commit 6eefab86dabcb1edfccb2fd7a7ed9a2c9e3e40b7 Author: hailin Date: Wed Jun 18 13:27:25 2025 +0800 first commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ba4fa66 --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +/intent-system.exe +/intent-system +/doc/~$集群管理系统角色和权限明细.xlsx +/.idea +/static/static/ +/static/favicon.ico +/static/index.html +/node_modules +/cmd/intent-system +/cmd/intent-system.exe +/go.work +/go.work.local diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..e69de29 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..5af6922 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,28 @@ +FROM golang:1.21 AS builder +MAINTAINER lory + +RUN apt-get update && apt-get install -y ca-certificates make +ENV SRC_DIR /intent-system +RUN set -x \ + && cd /tmp + +RUN go env -w GOPROXY=https://goproxy.io + +COPY . $SRC_DIR +RUN cd $SRC_DIR && export GIT_SSL_NO_VERIFY=true && git config --global http.sslVerify "false" && make + +FROM ubuntu:22.04 + +#RUN ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo "Asia/Shanghai" > /etc/timezone && apt-get update && apt-get install -y tzdata +#ENV TZ Asia/Shanghai +ENV SRC_DIR /intent-system + + +# 管理系统主程序 +COPY --from=builder $SRC_DIR/intent-system /usr/local/bin/intent-system +COPY --from=builder /etc/ssl/certs /etc/ssl/certs + + +ENV HOME_PATH /data + +VOLUME $HOME_PATH diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..c01e836 --- /dev/null +++ b/Makefile @@ -0,0 +1,50 @@ +#SHELL=/usr/bin/env bash + +CLEAN:= +BINS:= +DATE_TIME=`date +'%Y%m%d %H:%M:%S'` +COMMIT_ID=`git rev-parse --short HEAD` +MANAGER_DIR=${PWD} +CONSOLE_CODE=/tmp/intent-system-frontend + +build: + rm -f intent-system + go mod tidy && go build -ldflags "-s -w -X 'main.BuildTime=${DATE_TIME}' -X 'main.GitCommit=${COMMIT_ID}'" -o intent-system cmd/main.go +.PHONY: build +BINS+=intent-system + +nodejs: + curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash - && sudo apt update && sudo apt install -y nodejs build-essential && sudo npm install -g yarn +.PHONY: nodejs + +console: + rm -rf ${CONSOLE_CODE} && git clone -b master https://git.your-enterprise.com/intent-system-frontend.git ${CONSOLE_CODE} + cd ${CONSOLE_CODE} && git log -2 && npm install && npm run build:prod +.PHONY: console + +docker-test: build + docker build --tag intent-system -f Dockerfile.test . +.PHONY: docker-test + +docker: + rm -f intent-system + docker build --tag intent-system -f Dockerfile . +.PHONY: docker + +# 检查环境变量 +env-%: + @ if [ "${${*}}" = "" ]; then \ + echo "Environment variable $* not set"; \ + exit 1; \ + fi + +db2go: + go install github.com/civet148/db2go@latest +.PHONY: db2go + +models: + cd pkg/dal/db2go && ./gen_models.sh + +clean: + rm -rf $(CLEAN) $(BINS) +.PHONY: clean diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 0000000..a9d333a --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,194 @@ +package main + +import ( + "fmt" + "intent-system/pkg/api" + "intent-system/pkg/config" + "intent-system/pkg/itypes" + "intent-system/pkg/services" + "os" + "os/signal" + + "github.com/civet148/log" + "github.com/urfave/cli/v2" +) + +const ( + Version = "v0.5.11" + ProgramName = "intent-system" +) + +var ( + BuildTime = "2024-05-11" + GitCommit = "" +) + +const ( + CMD_NAME_RUN = "run" + CMD_NAME_START = "start" +) + +const ( + CMD_FLAG_NAME_DSN = "dsn" + CMD_FLAG_NAME_POSTGRESQL = "pg" + CMD_FLAG_NAME_DEBUG = "debug" + CMD_FLAG_NAME_STATIC = "static" + CMD_FLAG_NAME_DOMAIN = "domain" + CMD_FLAG_NAME_IMAGE_PATH = "image-path" + CMD_FLAG_NAME_IMAGE_PREFIX = "image-prefix" + CMD_FLAG_NAME_GATEWAY_URL = "gateway-url" + CMD_FLAG_NAME_GATEWAY_KEY = "gateway-key" + CMD_FLAG_NAME_GATEWAY_SECRET = "gateway-secret" + CMD_FLAG_NAME_SUB_CRON = "sub-cron" +) + +var manager api.ManagerApi + +func init() { +} + +func grace() { + //capture signal of Ctrl+C and gracefully exit + sigChannel := make(chan os.Signal, 1) + signal.Notify(sigChannel, os.Interrupt) + go func() { + for { + select { + case s := <-sigChannel: + { + if s != nil && s == os.Interrupt { + fmt.Printf("Ctrl+C signal captured, program exiting...\n") + if manager != nil { + manager.Close() + } + close(sigChannel) + os.Exit(0) + } + } + } + } + }() +} + +func main() { + + grace() + + local := []*cli.Command{ + runCmd, + } + app := &cli.App{ + Name: ProgramName, + Version: fmt.Sprintf("%s %s commit %s", Version, BuildTime, GitCommit), + Flags: []cli.Flag{}, + Commands: local, + Action: nil, + } + if err := app.Run(os.Args); err != nil { + log.Errorf("exit in error %s", err) + os.Exit(1) + return + } +} + +var runCmd = &cli.Command{ + Name: CMD_NAME_RUN, + Usage: "run as a web service", + ArgsUsage: "[listen address]", + Aliases: []string{CMD_NAME_START}, + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: CMD_FLAG_NAME_DEBUG, + Usage: "open debug log mode", + }, + &cli.StringFlag{ + Name: CMD_FLAG_NAME_DSN, + Usage: "data source name of database", + Value: itypes.DEFAULT_DATA_SOURCE_NAME, + Aliases: []string{"n"}, + }, + //&cli.StringFlag{ + // Name: CMD_FLAG_NAME_STATIC, + // Usage: "frontend static path", + // Value: itypes.DefaultStaticHome, + //}, + &cli.StringFlag{ + Name: CMD_FLAG_NAME_IMAGE_PATH, + Usage: "image saving path", + Value: itypes.DefaultImagesHome, + Aliases: []string{"i"}, + }, + &cli.StringFlag{ + Name: CMD_FLAG_NAME_DOMAIN, + Usage: "domain url", + Required: true, + Aliases: []string{"d"}, + }, + &cli.StringFlag{ + Name: CMD_FLAG_NAME_IMAGE_PREFIX, + Usage: "image url prefix", + Value: itypes.DEFAULT_IMAGE_PREFIX, + Aliases: []string{"p"}, + }, + &cli.StringFlag{ + Name: CMD_FLAG_NAME_GATEWAY_URL, + Usage: "sdk gateway url", + Aliases: []string{"g"}, + Required: true, + }, + &cli.StringFlag{ + Name: CMD_FLAG_NAME_GATEWAY_KEY, + Usage: "sdk gateway access key", + Aliases: []string{"k"}, + Required: true, + }, &cli.StringFlag{ + Name: CMD_FLAG_NAME_GATEWAY_SECRET, + Usage: "sdk gateway access secret", + Aliases: []string{"s"}, + Required: true, + }, + &cli.StringFlag{ + Name: CMD_FLAG_NAME_POSTGRESQL, + Usage: "Postgresql connection string for news sync", + Required: true, + }, + + &cli.StringFlag{ + Name: CMD_FLAG_NAME_SUB_CRON, + Usage: "cron task for email subscription", + Value: itypes.DEFAULT_SUB_CRON_EMAIL_PUSH, + }, + }, + Action: func(cctx *cli.Context) error { + cfg := &config.Config{ + Version: Version, + HttpAddr: itypes.DEFAULT_HTTP_LISTEN_ADDR, + DSN: cctx.String(CMD_FLAG_NAME_DSN), + Debug: cctx.Bool(CMD_FLAG_NAME_DEBUG), + Domain: cctx.String(CMD_FLAG_NAME_DOMAIN), + Static: cctx.String(CMD_FLAG_NAME_STATIC), + ImagePath: cctx.String(CMD_FLAG_NAME_IMAGE_PATH), + ImagePrefix: cctx.String(CMD_FLAG_NAME_IMAGE_PREFIX), + GatewayUrl: cctx.String(CMD_FLAG_NAME_GATEWAY_URL), + GatewayKey: cctx.String(CMD_FLAG_NAME_GATEWAY_KEY), + GatewaySecret: cctx.String(CMD_FLAG_NAME_GATEWAY_SECRET), + Postgresql: cctx.String(CMD_FLAG_NAME_POSTGRESQL), + SubCron: cctx.String(CMD_FLAG_NAME_SUB_CRON), + } + + cfg.Version = Version + if cfg.Debug { + log.SetLevel("debug") + } + log.Json("configuration", cfg) + if cctx.Args().First() != "" { + cfg.HttpAddr = cctx.Args().First() + } + if err := cfg.Save(); err != nil { + return err + } + //start up as a web server + manager = services.NewManager(cfg) + return manager.Run() + }, +} diff --git a/deploy/init.sql b/deploy/init.sql new file mode 100644 index 0000000..7e5d579 --- /dev/null +++ b/deploy/init.sql @@ -0,0 +1,10 @@ +USE `intent-system`; + +insert into `tag` (`name`, `name_cn`, `is_inherent`, `is_deleted`) values('#AI','#人工智能','1','0'); +insert into `tag` (`name`, `name_cn`, `is_inherent`, `is_deleted`) values('#Blockchain','#区块链','1','0'); + +INSERT INTO `dictionary` (`name`, `config_key`, `config_value`, `remark`, `deleted`) VALUES ('[SMTP] server', 'smtp_server', 'mail.jellydropsllc.com', '', '0'); +INSERT INTO `dictionary` (`name`, `config_key`, `config_value`, `remark`, `deleted`) VALUES ('[SMTP] port', 'smtp_port', '465', '', '0'); +INSERT INTO `dictionary` (`name`, `config_key`, `config_value`, `remark`, `deleted`) VALUES ('[SMTP] email address', 'smtp_name', 'it@jellydropsllc.com', '', '0'); +INSERT INTO `dictionary` (`name`, `config_key`, `config_value`, `remark`, `deleted`) VALUES ('[SMTP] auth code', 'auth_code', 'Z[yj4ri1tWRM', '', '0'); +INSERT INTO `dictionary` (`name`, `config_key`, `config_value`, `remark`, `deleted`) VALUES ('[SMTP] send name', 'send_name', 'Jelly AI', '', '0'); diff --git a/deploy/intent-system.sql b/deploy/intent-system.sql new file mode 100644 index 0000000..93cd62a --- /dev/null +++ b/deploy/intent-system.sql @@ -0,0 +1,409 @@ +/* +SQLyog Trial v13.1.8 (64 bit) +MySQL - 8.0.23 : Database - intent-system +********************************************************************* +*/ + +/*!40101 SET NAMES utf8 */; + +/*!40101 SET SQL_MODE=''*/; + +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +CREATE DATABASE /*!32312 IF NOT EXISTS*/`intent-system` /*!40100 DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci */ /*!80016 DEFAULT ENCRYPTION='N' */; + +USE `intent-system`; + +/*Table structure for table `casbin_rule` */ + +CREATE TABLE `casbin_rule` ( + `p_type` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + `v0` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + `v1` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + `v2` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + `v3` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + `v4` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + `v5` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + KEY `IDX_casbin_rule_v0` (`v0`) USING BTREE, + KEY `IDX_casbin_rule_v1` (`v1`) USING BTREE, + KEY `IDX_casbin_rule_v2` (`v2`) USING BTREE, + KEY `IDX_casbin_rule_v3` (`v3`) USING BTREE, + KEY `IDX_casbin_rule_v4` (`v4`) USING BTREE, + KEY `IDX_casbin_rule_v5` (`v5`) USING BTREE, + KEY `IDX_casbin_rule_p_type` (`p_type`) USING BTREE +) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC; + +/*Table structure for table `customer` */ + +CREATE TABLE `customer` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT '用户ID(自增)', + `user_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '登录名称', + `user_alias` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '账户别名', + `password` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '登录密码(MD5+SALT)', + `first_name` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '姓', + `last_name` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '名', + `title` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '职称', + `company` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '公司名称', + `salt` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT 'MD5加密盐', + `phone_number` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '联系手机号', + `is_admin` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为超级管理员(0=普通账户 1=超级管理员)', + `email` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT '' COMMENT '邮箱地址', + `address` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '家庭住址/公司地址', + `remark` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '备注', + `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)', + `state` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已冻结(1=已启用 2=已冻结)', + `is_subscribed` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已订阅(0=未订阅 1=已订阅)', + `login_ip` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近登录IP', + `login_time` bigint NOT NULL DEFAULT '0' COMMENT '最近登录时间', + `create_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '创建人', + `edit_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近编辑人', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)', + PRIMARY KEY (`id`) USING BTREE, + UNIQUE KEY `UNIQ_USER_NAME` (`user_name`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='客户信息表'; + +/*Table structure for table `dictionary` */ + +CREATE TABLE `dictionary` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '名称', + `config_key` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'KEY', + `config_value` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'VALUE', + `remark` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '备注', + `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`) USING BTREE, + UNIQUE KEY `key` (`config_key`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC; + +/*Table structure for table `invite_code` */ + +CREATE TABLE `invite_code` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `user_id` int NOT NULL COMMENT '注册用户ID', + `user_acc` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '注册账户', + `random_code` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '认证码(5位字母和数字组合)', + `link_url` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '链接URL(保留字段)', + `state` tinyint(1) NOT NULL COMMENT '状态(1=等待校验 2=已校验)', + `expire_time` bigint NOT NULL DEFAULT '0' COMMENT '过期时间(UNIX时间戳)', + `action_type` tinyint(1) NOT NULL DEFAULT '0' COMMENT '操作类型(0=注册 1=重置密码)', + `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`), + UNIQUE KEY `UNIQ_INVITECODE` (`user_acc`,`random_code`,`deleted`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +/*Table structure for table `login` */ + +CREATE TABLE `login` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `login_type` tinyint NOT NULL DEFAULT '0' COMMENT '登录类型(0=管理用户 1=注册用户)', + `user_id` int NOT NULL COMMENT '登录用户ID', + `login_ip` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '登录IP', + `login_addr` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '登录地址', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`) USING BTREE +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='登录记录表'; + +/*Table structure for table `news` */ + +CREATE TABLE `news` ( + `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `org_id` bigint NOT NULL DEFAULT '0' COMMENT 'AI文章同步ID', + `spider_id` bigint NOT NULL DEFAULT '0' COMMENT '爬虫文章ID', + `tag` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '文章标签(原始标签)', + `category` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '分类', + `main_title` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '主标题', + `sub_title` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '副标题', + `summary` text COLLATE utf8mb4_unicode_ci COMMENT '摘要', + `keywords` text COLLATE utf8mb4_unicode_ci COMMENT '文章关键词', + `seo_keywords` text COLLATE utf8mb4_unicode_ci COMMENT 'SEO关键词', + `tags` json DEFAULT NULL COMMENT '人工打标签(多选)', + `url` varchar(2048) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT '文章链接', + `image_url` text COLLATE utf8mb4_unicode_ci COMMENT '图片URL', + `content` longtext COLLATE utf8mb4_unicode_ci COMMENT '文章内容', + `is_hotspot` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否热门(0=否 1=是)', + `is_overwritten` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已被覆盖(0=否 1=是)', + `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=否 1=是)', + `is_replicate` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为副本(0=否 1=是)', + `state` tinyint(1) NOT NULL DEFAULT '0' COMMENT '状态(0=未发布订阅 1=已发布订阅 2=已推送)', + `language` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '语言(zh-CN=中文 en=英文)', + `data_time` timestamp NOT NULL COMMENT '数据生成时间', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '数据创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '数据更新时间', + `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)', + PRIMARY KEY (`id`), + KEY `INDEX_MAIN_TITLE` (`main_title`), + KEY `INDEX_SUB_TITLE` (`sub_title`), + KEY `INDEX_CREATED_TIME` (`created_time` DESC), + KEY `INDEX_TAG` (`tag`), + KEY `INDEX_HOTSPOT` (`is_hotspot`,`is_overwritten`,`is_deleted`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='新闻文章数据表(AI编辑)'; + +/*Table structure for table `news_draft` */ + +CREATE TABLE `news_draft` ( + `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `news_id` bigint NOT NULL DEFAULT '0' COMMENT '新闻ID(对应news表id字段)', + `org_id` bigint NOT NULL DEFAULT '0' COMMENT '源新闻ID(对应news表org_id字段)', + `category` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '分类', + `main_title` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '主标题', + `sub_title` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '副标题', + `summary` text COLLATE utf8mb4_unicode_ci COMMENT '摘要', + `keywords` text COLLATE utf8mb4_unicode_ci COMMENT '关键字(JSON数组)', + `seo_keywords` text COLLATE utf8mb4_unicode_ci COMMENT 'SEO关键字(JSON数组)', + `tags` json DEFAULT NULL COMMENT '标签(JSON数组)', + `image_url` text COLLATE utf8mb4_unicode_ci COMMENT '图片URL', + `content` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '文章内容', + `language` varchar(32) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '语言(zh-CN=中文 en=英文)', + `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=否 1=是)', + `is_replicate` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为副本(0=否 1=是)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '数据创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '数据更新时间', + `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)', + PRIMARY KEY (`id`), + KEY `INDEX_CREATED_TIME` (`created_time` DESC), + KEY `INDEX_HOTSPOT` (`is_deleted`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='草稿箱'; + +/*Table structure for table `news_spider` */ + +CREATE TABLE `news_spider` ( + `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `org_id` bigint NOT NULL DEFAULT '0' COMMENT '新闻同步ID', + `tag` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '文章标签', + `category` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '分类', + `main_title` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '主标题', + `sub_title` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '副标题', + `summary` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '摘要', + `keywords` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '文章关键词', + `seo_keywords` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT 'SEO关键词', + `url` varchar(2048) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT '文章链接', + `image_url` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '图片URL', + `content` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '文章内容', + `is_hotspot` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否热门(0=否 1=是)', + `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=否 1=是)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '数据创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '数据更新时间', + `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)', + PRIMARY KEY (`id`), + KEY `INDEX_MAIN_TITLE` (`main_title`), + KEY `INDEX_SUB_TITLE` (`sub_title`), + KEY `INDEX_CREATED_TIME` (`created_time` DESC), + KEY `INDEX_TAG` (`tag`), + KEY `INDEX_UPDATED_TIME` (`updated_time` DESC), + KEY `INDEX_NEWS_ID` (`org_id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='新闻文章数据表(爬虫)'; + +/*Table structure for table `news_subscribe` */ + +CREATE TABLE `news_subscribe` ( + `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `news_id` bigint NOT NULL COMMENT '订阅推送新闻ID(对应news表id字段)', + `news_subject` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '邮件主题', + `news_url` varchar(1024) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '订阅新闻推送URL', + `is_pushed` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已推送(0=否 1=是)', + `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=否 1=是)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)', + PRIMARY KEY (`id`), + KEY `INDEX_NEWS_ID` (`news_id`,`is_deleted`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +/*Table structure for table `oper_log` */ + +CREATE TABLE `oper_log` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `oper_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '操作用户', + `oper_type` tinyint(1) NOT NULL COMMENT '操作类型(1=首页 2=系统管理 3=存储管理 4=资源管理 5=告警中心)', + `oper_time` timestamp NOT NULL COMMENT '操作时间', + `oper_content` varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '操作内容', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`) USING BTREE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC; + +/*Table structure for table `privilege` */ + +CREATE TABLE `privilege` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `category` tinyint(1) NOT NULL DEFAULT '0' COMMENT '权限分类(保留字段)', + `name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '权限名称', + `label` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '权限标签', + `path` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '权限访问路径', + `children` mediumtext COLLATE utf8mb4_unicode_ci COMMENT '子权限树', + `is_inherent` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否固有权限(0=否 1=是)', + `remark` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '权限备注', + `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`) USING BTREE +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='角色-菜单关系表'; + +/*Table structure for table `question_answer` */ + +CREATE TABLE `question_answer` ( + `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `org_id` bigint NOT NULL DEFAULT '0' COMMENT 'Q&A源ID(同步ID)', + `question` mediumtext CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '问题', + `answer` mediumtext COLLATE utf8mb4_unicode_ci COMMENT '答案', + `state` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已发布(0=草稿 1=已发布 2=已下架)', + `language` varchar(32) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '语言(zh-CN=中文 en=英文)', + `is_overwritten` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已被覆盖(0=否 1=是)', + `is_replicate` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为副本(0=否 1=是)', + `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)', + `data_time` timestamp NOT NULL COMMENT '数据生成时间', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)', + PRIMARY KEY (`id`), + KEY `INDEX_CREATED_TIME` (`created_time` DESC), + KEY `INDEX_UPDATED_TIME` (`updated_time` DESC) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +/*Table structure for table `question_draft` */ + +CREATE TABLE `question_draft` ( + `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `qa_id` bigint NOT NULL DEFAULT '0' COMMENT '源ID(对应question_answer表id字段)', + `org_id` bigint NOT NULL DEFAULT '0' COMMENT 'Q&A同步ID', + `question` varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '问题', + `answer` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '答案', + `language` varchar(32) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '语言(zh-CN=中文 en=英文)', + `is_overwritten` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已被覆盖(0=否 1=是)', + `is_replicate` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为副本(0=否 1=是)', + `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)', + PRIMARY KEY (`id`), + KEY `INDEX_CREATED_TIME` (`created_time` DESC), + KEY `INDEX_UPDATED_TIME` (`updated_time` DESC) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +/*Table structure for table `role` */ + +CREATE TABLE `role` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT '角色ID(自增)', + `role_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '角色名称', + `role_alias` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '角色别名', + `create_user` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '创建人', + `edit_user` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近编辑人', + `remark` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '备注', + `is_inherent` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否固有角色(0=自定义角色 1=平台固有角色)', + `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`) USING BTREE, + UNIQUE KEY `UNIQ_ROLE_NAME` (`role_name`) COMMENT '角色名称唯一约束' +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='角色信息表'; + +/*Table structure for table `run_config` */ + +CREATE TABLE `run_config` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'incr id', + `config_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'config name', + `config_key` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'config key', + `config_value` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT 'config value', + `remark` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'remark', + `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'is deleted(0=false 1=true)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'created time', + PRIMARY KEY (`id`) USING BTREE, + UNIQUE KEY `UNIQ_NAME_KEY` (`config_name`,`config_key`) USING BTREE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='run config table'; + +/*Table structure for table `subscriber` */ + +CREATE TABLE `subscriber` ( + `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `customer_id` int NOT NULL DEFAULT '0' COMMENT '订阅者ID(对应customer标id字段,可为空)', + `email` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '订阅者邮箱', + `tags` varchar(1024) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '订阅标签(主题)', + `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已取消订阅(0=否 1=是)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)', + PRIMARY KEY (`id`), + UNIQUE KEY `UNIQ_CUSTOMER_EMAIL` (`email`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +/*Table structure for table `tag` */ + +CREATE TABLE `tag` ( + `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `name` varchar(32) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '标签名', + `name_cn` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '中文名', + `is_inherent` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为固有标签(0=否 1=是)', + `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)', + PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +/*Table structure for table `template` */ + +CREATE TABLE `template` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `template_type` int NOT NULL COMMENT '模板类型(1=订阅欢迎邮件[英文] 2=订阅欢迎邮件[中午])', + `subject` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '主题', + `content` mediumtext CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '内容', + `language` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '语言(英语=en 中文=zh-CN)', + `editor_user` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最后编辑人', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +/*Table structure for table `user` */ + +CREATE TABLE `user` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT '用户ID(自增)', + `user_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '登录名称', + `user_alias` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '账户别名', + `password` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '登录密码(MD5+SALT)', + `salt` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT 'MD5加密盐', + `phone_number` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '联系手机号', + `is_admin` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为超级管理员(0=普通账户 1=超级管理员)', + `email` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT '' COMMENT '邮箱地址', + `address` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '家庭住址/公司地址', + `remark` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '备注', + `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)', + `state` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已冻结(1=已启用 2=已冻结)', + `login_ip` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近登录IP', + `login_time` bigint NOT NULL DEFAULT '0' COMMENT '最近登录时间', + `create_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '创建人', + `edit_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近编辑人', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`) USING BTREE, + UNIQUE KEY `UNIQ_USER_NAME` (`user_name`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='登录账户信息表'; + +/*Table structure for table `user_role` */ + +CREATE TABLE `user_role` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID', + `user_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '用户名', + `role_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '角色名', + `create_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '创建人', + `edit_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近编辑人', + `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)', + `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`) USING BTREE, + UNIQUE KEY `UNIQ_USER_NAME` (`user_name`) COMMENT '用户唯一约束' +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='用户角色关系表'; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; diff --git a/deploy/run.sh b/deploy/run.sh new file mode 100644 index 0000000..4d7d92b --- /dev/null +++ b/deploy/run.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +#镜像地址和版本(本地) +IMAGE_URL=intent-system:latest + +#容器名称 +CONTAINER_NAME=intent-system + +# 数据源(正式环境需修改成实际数据库配置) +DSN='mysql://root:123456@127.0.0.1:3306/intent-system?charset=utf8mb4' +PG='postgres://dong:Pg2#123321@14.17.80.241:5432/webdb?sslmode=disable&search_path=public' + +# 管理系统HTTP服务监听地址 +LISTEN_ADDR="0.0.0.0:8083" + +# 数据挂载目录 +DATA_DIR=/data/intent-system + +# 订阅邮件访问链接域名 +DOMAIN="http://103.39.218.177:3008/blog" + +# 图片存储域名+后缀 +#IMAGE_PREFIX=https://www.your-enterprise.com/images + +# 网关URL +GATEWAY_URL="ws://127.0.0.1:12345" + +# 网关访问KEY +GATEWAY_KEY="bAkYh0JVe2Kph0ot" + +# 网关访问密码 +GATEWAY_SECRET="1EWKBne2LCX0TJBXkrOWSzSDkzaQmoR3xuXBrc41JsdjorpM" + +# 订阅邮件定时任务 +SUB_CRON="0 0 * * *" + +#删除原来的容器 +docker rm -f "${CONTAINER_NAME}" + +docker run -p 8083:8083 -v ${DATA_DIR}:~/.intent-system --restart always --name "${CONTAINER_NAME}" -d "$IMAGE_URL" \ + intent-system run --debug -n "${DSN}" --pg "${PG}" -d "${DOMAIN}" -g "${GATEWAY_URL}" -k "${GATEWAY_KEY}" -s "${GATEWAY_SECRET}" --sub-cron "${SUB_CRON}" "$LISTEN_ADDR" + + diff --git a/deploy/stop.sh b/deploy/stop.sh new file mode 100644 index 0000000..ad8133c --- /dev/null +++ b/deploy/stop.sh @@ -0,0 +1,2 @@ +#!/bin/bash +docker rm -f intent-system \ No newline at end of file diff --git a/deploy/数据库部署要求.md b/deploy/数据库部署要求.md new file mode 100644 index 0000000..d08871e --- /dev/null +++ b/deploy/数据库部署要求.md @@ -0,0 +1,165 @@ +# 部署步骤 + +## 1. 安装MySQL8 + +### 1.1 方案一 物理机安装MySQL8 + +- *操作系统* + +*Ubuntu20.04LTS* + +如果操作系统不是Ubuntu20则需要更新MySQL官方源信息 + +#### 1.1.1 apt安装MySQL + +```sh +$ sudo apt update && sudo apt install mysql-server + +``` +#### 1.1.2. 检查安装后是否启动成功 + +```sh +$ ps -ef | grep mysqld +mysql 65492 1 0 17:46 ? 00:00:00 /usr/sbin/mysqld --daemonize --pid-file=/run/mysqld/mysqld.pid +``` + +#### 1.1.3. 修改MySQL监听地址和端口 + +```sh +$ sudo netstat -alnt | grep 3306 +tcp 0 0 127.0.0.1:3306 0.0.0.0:* LISTEN + +# 编辑mysqld.cnf文件将bind-address对应的值由127.0.0.1改成0.0.0.0或局域网IP地址(端口视情况决定是否修改) +$ sudo vi /etc/mysql/mysql.conf.d/mysqld.cnf + +# 重启MySQL服务 +$ sudo service mysql restart +``` + +#### 1.1.4. 创建oss用户和权限 + +```sh +# MySQL安装完会在/etc/mysql目录下有一个debian.cnf文件可用于本地登录并修改root密码或创建用户 +$ sudo cat /etc/mysql/debian.cnf + +[client] +host = localhost +user = debian-sys-maint +password = xgf1OdcBzRy0LaEP +socket = /var/run/mysqld/mysqld.sock + +# 本地登录MySQL,执行下面的命令行 +$ mysql -udebian-sys-maint -pxgf1OdcBzRy0LaEP mysql + +mysql> select host,user,plugin,authentication_string from user; ++-----------+------------------+-----------------------+-------------------------------------------+ +| host | user | plugin | authentication_string | ++-----------+------------------+-----------------------+-------------------------------------------+ +| localhost | root | auth_socket | | +| localhost | mysql.session | mysql_native_password | *THISISNOTAVALIDPASSWORDTHATCANBEUSEDHERE | +| localhost | mysql.sys | mysql_native_password | *THISISNOTAVALIDPASSWORDTHATCANBEUSEDHERE | +| localhost | debian-sys-maint | mysql_native_password | *22CC5F671040F19FF9FB1E5A9B94D2576C4A1A24 | +| % | node | mysql_native_password | *6BB4837EB74329105EE4568DDA7DC67ED2CA2AD9 | ++-----------+------------------+-----------------------+-------------------------------------------+ + +4 rows in set (0.00 sec) + +# 创建一个admin账户并允许远程登录, 密码是123456(生产环境请设置复杂密码) +mysql> create user 'admin'@'%' identified by '123456'; +Query OK, 0 rows affected (0.00 sec) + +# 赋予oss所有权限 +mysql> grant all on *.* to 'admin'@'%'; +mysql> flush privileges; + +# 修改root密码和口令加密方式并开启远程登录(视实际情况而定,如果无必要可以只修改密码不开启远程登录) +# host='%'表示开启远程访问,如果不开启就不要这个SQL字句 +mysql> update user set plugin='mysql_native_password', authentication_string='', host='%' where user='root'; +Query OK, 1 row affected (0.00 sec) +Rows matched: 1 Changed: 1 Warnings: 0 + +# 重置root账户登录密码并刷新权限 +mysql> alter user 'root'@'%' IDENTIFIED BY '123456'; #适用于8.x版本修改密码(设置不成功可能是需要复杂密码) +mysql> flush privileges; + +# 查看账户信息 +mysql> select host,user,plugin,authentication_string from user; ++-----------+------------------+-----------------------+-------------------------------------------+ +| host | user | plugin | authentication_string | ++-----------+------------------+-----------------------+-------------------------------------------+ +| % | root | mysql_native_password | *6BB4837EB74329105EE4568DDA7DC67ED2CA2AD9 | +| localhost | mysql.session | mysql_native_password | *THISISNOTAVALIDPASSWORDTHATCANBEUSEDHERE | +| localhost | mysql.sys | mysql_native_password | *THISISNOTAVALIDPASSWORDTHATCANBEUSEDHERE | +| localhost | debian-sys-maint | mysql_native_password | *22CC5F671040F19FF9FB1E5A9B94D2576C4A1A24 | +| % | admin | mysql_native_password | *6BB4837EB74329105EE4568DDA7DC67ED2CA2AD9 | ++-----------+------------------+-----------------------+-------------------------------------------+ +5 rows in set (0.00 sec) + +``` + +### 1.2 方案二 docker安装MySQL8 + +```sh +# 创建本地数据库目录 +$ sudo mkdir -p /data/mysql/{mysql-files,conf,logs,data} + +# 启动容器(设置root初始密码为123456) +$ docker run -p 3306:3306 -e MYSQL_ROOT_PASSWORD=123456 --restart always \ + -e TZ=Asia/Shanghai \ + -v /data/mysql/mysql-files:/var/lib/mysql-files \ + -v /data/mysql/conf:/etc/mysql \ + -v /data/mysql/logs:/var/log/mysql \ + -v /data/mysql/data:/var/lib/mysql \ + --name mysql -d mysql:8.0.23 +``` + +```shell +# 登录mysql终端(手动输入初始密码123456登录MySQL控制台) +$ docker exec -it mysql mysql -uroot -p mysql + +Enter password: +Reading table information for completion of table and column names +You can turn off this feature to get a quicker startup with -A + +Welcome to the MySQL monitor. Commands end with ; or \g. +Your MySQL connection id is 8 +Server version: 8.0.23 MySQL Community Server - GPL + +Copyright (c) 2000, 2021, Oracle and/or its affiliates. + +Oracle is a registered trademark of Oracle Corporation and/or its +affiliates. Other names may be trademarks of their respective +owners. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +# 更改数据库root账户密码为123456并开启远程访问(密码可以自行修改成其他也可以保持原密码,主要是通过%符号开启root远程访问) +mysql> USE mysql; +mysql> ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '123456'; +mysql> FLUSH PRIVILEGES; +mysql> exit +``` + +## 2. 初始化数据表 + +将oss-manager.sql和init-data.sql文件上传到服务器/tmp目录 + +```shell script +# 服务器登录MySQL命令行终端执行sql文件 +mysql> source /path/to/intent-system.sql +``` + +## 3. 解除MySQL分组查询限制 + +```bash +# 打开mysqld.cnf文件并在[mysqld]选项范围内加一行下面的参数(如果sql_mode已存在则去掉ONLY_FULL_GROUP_BY) +$ sudo vi /etc/mysql/mysql.conf.d/mysqld.cnf +sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION' + +# 重启mysql服务 +$ sudo service mysql restart +``` + +## 4. 更改服务器/容器时区 + + MySQL运行服务器或容器时区改为UTC+8 (北京时间) diff --git a/deps/github.com/anacrolix/torrent/.circleci/config.yml b/deps/github.com/anacrolix/torrent/.circleci/config.yml new file mode 100644 index 0000000..f25b011 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/.circleci/config.yml @@ -0,0 +1,64 @@ +version: 2 +jobs: + build: + machine: true + # This would be for if we didn't have machine: true. Could help with circleci local execute, which doesn't support VMs? + # docker: + # - image: cimg/go:1.13 + environment: + GO_BRANCH: release-branch.go1.16 + steps: + - run: echo $CIRCLE_WORKING_DIRECTORY + - run: echo $PWD + - run: echo $GOPATH + - run: echo 'export GOPATH=$HOME/go' >> $BASH_ENV + - run: echo 'export PATH="$GOPATH/bin:$PATH"' >> $BASH_ENV + - run: echo $GOPATH + - run: which go || sudo apt install golang-go + - run: go version + - run: | + cd /usr/local + sudo mkdir go.local + sudo chown `whoami` go.local + - restore_cache: + key: go-local- + - run: | + cd /usr/local + git clone git://github.com/golang/go go.local || true + cd go.local + git fetch + git checkout "$GO_BRANCH" + [[ -x bin/go && `git rev-parse HEAD` == `cat anacrolix.built` ]] && exit + cd src + ./make.bash || exit + git rev-parse HEAD > ../anacrolix.built + - save_cache: + paths: /usr/local/go.local + key: go-local-{{ checksum "/usr/local/go.local/anacrolix.built" }} + - run: echo 'export PATH="/usr/local/go.local/bin:$PATH"' >> $BASH_ENV + - run: go version + - checkout + - run: sudo apt-get update + - run: sudo apt install fuse pv + - restore_cache: + keys: + - go-pkg- + - restore_cache: + keys: + - go-cache- + - run: go get -d ./... + - run: go test -v -race ./... -count 2 + - run: go test -bench . ./... + - run: set +e; CGO_ENABLED=0 go test -v ./...; true + - run: GOARCH=386 go test ./... -count 2 -bench . || true + - run: go install github.com/anacrolix/godo@latest + - save_cache: + key: go-pkg-{{ checksum "go.mod" }} + paths: + - ~/go/pkg + - run: sudo modprobe fuse + - run: fs/test.sh + - save_cache: + key: go-cache-{{ .Revision }} + paths: + - ~/.cache/go-build diff --git a/deps/github.com/anacrolix/torrent/.deepsource.toml b/deps/github.com/anacrolix/torrent/.deepsource.toml new file mode 100644 index 0000000..e72f983 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/.deepsource.toml @@ -0,0 +1,14 @@ +version = 1 + +test_patterns = ["**/*_test.go"] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_root = "github.com/anacrolix/torrent" + +[[transformers]] +name = "gofmt" +enabled = true \ No newline at end of file diff --git a/deps/github.com/anacrolix/torrent/.github/actions/go-common/action.yml b/deps/github.com/anacrolix/torrent/.github/actions/go-common/action.yml new file mode 100644 index 0000000..2b374de --- /dev/null +++ b/deps/github.com/anacrolix/torrent/.github/actions/go-common/action.yml @@ -0,0 +1,61 @@ +name: 'Common Go' +description: 'Checks out, and handles Go setup and caching' +runs: + using: "composite" + steps: + - name: Set up Go + if: matrix.go-version != 'tip' + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go-version }} + - uses: actions/cache@v2 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + # The OS defines the directories to use, then this is specific to go. The go version could + # affect the dependencies. The job can affect what is actually downloaded, and provides + # collision resistance. Finally, the hash of the go.sum files ensures a new cache is created + # when the dependencies change. Note if this were just a mod cache, we might do this based + # on time or something. + key: ${{ runner.os }}-go-${{ matrix.go-version }}-${{ github.job }}-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-${{ matrix.go-version }}-${{ github.job }}- + ${{ runner.os }}-go-${{ matrix.go-version }}- + ${{ runner.os }}-go- + - run: | + echo GOTIP_REVISION="`git ls-remote https://github.com/golang/go refs/heads/master | cut -f1`" >> "$GITHUB_ENV" + echo GOTIP_PATH="$HOME/gotip" >> "$GITHUB_ENV" + if: matrix.go-version == 'tip' + shell: bash + - uses: actions/cache@v2 + if: matrix.go-version == 'tip' + with: + path: | + ${{ env.GOTIP_PATH }} + # The build varies by OS (and arch, but I haven't bothered to add that yet.) We always want + # the latest snapshot that works for us, the revision is only used to store differentiate + # builds. + key: gotip-ls-remote-${{ runner.os }}-${{ env.GOTIP_REVISION }} + restore-keys: | + gotip-ls-remote-${{ runner.os }}-${{ env.GOTIP_REVISION }} + gotip-ls-remote-${{ runner.os }}- + gotip-env-home-${{ runner.os }}- + gotip-${{ runner.os }}- + - name: Install gotip + if: matrix.go-version == 'tip' + run: | + git clone --depth=1 https://github.com/golang/go "$GOTIP_PATH" || true + cd "$GOTIP_PATH" + git pull + echo "GOROOT=$GOTIP_PATH" >> "$GITHUB_ENV" + echo "$(go env GOPATH)/bin:$PATH" >> "$GITHUB_PATH" + echo "$GOTIP_PATH/bin:$PATH" >> "$GITHUB_PATH" + echo "anacrolix.built:" $(cat anacrolix.built) + [[ -x bin/go && `git rev-parse HEAD` == `cat anacrolix.built` ]] && exit + cd src + ./make.bash || exit + git rev-parse HEAD > ../anacrolix.built + env + shell: bash + diff --git a/deps/github.com/anacrolix/torrent/.github/workflows/codeql-analysis.yml b/deps/github.com/anacrolix/torrent/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000..31f4653 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/.github/workflows/codeql-analysis.yml @@ -0,0 +1,54 @@ +name: "CodeQL" + +on: + push: + branches: [master, ] + pull_request: + # The branches below must be a subset of the branches above + branches: [master] + schedule: + - cron: '0 10 * * 4' + +jobs: + analyse: + name: Analyse + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + # We must fetch at least the immediate parents so that if this is + # a pull request then we can checkout the head. + fetch-depth: 2 + + # If this run was triggered by a pull request event, then checkout + # the head of the pull request instead of the merge commit. + - run: git checkout HEAD^2 + if: ${{ github.event_name == 'pull_request' }} + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + # Override language selection by uncommenting this and choosing your languages + # with: + # languages: go, javascript, csharp, python, cpp, java + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 diff --git a/deps/github.com/anacrolix/torrent/.github/workflows/go.yml b/deps/github.com/anacrolix/torrent/.github/workflows/go.yml new file mode 100644 index 0000000..e95f826 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/.github/workflows/go.yml @@ -0,0 +1,115 @@ +name: Go + +on: [push, pull_request] + +jobs: + + test: + timeout-minutes: 10 + runs-on: ${{ matrix.os }} + strategy: + matrix: + go-version: [ '1.21' ] + os: [windows-latest, macos-latest, ubuntu-latest] + fail-fast: false + steps: + - uses: actions/checkout@v2 + - uses: ./.github/actions/go-common + - run: go test -race -count 2 $(go list ./... | grep -v /fs) + - run: go test -race -count 2 ./fs/... + if: ${{ ! contains(matrix.os, 'windows') }} + + test-benchmarks: + runs-on: ubuntu-latest + strategy: + matrix: + go-version: [ '1.21' ] + fail-fast: false + steps: + - uses: actions/checkout@v2 + - uses: ./.github/actions/go-common + - run: go test -race -run @ -bench . -benchtime 2x ./... + + bench: + runs-on: ubuntu-latest + strategy: + matrix: + go-version: [ '1.21' ] + fail-fast: false + steps: + - uses: actions/checkout@v2 + - uses: ./.github/actions/go-common + - run: go test -run @ -bench . ./... + + test-386: + runs-on: ubuntu-latest + strategy: + matrix: + go-version: [ '1.21' ] + fail-fast: false + steps: + - uses: actions/checkout@v2 + - uses: ./.github/actions/go-common + - run: GOARCH=386 go test ./... + - run: GOARCH=386 go test ./... -run @ -bench . + + build-wasm: + runs-on: ubuntu-latest + strategy: + matrix: + go-version: [ '1.21' ] + fail-fast: false + steps: + - uses: actions/checkout@v2 + - uses: ./.github/actions/go-common + - name: Some packages compile for WebAssembly + run: GOOS=js GOARCH=wasm go build . ./storage ./tracker/... + + torrentfs-linux: + timeout-minutes: 5 + runs-on: ${{ matrix.os }} + strategy: + matrix: + go-version: [ '1.21' ] + os: [ubuntu-latest] + fail-fast: false + steps: + - uses: actions/checkout@v2 + - uses: ./.github/actions/go-common + + - name: Install godo + run: | + # Need master for cross-compiling fix + go install -v -x github.com/anacrolix/godo@master + echo $PATH + + - name: Apt packages + run: sudo apt install pv fuse + + - name: torrentfs end-to-end test + # Test on 386 for atomic alignment and other bad 64-bit assumptions + run: GOARCH=386 fs/test.sh + +# Github broke FUSE on MacOS, I'm not sure what the state is. + +# torrentfs-macos: +# timeout-minutes: 15 +# runs-on: ${{ matrix.os }} +# strategy: +# matrix: +# go-version: [ '1.20' ] +# os: [macos-latest] +# fail-fast: false +# steps: +# - uses: actions/checkout@v2 +# - uses: ./.github/actions/go-common +# +# - run: brew install macfuse pv md5sha1sum bash +# +# - name: Install godo +# run: go install -v github.com/anacrolix/godo@master +# +# - name: torrentfs end-to-end test +# run: fs/test.sh +# # Pretty sure macos on GitHub CI has issues with the fuse driver now. +# continue-on-error: true diff --git a/deps/github.com/anacrolix/torrent/.github/workflows/linter.yml b/deps/github.com/anacrolix/torrent/.github/workflows/linter.yml new file mode 100644 index 0000000..b5342ac --- /dev/null +++ b/deps/github.com/anacrolix/torrent/.github/workflows/linter.yml @@ -0,0 +1,37 @@ +name: GolangCI-Lint + +on: + push: + branches: [ '!master' ] + pull_request: + branches: [ '!master' ] + +jobs: + golint: + name: Lint + runs-on: ubuntu-latest + env: + GO111MODULE: on + steps: + - uses: actions/checkout@v2 + - uses: golangci/golangci-lint-action@v2 + with: + version: latest + + # Optional: working directory, useful for monorepos + # working-directory: somedir + + # Optional: golangci-lint command line arguments. + args: -D errcheck,unused,structcheck,deadcode + + # Optional: show only new issues if it's a pull request. The default value is `false`. + only-new-issues: true + + # Optional: if set to true then the action will use pre-installed Go. + skip-go-installation: true + + # Optional: if set to true then the action don't cache or restore ~/go/pkg. + # skip-pkg-cache: true + + # Optional: if set to true then the action don't cache or restore ~/.cache/go-build. + # skip-build-cache: true diff --git a/deps/github.com/anacrolix/torrent/.gitignore b/deps/github.com/anacrolix/torrent/.gitignore new file mode 100644 index 0000000..0c15585 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/.gitignore @@ -0,0 +1,5 @@ +.idea +*-run.gob +.envrc* +.DS_Store +go.work* \ No newline at end of file diff --git a/deps/github.com/anacrolix/torrent/.golangci.yml b/deps/github.com/anacrolix/torrent/.golangci.yml new file mode 100644 index 0000000..eb429fc --- /dev/null +++ b/deps/github.com/anacrolix/torrent/.golangci.yml @@ -0,0 +1,8 @@ +linters-settings: + staticcheck: + go: "1.16" + checks: ["all", "-U1000"] + + govet: + disable: + - composites diff --git a/deps/github.com/anacrolix/torrent/LICENSE b/deps/github.com/anacrolix/torrent/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/deps/github.com/anacrolix/torrent/NOTES.md b/deps/github.com/anacrolix/torrent/NOTES.md new file mode 100644 index 0000000..80da84b --- /dev/null +++ b/deps/github.com/anacrolix/torrent/NOTES.md @@ -0,0 +1,32 @@ +### Literature + +* [arvid on writing a fast piece picker](https://blog.libtorrent.org/2011/11/writing-a-fast-piece-picker/) + + Uses C++ for examples. + +* [On Piece Selection for Streaming BitTorrent](https://www.diva-portal.org/smash/get/diva2:835742/FULLTEXT01.pdf) + + Some simulations by some Swedes on piece selection. + +* [A South American paper on peer-selection strategies for uploading](https://arxiv.org/pdf/1402.2187.pdf) + + Has some useful overviews of piece-selection. + +### Hole-punching + +Holepunching is tracked in Torrent, rather than in Client because if we send a rendezvous message, and subsequently receive a connect message, we do not know if a peer sent a rendezvous message to our relay and we're receiving the connect message for their rendezvous or ours. Relays are not required to respond to rendezvous, so we can't enforce a timeout. If we don't know if who sent the rendezvous that triggered a connect, then we don't know what infohash to use in the handshake. Once we send a rendezvous, and never receive a reply, we would have to always perform handshakes with our original infohash, or always copy the infohash the remote sends. Handling connects by always being the passive side in the handshake won't work since the other side might use the same behaviour and neither will initiate. + +If we only perform rendezvous through relays for the same torrent as the relay, then all the handshake can be done actively for all connect messages. All connect messages received from a peer can only be for the same torrent for which we are connected to the peer. + +In 2006, approximately 70% of clients were behind NAT (https://web.archive.org/web/20100724011252/http://illuminati.coralcdn.org/stats/). According to https://fosdem.org/2023/schedule/event/network_hole_punching_in_the_wild/, hole punching (in libp2p) 70% of NAT can be defeated by relay mechanisms. + +If either or both peers in a potential peer do not have NAT, or are full cone NAT, then NAT doesn't matter at least for BitTorrent, as both parties are trying to connect to each other and connections will always work in one direction. + +The chance that 2 peers can connect to each other would be 1-(badnat)^2, and 1-unrelayable*(badnat)^2 where unrelayable is the chance they can't work even with a relay, and badnat is the chance a peer has a bad NAT (not full cone). For example if unrelayable is 0.3 per the libp2p study, and badnat is 0.5 (i made this up), 92.5% of peers can connect with each other if they use "relay mechanisms", and 75% if they don't. as long as any peers in the swarm are not badnat, they can relay those that are, and and act as super nodes for peers that can't or don't implement hole punching. + +The DHT is a bit different: you can't be an active node if you are a badnat, but you can still query the network to get what you need, you just don't contribute to it. It also doesn't matter what the swarm looks like for a given torrent on the DHT, because you don't have to be in the swarm to host its data. all that matters is that there are some peers that aren't badnat that are in the DHT, of which there are millions (for BitTorrent). + +- https://blog.ipfs.tech/2022-01-20-libp2p-hole-punching/ +- https://www.bittorrent.org/beps/bep_0055.html +- https://github.com/anacrolix/torrent/issues/685 +- https://stackoverflow.com/questions/38786438/libutp-%C2%B5tp-and-nat-traversal-udp-hole-punching diff --git a/deps/github.com/anacrolix/torrent/README.md b/deps/github.com/anacrolix/torrent/README.md new file mode 100644 index 0000000..399b936 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/README.md @@ -0,0 +1,102 @@ +# torrent + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/anacrolix/torrent)](https://pkg.go.dev/github.com/anacrolix/torrent) + +This repository implements BitTorrent-related packages and command-line utilities in Go. The emphasis is on use as a library from other projects. It's been used 24/7 in production by downstream services since late 2014. The implementation was specifically created to explore Go's concurrency capabilities, and to include the ability to stream data directly from the BitTorrent network. To this end it [supports seeking, readaheads and other features](https://godoc.org/github.com/anacrolix/torrent#Reader) exposing torrents and their files with the various Go idiomatic `io` package interfaces. This is also demonstrated through [torrentfs](#torrentfs). + +There is [support for protocol encryption, DHT, PEX, uTP, and various extensions](https://godoc.org/github.com/anacrolix/torrent). There are [several data storage backends provided](https://godoc.org/github.com/anacrolix/torrent/storage): blob, file, bolt, mmap, and sqlite, to name a few. You can [write your own](https://godoc.org/github.com/anacrolix/torrent/storage#ClientImpl) to store data for example on S3, or in a database. + +Some noteworthy package dependencies that can be used for other purposes include: + + * [go-libutp](https://github.com/anacrolix/go-libutp) + * [dht](https://github.com/anacrolix/dht) + * [bencode](https://godoc.org/github.com/anacrolix/torrent/bencode) + * [tracker](https://godoc.org/github.com/anacrolix/torrent/tracker) + +## Installation + +Install the library package with `go get github.com/anacrolix/torrent`, or the provided cmds with `go install github.com/anacrolix/torrent/cmd/...@latest`. + +## Library examples + +There are some small [examples](https://godoc.org/github.com/anacrolix/torrent#pkg-examples) in the package documentation. + +## Mentions + + * [@anacrolix](https://github.com/anacrolix) is interviewed about this repo in [Console 32](https://console.substack.com/p/console-32). + +### Downstream projects + +There are several web-frontends, sites, Android clients, storage backends and supporting services among the known public projects: + + * [cove](https://coveapp.info): Personal torrent browser with streaming, DHT search, video transcoding and casting. + * [confluence](https://github.com/anacrolix/confluence): torrent client as a HTTP service + * [Gopeed](https://github.com/GopeedLab/gopeed): Gopeed (full name Go Speed), a high-speed downloader developed by Golang + Flutter, supports (HTTP, BitTorrent, Magnet) protocol, and supports all platforms. + * [Erigon](https://github.com/ledgerwatch/erigon): an implementation of Ethereum (execution layer with embeddable consensus layer), on the efficiency frontier. + * [exatorrent](https://github.com/varbhat/exatorrent): Elegant self-hostable torrent client + * [bitmagnet](https://github.com/bitmagnet-io/bitmagnet): A self-hosted BitTorrent indexer, DHT crawler, content classifier and torrent search engine with web UI, GraphQL API and Servarr stack integration. + * [TorrServer](https://github.com/YouROK/TorrServer): Torrent streaming server over http + * [distribyted](https://github.com/distribyted/distribyted): Distribyted is an alternative torrent client. It can expose torrent files as a standard FUSE, webDAV or HTTP endpoint and download them on demand, allowing random reads using a fixed amount of disk space. + * [Simple Torrent](https://github.com/boypt/simple-torrent): self-hosted HTTP remote torrent client + * [autobrr](https://github.com/autobrr/autobrr): autobrr redefines download automation for torrents and Usenet, drawing inspiration from tools like trackarr, autodl-irssi, and flexget. + * [mabel](https://github.com/smmr-software/mabel): Fancy BitTorrent client for the terminal + * [webtor.io](https://webtor.io/): free cloud BitTorrent-client + * [Android Torrent Client](https://gitlab.com/axet/android-torrent-client): Android torrent client + * [libtorrent](https://gitlab.com/axet/libtorrent): gomobile wrapper + * [Go-PeersToHTTP](https://github.com/WinPooh32/peerstohttp): Simple torrent proxy to http stream controlled over REST-like api + * [CortexFoundation/torrentfs](https://github.com/CortexFoundation/torrentfs): Independent HTTP service for file seeding and P2P file system of cortex full node + * [goTorrent](https://github.com/deranjer/goTorrent): torrenting server with a React web frontend + * [Go Peerflix](https://github.com/Sioro-Neoku/go-peerflix): Start watching the movie while your torrent is still downloading! + * [hTorrent](https://github.com/pojntfx/htorrent): HTTP to BitTorrent gateway with seeking support. + * [Remote-Torrent](https://github.com/BruceWangNo1/remote-torrent): Download Remotely and Retrieve Files Over HTTP + * [Trickl](https://github.com/arranlomas/Trickl): torrent client for android + * [ANT-Downloader](https://github.com/anatasluo/ant): ANT Downloader is a BitTorrent Client developed by golang, angular 7, and electron + * [Elementum](http://elementum.surge.sh/) (up to version 0.0.71) + +## Help + +Communication about the project is primarily through [Discussions](https://github.com/anacrolix/torrent/discussions) and the [issue tracker](https://github.com/anacrolix/torrent/issues). + +## Command packages + +Here I'll describe what some of the packages in `./cmd` do. See [installation](#installation) to make them available. + +### `torrent` + +#### `torrent download` + +Downloads torrents from the command-line. + + $ torrent download 'magnet:?xt=urn:btih:KRWPCX3SJUM4IMM4YF5RPHL6ANPYTQPU' + ... lots of jibber jabber ... + downloading "ubuntu-14.04.2-desktop-amd64.iso": 1.0 GB/1.0 GB, 1989/1992 pieces completed (1 partial) + 2015/04/01 02:08:20 main.go:137: downloaded ALL the torrents + $ md5sum ubuntu-14.04.2-desktop-amd64.iso + 1b305d585b1918f297164add46784116 ubuntu-14.04.2-desktop-amd64.iso + $ echo such amaze + wow + +#### `torrent metainfo magnet` + +Creates a magnet link from a torrent file. Note the extracted trackers, display name, and info hash. + + $ torrent metainfo testdata/debian-10.8.0-amd64-netinst.iso.torrent magnet + magnet:?xt=urn:btih:4090c3c2a394a49974dfbbf2ce7ad0db3cdeddd7&dn=debian-10.8.0-amd64-netinst.iso&tr=http%3A%2F%2Fbttracker.debian.org%3A6969%2Fannounce + +See `torrent metainfo --help` for other metainfo related commands. + +### `torrentfs` + +torrentfs mounts a FUSE filesystem at `-mountDir`. The contents are the torrents described by the torrent files and magnet links at `-metainfoDir`. Data for read requests is fetched only as required from the torrent network, and stored at `-downloadDir`. + + $ mkdir mnt torrents + $ torrentfs -mountDir=mnt -metainfoDir=torrents & + $ cd torrents + $ wget http://releases.ubuntu.com/14.04.2/ubuntu-14.04.2-desktop-amd64.iso.torrent + $ cd .. + $ ls mnt + ubuntu-14.04.2-desktop-amd64.iso + $ pv mnt/ubuntu-14.04.2-desktop-amd64.iso | md5sum + 996MB 0:04:40 [3.55MB/s] [========================================>] 100% + 1b305d585b1918f297164add46784116 - + diff --git a/deps/github.com/anacrolix/torrent/SECURITY.md b/deps/github.com/anacrolix/torrent/SECURITY.md new file mode 100644 index 0000000..688b1cf --- /dev/null +++ b/deps/github.com/anacrolix/torrent/SECURITY.md @@ -0,0 +1,11 @@ +# Security Policy + +## Supported Versions + +The two most recent minor releases are supported, with older versions receiving updates subject to contributor discretion. +Please also report issues in master, but there are no guarantees of stability there. + +## Reporting a Vulnerability + +All vulnerability reports are welcomed. Use your discretion in providing information to Discussions, an Issue, or message a maintainer directly. +For a non-trivial issue, you should receive a response within a week, but more than likely a day or two. diff --git a/deps/github.com/anacrolix/torrent/TODO b/deps/github.com/anacrolix/torrent/TODO new file mode 100644 index 0000000..02f983a --- /dev/null +++ b/deps/github.com/anacrolix/torrent/TODO @@ -0,0 +1,5 @@ + * Make use of sparse file regions in download data for faster hashing. This is available as whence 3 and 4 on some OSs? + * When we're choked and interested, are we not interested if there's no longer anything that we want? + * dht: Randomize triedAddrs bloom filter to allow different Addr sets on each Announce. + * data/blob: Deleting incomplete data triggers io.ErrUnexpectedEOF that isn't recovered from. + * Handle wanted pieces more efficiently, it's slow in in fillRequests, since the prioritization system was changed. diff --git a/deps/github.com/anacrolix/torrent/analysis/peer-upload-order.go b/deps/github.com/anacrolix/torrent/analysis/peer-upload-order.go new file mode 100644 index 0000000..8713804 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/analysis/peer-upload-order.go @@ -0,0 +1,103 @@ +package analysis + +import ( + "fmt" + "log" + "sync" + + "github.com/elliotchance/orderedmap" + + "github.com/anacrolix/torrent" + pp "github.com/anacrolix/torrent/peer_protocol" +) + +type peerData struct { + requested *orderedmap.OrderedMap + haveDeleted map[torrent.Request]bool +} + +// Tracks the order that peers upload requests that we've sent them. +type PeerUploadOrder struct { + mu sync.Mutex + peers map[*torrent.Peer]*peerData +} + +func (me *PeerUploadOrder) Init() { + me.peers = make(map[*torrent.Peer]*peerData) +} + +func (me *PeerUploadOrder) onNewPeer(p *torrent.Peer) { + me.mu.Lock() + defer me.mu.Unlock() + if _, ok := me.peers[p]; ok { + panic("already have peer") + } + me.peers[p] = &peerData{ + requested: orderedmap.NewOrderedMap(), + haveDeleted: make(map[torrent.Request]bool), + } +} + +func (me *PeerUploadOrder) onSentRequest(event torrent.PeerRequestEvent) { + me.mu.Lock() + defer me.mu.Unlock() + if !me.peers[event.Peer].requested.Set(event.Request, nil) { + panic("duplicate request sent") + } +} + +func (me *PeerUploadOrder) Install(cbs *torrent.Callbacks) { + cbs.NewPeer = append(cbs.NewPeer, me.onNewPeer) + cbs.SentRequest = append(cbs.SentRequest, me.onSentRequest) + cbs.ReceivedRequested = append(cbs.ReceivedRequested, me.onReceivedRequested) + cbs.DeletedRequest = append(cbs.DeletedRequest, me.deletedRequest) +} + +func (me *PeerUploadOrder) report(desc string, req torrent.Request, peer *torrent.Peer) { + peerConn, ok := peer.TryAsPeerConn() + var peerId *torrent.PeerID + if ok { + peerId = &peerConn.PeerID + } + log.Printf("%s: %v, %v", desc, req, peerId) +} + +func (me *PeerUploadOrder) onReceivedRequested(event torrent.PeerMessageEvent) { + req := torrent.Request{ + event.Message.Index, + torrent.ChunkSpec{ + Begin: event.Message.Begin, + Length: pp.Integer(len(event.Message.Piece)), + }, + } + makeLogMsg := func(desc string) string { + peerConn, ok := event.Peer.TryAsPeerConn() + var peerId *torrent.PeerID + if ok { + peerId = &peerConn.PeerID + } + return fmt.Sprintf("%s: %q, %v", desc, peerId, req) + } + me.mu.Lock() + defer me.mu.Unlock() + peerData := me.peers[event.Peer] + if peerData.requested.Front().Key.(torrent.Request) == req { + log.Print(makeLogMsg("got next requested piece")) + } else if _, ok := peerData.requested.Get(req); ok { + log.Print(makeLogMsg(fmt.Sprintf( + "got requested piece but not next (previous delete=%v)", + peerData.haveDeleted[req]))) + } else { + panic(makeLogMsg("got unrequested piece")) + } +} + +func (me *PeerUploadOrder) deletedRequest(event torrent.PeerRequestEvent) { + me.mu.Lock() + defer me.mu.Unlock() + peerData := me.peers[event.Peer] + if !peerData.requested.Delete(event.Request) { + panic("nothing to delete") + } + peerData.haveDeleted[event.Request] = true +} diff --git a/deps/github.com/anacrolix/torrent/bad_storage.go b/deps/github.com/anacrolix/torrent/bad_storage.go new file mode 100644 index 0000000..fc15beb --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bad_storage.go @@ -0,0 +1,56 @@ +package torrent + +import ( + "errors" + "math/rand" + "strings" + + "github.com/anacrolix/torrent/internal/testutil" + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/storage" +) + +type badStorage struct{} + +var _ storage.ClientImpl = badStorage{} + +func (bs badStorage) OpenTorrent(*metainfo.Info, metainfo.Hash) (storage.TorrentImpl, error) { + return storage.TorrentImpl{ + Piece: bs.Piece, + }, nil +} + +func (bs badStorage) Piece(p metainfo.Piece) storage.PieceImpl { + return badStoragePiece{p} +} + +type badStoragePiece struct { + p metainfo.Piece +} + +var _ storage.PieceImpl = badStoragePiece{} + +func (p badStoragePiece) WriteAt(b []byte, off int64) (int, error) { + return 0, nil +} + +func (p badStoragePiece) Completion() storage.Completion { + return storage.Completion{Complete: true, Ok: true} +} + +func (p badStoragePiece) MarkComplete() error { + return errors.New("psyyyyyyyche") +} + +func (p badStoragePiece) MarkNotComplete() error { + return errors.New("psyyyyyyyche") +} + +func (p badStoragePiece) randomlyTruncatedDataString() string { + return testutil.GreetingFileContents[:rand.Intn(14)] +} + +func (p badStoragePiece) ReadAt(b []byte, off int64) (n int, err error) { + r := strings.NewReader(p.randomlyTruncatedDataString()) + return r.ReadAt(b, off+p.p.Offset()) +} diff --git a/deps/github.com/anacrolix/torrent/bencode/README.md b/deps/github.com/anacrolix/torrent/bencode/README.md new file mode 100644 index 0000000..4dbc67b --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/README.md @@ -0,0 +1,38 @@ +Bencode encoding/decoding sub package. Uses similar API design to Go's json package. + +## Install + +```sh +go get github.com/anacrolix/torrent +``` + +## Usage + +```go +package demo + +import ( + bencode "github.com/anacrolix/torrent/bencode" +) + +type Message struct { + Query string `json:"q,omitempty" bencode:"q,omitempty"` +} + +var v Message + +func main(){ + // encode + data, err := bencode.Marshal(v) + if err != nil { + log.Fatal(err) + } + + //decode + err := bencode.Unmarshal(data, &v) + if err != nil { + log.Fatal(err) + } + fmt.Println(v) +} +``` diff --git a/deps/github.com/anacrolix/torrent/bencode/api.go b/deps/github.com/anacrolix/torrent/bencode/api.go new file mode 100644 index 0000000..3c379ab --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/api.go @@ -0,0 +1,164 @@ +package bencode + +import ( + "bytes" + "fmt" + "io" + "reflect" + + "github.com/anacrolix/missinggo/expect" +) + +//---------------------------------------------------------------------------- +// Errors +//---------------------------------------------------------------------------- + +// In case if marshaler cannot encode a type, it will return this error. Typical +// example of such type is float32/float64 which has no bencode representation. +type MarshalTypeError struct { + Type reflect.Type +} + +func (e *MarshalTypeError) Error() string { + return "bencode: unsupported type: " + e.Type.String() +} + +// Unmarshal argument must be a non-nil value of some pointer type. +type UnmarshalInvalidArgError struct { + Type reflect.Type +} + +func (e *UnmarshalInvalidArgError) Error() string { + if e.Type == nil { + return "bencode: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "bencode: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "bencode: Unmarshal(nil " + e.Type.String() + ")" +} + +// Unmarshaler spotted a value that was not appropriate for a given Go value. +type UnmarshalTypeError struct { + BencodeTypeName string + UnmarshalTargetType reflect.Type +} + +// This could probably be a value type, but we may already have users assuming +// that it's passed by pointer. +func (e *UnmarshalTypeError) Error() string { + return fmt.Sprintf( + "can't unmarshal a bencode %v into a %v", + e.BencodeTypeName, + e.UnmarshalTargetType, + ) +} + +// Unmarshaler tried to write to an unexported (therefore unwritable) field. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "bencode: key \"" + e.Key + "\" led to an unexported field \"" + + e.Field.Name + "\" in type: " + e.Type.String() +} + +// Malformed bencode input, unmarshaler failed to parse it. +type SyntaxError struct { + Offset int64 // location of the error + What error // error description +} + +func (e *SyntaxError) Error() string { + return fmt.Sprintf("bencode: syntax error (offset: %d): %s", e.Offset, e.What) +} + +// A non-nil error was returned after calling MarshalBencode on a type which +// implements the Marshaler interface. +type MarshalerError struct { + Type reflect.Type + Err error +} + +func (e *MarshalerError) Error() string { + return "bencode: error calling MarshalBencode for type " + e.Type.String() + ": " + e.Err.Error() +} + +// A non-nil error was returned after calling UnmarshalBencode on a type which +// implements the Unmarshaler interface. +type UnmarshalerError struct { + Type reflect.Type + Err error +} + +func (e *UnmarshalerError) Error() string { + return "bencode: error calling UnmarshalBencode for type " + e.Type.String() + ": " + e.Err.Error() +} + +//---------------------------------------------------------------------------- +// Interfaces +//---------------------------------------------------------------------------- + +// Any type which implements this interface, will be marshaled using the +// specified method. +type Marshaler interface { + MarshalBencode() ([]byte, error) +} + +// Any type which implements this interface, will be unmarshaled using the +// specified method. +type Unmarshaler interface { + UnmarshalBencode([]byte) error +} + +// Marshal the value 'v' to the bencode form, return the result as []byte and +// an error if any. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + e := Encoder{w: &buf} + err := e.Encode(v) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func MustMarshal(v interface{}) []byte { + b, err := Marshal(v) + expect.Nil(err) + return b +} + +// Unmarshal the bencode value in the 'data' to a value pointed by the 'v' pointer, return a non-nil +// error if any. If there are trailing bytes, this results in ErrUnusedTrailingBytes, but the value +// will be valid. It's probably more consistent to use Decoder.Decode if you want to rely on this +// behaviour (inspired by Rust's serde here). +func Unmarshal(data []byte, v interface{}) (err error) { + buf := bytes.NewReader(data) + e := Decoder{r: buf} + err = e.Decode(v) + if err == nil && buf.Len() != 0 { + err = ErrUnusedTrailingBytes{buf.Len()} + } + return +} + +type ErrUnusedTrailingBytes struct { + NumUnusedBytes int +} + +func (me ErrUnusedTrailingBytes) Error() string { + return fmt.Sprintf("%d unused trailing bytes", me.NumUnusedBytes) +} + +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: &scanner{r: r}} +} + +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w} +} diff --git a/deps/github.com/anacrolix/torrent/bencode/bench_test.go b/deps/github.com/anacrolix/torrent/bencode/bench_test.go new file mode 100644 index 0000000..5cf1ce8 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/bench_test.go @@ -0,0 +1,45 @@ +package bencode_test + +import ( + "net" + "reflect" + "testing" + + "github.com/anacrolix/dht/v2/krpc" + + "github.com/anacrolix/torrent/bencode" +) + +func marshalAndUnmarshal(tb testing.TB, orig krpc.Msg) (ret krpc.Msg) { + b, err := bencode.Marshal(orig) + if err != nil { + tb.Fatal(err) + } + err = bencode.Unmarshal(b, &ret) + if err != nil { + tb.Fatal(err) + } + // ret.Q = "what" + return +} + +func BenchmarkMarshalThenUnmarshalKrpcMsg(tb *testing.B) { + orig := krpc.Msg{ + T: "420", + Y: "r", + R: &krpc.Return{ + Token: func() *string { t := "re-up"; return &t }(), + }, + IP: krpc.NodeAddr{IP: net.ParseIP("1.2.3.4"), Port: 1337}, + ReadOnly: true, + } + first := marshalAndUnmarshal(tb, orig) + if !reflect.DeepEqual(orig, first) { + tb.Fail() + } + tb.ReportAllocs() + tb.ResetTimer() + for i := 0; i < tb.N; i += 1 { + marshalAndUnmarshal(tb, orig) + } +} diff --git a/deps/github.com/anacrolix/torrent/bencode/both_test.go b/deps/github.com/anacrolix/torrent/bencode/both_test.go new file mode 100644 index 0000000..fdcb90d --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/both_test.go @@ -0,0 +1,76 @@ +package bencode + +import ( + "bytes" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func loadFile(name string, t *testing.T) []byte { + data, err := os.ReadFile(name) + require.NoError(t, err) + return data +} + +func testFileInterface(t *testing.T, filename string) { + data1 := loadFile(filename, t) + + var iface interface{} + err := Unmarshal(data1, &iface) + require.NoError(t, err) + + data2, err := Marshal(iface) + require.NoError(t, err) + + assert.EqualValues(t, data1, data2) +} + +func TestBothInterface(t *testing.T) { + testFileInterface(t, "testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent") + testFileInterface(t, "testdata/continuum.torrent") +} + +type torrentFile struct { + Info struct { + Name string `bencode:"name"` + Length int64 `bencode:"length"` + MD5Sum string `bencode:"md5sum,omitempty"` + PieceLength int64 `bencode:"piece length"` + Pieces string `bencode:"pieces"` + Private bool `bencode:"private,omitempty"` + } `bencode:"info"` + + Announce string `bencode:"announce"` + AnnounceList [][]string `bencode:"announce-list,omitempty"` + CreationDate int64 `bencode:"creation date,omitempty"` + Comment string `bencode:"comment,omitempty"` + CreatedBy string `bencode:"created by,omitempty"` + URLList interface{} `bencode:"url-list,omitempty"` +} + +func testFile(t *testing.T, filename string) { + data1 := loadFile(filename, t) + var f torrentFile + + err := Unmarshal(data1, &f) + if err != nil { + t.Fatal(err) + } + + data2, err := Marshal(&f) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(data1, data2) { + println(string(data2)) + t.Fatalf("equality expected") + } +} + +func TestBoth(t *testing.T) { + testFile(t, "testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent") +} diff --git a/deps/github.com/anacrolix/torrent/bencode/bytes.go b/deps/github.com/anacrolix/torrent/bencode/bytes.go new file mode 100644 index 0000000..42a1db2 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/bytes.go @@ -0,0 +1,30 @@ +package bencode + +import ( + "errors" + "fmt" +) + +type Bytes []byte + +var ( + _ Unmarshaler = (*Bytes)(nil) + _ Marshaler = (*Bytes)(nil) + _ Marshaler = Bytes{} +) + +func (me *Bytes) UnmarshalBencode(b []byte) error { + *me = append([]byte(nil), b...) + return nil +} + +func (me Bytes) MarshalBencode() ([]byte, error) { + if len(me) == 0 { + return nil, errors.New("marshalled Bytes should not be zero-length") + } + return me, nil +} + +func (me Bytes) GoString() string { + return fmt.Sprintf("bencode.Bytes(%q)", []byte(me)) +} diff --git a/deps/github.com/anacrolix/torrent/bencode/bytes_test.go b/deps/github.com/anacrolix/torrent/bencode/bytes_test.go new file mode 100644 index 0000000..08b4f98 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/bytes_test.go @@ -0,0 +1,39 @@ +package bencode + +import ( + "testing" + + qt "github.com/frankban/quicktest" +) + +func TestBytesMarshalNil(t *testing.T) { + var b Bytes + Marshal(b) +} + +type structWithBytes struct { + A Bytes + B Bytes +} + +func TestMarshalNilStructBytes(t *testing.T) { + _, err := Marshal(structWithBytes{B: Bytes("i42e")}) + c := qt.New(t) + c.Assert(err, qt.IsNotNil) +} + +type structWithOmitEmptyBytes struct { + A Bytes `bencode:",omitempty"` + B Bytes `bencode:",omitempty"` +} + +func TestMarshalNilStructBytesOmitEmpty(t *testing.T) { + c := qt.New(t) + b, err := Marshal(structWithOmitEmptyBytes{B: Bytes("i42e")}) + c.Assert(err, qt.IsNil) + t.Logf("%q", b) + var s structWithBytes + err = Unmarshal(b, &s) + c.Assert(err, qt.IsNil) + c.Check(s.B, qt.DeepEquals, Bytes("i42e")) +} diff --git a/deps/github.com/anacrolix/torrent/bencode/decode.go b/deps/github.com/anacrolix/torrent/bencode/decode.go new file mode 100644 index 0000000..3839b84 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/decode.go @@ -0,0 +1,752 @@ +package bencode + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "runtime" + "strconv" + "sync" +) + +// The default bencode string length limit. This is a poor attempt to prevent excessive memory +// allocation when parsing, but also leaves the window open to implement a better solution. +const DefaultDecodeMaxStrLen = 1<<27 - 1 // ~128MiB + +type MaxStrLen = int64 + +type Decoder struct { + // Maximum parsed bencode string length. Defaults to DefaultMaxStrLen if zero. + MaxStrLen MaxStrLen + + r interface { + io.ByteScanner + io.Reader + } + // Sum of bytes used to Decode values. + Offset int64 + buf bytes.Buffer +} + +func (d *Decoder) Decode(v interface{}) (err error) { + defer func() { + if err != nil { + return + } + r := recover() + if r == nil { + return + } + _, ok := r.(runtime.Error) + if ok { + panic(r) + } + if err, ok = r.(error); !ok { + panic(r) + } + // Errors thrown from deeper in parsing are unexpected. At value boundaries, errors should + // be returned directly (at least until all the panic nonsense is removed entirely). + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + pv := reflect.ValueOf(v) + if pv.Kind() != reflect.Ptr || pv.IsNil() { + return &UnmarshalInvalidArgError{reflect.TypeOf(v)} + } + + ok, err := d.parseValue(pv.Elem()) + if err != nil { + return + } + if !ok { + d.throwSyntaxError(d.Offset-1, errors.New("unexpected 'e'")) + } + return +} + +func checkForUnexpectedEOF(err error, offset int64) { + if err == io.EOF { + panic(&SyntaxError{ + Offset: offset, + What: io.ErrUnexpectedEOF, + }) + } +} + +func (d *Decoder) readByte() byte { + b, err := d.r.ReadByte() + if err != nil { + checkForUnexpectedEOF(err, d.Offset) + panic(err) + } + + d.Offset++ + return b +} + +// reads data writing it to 'd.buf' until 'sep' byte is encountered, 'sep' byte +// is consumed, but not included into the 'd.buf' +func (d *Decoder) readUntil(sep byte) { + for { + b := d.readByte() + if b == sep { + return + } + d.buf.WriteByte(b) + } +} + +func checkForIntParseError(err error, offset int64) { + if err != nil { + panic(&SyntaxError{ + Offset: offset, + What: err, + }) + } +} + +func (d *Decoder) throwSyntaxError(offset int64, err error) { + panic(&SyntaxError{ + Offset: offset, + What: err, + }) +} + +// Assume the 'i' is already consumed. Read and validate the rest of an int into the buffer. +func (d *Decoder) readInt() error { + // start := d.Offset - 1 + d.readUntil('e') + if err := d.checkBufferedInt(); err != nil { + return err + } + // if d.buf.Len() == 0 { + // panic(&SyntaxError{ + // Offset: start, + // What: errors.New("empty integer value"), + // }) + // } + return nil +} + +// called when 'i' was consumed, for the integer type in v. +func (d *Decoder) parseInt(v reflect.Value) error { + start := d.Offset - 1 + + if err := d.readInt(); err != nil { + return err + } + s := bytesAsString(d.buf.Bytes()) + + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + checkForIntParseError(err, start) + + if v.OverflowInt(n) { + return &UnmarshalTypeError{ + BencodeTypeName: "int", + UnmarshalTargetType: v.Type(), + } + } + v.SetInt(n) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n, err := strconv.ParseUint(s, 10, 64) + checkForIntParseError(err, start) + + if v.OverflowUint(n) { + return &UnmarshalTypeError{ + BencodeTypeName: "int", + UnmarshalTargetType: v.Type(), + } + } + v.SetUint(n) + case reflect.Bool: + v.SetBool(s != "0") + default: + return &UnmarshalTypeError{ + BencodeTypeName: "int", + UnmarshalTargetType: v.Type(), + } + } + d.buf.Reset() + return nil +} + +func (d *Decoder) checkBufferedInt() error { + b := d.buf.Bytes() + if len(b) <= 1 { + return nil + } + if b[0] == '-' { + b = b[1:] + } + if b[0] < '1' || b[0] > '9' { + return errors.New("invalid leading digit") + } + return nil +} + +func (d *Decoder) parseStringLength() (int, error) { + // We should have already consumed the first byte of the length into the Decoder buf. + start := d.Offset - 1 + d.readUntil(':') + if err := d.checkBufferedInt(); err != nil { + return 0, err + } + // Really the limit should be the uint size for the platform. But we can't pass in an allocator, + // or limit total memory use in Go, the best we might hope to do is limit the size of a single + // decoded value (by reading it in in-place and then operating on a view). + length, err := strconv.ParseInt(bytesAsString(d.buf.Bytes()), 10, 0) + checkForIntParseError(err, start) + if int64(length) > d.getMaxStrLen() { + err = fmt.Errorf("parsed string length %v exceeds limit (%v)", length, DefaultDecodeMaxStrLen) + } + d.buf.Reset() + return int(length), err +} + +func (d *Decoder) parseString(v reflect.Value) error { + length, err := d.parseStringLength() + if err != nil { + return err + } + defer d.buf.Reset() + read := func(b []byte) { + n, err := io.ReadFull(d.r, b) + d.Offset += int64(n) + if err != nil { + checkForUnexpectedEOF(err, d.Offset) + panic(&SyntaxError{ + Offset: d.Offset, + What: errors.New("unexpected I/O error: " + err.Error()), + }) + } + } + + switch v.Kind() { + case reflect.String: + b := make([]byte, length) + read(b) + v.SetString(bytesAsString(b)) + return nil + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + break + } + b := make([]byte, length) + read(b) + v.SetBytes(b) + return nil + case reflect.Array: + if v.Type().Elem().Kind() != reflect.Uint8 { + break + } + d.buf.Grow(length) + b := d.buf.Bytes()[:length] + read(b) + reflect.Copy(v, reflect.ValueOf(b)) + return nil + case reflect.Bool: + d.buf.Grow(length) + b := d.buf.Bytes()[:length] + read(b) + x, err := strconv.ParseBool(bytesAsString(b)) + if err != nil { + x = length != 0 + } + v.SetBool(x) + return nil + } + // Can't move this into default clause because some cases above fail through to here after + // additional checks. + d.buf.Grow(length) + read(d.buf.Bytes()[:length]) + // I believe we return here to support "ignore_unmarshal_type_error". + return &UnmarshalTypeError{ + BencodeTypeName: "string", + UnmarshalTargetType: v.Type(), + } +} + +// Info for parsing a dict value. +type dictField struct { + Type reflect.Type + Get func(value reflect.Value) func(reflect.Value) + Tags tag +} + +// Returns specifics for parsing a dict field value. +func getDictField(dict reflect.Type, key string) (_ dictField, err error) { + // get valuev as a map value or as a struct field + switch k := dict.Kind(); k { + case reflect.Map: + return dictField{ + Type: dict.Elem(), + Get: func(mapValue reflect.Value) func(reflect.Value) { + return func(value reflect.Value) { + if mapValue.IsNil() { + mapValue.Set(reflect.MakeMap(dict)) + } + // Assigns the value into the map. + // log.Printf("map type: %v", mapValue.Type()) + mapValue.SetMapIndex(reflect.ValueOf(key).Convert(dict.Key()), value) + } + }, + }, nil + case reflect.Struct: + return getStructFieldForKey(dict, key), nil + // if sf.r.PkgPath != "" { + // panic(&UnmarshalFieldError{ + // Key: key, + // Type: dict.Type(), + // Field: sf.r, + // }) + // } + default: + err = fmt.Errorf("can't assign bencode dict items into a %v", k) + return + } +} + +var ( + structFieldsMu sync.Mutex + structFields = map[reflect.Type]map[string]dictField{} +) + +func parseStructFields(struct_ reflect.Type, each func(key string, df dictField)) { + for _i, n := 0, struct_.NumField(); _i < n; _i++ { + i := _i + f := struct_.Field(i) + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + parseStructFields(t, func(key string, df dictField) { + innerGet := df.Get + df.Get = func(value reflect.Value) func(reflect.Value) { + anonPtr := value.Field(i) + if anonPtr.Kind() == reflect.Ptr && anonPtr.IsNil() { + anonPtr.Set(reflect.New(f.Type.Elem())) + anonPtr = anonPtr.Elem() + } + return innerGet(anonPtr) + } + each(key, df) + }) + continue + } + tagStr := f.Tag.Get("bencode") + if tagStr == "-" { + continue + } + tag := parseTag(tagStr) + key := tag.Key() + if key == "" { + key = f.Name + } + each(key, dictField{f.Type, func(value reflect.Value) func(reflect.Value) { + return value.Field(i).Set + }, tag}) + } +} + +func saveStructFields(struct_ reflect.Type) { + m := make(map[string]dictField) + parseStructFields(struct_, func(key string, sf dictField) { + m[key] = sf + }) + structFields[struct_] = m +} + +func getStructFieldForKey(struct_ reflect.Type, key string) (f dictField) { + structFieldsMu.Lock() + if _, ok := structFields[struct_]; !ok { + saveStructFields(struct_) + } + f, ok := structFields[struct_][key] + structFieldsMu.Unlock() + if !ok { + var discard interface{} + return dictField{ + Type: reflect.TypeOf(discard), + Get: func(reflect.Value) func(reflect.Value) { return func(reflect.Value) {} }, + Tags: nil, + } + } + return +} + +func (d *Decoder) parseDict(v reflect.Value) error { + // At this point 'd' byte was consumed, now read key/value pairs + for { + var keyStr string + keyValue := reflect.ValueOf(&keyStr).Elem() + ok, err := d.parseValue(keyValue) + if err != nil { + return fmt.Errorf("error parsing dict key: %w", err) + } + if !ok { + return nil + } + + df, err := getDictField(v.Type(), keyStr) + if err != nil { + return fmt.Errorf("parsing bencode dict into %v: %w", v.Type(), err) + } + + // now we need to actually parse it + if df.Type == nil { + // Discard the value, there's nowhere to put it. + var if_ interface{} + if_, ok = d.parseValueInterface() + if if_ == nil { + return fmt.Errorf("error parsing value for key %q", keyStr) + } + if !ok { + return fmt.Errorf("missing value for key %q", keyStr) + } + continue + } + setValue := reflect.New(df.Type).Elem() + // log.Printf("parsing into %v", setValue.Type()) + ok, err = d.parseValue(setValue) + if err != nil { + var target *UnmarshalTypeError + if !(errors.As(err, &target) && df.Tags.IgnoreUnmarshalTypeError()) { + return fmt.Errorf("parsing value for key %q: %w", keyStr, err) + } + } + if !ok { + return fmt.Errorf("missing value for key %q", keyStr) + } + df.Get(v)(setValue) + } +} + +func (d *Decoder) parseList(v reflect.Value) error { + switch v.Kind() { + default: + // If the list is a singleton of the expected type, use that value. See + // https://github.com/anacrolix/torrent/issues/297. + l := reflect.New(reflect.SliceOf(v.Type())) + if err := d.parseList(l.Elem()); err != nil { + return err + } + if l.Elem().Len() != 1 { + return &UnmarshalTypeError{ + BencodeTypeName: "list", + UnmarshalTargetType: v.Type(), + } + } + v.Set(l.Elem().Index(0)) + return nil + case reflect.Array, reflect.Slice: + // We can work with this. Normal case, fallthrough. + } + + i := 0 + for ; ; i++ { + if v.Kind() == reflect.Slice && i >= v.Len() { + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + } + + if i < v.Len() { + ok, err := d.parseValue(v.Index(i)) + if err != nil { + return err + } + if !ok { + break + } + } else { + _, ok := d.parseValueInterface() + if !ok { + break + } + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + z := reflect.Zero(v.Type().Elem()) + for n := v.Len(); i < n; i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +func (d *Decoder) readOneValue() bool { + b, err := d.r.ReadByte() + if err != nil { + panic(err) + } + if b == 'e' { + d.r.UnreadByte() + return false + } else { + d.Offset++ + d.buf.WriteByte(b) + } + + switch b { + case 'd', 'l': + // read until there is nothing to read + for d.readOneValue() { + } + // consume 'e' as well + b = d.readByte() + d.buf.WriteByte(b) + case 'i': + d.readUntil('e') + d.buf.WriteString("e") + default: + if b >= '0' && b <= '9' { + start := d.buf.Len() - 1 + d.readUntil(':') + length, err := strconv.ParseInt(bytesAsString(d.buf.Bytes()[start:]), 10, 64) + checkForIntParseError(err, d.Offset-1) + + d.buf.WriteString(":") + n, err := io.CopyN(&d.buf, d.r, length) + d.Offset += n + if err != nil { + checkForUnexpectedEOF(err, d.Offset) + panic(&SyntaxError{ + Offset: d.Offset, + What: errors.New("unexpected I/O error: " + err.Error()), + }) + } + break + } + + d.raiseUnknownValueType(b, d.Offset-1) + } + + return true +} + +func (d *Decoder) parseUnmarshaler(v reflect.Value) bool { + if !v.Type().Implements(unmarshalerType) { + if v.Addr().Type().Implements(unmarshalerType) { + v = v.Addr() + } else { + return false + } + } + d.buf.Reset() + if !d.readOneValue() { + return false + } + m := v.Interface().(Unmarshaler) + err := m.UnmarshalBencode(d.buf.Bytes()) + if err != nil { + panic(&UnmarshalerError{v.Type(), err}) + } + return true +} + +// Returns true if there was a value and it's now stored in 'v', otherwise +// there was an end symbol ("e") and no value was stored. +func (d *Decoder) parseValue(v reflect.Value) (bool, error) { + // we support one level of indirection at the moment + if v.Kind() == reflect.Ptr { + // if the pointer is nil, allocate a new element of the type it + // points to + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + if d.parseUnmarshaler(v) { + return true, nil + } + + // common case: interface{} + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + iface, _ := d.parseValueInterface() + v.Set(reflect.ValueOf(iface)) + return true, nil + } + + b, err := d.r.ReadByte() + if err != nil { + return false, err + } + d.Offset++ + + switch b { + case 'e': + return false, nil + case 'd': + return true, d.parseDict(v) + case 'l': + return true, d.parseList(v) + case 'i': + return true, d.parseInt(v) + default: + if b >= '0' && b <= '9' { + // It's a string. + d.buf.Reset() + // Write the first digit of the length to the buffer. + d.buf.WriteByte(b) + return true, d.parseString(v) + } + + d.raiseUnknownValueType(b, d.Offset-1) + } + panic("unreachable") +} + +// An unknown bencode type character was encountered. +func (d *Decoder) raiseUnknownValueType(b byte, offset int64) { + panic(&SyntaxError{ + Offset: offset, + What: fmt.Errorf("unknown value type %+q", b), + }) +} + +func (d *Decoder) parseValueInterface() (interface{}, bool) { + b, err := d.r.ReadByte() + if err != nil { + panic(err) + } + d.Offset++ + + switch b { + case 'e': + return nil, false + case 'd': + return d.parseDictInterface(), true + case 'l': + return d.parseListInterface(), true + case 'i': + return d.parseIntInterface(), true + default: + if b >= '0' && b <= '9' { + // string + // append first digit of the length to the buffer + d.buf.WriteByte(b) + return d.parseStringInterface(), true + } + + d.raiseUnknownValueType(b, d.Offset-1) + panic("unreachable") + } +} + +// Called after 'i', for an arbitrary integer size. +func (d *Decoder) parseIntInterface() (ret interface{}) { + start := d.Offset - 1 + + if err := d.readInt(); err != nil { + panic(err) + } + n, err := strconv.ParseInt(d.buf.String(), 10, 64) + if ne, ok := err.(*strconv.NumError); ok && ne.Err == strconv.ErrRange { + i := new(big.Int) + _, ok := i.SetString(d.buf.String(), 10) + if !ok { + panic(&SyntaxError{ + Offset: start, + What: errors.New("failed to parse integer"), + }) + } + ret = i + } else { + checkForIntParseError(err, start) + ret = n + } + + d.buf.Reset() + return +} + +func (d *Decoder) readBytes(length int) []byte { + b, err := io.ReadAll(io.LimitReader(d.r, int64(length))) + if err != nil { + panic(err) + } + if len(b) != length { + panic(fmt.Errorf("read %v bytes expected %v", len(b), length)) + } + return b +} + +func (d *Decoder) parseStringInterface() string { + length, err := d.parseStringLength() + if err != nil { + panic(err) + } + b := d.readBytes(int(length)) + d.Offset += int64(len(b)) + if err != nil { + panic(&SyntaxError{Offset: d.Offset, What: err}) + } + return bytesAsString(b) +} + +func (d *Decoder) parseDictInterface() interface{} { + dict := make(map[string]interface{}) + var lastKey string + lastKeyOk := false + for { + start := d.Offset + keyi, ok := d.parseValueInterface() + if !ok { + break + } + + key, ok := keyi.(string) + if !ok { + panic(&SyntaxError{ + Offset: d.Offset, + What: errors.New("non-string key in a dict"), + }) + } + if lastKeyOk && key <= lastKey { + d.throwSyntaxError(start, fmt.Errorf("dict keys unsorted: %q <= %q", key, lastKey)) + } + start = d.Offset + valuei, ok := d.parseValueInterface() + if !ok { + d.throwSyntaxError(start, fmt.Errorf("dict elem missing value [key=%v]", key)) + } + + lastKey = key + lastKeyOk = true + dict[key] = valuei + } + return dict +} + +func (d *Decoder) parseListInterface() (list []interface{}) { + list = []interface{}{} + valuei, ok := d.parseValueInterface() + for ok { + list = append(list, valuei) + valuei, ok = d.parseValueInterface() + } + return +} + +func (d *Decoder) getMaxStrLen() int64 { + if d.MaxStrLen == 0 { + return DefaultDecodeMaxStrLen + } + return d.MaxStrLen +} diff --git a/deps/github.com/anacrolix/torrent/bencode/decode_test.go b/deps/github.com/anacrolix/torrent/bencode/decode_test.go new file mode 100644 index 0000000..4d05d2b --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/decode_test.go @@ -0,0 +1,267 @@ +package bencode + +import ( + "bytes" + "fmt" + "io" + "math/big" + "reflect" + "testing" + + qt "github.com/frankban/quicktest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type random_decode_test struct { + data string + expected interface{} +} + +var random_decode_tests = []random_decode_test{ + {"i57e", int64(57)}, + {"i-9223372036854775808e", int64(-9223372036854775808)}, + {"5:hello", "hello"}, + {"29:unicode test проверка", "unicode test проверка"}, + {"d1:ai5e1:b5:helloe", map[string]interface{}{"a": int64(5), "b": "hello"}}, + { + "li5ei10ei15ei20e7:bencodee", + []interface{}{int64(5), int64(10), int64(15), int64(20), "bencode"}, + }, + {"ldedee", []interface{}{map[string]interface{}{}, map[string]interface{}{}}}, + {"le", []interface{}{}}, + {"i604919719469385652980544193299329427705624352086e", func() *big.Int { + ret, _ := big.NewInt(-1).SetString("604919719469385652980544193299329427705624352086", 10) + return ret + }()}, + {"d1:rd6:\xd4/\xe2F\x00\x01i42ee1:t3:\x9a\x87\x011:v4:TR%=1:y1:re", map[string]interface{}{ + "r": map[string]interface{}{"\xd4/\xe2F\x00\x01": int64(42)}, + "t": "\x9a\x87\x01", + "v": "TR%=", + "y": "r", + }}, + {"d0:i420ee", map[string]interface{}{"": int64(420)}}, +} + +func TestRandomDecode(t *testing.T) { + for _, test := range random_decode_tests { + var value interface{} + err := Unmarshal([]byte(test.data), &value) + if err != nil { + t.Error(err, test.data) + continue + } + assert.EqualValues(t, test.expected, value) + } +} + +func TestLoneE(t *testing.T) { + var v int + err := Unmarshal([]byte("e"), &v) + se := err.(*SyntaxError) + require.EqualValues(t, 0, se.Offset) +} + +func TestDecoderConsecutive(t *testing.T) { + d := NewDecoder(bytes.NewReader([]byte("i1ei2e"))) + var i int + err := d.Decode(&i) + require.NoError(t, err) + require.EqualValues(t, 1, i) + err = d.Decode(&i) + require.NoError(t, err) + require.EqualValues(t, 2, i) + err = d.Decode(&i) + require.Equal(t, io.EOF, err) +} + +func TestDecoderConsecutiveDicts(t *testing.T) { + bb := bytes.NewBufferString("d4:herp4:derped3:wat1:ke17:oh baby a triple!") + + d := NewDecoder(bb) + assert.EqualValues(t, "d4:herp4:derped3:wat1:ke17:oh baby a triple!", bb.Bytes()) + assert.EqualValues(t, 0, d.Offset) + + var m map[string]interface{} + + require.NoError(t, d.Decode(&m)) + assert.Len(t, m, 1) + assert.Equal(t, "derp", m["herp"]) + assert.Equal(t, "d3:wat1:ke17:oh baby a triple!", bb.String()) + assert.EqualValues(t, 14, d.Offset) + + require.NoError(t, d.Decode(&m)) + assert.Equal(t, "k", m["wat"]) + assert.Equal(t, "17:oh baby a triple!", bb.String()) + assert.EqualValues(t, 24, d.Offset) + + var s string + require.NoError(t, d.Decode(&s)) + assert.Equal(t, "oh baby a triple!", s) + assert.EqualValues(t, 44, d.Offset) +} + +func check_error(t *testing.T, err error) { + if err != nil { + t.Error(err) + } +} + +func assert_equal(t *testing.T, x, y interface{}) { + if !reflect.DeepEqual(x, y) { + t.Errorf("got: %v (%T), expected: %v (%T)\n", x, x, y, y) + } +} + +type unmarshalerInt struct { + x int +} + +func (me *unmarshalerInt) UnmarshalBencode(data []byte) error { + return Unmarshal(data, &me.x) +} + +type unmarshalerString struct { + x string +} + +func (me *unmarshalerString) UnmarshalBencode(data []byte) error { + me.x = string(data) + return nil +} + +func TestUnmarshalerBencode(t *testing.T) { + var i unmarshalerInt + var ss []unmarshalerString + check_error(t, Unmarshal([]byte("i71e"), &i)) + assert_equal(t, i.x, 71) + check_error(t, Unmarshal([]byte("l5:hello5:fruit3:waye"), &ss)) + assert_equal(t, ss[0].x, "5:hello") + assert_equal(t, ss[1].x, "5:fruit") + assert_equal(t, ss[2].x, "3:way") +} + +func TestIgnoreUnmarshalTypeError(t *testing.T) { + s := struct { + Ignore int `bencode:",ignore_unmarshal_type_error"` + Normal int + }{} + require.Error(t, Unmarshal([]byte("d6:Normal5:helloe"), &s)) + assert.NoError(t, Unmarshal([]byte("d6:Ignore5:helloe"), &s)) + qt.Assert(t, Unmarshal([]byte("d6:Ignorei42ee"), &s), qt.IsNil) + assert.EqualValues(t, 42, s.Ignore) +} + +// Test unmarshalling []byte into something that has the same kind but +// different type. +func TestDecodeCustomSlice(t *testing.T) { + type flag byte + var fs3, fs2 []flag + // We do a longer slice then a shorter slice to see if the buffers are + // shared. + d := NewDecoder(bytes.NewBufferString("3:\x01\x10\xff2:\x04\x0f")) + require.NoError(t, d.Decode(&fs3)) + require.NoError(t, d.Decode(&fs2)) + assert.EqualValues(t, []flag{1, 16, 255}, fs3) + assert.EqualValues(t, []flag{4, 15}, fs2) +} + +func TestUnmarshalUnusedBytes(t *testing.T) { + var i int + require.EqualValues(t, ErrUnusedTrailingBytes{1}, Unmarshal([]byte("i42ee"), &i)) + assert.EqualValues(t, 42, i) +} + +func TestUnmarshalByteArray(t *testing.T) { + var ba [2]byte + assert.NoError(t, Unmarshal([]byte("2:hi"), &ba)) + assert.EqualValues(t, "hi", ba[:]) +} + +func TestDecodeDictIntoUnsupported(t *testing.T) { + // Any type that a dict shouldn't be unmarshallable into. + var i int + c := qt.New(t) + err := Unmarshal([]byte("d1:a1:be"), &i) + t.Log(err) + c.Check(err, qt.Not(qt.IsNil)) +} + +func TestUnmarshalDictKeyNotString(t *testing.T) { + // Any type that a dict shouldn't be unmarshallable into. + var i int + c := qt.New(t) + err := Unmarshal([]byte("di42e3:yese"), &i) + t.Log(err) + c.Check(err, qt.Not(qt.IsNil)) +} + +type arbitraryReader struct{} + +func (arbitraryReader) Read(b []byte) (int, error) { + return len(b), nil +} + +func decodeHugeString(t *testing.T, strLen int64, header, tail string, v interface{}, maxStrLen MaxStrLen) error { + r, w := io.Pipe() + go func() { + fmt.Fprintf(w, header, strLen) + io.CopyN(w, arbitraryReader{}, strLen) + w.Write([]byte(tail)) + w.Close() + }() + d := NewDecoder(r) + d.MaxStrLen = maxStrLen + return d.Decode(v) +} + +// Ensure that bencode strings in various places obey the Decoder.MaxStrLen field. +func TestDecodeMaxStrLen(t *testing.T) { + t.Parallel() + c := qt.New(t) + test := func(header, tail string, v interface{}, maxStrLen MaxStrLen) { + strLen := maxStrLen + if strLen == 0 { + strLen = DefaultDecodeMaxStrLen + } + c.Assert(decodeHugeString(t, strLen, header, tail, v, maxStrLen), qt.IsNil) + c.Assert(decodeHugeString(t, strLen+1, header, tail, v, maxStrLen), qt.IsNotNil) + } + test("d%d:", "i0ee", new(interface{}), 0) + test("%d:", "", new(interface{}), DefaultDecodeMaxStrLen) + test("%d:", "", new([]byte), 1) + test("d3:420%d:", "e", new(struct { + Hi []byte `bencode:"420"` + }), 69) +} + +// This is for the "github.com/anacrolix/torrent/metainfo".Info.Private field. +func TestDecodeStringIntoBoolPtr(t *testing.T) { + var m struct { + Private *bool `bencode:"private,omitempty"` + } + c := qt.New(t) + check := func(msg string, expectNil, expectTrue bool) { + m.Private = nil + c.Check(Unmarshal([]byte(msg), &m), qt.IsNil, qt.Commentf("%q", msg)) + if expectNil { + c.Check(m.Private, qt.IsNil) + } else { + if c.Check(m.Private, qt.IsNotNil, qt.Commentf("%q", msg)) { + c.Check(*m.Private, qt.Equals, expectTrue, qt.Commentf("%q", msg)) + } + } + } + check("d7:privatei1ee", false, true) + check("d7:privatei0ee", false, false) + check("d7:privatei42ee", false, true) + // This is a weird case. We could not allocate the bool to indicate it was bad (maybe a bad + // serializer which isn't uncommon), but that requires reworking the decoder to handle + // automatically. I think if we cared enough we'd create a custom Unmarshaler. Also if we were + // worried enough about performance I'd completely rewrite this package. + check("d7:private0:e", false, false) + check("d7:private1:te", false, true) + check("d7:private5:falsee", false, false) + check("d7:private1:Fe", false, false) + check("d7:private11:bunnyfoofooe", false, true) +} diff --git a/deps/github.com/anacrolix/torrent/bencode/encode.go b/deps/github.com/anacrolix/torrent/bencode/encode.go new file mode 100644 index 0000000..5e80cb1 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/encode.go @@ -0,0 +1,293 @@ +package bencode + +import ( + "io" + "math/big" + "reflect" + "runtime" + "sort" + "strconv" + "sync" + + "github.com/anacrolix/missinggo" +) + +func isEmptyValue(v reflect.Value) bool { + return missinggo.IsEmptyValue(v) +} + +type Encoder struct { + w io.Writer + scratch [64]byte +} + +func (e *Encoder) Encode(v interface{}) (err error) { + if v == nil { + return + } + defer func() { + if e := recover(); e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + var ok bool + err, ok = e.(error) + if !ok { + panic(e) + } + } + }() + e.reflectValue(reflect.ValueOf(v)) + return nil +} + +type stringValues []reflect.Value + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } +func (sv stringValues) get(i int) string { return sv[i].String() } + +func (e *Encoder) write(s []byte) { + _, err := e.w.Write(s) + if err != nil { + panic(err) + } +} + +func (e *Encoder) writeString(s string) { + for s != "" { + n := copy(e.scratch[:], s) + s = s[n:] + e.write(e.scratch[:n]) + } +} + +func (e *Encoder) reflectString(s string) { + e.writeStringPrefix(int64(len(s))) + e.writeString(s) +} + +func (e *Encoder) writeStringPrefix(l int64) { + b := strconv.AppendInt(e.scratch[:0], l, 10) + e.write(b) + e.writeString(":") +} + +func (e *Encoder) reflectByteSlice(s []byte) { + e.writeStringPrefix(int64(len(s))) + e.write(s) +} + +// Returns true if the value implements Marshaler interface and marshaling was +// done successfully. +func (e *Encoder) reflectMarshaler(v reflect.Value) bool { + if !v.Type().Implements(marshalerType) { + if v.Kind() != reflect.Ptr && v.CanAddr() && v.Addr().Type().Implements(marshalerType) { + v = v.Addr() + } else { + return false + } + } + m := v.Interface().(Marshaler) + data, err := m.MarshalBencode() + if err != nil { + panic(&MarshalerError{v.Type(), err}) + } + e.write(data) + return true +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() + +func (e *Encoder) reflectValue(v reflect.Value) { + if e.reflectMarshaler(v) { + return + } + + if v.Type() == bigIntType { + e.writeString("i") + bi := v.Interface().(big.Int) + e.writeString(bi.String()) + e.writeString("e") + return + } + + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + e.writeString("i1e") + } else { + e.writeString("i0e") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.writeString("i") + b := strconv.AppendInt(e.scratch[:0], v.Int(), 10) + e.write(b) + e.writeString("e") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + e.writeString("i") + b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10) + e.write(b) + e.writeString("e") + case reflect.String: + e.reflectString(v.String()) + case reflect.Struct: + e.writeString("d") + for _, ef := range getEncodeFields(v.Type()) { + fieldValue := ef.i(v) + if !fieldValue.IsValid() { + continue + } + if ef.omitEmpty && isEmptyValue(fieldValue) { + continue + } + e.reflectString(ef.tag) + e.reflectValue(fieldValue) + } + e.writeString("e") + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + panic(&MarshalTypeError{v.Type()}) + } + if v.IsNil() { + e.writeString("de") + break + } + e.writeString("d") + sv := stringValues(v.MapKeys()) + sort.Sort(sv) + for _, key := range sv { + e.reflectString(key.String()) + e.reflectValue(v.MapIndex(key)) + } + e.writeString("e") + case reflect.Slice, reflect.Array: + e.reflectSequence(v) + case reflect.Interface: + e.reflectValue(v.Elem()) + case reflect.Ptr: + if v.IsNil() { + v = reflect.Zero(v.Type().Elem()) + } else { + v = v.Elem() + } + e.reflectValue(v) + default: + panic(&MarshalTypeError{v.Type()}) + } +} + +func (e *Encoder) reflectSequence(v reflect.Value) { + // Use bencode string-type + if v.Type().Elem().Kind() == reflect.Uint8 { + if v.Kind() != reflect.Slice { + // Can't use []byte optimization + if !v.CanAddr() { + e.writeStringPrefix(int64(v.Len())) + for i := 0; i < v.Len(); i++ { + var b [1]byte + b[0] = byte(v.Index(i).Uint()) + e.write(b[:]) + } + return + } + v = v.Slice(0, v.Len()) + } + s := v.Bytes() + e.reflectByteSlice(s) + return + } + if v.IsNil() { + e.writeString("le") + return + } + e.writeString("l") + for i, n := 0, v.Len(); i < n; i++ { + e.reflectValue(v.Index(i)) + } + e.writeString("e") +} + +type encodeField struct { + i func(v reflect.Value) reflect.Value + tag string + omitEmpty bool +} + +type encodeFieldsSortType []encodeField + +func (ef encodeFieldsSortType) Len() int { return len(ef) } +func (ef encodeFieldsSortType) Swap(i, j int) { ef[i], ef[j] = ef[j], ef[i] } +func (ef encodeFieldsSortType) Less(i, j int) bool { return ef[i].tag < ef[j].tag } + +var ( + typeCacheLock sync.RWMutex + encodeFieldsCache = make(map[reflect.Type][]encodeField) +) + +func getEncodeFields(t reflect.Type) []encodeField { + typeCacheLock.RLock() + fs, ok := encodeFieldsCache[t] + typeCacheLock.RUnlock() + if ok { + return fs + } + fs = makeEncodeFields(t) + typeCacheLock.Lock() + defer typeCacheLock.Unlock() + encodeFieldsCache[t] = fs + return fs +} + +func makeEncodeFields(t reflect.Type) (fs []encodeField) { + for _i, n := 0, t.NumField(); _i < n; _i++ { + i := _i + f := t.Field(i) + if f.PkgPath != "" { + continue + } + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + anonEFs := makeEncodeFields(t) + for aefi := range anonEFs { + anonEF := anonEFs[aefi] + bottomField := anonEF + bottomField.i = func(v reflect.Value) reflect.Value { + v = v.Field(i) + if v.Kind() == reflect.Ptr { + if v.IsNil() { + // This will skip serializing this value. + return reflect.Value{} + } + v = v.Elem() + } + return anonEF.i(v) + } + fs = append(fs, bottomField) + } + continue + } + var ef encodeField + ef.i = func(v reflect.Value) reflect.Value { + return v.Field(i) + } + ef.tag = f.Name + + tv := getTag(f.Tag) + if tv.Ignore() { + continue + } + if tv.Key() != "" { + ef.tag = tv.Key() + } + ef.omitEmpty = tv.OmitEmpty() + fs = append(fs, ef) + } + fss := encodeFieldsSortType(fs) + sort.Sort(fss) + return fs +} diff --git a/deps/github.com/anacrolix/torrent/bencode/encode_test.go b/deps/github.com/anacrolix/torrent/bencode/encode_test.go new file mode 100644 index 0000000..b0fabc4 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/encode_test.go @@ -0,0 +1,91 @@ +package bencode + +import ( + "bytes" + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" +) + +type random_encode_test struct { + value interface{} + expected string +} + +type random_struct struct { + ABC int `bencode:"abc"` + SkipThisOne string `bencode:"-"` + CDE string +} + +type dummy struct { + a, b, c int +} + +func (d *dummy) MarshalBencode() ([]byte, error) { + var b bytes.Buffer + _, err := fmt.Fprintf(&b, "i%dei%dei%de", d.a+1, d.b+1, d.c+1) + if err != nil { + return nil, err + } + return b.Bytes(), nil +} + +var random_encode_tests = []random_encode_test{ + {int(10), "i10e"}, + {uint(10), "i10e"}, + {"hello, world", "12:hello, world"}, + {true, "i1e"}, + {false, "i0e"}, + {int8(-8), "i-8e"}, + {int16(-16), "i-16e"}, + {int32(32), "i32e"}, + {int64(-64), "i-64e"}, + {uint8(8), "i8e"}, + {uint16(16), "i16e"}, + {uint32(32), "i32e"}, + {uint64(64), "i64e"}, + {random_struct{123, "nono", "hello"}, "d3:CDE5:hello3:abci123ee"}, + {map[string]string{"a": "b", "c": "d"}, "d1:a1:b1:c1:de"}, + {[]byte{1, 2, 3, 4}, "4:\x01\x02\x03\x04"}, + {&[4]byte{1, 2, 3, 4}, "4:\x01\x02\x03\x04"}, + {nil, ""}, + {[]byte{}, "0:"}, + {[]byte(nil), "0:"}, + {"", "0:"}, + {[]int{}, "le"}, + {map[string]int{}, "de"}, + {&dummy{1, 2, 3}, "i2ei3ei4e"}, + {struct { + A *string + }{nil}, "d1:A0:e"}, + {struct { + A *string + }{new(string)}, "d1:A0:e"}, + {struct { + A *string `bencode:",omitempty"` + }{nil}, "de"}, + {struct { + A *string `bencode:",omitempty"` + }{new(string)}, "d1:A0:e"}, + {bigIntFromString("62208002200000000000"), "i62208002200000000000e"}, + {*bigIntFromString("62208002200000000000"), "i62208002200000000000e"}, +} + +func bigIntFromString(s string) *big.Int { + bi, ok := new(big.Int).SetString(s, 10) + if !ok { + panic(s) + } + return bi +} + +func TestRandomEncode(t *testing.T) { + for _, test := range random_encode_tests { + data, err := Marshal(test.value) + assert.NoError(t, err, "%s", test) + assert.EqualValues(t, test.expected, string(data)) + } +} diff --git a/deps/github.com/anacrolix/torrent/bencode/fuzz_test.go b/deps/github.com/anacrolix/torrent/bencode/fuzz_test.go new file mode 100644 index 0000000..d0dce71 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/fuzz_test.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +package bencode + +import ( + "math/big" + "testing" + + qt "github.com/frankban/quicktest" + "github.com/google/go-cmp/cmp" +) + +var bencodeInterfaceChecker = qt.CmpEquals(cmp.Comparer(func(a, b *big.Int) bool { + return a.Cmp(b) == 0 +})) + +func Fuzz(f *testing.F) { + for _, ret := range random_encode_tests { + f.Add([]byte(ret.expected)) + } + f.Fuzz(func(t *testing.T, b []byte) { + c := qt.New(t) + var d interface{} + err := Unmarshal(b, &d) + if err != nil { + t.Skip() + } + b0, err := Marshal(d) + c.Assert(err, qt.IsNil) + var d0 interface{} + err = Unmarshal(b0, &d0) + c.Assert(err, qt.IsNil) + c.Assert(d0, bencodeInterfaceChecker, d) + }) +} + +func FuzzInterfaceRoundTrip(f *testing.F) { + for _, ret := range random_encode_tests { + f.Add([]byte(ret.expected)) + } + f.Fuzz(func(t *testing.T, b []byte) { + c := qt.New(t) + var d interface{} + err := Unmarshal(b, &d) + if err != nil { + c.Skip(err) + } + b0, err := Marshal(d) + c.Assert(err, qt.IsNil) + c.Check(b0, qt.DeepEquals, b) + }) +} diff --git a/deps/github.com/anacrolix/torrent/bencode/misc.go b/deps/github.com/anacrolix/torrent/bencode/misc.go new file mode 100644 index 0000000..6690008 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/misc.go @@ -0,0 +1,11 @@ +package bencode + +import ( + "reflect" +) + +// Wow Go is retarded. +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) diff --git a/deps/github.com/anacrolix/torrent/bencode/scanner.go b/deps/github.com/anacrolix/torrent/bencode/scanner.go new file mode 100644 index 0000000..967d5a5 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/scanner.go @@ -0,0 +1,38 @@ +package bencode + +import ( + "errors" + "io" +) + +// Implements io.ByteScanner over io.Reader, for use in Decoder, to ensure +// that as little as the undecoded input Reader is consumed as possible. +type scanner struct { + r io.Reader + b [1]byte // Buffer for ReadByte + unread bool // True if b has been unread, and so should be returned next +} + +func (me *scanner) Read(b []byte) (int, error) { + return me.r.Read(b) +} + +func (me *scanner) ReadByte() (byte, error) { + if me.unread { + me.unread = false + return me.b[0], nil + } + n, err := me.r.Read(me.b[:]) + if n == 1 { + err = nil + } + return me.b[0], err +} + +func (me *scanner) UnreadByte() error { + if me.unread { + return errors.New("byte already unread") + } + me.unread = true + return nil +} diff --git a/deps/github.com/anacrolix/torrent/bencode/string.go b/deps/github.com/anacrolix/torrent/bencode/string.go new file mode 100644 index 0000000..0c6e307 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/string.go @@ -0,0 +1,9 @@ +//go:build !go1.20 + +package bencode + +import "unsafe" + +func bytesAsString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/deps/github.com/anacrolix/torrent/bencode/string_go120.go b/deps/github.com/anacrolix/torrent/bencode/string_go120.go new file mode 100644 index 0000000..1688d9b --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/string_go120.go @@ -0,0 +1,9 @@ +//go:build go1.20 + +package bencode + +import "unsafe" + +func bytesAsString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} diff --git a/deps/github.com/anacrolix/torrent/bencode/tags.go b/deps/github.com/anacrolix/torrent/bencode/tags.go new file mode 100644 index 0000000..d4adeb2 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/tags.go @@ -0,0 +1,44 @@ +package bencode + +import ( + "reflect" + "strings" +) + +func getTag(st reflect.StructTag) tag { + return parseTag(st.Get("bencode")) +} + +type tag []string + +func parseTag(tagStr string) tag { + return strings.Split(tagStr, ",") +} + +func (me tag) Ignore() bool { + return me[0] == "-" +} + +func (me tag) Key() string { + return me[0] +} + +func (me tag) HasOpt(opt string) bool { + if len(me) < 1 { + return false + } + for _, s := range me[1:] { + if s == opt { + return true + } + } + return false +} + +func (me tag) OmitEmpty() bool { + return me.HasOpt("omitempty") +} + +func (me tag) IgnoreUnmarshalTypeError() bool { + return me.HasOpt("ignore_unmarshal_type_error") +} diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent b/deps/github.com/anacrolix/torrent/bencode/testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent new file mode 100644 index 0000000..9ce7748 Binary files /dev/null and b/deps/github.com/anacrolix/torrent/bencode/testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent differ diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/continuum.torrent b/deps/github.com/anacrolix/torrent/bencode/testdata/continuum.torrent new file mode 100644 index 0000000..ac15b75 Binary files /dev/null and b/deps/github.com/anacrolix/torrent/bencode/testdata/continuum.torrent differ diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/Fuzz/65cfcaf31066e15825ace0f8e03701b8729a159063a9ca0884df18a5c9499715 b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/Fuzz/65cfcaf31066e15825ace0f8e03701b8729a159063a9ca0884df18a5c9499715 new file mode 100644 index 0000000..127ed5f --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/Fuzz/65cfcaf31066e15825ace0f8e03701b8729a159063a9ca0884df18a5c9499715 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("i62208002200000000:00{݃y\u007f m.\x16\t\fZL\x18'\xad\xe7\xc4e") diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/Fuzz/9d85a0638af39a02b96933a448414897e30e595bce25ef2685aaf459f06afaf7 b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/Fuzz/9d85a0638af39a02b96933a448414897e30e595bce25ef2685aaf459f06afaf7 new file mode 100644 index 0000000..d5a0ac5 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/Fuzz/9d85a0638af39a02b96933a448414897e30e595bce25ef2685aaf459f06afaf7 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("0000000000000000000060000000000000D000000000:0000000000000000000000000000000000000000000000000000000000000000000000000000000000\xa1\xcc!\xc0\x04\a^.000") diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/1d7fd8d6b4e9380abbaa373f173a59a4d4418d23d3db9af8abd3ea5412c629c5 b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/1d7fd8d6b4e9380abbaa373f173a59a4d4418d23d3db9af8abd3ea5412c629c5 new file mode 100644 index 0000000..87fcc3d --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/1d7fd8d6b4e9380abbaa373f173a59a4d4418d23d3db9af8abd3ea5412c629c5 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("00:") diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/2674fedd58e056c9322ff8ed4bd0b23e2d9080499038d4353739f5c81b05fc0a b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/2674fedd58e056c9322ff8ed4bd0b23e2d9080499038d4353739f5c81b05fc0a new file mode 100644 index 0000000..903e02e --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/2674fedd58e056c9322ff8ed4bd0b23e2d9080499038d4353739f5c81b05fc0a @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("d3:000e") diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/321f4f280d23ac90ccaf7894a9106ad601e23fd484747898394a12bddba90615 b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/321f4f280d23ac90ccaf7894a9106ad601e23fd484747898394a12bddba90615 new file mode 100644 index 0000000..92d0101 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/321f4f280d23ac90ccaf7894a9106ad601e23fd484747898394a12bddba90615 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("i+0e") diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/49dbd4b139383deb718477bee049320c697e6e7ae547e0a4c4ebd4c2cdd25c1b b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/49dbd4b139383deb718477bee049320c697e6e7ae547e0a4c4ebd4c2cdd25c1b new file mode 100644 index 0000000..b2ace7b --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/49dbd4b139383deb718477bee049320c697e6e7ae547e0a4c4ebd4c2cdd25c1b @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("i00e") diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/808391fc9a93d89909205a5216675a7e1b7b8ef7b4e4d80ec7b7b5dce6dbbb38 b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/808391fc9a93d89909205a5216675a7e1b7b8ef7b4e4d80ec7b7b5dce6dbbb38 new file mode 100644 index 0000000..ed2aa46 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/808391fc9a93d89909205a5216675a7e1b7b8ef7b4e4d80ec7b7b5dce6dbbb38 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("i-0e") diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/b83176c6cec6b92f5c66774ae105efbc87c9aee44b9a55dba7f1789d8d862f45 b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/b83176c6cec6b92f5c66774ae105efbc87c9aee44b9a55dba7f1789d8d862f45 new file mode 100644 index 0000000..60cc554 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/b83176c6cec6b92f5c66774ae105efbc87c9aee44b9a55dba7f1789d8d862f45 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("d3:A005:000003:000i0ee") diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/c73f26cbd996104c4e39ce4998a08e90a5c437df90e68caeea0650ee3c7e7b42 b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/c73f26cbd996104c4e39ce4998a08e90a5c437df90e68caeea0650ee3c7e7b42 new file mode 100644 index 0000000..7dcf27e --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/c73f26cbd996104c4e39ce4998a08e90a5c437df90e68caeea0650ee3c7e7b42 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("1:") diff --git a/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/eef53fca91deb00d4e30f4f59e17e92d2936cda9f4b260994a830ec27cfb88c3 b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/eef53fca91deb00d4e30f4f59e17e92d2936cda9f4b260994a830ec27cfb88c3 new file mode 100644 index 0000000..384fff7 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bencode/testdata/fuzz/FuzzInterfaceRoundTrip/eef53fca91deb00d4e30f4f59e17e92d2936cda9f4b260994a830ec27cfb88c3 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("d10000000000") diff --git a/deps/github.com/anacrolix/torrent/bep40.go b/deps/github.com/anacrolix/torrent/bep40.go new file mode 100644 index 0000000..9a64355 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bep40.go @@ -0,0 +1,85 @@ +package torrent + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "net" +) + +var table = crc32.MakeTable(crc32.Castagnoli) + +type peerPriority = uint32 + +func sameSubnet(ones, bits int, a, b net.IP) bool { + mask := net.CIDRMask(ones, bits) + return a.Mask(mask).Equal(b.Mask(mask)) +} + +func ipv4Mask(a, b net.IP) net.IPMask { + if !sameSubnet(16, 32, a, b) { + return net.IPv4Mask(0xff, 0xff, 0x55, 0x55) + } + if !sameSubnet(24, 32, a, b) { + return net.IPv4Mask(0xff, 0xff, 0xff, 0x55) + } + return net.IPv4Mask(0xff, 0xff, 0xff, 0xff) +} + +func mask(prefix, bytes int) net.IPMask { + ret := make(net.IPMask, bytes) + for i := range ret { + ret[i] = 0x55 + } + for i := 0; i < prefix; i++ { + ret[i] = 0xff + } + return ret +} + +func ipv6Mask(a, b net.IP) net.IPMask { + for i := 6; i <= 16; i++ { + if !sameSubnet(i*8, 128, a, b) { + return mask(i, 16) + } + } + panic(fmt.Sprintf("%s %s", a, b)) +} + +func bep40PriorityBytes(a, b IpPort) ([]byte, error) { + if a.IP.Equal(b.IP) { + var ret [4]byte + binary.BigEndian.PutUint16(ret[0:2], a.Port) + binary.BigEndian.PutUint16(ret[2:4], b.Port) + return ret[:], nil + } + if a4, b4 := a.IP.To4(), b.IP.To4(); a4 != nil && b4 != nil { + m := ipv4Mask(a.IP, b.IP) + return append(a4.Mask(m), b4.Mask(m)...), nil + } + if a6, b6 := a.IP.To16(), b.IP.To16(); a6 != nil && b6 != nil { + m := ipv6Mask(a.IP, b.IP) + return append(a6.Mask(m), b6.Mask(m)...), nil + } + return nil, errors.New("incomparable IPs") +} + +func bep40Priority(a, b IpPort) (peerPriority, error) { + bs, err := bep40PriorityBytes(a, b) + if err != nil { + return 0, err + } + i := len(bs) / 2 + _a, _b := bs[:i], bs[i:] + if bytes.Compare(_a, _b) > 0 { + bs = append(_b, _a...) + } + return crc32.Checksum(bs, table), nil +} + +func bep40PriorityIgnoreError(a, b IpPort) peerPriority { + prio, _ := bep40Priority(a, b) + return prio +} diff --git a/deps/github.com/anacrolix/torrent/bep40_test.go b/deps/github.com/anacrolix/torrent/bep40_test.go new file mode 100644 index 0000000..48d5fdd --- /dev/null +++ b/deps/github.com/anacrolix/torrent/bep40_test.go @@ -0,0 +1,34 @@ +package torrent + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBep40Priority(t *testing.T) { + assert.EqualValues(t, peerPriority(0xec2d7224), bep40PriorityIgnoreError( + IpPort{IP: net.ParseIP("123.213.32.10"), Port: 0}, + IpPort{IP: net.ParseIP("98.76.54.32"), Port: 0}, + )) + assert.EqualValues(t, peerPriority(0xec2d7224), bep40PriorityIgnoreError( + IpPort{IP: net.ParseIP("98.76.54.32"), Port: 0}, + IpPort{IP: net.ParseIP("123.213.32.10"), Port: 0}, + )) + assert.Equal(t, peerPriority(0x99568189), bep40PriorityIgnoreError( + IpPort{IP: net.ParseIP("123.213.32.10"), Port: 0}, + IpPort{IP: net.ParseIP("123.213.32.234"), Port: 0}, + )) + assert.Equal(t, peerPriority(0x2b41d456), bep40PriorityIgnoreError( + IpPort{IP: net.ParseIP("206.248.98.111"), Port: 0}, + IpPort{IP: net.ParseIP("142.147.89.224"), Port: 0}, + )) + assert.EqualValues(t, "\x00\x00\x00\x00", func() []byte { + b, _ := bep40PriorityBytes( + IpPort{IP: net.ParseIP("123.213.32.234"), Port: 0}, + IpPort{IP: net.ParseIP("123.213.32.234"), Port: 0}, + ) + return b + }()) +} diff --git a/deps/github.com/anacrolix/torrent/callbacks.go b/deps/github.com/anacrolix/torrent/callbacks.go new file mode 100644 index 0000000..f9ba131 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/callbacks.go @@ -0,0 +1,40 @@ +package torrent + +import ( + "github.com/anacrolix/torrent/mse" + pp "github.com/anacrolix/torrent/peer_protocol" +) + +// These are called synchronously, and do not pass ownership of arguments (do not expect to retain +// data after returning from the callback). The Client and other locks may still be held. nil +// functions are not called. +type Callbacks struct { + // Called after a peer connection completes the BitTorrent handshake. The Client lock is not + // held. + CompletedHandshake func(*PeerConn, InfoHash) + ReadMessage func(*PeerConn, *pp.Message) + ReadExtendedHandshake func(*PeerConn, *pp.ExtendedHandshakeMessage) + PeerConnClosed func(*PeerConn) + + // Provides secret keys to be tried against incoming encrypted connections. + ReceiveEncryptedHandshakeSkeys mse.SecretKeyIter + + ReceivedUsefulData []func(ReceivedUsefulDataEvent) + ReceivedRequested []func(PeerMessageEvent) + DeletedRequest []func(PeerRequestEvent) + SentRequest []func(PeerRequestEvent) + PeerClosed []func(*Peer) + NewPeer []func(*Peer) +} + +type ReceivedUsefulDataEvent = PeerMessageEvent + +type PeerMessageEvent struct { + Peer *Peer + Message *pp.Message +} + +type PeerRequestEvent struct { + Peer *Peer + Request +} diff --git a/deps/github.com/anacrolix/torrent/client-nowasm_test.go b/deps/github.com/anacrolix/torrent/client-nowasm_test.go new file mode 100644 index 0000000..9b93139 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/client-nowasm_test.go @@ -0,0 +1,71 @@ +//go:build !wasm +// +build !wasm + +package torrent + +import ( + "os" + "testing" + + qt "github.com/frankban/quicktest" + "github.com/stretchr/testify/require" + + "github.com/anacrolix/torrent/internal/testutil" + "github.com/anacrolix/torrent/storage" +) + +func TestBoltPieceCompletionClosedWhenClientClosed(t *testing.T) { + cfg := TestingConfig(t) + pc, err := storage.NewBoltPieceCompletion(cfg.DataDir) + require.NoError(t, err) + ci := storage.NewFileWithCompletion(cfg.DataDir, pc) + defer ci.Close() + cfg.DefaultStorage = ci + cl, err := NewClient(cfg) + require.NoError(t, err) + cl.Close() + // And again, https://github.com/anacrolix/torrent/issues/158 + cl, err = NewClient(cfg) + require.NoError(t, err) + cl.Close() +} + +func TestIssue335(t *testing.T) { + dir, mi := testutil.GreetingTestTorrent() + defer func() { + err := os.RemoveAll(dir) + if err != nil { + t.Fatalf("removing torrent dummy data dir: %v", err) + } + }() + logErr := func(f func() error, msg string) { + err := f() + t.Logf("%s: %v", msg, err) + if err != nil { + t.Fail() + } + } + cfg := TestingConfig(t) + cfg.Seed = false + cfg.Debug = true + cfg.DataDir = dir + comp, err := storage.NewBoltPieceCompletion(dir) + c := qt.New(t) + c.Assert(err, qt.IsNil) + defer logErr(comp.Close, "closing bolt piece completion") + mmapStorage := storage.NewMMapWithCompletion(dir, comp) + defer logErr(mmapStorage.Close, "closing mmap storage") + cfg.DefaultStorage = mmapStorage + cl, err := NewClient(cfg) + c.Assert(err, qt.IsNil) + defer cl.Close() + tor, new, err := cl.AddTorrentSpec(TorrentSpecFromMetaInfo(mi)) + c.Assert(err, qt.IsNil) + c.Assert(new, qt.IsTrue) + c.Assert(cl.WaitAll(), qt.IsTrue) + tor.Drop() + _, new, err = cl.AddTorrentSpec(TorrentSpecFromMetaInfo(mi)) + c.Assert(err, qt.IsNil) + c.Assert(new, qt.IsTrue) + c.Assert(cl.WaitAll(), qt.IsTrue) +} diff --git a/deps/github.com/anacrolix/torrent/client-stats.go b/deps/github.com/anacrolix/torrent/client-stats.go new file mode 100644 index 0000000..bfa6994 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/client-stats.go @@ -0,0 +1,52 @@ +package torrent + +import ( + "net/netip" + + g "github.com/anacrolix/generics" +) + +func setAdd[K comparable](m *map[K]struct{}, elem K) { + g.MakeMapIfNilAndSet(m, elem, struct{}{}) +} + +type clientHolepunchAddrSets struct { + undialableWithoutHolepunch map[netip.AddrPort]struct{} + undialableWithoutHolepunchDialedAfterHolepunchConnect map[netip.AddrPort]struct{} + dialableOnlyAfterHolepunch map[netip.AddrPort]struct{} + dialedSuccessfullyAfterHolepunchConnect map[netip.AddrPort]struct{} + probablyOnlyConnectedDueToHolepunch map[netip.AddrPort]struct{} + accepted map[netip.AddrPort]struct{} +} + +type ClientStats struct { + ConnStats + + // Ongoing outgoing dial attempts. There may be more than one dial going on per peer address due + // to hole-punch connect requests. The total may not match the sum of attempts for all Torrents + // if a Torrent is dropped while there are outstanding dials. + ActiveHalfOpenAttempts int + + NumPeersUndialableWithoutHolepunch int + // Number of unique peer addresses that were dialed after receiving a holepunch connect message, + // that have previously been undialable without any hole-punching attempts. + NumPeersUndialableWithoutHolepunchDialedAfterHolepunchConnect int + // Number of unique peer addresses that were successfully dialed and connected after a holepunch + // connect message and previously failing to connect without holepunching. + NumPeersDialableOnlyAfterHolepunch int + NumPeersDialedSuccessfullyAfterHolepunchConnect int + NumPeersProbablyOnlyConnectedDueToHolepunch int +} + +func (cl *Client) statsLocked() (stats ClientStats) { + stats.ConnStats = cl.connStats.Copy() + stats.ActiveHalfOpenAttempts = cl.numHalfOpen + + stats.NumPeersUndialableWithoutHolepunch = len(cl.undialableWithoutHolepunch) + stats.NumPeersUndialableWithoutHolepunchDialedAfterHolepunchConnect = len(cl.undialableWithoutHolepunchDialedAfterHolepunchConnect) + stats.NumPeersDialableOnlyAfterHolepunch = len(cl.dialableOnlyAfterHolepunch) + stats.NumPeersDialedSuccessfullyAfterHolepunchConnect = len(cl.dialedSuccessfullyAfterHolepunchConnect) + stats.NumPeersProbablyOnlyConnectedDueToHolepunch = len(cl.probablyOnlyConnectedDueToHolepunch) + + return +} diff --git a/deps/github.com/anacrolix/torrent/client.go b/deps/github.com/anacrolix/torrent/client.go new file mode 100644 index 0000000..62c0d2b --- /dev/null +++ b/deps/github.com/anacrolix/torrent/client.go @@ -0,0 +1,1792 @@ +package torrent + +import ( + "bufio" + "context" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "errors" + "expvar" + "fmt" + "io" + "math" + "net" + "net/http" + "net/netip" + "sort" + "strconv" + "time" + + "github.com/anacrolix/chansync" + "github.com/anacrolix/chansync/events" + "github.com/anacrolix/dht/v2" + "github.com/anacrolix/dht/v2/krpc" + . "github.com/anacrolix/generics" + g "github.com/anacrolix/generics" + "github.com/anacrolix/log" + "github.com/anacrolix/missinggo/perf" + "github.com/anacrolix/missinggo/v2" + "github.com/anacrolix/missinggo/v2/bitmap" + "github.com/anacrolix/missinggo/v2/pproffd" + "github.com/anacrolix/sync" + "github.com/davecgh/go-spew/spew" + "github.com/dustin/go-humanize" + gbtree "github.com/google/btree" + "github.com/pion/datachannel" + + "github.com/anacrolix/torrent/bencode" + "github.com/anacrolix/torrent/internal/check" + "github.com/anacrolix/torrent/internal/limiter" + "github.com/anacrolix/torrent/iplist" + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/mse" + pp "github.com/anacrolix/torrent/peer_protocol" + utHolepunch "github.com/anacrolix/torrent/peer_protocol/ut-holepunch" + request_strategy "github.com/anacrolix/torrent/request-strategy" + "github.com/anacrolix/torrent/storage" + "github.com/anacrolix/torrent/tracker" + "github.com/anacrolix/torrent/types/infohash" + "github.com/anacrolix/torrent/webtorrent" +) + +// Clients contain zero or more Torrents. A Client manages a blocklist, the +// TCP/UDP protocol ports, and DHT as desired. +type Client struct { + // An aggregate of stats over all connections. First in struct to ensure 64-bit alignment of + // fields. See #262. + connStats ConnStats + + _mu lockWithDeferreds + event sync.Cond + closed chansync.SetOnce + + config *ClientConfig + logger log.Logger + + peerID PeerID + defaultStorage *storage.Client + onClose []func() + dialers []Dialer + listeners []Listener + dhtServers []DhtServer + ipBlockList iplist.Ranger + + // Set of addresses that have our client ID. This intentionally will + // include ourselves if we end up trying to connect to our own address + // through legitimate channels. + dopplegangerAddrs map[string]struct{} + badPeerIPs map[netip.Addr]struct{} + torrents map[InfoHash]*Torrent + pieceRequestOrder map[interface{}]*request_strategy.PieceRequestOrder + + acceptLimiter map[ipStr]int + numHalfOpen int + + websocketTrackers websocketTrackers + + activeAnnounceLimiter limiter.Instance + httpClient *http.Client + + clientHolepunchAddrSets +} + +type ipStr string + +func (cl *Client) BadPeerIPs() (ips []string) { + cl.rLock() + ips = cl.badPeerIPsLocked() + cl.rUnlock() + return +} + +func (cl *Client) badPeerIPsLocked() (ips []string) { + ips = make([]string, len(cl.badPeerIPs)) + i := 0 + for k := range cl.badPeerIPs { + ips[i] = k.String() + i += 1 + } + return +} + +func (cl *Client) PeerID() PeerID { + return cl.peerID +} + +// Returns the port number for the first listener that has one. No longer assumes that all port +// numbers are the same, due to support for custom listeners. Returns zero if no port number is +// found. +func (cl *Client) LocalPort() (port int) { + for i := 0; i < len(cl.listeners); i += 1 { + if port = addrPortOrZero(cl.listeners[i].Addr()); port != 0 { + return + } + } + return +} + +func writeDhtServerStatus(w io.Writer, s DhtServer) { + dhtStats := s.Stats() + fmt.Fprintf(w, " ID: %x\n", s.ID()) + spew.Fdump(w, dhtStats) +} + +// Writes out a human readable status of the client, such as for writing to a +// HTTP status page. +func (cl *Client) WriteStatus(_w io.Writer) { + cl.rLock() + defer cl.rUnlock() + w := bufio.NewWriter(_w) + defer w.Flush() + fmt.Fprintf(w, "Listen port: %d\n", cl.LocalPort()) + fmt.Fprintf(w, "Peer ID: %+q\n", cl.PeerID()) + fmt.Fprintf(w, "Extension bits: %v\n", cl.config.Extensions) + fmt.Fprintf(w, "Announce key: %x\n", cl.announceKey()) + fmt.Fprintf(w, "Banned IPs: %d\n", len(cl.badPeerIPsLocked())) + cl.eachDhtServer(func(s DhtServer) { + fmt.Fprintf(w, "%s DHT server at %s:\n", s.Addr().Network(), s.Addr().String()) + writeDhtServerStatus(w, s) + }) + dumpStats(w, cl.statsLocked()) + torrentsSlice := cl.torrentsAsSlice() + fmt.Fprintf(w, "# Torrents: %d\n", len(torrentsSlice)) + fmt.Fprintln(w) + sort.Slice(torrentsSlice, func(l, r int) bool { + return torrentsSlice[l].infoHash.AsString() < torrentsSlice[r].infoHash.AsString() + }) + for _, t := range torrentsSlice { + if t.name() == "" { + fmt.Fprint(w, "") + } else { + fmt.Fprint(w, t.name()) + } + fmt.Fprint(w, "\n") + if t.info != nil { + fmt.Fprintf( + w, + "%f%% of %d bytes (%s)", + 100*(1-float64(t.bytesMissingLocked())/float64(t.info.TotalLength())), + t.length(), + humanize.Bytes(uint64(t.length()))) + } else { + w.WriteString("") + } + fmt.Fprint(w, "\n") + t.writeStatus(w) + fmt.Fprintln(w) + } +} + +func (cl *Client) initLogger() { + logger := cl.config.Logger + if logger.IsZero() { + logger = log.Default + } + if cl.config.Debug { + logger = logger.FilterLevel(log.Debug) + } + cl.logger = logger.WithValues(cl) +} + +func (cl *Client) announceKey() int32 { + return int32(binary.BigEndian.Uint32(cl.peerID[16:20])) +} + +// Initializes a bare minimum Client. *Client and *ClientConfig must not be nil. +func (cl *Client) init(cfg *ClientConfig) { + cl.config = cfg + g.MakeMap(&cl.dopplegangerAddrs) + cl.torrents = make(map[metainfo.Hash]*Torrent) + cl.activeAnnounceLimiter.SlotsPerKey = 2 + cl.event.L = cl.locker() + cl.ipBlockList = cfg.IPBlocklist + cl.httpClient = &http.Client{ + Transport: cfg.WebTransport, + } + if cl.httpClient.Transport == nil { + cl.httpClient.Transport = &http.Transport{ + Proxy: cfg.HTTPProxy, + DialContext: cfg.HTTPDialContext, + // I think this value was observed from some webseeds. It seems reasonable to extend it + // to other uses of HTTP from the client. + MaxConnsPerHost: 10, + } + } +} + +func NewClient(cfg *ClientConfig) (cl *Client, err error) { + if cfg == nil { + cfg = NewDefaultClientConfig() + cfg.ListenPort = 0 + } + var client Client + client.init(cfg) + cl = &client + go cl.acceptLimitClearer() + cl.initLogger() + defer func() { + if err != nil { + cl.Close() + cl = nil + } + }() + + storageImpl := cfg.DefaultStorage + if storageImpl == nil { + // We'd use mmap by default but HFS+ doesn't support sparse files. + storageImplCloser := storage.NewFile(cfg.DataDir) + cl.onClose = append(cl.onClose, func() { + if err := storageImplCloser.Close(); err != nil { + cl.logger.Printf("error closing default storage: %s", err) + } + }) + storageImpl = storageImplCloser + } + cl.defaultStorage = storage.NewClient(storageImpl) + + if cfg.PeerID != "" { + missinggo.CopyExact(&cl.peerID, cfg.PeerID) + } else { + o := copy(cl.peerID[:], cfg.Bep20) + _, err = rand.Read(cl.peerID[o:]) + if err != nil { + panic("error generating peer id") + } + } + + sockets, err := listenAll(cl.listenNetworks(), cl.config.ListenHost, cl.config.ListenPort, cl.firewallCallback, cl.logger) + if err != nil { + return + } + + // Check for panics. + cl.LocalPort() + + for _, _s := range sockets { + s := _s // Go is fucking retarded. + cl.onClose = append(cl.onClose, func() { go s.Close() }) + if peerNetworkEnabled(parseNetworkString(s.Addr().Network()), cl.config) { + cl.dialers = append(cl.dialers, s) + cl.listeners = append(cl.listeners, s) + if cl.config.AcceptPeerConnections { + go cl.acceptConnections(s) + } + } + } + + go cl.forwardPort() + if !cfg.NoDHT { + for _, s := range sockets { + if pc, ok := s.(net.PacketConn); ok { + ds, err := cl.NewAnacrolixDhtServer(pc) + if err != nil { + panic(err) + } + cl.dhtServers = append(cl.dhtServers, AnacrolixDhtServerWrapper{ds}) + cl.onClose = append(cl.onClose, func() { ds.Close() }) + } + } + } + + cl.websocketTrackers = websocketTrackers{ + PeerId: cl.peerID, + Logger: cl.logger, + GetAnnounceRequest: func(event tracker.AnnounceEvent, infoHash [20]byte) (tracker.AnnounceRequest, error) { + cl.lock() + defer cl.unlock() + t, ok := cl.torrents[infoHash] + if !ok { + return tracker.AnnounceRequest{}, errors.New("torrent not tracked by client") + } + return t.announceRequest(event), nil + }, + Proxy: cl.config.HTTPProxy, + WebsocketTrackerHttpHeader: cl.config.WebsocketTrackerHttpHeader, + ICEServers: cl.config.ICEServers, + DialContext: cl.config.TrackerDialContext, + OnConn: func(dc datachannel.ReadWriteCloser, dcc webtorrent.DataChannelContext) { + cl.lock() + defer cl.unlock() + t, ok := cl.torrents[dcc.InfoHash] + if !ok { + cl.logger.WithDefaultLevel(log.Warning).Printf( + "got webrtc conn for unloaded torrent with infohash %x", + dcc.InfoHash, + ) + dc.Close() + return + } + go t.onWebRtcConn(dc, dcc) + }, + } + + return +} + +func (cl *Client) AddDhtServer(d DhtServer) { + cl.dhtServers = append(cl.dhtServers, d) +} + +// Adds a Dialer for outgoing connections. All Dialers are used when attempting to connect to a +// given address for any Torrent. +func (cl *Client) AddDialer(d Dialer) { + cl.lock() + defer cl.unlock() + cl.dialers = append(cl.dialers, d) + for _, t := range cl.torrents { + t.openNewConns() + } +} + +func (cl *Client) Listeners() []Listener { + return cl.listeners +} + +// Registers a Listener, and starts Accepting on it. You must Close Listeners provided this way +// yourself. +func (cl *Client) AddListener(l Listener) { + cl.listeners = append(cl.listeners, l) + if cl.config.AcceptPeerConnections { + go cl.acceptConnections(l) + } +} + +func (cl *Client) firewallCallback(net.Addr) bool { + cl.rLock() + block := !cl.wantConns() || !cl.config.AcceptPeerConnections + cl.rUnlock() + if block { + torrent.Add("connections firewalled", 1) + } else { + torrent.Add("connections not firewalled", 1) + } + return block +} + +func (cl *Client) listenOnNetwork(n network) bool { + if n.Ipv4 && cl.config.DisableIPv4 { + return false + } + if n.Ipv6 && cl.config.DisableIPv6 { + return false + } + if n.Tcp && cl.config.DisableTCP { + return false + } + if n.Udp && cl.config.DisableUTP && cl.config.NoDHT { + return false + } + return true +} + +func (cl *Client) listenNetworks() (ns []network) { + for _, n := range allPeerNetworks { + if cl.listenOnNetwork(n) { + ns = append(ns, n) + } + } + return +} + +// Creates an anacrolix/dht Server, as would be done internally in NewClient, for the given conn. +func (cl *Client) NewAnacrolixDhtServer(conn net.PacketConn) (s *dht.Server, err error) { + logger := cl.logger.WithNames("dht", conn.LocalAddr().String()) + cfg := dht.ServerConfig{ + IPBlocklist: cl.ipBlockList, + Conn: conn, + OnAnnouncePeer: cl.onDHTAnnouncePeer, + PublicIP: func() net.IP { + if connIsIpv6(conn) && cl.config.PublicIp6 != nil { + return cl.config.PublicIp6 + } + return cl.config.PublicIp4 + }(), + StartingNodes: cl.config.DhtStartingNodes(conn.LocalAddr().Network()), + OnQuery: cl.config.DHTOnQuery, + Logger: logger, + } + if f := cl.config.ConfigureAnacrolixDhtServer; f != nil { + f(&cfg) + } + s, err = dht.NewServer(&cfg) + if err == nil { + go s.TableMaintainer() + } + return +} + +func (cl *Client) Closed() events.Done { + return cl.closed.Done() +} + +func (cl *Client) eachDhtServer(f func(DhtServer)) { + for _, ds := range cl.dhtServers { + f(ds) + } +} + +// Stops the client. All connections to peers are closed and all activity will come to a halt. +func (cl *Client) Close() (errs []error) { + var closeGroup sync.WaitGroup // For concurrent cleanup to complete before returning + cl.lock() + for _, t := range cl.torrents { + err := t.close(&closeGroup) + if err != nil { + errs = append(errs, err) + } + } + for i := range cl.onClose { + cl.onClose[len(cl.onClose)-1-i]() + } + cl.closed.Set() + cl.unlock() + cl.event.Broadcast() + closeGroup.Wait() // defer is LIFO. We want to Wait() after cl.unlock() + return +} + +func (cl *Client) ipBlockRange(ip net.IP) (r iplist.Range, blocked bool) { + if cl.ipBlockList == nil { + return + } + return cl.ipBlockList.Lookup(ip) +} + +func (cl *Client) ipIsBlocked(ip net.IP) bool { + _, blocked := cl.ipBlockRange(ip) + return blocked +} + +func (cl *Client) wantConns() bool { + if cl.config.AlwaysWantConns { + return true + } + for _, t := range cl.torrents { + if t.wantIncomingConns() { + return true + } + } + return false +} + +// TODO: Apply filters for non-standard networks, particularly rate-limiting. +func (cl *Client) rejectAccepted(conn net.Conn) error { + if !cl.wantConns() { + return errors.New("don't want conns right now") + } + ra := conn.RemoteAddr() + if rip := addrIpOrNil(ra); rip != nil { + if cl.config.DisableIPv4Peers && rip.To4() != nil { + return errors.New("ipv4 peers disabled") + } + if cl.config.DisableIPv4 && len(rip) == net.IPv4len { + return errors.New("ipv4 disabled") + } + if cl.config.DisableIPv6 && len(rip) == net.IPv6len && rip.To4() == nil { + return errors.New("ipv6 disabled") + } + if cl.rateLimitAccept(rip) { + return errors.New("source IP accepted rate limited") + } + if cl.badPeerIPPort(rip, missinggo.AddrPort(ra)) { + return errors.New("bad source addr") + } + } + return nil +} + +func (cl *Client) acceptConnections(l Listener) { + for { + conn, err := l.Accept() + torrent.Add("client listener accepts", 1) + if err == nil { + holepunchAddr, holepunchErr := addrPortFromPeerRemoteAddr(conn.RemoteAddr()) + if holepunchErr == nil { + cl.lock() + if g.MapContains(cl.undialableWithoutHolepunch, holepunchAddr) { + setAdd(&cl.accepted, holepunchAddr) + } + if g.MapContains( + cl.undialableWithoutHolepunchDialedAfterHolepunchConnect, + holepunchAddr, + ) { + setAdd(&cl.probablyOnlyConnectedDueToHolepunch, holepunchAddr) + } + cl.unlock() + } + } + conn = pproffd.WrapNetConn(conn) + cl.rLock() + closed := cl.closed.IsSet() + var reject error + if !closed && conn != nil { + reject = cl.rejectAccepted(conn) + } + cl.rUnlock() + if closed { + if conn != nil { + conn.Close() + } + return + } + if err != nil { + log.Fmsg("error accepting connection: %s", err).LogLevel(log.Debug, cl.logger) + continue + } + go func() { + if reject != nil { + torrent.Add("rejected accepted connections", 1) + cl.logger.LazyLog(log.Debug, func() log.Msg { + return log.Fmsg("rejecting accepted conn: %v", reject) + }) + conn.Close() + } else { + go cl.incomingConnection(conn) + } + cl.logger.LazyLog(log.Debug, func() log.Msg { + return log.Fmsg("accepted %q connection at %q from %q", + l.Addr().Network(), + conn.LocalAddr(), + conn.RemoteAddr(), + ) + }) + torrent.Add(fmt.Sprintf("accepted conn remote IP len=%d", len(addrIpOrNil(conn.RemoteAddr()))), 1) + torrent.Add(fmt.Sprintf("accepted conn network=%s", conn.RemoteAddr().Network()), 1) + torrent.Add(fmt.Sprintf("accepted on %s listener", l.Addr().Network()), 1) + }() + } +} + +// Creates the PeerConn.connString for a regular net.Conn PeerConn. +func regularNetConnPeerConnConnString(nc net.Conn) string { + return fmt.Sprintf("%s-%s", nc.LocalAddr(), nc.RemoteAddr()) +} + +func (cl *Client) incomingConnection(nc net.Conn) { + defer nc.Close() + if tc, ok := nc.(*net.TCPConn); ok { + tc.SetLinger(0) + } + remoteAddr, _ := tryIpPortFromNetAddr(nc.RemoteAddr()) + c := cl.newConnection( + nc, + newConnectionOpts{ + outgoing: false, + remoteAddr: nc.RemoteAddr(), + localPublicAddr: cl.publicAddr(remoteAddr.IP), + network: nc.RemoteAddr().Network(), + connString: regularNetConnPeerConnConnString(nc), + }) + defer func() { + cl.lock() + defer cl.unlock() + c.close() + }() + c.Discovery = PeerSourceIncoming + cl.runReceivedConn(c) +} + +// Returns a handle to the given torrent, if it's present in the client. +func (cl *Client) Torrent(ih metainfo.Hash) (t *Torrent, ok bool) { + cl.rLock() + defer cl.rUnlock() + t, ok = cl.torrents[ih] + return +} + +func (cl *Client) torrent(ih metainfo.Hash) *Torrent { + return cl.torrents[ih] +} + +type DialResult struct { + Conn net.Conn + Dialer Dialer +} + +func countDialResult(err error) { + if err == nil { + torrent.Add("successful dials", 1) + } else { + torrent.Add("unsuccessful dials", 1) + } +} + +func reducedDialTimeout(minDialTimeout, max time.Duration, halfOpenLimit, pendingPeers int) (ret time.Duration) { + ret = max / time.Duration((pendingPeers+halfOpenLimit)/halfOpenLimit) + if ret < minDialTimeout { + ret = minDialTimeout + } + return +} + +// Returns whether an address is known to connect to a client with our own ID. +func (cl *Client) dopplegangerAddr(addr string) bool { + _, ok := cl.dopplegangerAddrs[addr] + return ok +} + +// Returns a connection over UTP or TCP, whichever is first to connect. +func (cl *Client) dialFirst(ctx context.Context, addr string) (res DialResult) { + return DialFirst(ctx, addr, cl.dialers) +} + +// Returns a connection over UTP or TCP, whichever is first to connect. +func DialFirst(ctx context.Context, addr string, dialers []Dialer) (res DialResult) { + pool := dialPool{ + addr: addr, + } + defer pool.startDrainer() + for _, _s := range dialers { + pool.add(ctx, _s) + } + return pool.getFirst() +} + +func dialFromSocket(ctx context.Context, s Dialer, addr string) net.Conn { + c, err := s.Dial(ctx, addr) + if err != nil { + log.Levelf(log.Debug, "error dialing %q: %v", addr, err) + } + // This is a bit optimistic, but it looks non-trivial to thread this through the proxy code. Set + // it now in case we close the connection forthwith. Note this is also done in the TCP dialer + // code to increase the chance it's done. + if tc, ok := c.(*net.TCPConn); ok { + tc.SetLinger(0) + } + countDialResult(err) + return c +} + +func (cl *Client) noLongerHalfOpen(t *Torrent, addr string, attemptKey outgoingConnAttemptKey) { + path := t.getHalfOpenPath(addr, attemptKey) + if !path.Exists() { + panic("should exist") + } + path.Delete() + cl.numHalfOpen-- + if cl.numHalfOpen < 0 { + panic("should not be possible") + } + for _, t := range cl.torrents { + t.openNewConns() + } +} + +func (cl *Client) countHalfOpenFromTorrents() (count int) { + for _, t := range cl.torrents { + count += t.numHalfOpenAttempts() + } + return +} + +// Performs initiator handshakes and returns a connection. Returns nil *PeerConn if no connection +// for valid reasons. +func (cl *Client) initiateProtocolHandshakes( + ctx context.Context, + nc net.Conn, + t *Torrent, + encryptHeader bool, + newConnOpts newConnectionOpts, +) ( + c *PeerConn, err error, +) { + c = cl.newConnection(nc, newConnOpts) + c.headerEncrypted = encryptHeader + ctx, cancel := context.WithTimeout(ctx, cl.config.HandshakesTimeout) + defer cancel() + dl, ok := ctx.Deadline() + if !ok { + panic(ctx) + } + err = nc.SetDeadline(dl) + if err != nil { + panic(err) + } + err = cl.initiateHandshakes(c, t) + return +} + +func doProtocolHandshakeOnDialResult( + t *Torrent, + obfuscatedHeader bool, + addr PeerRemoteAddr, + dr DialResult, +) ( + c *PeerConn, err error, +) { + cl := t.cl + nc := dr.Conn + addrIpPort, _ := tryIpPortFromNetAddr(addr) + c, err = cl.initiateProtocolHandshakes( + context.Background(), nc, t, obfuscatedHeader, + newConnectionOpts{ + outgoing: true, + remoteAddr: addr, + // It would be possible to retrieve a public IP from the dialer used here? + localPublicAddr: cl.publicAddr(addrIpPort.IP), + network: dr.Dialer.DialerNetwork(), + connString: regularNetConnPeerConnConnString(nc), + }) + if err != nil { + nc.Close() + } + return c, err +} + +// Returns nil connection and nil error if no connection could be established for valid reasons. +func (cl *Client) dialAndCompleteHandshake(opts outgoingConnOpts) (c *PeerConn, err error) { + // It would be better if dial rate limiting could be tested when considering to open connections + // instead. Doing it here means if the limit is low, and the half-open limit is high, we could + // end up with lots of outgoing connection attempts pending that were initiated on stale data. + { + dialReservation := cl.config.DialRateLimiter.Reserve() + if !opts.receivedHolepunchConnect { + if !dialReservation.OK() { + err = errors.New("can't make dial limit reservation") + return + } + time.Sleep(dialReservation.Delay()) + } + } + torrent.Add("establish outgoing connection", 1) + addr := opts.peerInfo.Addr + dialPool := dialPool{ + resCh: make(chan DialResult), + addr: addr.String(), + } + defer dialPool.startDrainer() + dialTimeout := opts.t.getDialTimeoutUnlocked() + { + ctx, cancel := context.WithTimeout(context.Background(), dialTimeout) + defer cancel() + for _, d := range cl.dialers { + dialPool.add(ctx, d) + } + } + holepunchAddr, holepunchAddrErr := addrPortFromPeerRemoteAddr(addr) + headerObfuscationPolicy := opts.HeaderObfuscationPolicy + obfuscatedHeaderFirst := headerObfuscationPolicy.Preferred + firstDialResult := dialPool.getFirst() + if firstDialResult.Conn == nil { + // No dialers worked. Try to initiate a holepunching rendezvous. + if holepunchAddrErr == nil { + cl.lock() + if !opts.receivedHolepunchConnect { + g.MakeMapIfNilAndSet(&cl.undialableWithoutHolepunch, holepunchAddr, struct{}{}) + } + if !opts.skipHolepunchRendezvous { + opts.t.trySendHolepunchRendezvous(holepunchAddr) + } + cl.unlock() + } + err = fmt.Errorf("all initial dials failed") + return + } + if opts.receivedHolepunchConnect && holepunchAddrErr == nil { + cl.lock() + if g.MapContains(cl.undialableWithoutHolepunch, holepunchAddr) { + g.MakeMapIfNilAndSet(&cl.dialableOnlyAfterHolepunch, holepunchAddr, struct{}{}) + } + g.MakeMapIfNil(&cl.dialedSuccessfullyAfterHolepunchConnect) + g.MapInsert(cl.dialedSuccessfullyAfterHolepunchConnect, holepunchAddr, struct{}{}) + cl.unlock() + } + c, err = doProtocolHandshakeOnDialResult( + opts.t, + obfuscatedHeaderFirst, + addr, + firstDialResult, + ) + if err == nil { + torrent.Add("initiated conn with preferred header obfuscation", 1) + return + } + c.logger.Levelf( + log.Debug, + "error doing protocol handshake with header obfuscation %v", + obfuscatedHeaderFirst, + ) + firstDialResult.Conn.Close() + // We should have just tried with the preferred header obfuscation. If it was required, there's nothing else to try. + if headerObfuscationPolicy.RequirePreferred { + return + } + // Reuse the dialer that returned already but failed to handshake. + { + ctx, cancel := context.WithTimeout(context.Background(), dialTimeout) + defer cancel() + dialPool.add(ctx, firstDialResult.Dialer) + } + secondDialResult := dialPool.getFirst() + if secondDialResult.Conn == nil { + return + } + c, err = doProtocolHandshakeOnDialResult( + opts.t, + !obfuscatedHeaderFirst, + addr, + secondDialResult, + ) + if err == nil { + torrent.Add("initiated conn with fallback header obfuscation", 1) + return + } + c.logger.Levelf( + log.Debug, + "error doing protocol handshake with header obfuscation %v", + !obfuscatedHeaderFirst, + ) + secondDialResult.Conn.Close() + return +} + +type outgoingConnOpts struct { + peerInfo PeerInfo + t *Torrent + // Don't attempt to connect unless a connect message is received after initiating a rendezvous. + requireRendezvous bool + // Don't send rendezvous requests to eligible relays. + skipHolepunchRendezvous bool + // Outgoing connection attempt is in response to holepunch connect message. + receivedHolepunchConnect bool + HeaderObfuscationPolicy HeaderObfuscationPolicy +} + +// Called to dial out and run a connection. The addr we're given is already +// considered half-open. +func (cl *Client) outgoingConnection( + opts outgoingConnOpts, + attemptKey outgoingConnAttemptKey, +) { + c, err := cl.dialAndCompleteHandshake(opts) + if err == nil { + c.conn.SetWriteDeadline(time.Time{}) + } + cl.lock() + defer cl.unlock() + // Don't release lock between here and addPeerConn, unless it's for failure. + cl.noLongerHalfOpen(opts.t, opts.peerInfo.Addr.String(), attemptKey) + if err != nil { + if cl.config.Debug { + cl.logger.Levelf( + log.Debug, + "error establishing outgoing connection to %v: %v", + opts.peerInfo.Addr, + err, + ) + } + return + } + defer c.close() + c.Discovery = opts.peerInfo.Source + c.trusted = opts.peerInfo.Trusted + opts.t.runHandshookConnLoggingErr(c) +} + +// The port number for incoming peer connections. 0 if the client isn't listening. +func (cl *Client) incomingPeerPort() int { + return cl.LocalPort() +} + +func (cl *Client) initiateHandshakes(c *PeerConn, t *Torrent) error { + if c.headerEncrypted { + var rw io.ReadWriter + var err error + rw, c.cryptoMethod, err = mse.InitiateHandshake( + struct { + io.Reader + io.Writer + }{c.r, c.w}, + t.infoHash[:], + nil, + cl.config.CryptoProvides, + ) + c.setRW(rw) + if err != nil { + return fmt.Errorf("header obfuscation handshake: %w", err) + } + } + ih, err := cl.connBtHandshake(c, &t.infoHash) + if err != nil { + return fmt.Errorf("bittorrent protocol handshake: %w", err) + } + if ih != t.infoHash { + return errors.New("bittorrent protocol handshake: peer infohash didn't match") + } + return nil +} + +// Calls f with any secret keys. Note that it takes the Client lock, and so must be used from code +// that won't also try to take the lock. This saves us copying all the infohashes everytime. +func (cl *Client) forSkeys(f func([]byte) bool) { + cl.rLock() + defer cl.rUnlock() + if false { // Emulate the bug from #114 + var firstIh InfoHash + for ih := range cl.torrents { + firstIh = ih + break + } + for range cl.torrents { + if !f(firstIh[:]) { + break + } + } + return + } + for ih := range cl.torrents { + if !f(ih[:]) { + break + } + } +} + +func (cl *Client) handshakeReceiverSecretKeys() mse.SecretKeyIter { + if ret := cl.config.Callbacks.ReceiveEncryptedHandshakeSkeys; ret != nil { + return ret + } + return cl.forSkeys +} + +// Do encryption and bittorrent handshakes as receiver. +func (cl *Client) receiveHandshakes(c *PeerConn) (t *Torrent, err error) { + defer perf.ScopeTimerErr(&err)() + var rw io.ReadWriter + rw, c.headerEncrypted, c.cryptoMethod, err = handleEncryption(c.rw(), cl.handshakeReceiverSecretKeys(), cl.config.HeaderObfuscationPolicy, cl.config.CryptoSelector) + c.setRW(rw) + if err == nil || err == mse.ErrNoSecretKeyMatch { + if c.headerEncrypted { + torrent.Add("handshakes received encrypted", 1) + } else { + torrent.Add("handshakes received unencrypted", 1) + } + } else { + torrent.Add("handshakes received with error while handling encryption", 1) + } + if err != nil { + if err == mse.ErrNoSecretKeyMatch { + err = nil + } + return + } + if cl.config.HeaderObfuscationPolicy.RequirePreferred && c.headerEncrypted != cl.config.HeaderObfuscationPolicy.Preferred { + err = errors.New("connection does not have required header obfuscation") + return + } + ih, err := cl.connBtHandshake(c, nil) + if err != nil { + return nil, fmt.Errorf("during bt handshake: %w", err) + } + cl.lock() + t = cl.torrents[ih] + cl.unlock() + return +} + +var successfulPeerWireProtocolHandshakePeerReservedBytes expvar.Map + +func init() { + torrent.Set( + "successful_peer_wire_protocol_handshake_peer_reserved_bytes", + &successfulPeerWireProtocolHandshakePeerReservedBytes) +} + +func (cl *Client) connBtHandshake(c *PeerConn, ih *metainfo.Hash) (ret metainfo.Hash, err error) { + res, err := pp.Handshake(c.rw(), ih, cl.peerID, cl.config.Extensions) + if err != nil { + return + } + successfulPeerWireProtocolHandshakePeerReservedBytes.Add( + hex.EncodeToString(res.PeerExtensionBits[:]), 1) + ret = res.Hash + c.PeerExtensionBytes = res.PeerExtensionBits + c.PeerID = res.PeerID + c.completedHandshake = time.Now() + if cb := cl.config.Callbacks.CompletedHandshake; cb != nil { + cb(c, res.Hash) + } + return +} + +func (cl *Client) runReceivedConn(c *PeerConn) { + err := c.conn.SetDeadline(time.Now().Add(cl.config.HandshakesTimeout)) + if err != nil { + panic(err) + } + t, err := cl.receiveHandshakes(c) + if err != nil { + cl.logger.LazyLog(log.Debug, func() log.Msg { + return log.Fmsg( + "error receiving handshakes on %v: %s", c, err, + ).Add( + "network", c.Network, + ) + }) + torrent.Add("error receiving handshake", 1) + cl.lock() + cl.onBadAccept(c.RemoteAddr) + cl.unlock() + return + } + if t == nil { + torrent.Add("received handshake for unloaded torrent", 1) + cl.logger.LazyLog(log.Debug, func() log.Msg { + return log.Fmsg("received handshake for unloaded torrent") + }) + cl.lock() + cl.onBadAccept(c.RemoteAddr) + cl.unlock() + return + } + torrent.Add("received handshake for loaded torrent", 1) + c.conn.SetWriteDeadline(time.Time{}) + cl.lock() + defer cl.unlock() + t.runHandshookConnLoggingErr(c) +} + +// Client lock must be held before entering this. +func (t *Torrent) runHandshookConn(pc *PeerConn) error { + pc.setTorrent(t) + cl := t.cl + for i, b := range cl.config.MinPeerExtensions { + if pc.PeerExtensionBytes[i]&b != b { + return fmt.Errorf("peer did not meet minimum peer extensions: %x", pc.PeerExtensionBytes[:]) + } + } + if pc.PeerID == cl.peerID { + if pc.outgoing { + connsToSelf.Add(1) + addr := pc.RemoteAddr.String() + cl.dopplegangerAddrs[addr] = struct{}{} + } /* else { + // Because the remote address is not necessarily the same as its client's torrent listen + // address, we won't record the remote address as a doppleganger. Instead, the initiator + // can record *us* as the doppleganger. + } */ + t.logger.Levelf(log.Debug, "local and remote peer ids are the same") + return nil + } + pc.r = deadlineReader{pc.conn, pc.r} + completedHandshakeConnectionFlags.Add(pc.connectionFlags(), 1) + if connIsIpv6(pc.conn) { + torrent.Add("completed handshake over ipv6", 1) + } + if err := t.addPeerConn(pc); err != nil { + return fmt.Errorf("adding connection: %w", err) + } + defer t.dropConnection(pc) + pc.startMessageWriter() + pc.sendInitialMessages() + pc.initUpdateRequestsTimer() + err := pc.mainReadLoop() + if err != nil { + return fmt.Errorf("main read loop: %w", err) + } + return nil +} + +func (p *Peer) initUpdateRequestsTimer() { + if check.Enabled { + if p.updateRequestsTimer != nil { + panic(p.updateRequestsTimer) + } + } + if enableUpdateRequestsTimer { + p.updateRequestsTimer = time.AfterFunc(math.MaxInt64, p.updateRequestsTimerFunc) + } +} + +const peerUpdateRequestsTimerReason = "updateRequestsTimer" + +func (c *Peer) updateRequestsTimerFunc() { + c.locker().Lock() + defer c.locker().Unlock() + if c.closed.IsSet() { + return + } + if c.isLowOnRequests() { + // If there are no outstanding requests, then a request update should have already run. + return + } + if d := time.Since(c.lastRequestUpdate); d < updateRequestsTimerDuration { + // These should be benign, Timer.Stop doesn't guarantee that its function won't run if it's + // already been fired. + torrent.Add("spurious timer requests updates", 1) + return + } + c.updateRequests(peerUpdateRequestsTimerReason) +} + +// Maximum pending requests we allow peers to send us. If peer requests are buffered on read, this +// instructs the amount of memory that might be used to cache pending writes. Assuming 512KiB +// (1<<19) cached for sending, for 16KiB (1<<14) chunks. +const localClientReqq = 1024 + +// See the order given in Transmission's tr_peerMsgsNew. +func (pc *PeerConn) sendInitialMessages() { + t := pc.t + cl := t.cl + if pc.PeerExtensionBytes.SupportsExtended() && cl.config.Extensions.SupportsExtended() { + pc.write(pp.Message{ + Type: pp.Extended, + ExtendedID: pp.HandshakeExtendedID, + ExtendedPayload: func() []byte { + msg := pp.ExtendedHandshakeMessage{ + M: map[pp.ExtensionName]pp.ExtensionNumber{ + pp.ExtensionNameMetadata: metadataExtendedId, + utHolepunch.ExtensionName: utHolepunchExtendedId, + }, + V: cl.config.ExtendedHandshakeClientVersion, + Reqq: localClientReqq, + YourIp: pp.CompactIp(pc.remoteIp()), + Encryption: cl.config.HeaderObfuscationPolicy.Preferred || !cl.config.HeaderObfuscationPolicy.RequirePreferred, + Port: cl.incomingPeerPort(), + MetadataSize: t.metadataSize(), + // TODO: We can figure these out specific to the socket used. + Ipv4: pp.CompactIp(cl.config.PublicIp4.To4()), + Ipv6: cl.config.PublicIp6.To16(), + } + if !cl.config.DisablePEX { + msg.M[pp.ExtensionNamePex] = pexExtendedId + } + return bencode.MustMarshal(msg) + }(), + }) + } + func() { + if pc.fastEnabled() { + if t.haveAllPieces() { + pc.write(pp.Message{Type: pp.HaveAll}) + pc.sentHaves.AddRange(0, bitmap.BitRange(pc.t.NumPieces())) + return + } else if !t.haveAnyPieces() { + pc.write(pp.Message{Type: pp.HaveNone}) + pc.sentHaves.Clear() + return + } + } + pc.postBitfield() + }() + if pc.PeerExtensionBytes.SupportsDHT() && cl.config.Extensions.SupportsDHT() && cl.haveDhtServer() { + pc.write(pp.Message{ + Type: pp.Port, + Port: cl.dhtPort(), + }) + } +} + +func (cl *Client) dhtPort() (ret uint16) { + if len(cl.dhtServers) == 0 { + return + } + return uint16(missinggo.AddrPort(cl.dhtServers[len(cl.dhtServers)-1].Addr())) +} + +func (cl *Client) haveDhtServer() bool { + return len(cl.dhtServers) > 0 +} + +// Process incoming ut_metadata message. +func (cl *Client) gotMetadataExtensionMsg(payload []byte, t *Torrent, c *PeerConn) error { + var d pp.ExtendedMetadataRequestMsg + err := bencode.Unmarshal(payload, &d) + if _, ok := err.(bencode.ErrUnusedTrailingBytes); ok { + } else if err != nil { + return fmt.Errorf("error unmarshalling bencode: %s", err) + } + piece := d.Piece + switch d.Type { + case pp.DataMetadataExtensionMsgType: + c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.MetadataChunksRead })) + if !c.requestedMetadataPiece(piece) { + return fmt.Errorf("got unexpected piece %d", piece) + } + c.metadataRequests[piece] = false + begin := len(payload) - d.PieceSize() + if begin < 0 || begin >= len(payload) { + return fmt.Errorf("data has bad offset in payload: %d", begin) + } + t.saveMetadataPiece(piece, payload[begin:]) + c.lastUsefulChunkReceived = time.Now() + err = t.maybeCompleteMetadata() + if err != nil { + // Log this at the Torrent-level, as we don't partition metadata by Peer yet, so we + // don't know who to blame. TODO: Also errors can be returned here that aren't related + // to verifying metadata, which should be fixed. This should be tagged with metadata, so + // log consumers can filter for this message. + t.logger.WithDefaultLevel(log.Warning).Printf("error completing metadata: %v", err) + } + return err + case pp.RequestMetadataExtensionMsgType: + if !t.haveMetadataPiece(piece) { + c.write(t.newMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d.Piece, nil)) + return nil + } + start := (1 << 14) * piece + c.logger.WithDefaultLevel(log.Debug).Printf("sending metadata piece %d", piece) + c.write(t.newMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.metadataBytes[start:start+t.metadataPieceSize(piece)])) + return nil + case pp.RejectMetadataExtensionMsgType: + return nil + default: + return errors.New("unknown msg_type value") + } +} + +func (cl *Client) badPeerAddr(addr PeerRemoteAddr) bool { + if ipa, ok := tryIpPortFromNetAddr(addr); ok { + return cl.badPeerIPPort(ipa.IP, ipa.Port) + } + return false +} + +// Returns whether the IP address and port are considered "bad". +func (cl *Client) badPeerIPPort(ip net.IP, port int) bool { + if port == 0 || ip == nil { + return true + } + if cl.dopplegangerAddr(net.JoinHostPort(ip.String(), strconv.FormatInt(int64(port), 10))) { + return true + } + if _, ok := cl.ipBlockRange(ip); ok { + return true + } + ipAddr, ok := netip.AddrFromSlice(ip) + if !ok { + panic(ip) + } + if _, ok := cl.badPeerIPs[ipAddr]; ok { + return true + } + return false +} + +// Return a Torrent ready for insertion into a Client. +func (cl *Client) newTorrent(ih metainfo.Hash, specStorage storage.ClientImpl) (t *Torrent) { + return cl.newTorrentOpt(AddTorrentOpts{ + InfoHash: ih, + Storage: specStorage, + }) +} + +// Return a Torrent ready for insertion into a Client. +func (cl *Client) newTorrentOpt(opts AddTorrentOpts) (t *Torrent) { + // use provided storage, if provided + storageClient := cl.defaultStorage + if opts.Storage != nil { + storageClient = storage.NewClient(opts.Storage) + } + + t = &Torrent{ + cl: cl, + infoHash: opts.InfoHash, + peers: prioritizedPeers{ + om: gbtree.New(32), + getPrio: func(p PeerInfo) peerPriority { + ipPort := p.addr() + return bep40PriorityIgnoreError(cl.publicAddr(ipPort.IP), ipPort) + }, + }, + conns: make(map[*PeerConn]struct{}, 2*cl.config.EstablishedConnsPerTorrent), + + storageOpener: storageClient, + maxEstablishedConns: cl.config.EstablishedConnsPerTorrent, + + metadataChanged: sync.Cond{ + L: cl.locker(), + }, + webSeeds: make(map[string]*Peer), + gotMetainfoC: make(chan struct{}), + } + t.smartBanCache.Hash = sha1.Sum + t.smartBanCache.Init() + t.networkingEnabled.Set() + t.logger = cl.logger.WithDefaultLevel(log.Debug) + t.sourcesLogger = t.logger.WithNames("sources") + if opts.ChunkSize == 0 { + opts.ChunkSize = defaultChunkSize + } + t.setChunkSize(opts.ChunkSize) + return +} + +// A file-like handle to some torrent data resource. +type Handle interface { + io.Reader + io.Seeker + io.Closer + io.ReaderAt +} + +func (cl *Client) AddTorrentInfoHash(infoHash metainfo.Hash) (t *Torrent, new bool) { + return cl.AddTorrentInfoHashWithStorage(infoHash, nil) +} + +// Adds a torrent by InfoHash with a custom Storage implementation. +// If the torrent already exists then this Storage is ignored and the +// existing torrent returned with `new` set to `false` +func (cl *Client) AddTorrentInfoHashWithStorage(infoHash metainfo.Hash, specStorage storage.ClientImpl) (t *Torrent, new bool) { + cl.lock() + defer cl.unlock() + t, ok := cl.torrents[infoHash] + if ok { + return + } + new = true + + t = cl.newTorrent(infoHash, specStorage) + cl.eachDhtServer(func(s DhtServer) { + if cl.config.PeriodicallyAnnounceTorrentsToDht { + go t.dhtAnnouncer(s) + } + }) + cl.torrents[infoHash] = t + cl.clearAcceptLimits() + t.updateWantPeersEvent() + // Tickle Client.waitAccept, new torrent may want conns. + cl.event.Broadcast() + return +} + +// Adds a torrent by InfoHash with a custom Storage implementation. If the torrent already exists +// then this Storage is ignored and the existing torrent returned with `new` set to `false`. +func (cl *Client) AddTorrentOpt(opts AddTorrentOpts) (t *Torrent, new bool) { + infoHash := opts.InfoHash + cl.lock() + defer cl.unlock() + t, ok := cl.torrents[infoHash] + if ok { + return + } + new = true + + t = cl.newTorrentOpt(opts) + cl.eachDhtServer(func(s DhtServer) { + if cl.config.PeriodicallyAnnounceTorrentsToDht { + go t.dhtAnnouncer(s) + } + }) + cl.torrents[infoHash] = t + t.setInfoBytesLocked(opts.InfoBytes) + cl.clearAcceptLimits() + t.updateWantPeersEvent() + // Tickle Client.waitAccept, new torrent may want conns. + cl.event.Broadcast() + return +} + +type AddTorrentOpts struct { + InfoHash infohash.T + Storage storage.ClientImpl + ChunkSize pp.Integer + InfoBytes []byte +} + +// Add or merge a torrent spec. Returns new if the torrent wasn't already in the client. See also +// Torrent.MergeSpec. +func (cl *Client) AddTorrentSpec(spec *TorrentSpec) (t *Torrent, new bool, err error) { + t, new = cl.AddTorrentOpt(AddTorrentOpts{ + InfoHash: spec.InfoHash, + Storage: spec.Storage, + ChunkSize: spec.ChunkSize, + }) + modSpec := *spec + if new { + // ChunkSize was already applied by adding a new Torrent, and MergeSpec disallows changing + // it. + modSpec.ChunkSize = 0 + } + err = t.MergeSpec(&modSpec) + if err != nil && new { + t.Drop() + } + return +} + +// The trackers will be merged with the existing ones. If the Info isn't yet known, it will be set. +// spec.DisallowDataDownload/Upload will be read and applied +// The display name is replaced if the new spec provides one. Note that any `Storage` is ignored. +func (t *Torrent) MergeSpec(spec *TorrentSpec) error { + if spec.DisplayName != "" { + t.SetDisplayName(spec.DisplayName) + } + if spec.InfoBytes != nil { + err := t.SetInfoBytes(spec.InfoBytes) + if err != nil { + return err + } + } + cl := t.cl + cl.AddDhtNodes(spec.DhtNodes) + t.UseSources(spec.Sources) + cl.lock() + defer cl.unlock() + t.initialPieceCheckDisabled = spec.DisableInitialPieceCheck + for _, url := range spec.Webseeds { + t.addWebSeed(url) + } + for _, peerAddr := range spec.PeerAddrs { + t.addPeer(PeerInfo{ + Addr: StringAddr(peerAddr), + Source: PeerSourceDirect, + Trusted: true, + }) + } + if spec.ChunkSize != 0 { + panic("chunk size cannot be changed for existing Torrent") + } + t.addTrackers(spec.Trackers) + t.maybeNewConns() + t.dataDownloadDisallowed.SetBool(spec.DisallowDataDownload) + t.dataUploadDisallowed = spec.DisallowDataUpload + return nil +} + +func (cl *Client) dropTorrent(infoHash metainfo.Hash, wg *sync.WaitGroup) (err error) { + t, ok := cl.torrents[infoHash] + if !ok { + err = fmt.Errorf("no such torrent") + return + } + err = t.close(wg) + delete(cl.torrents, infoHash) + return +} + +func (cl *Client) allTorrentsCompleted() bool { + for _, t := range cl.torrents { + if !t.haveInfo() { + return false + } + if !t.haveAllPieces() { + return false + } + } + return true +} + +// Returns true when all torrents are completely downloaded and false if the +// client is stopped before that. +func (cl *Client) WaitAll() bool { + cl.lock() + defer cl.unlock() + for !cl.allTorrentsCompleted() { + if cl.closed.IsSet() { + return false + } + cl.event.Wait() + } + return true +} + +// Returns handles to all the torrents loaded in the Client. +func (cl *Client) Torrents() []*Torrent { + cl.rLock() + defer cl.rUnlock() + return cl.torrentsAsSlice() +} + +func (cl *Client) torrentsAsSlice() (ret []*Torrent) { + for _, t := range cl.torrents { + ret = append(ret, t) + } + return +} + +func (cl *Client) AddMagnet(uri string) (T *Torrent, err error) { + spec, err := TorrentSpecFromMagnetUri(uri) + if err != nil { + return + } + T, _, err = cl.AddTorrentSpec(spec) + return +} + +func (cl *Client) AddTorrent(mi *metainfo.MetaInfo) (T *Torrent, err error) { + ts, err := TorrentSpecFromMetaInfoErr(mi) + if err != nil { + return + } + T, _, err = cl.AddTorrentSpec(ts) + return +} + +func (cl *Client) AddTorrentFromFile(filename string) (T *Torrent, err error) { + mi, err := metainfo.LoadFromFile(filename) + if err != nil { + return + } + return cl.AddTorrent(mi) +} + +func (cl *Client) DhtServers() []DhtServer { + return cl.dhtServers +} + +func (cl *Client) AddDhtNodes(nodes []string) { + for _, n := range nodes { + hmp := missinggo.SplitHostMaybePort(n) + ip := net.ParseIP(hmp.Host) + if ip == nil { + cl.logger.Printf("won't add DHT node with bad IP: %q", hmp.Host) + continue + } + ni := krpc.NodeInfo{ + Addr: krpc.NodeAddr{ + IP: ip, + Port: hmp.Port, + }, + } + cl.eachDhtServer(func(s DhtServer) { + s.AddNode(ni) + }) + } +} + +func (cl *Client) banPeerIP(ip net.IP) { + // We can't take this from string, because it will lose netip's v4on6. net.ParseIP parses v4 + // addresses directly to v4on6, which doesn't compare equal with v4. + ipAddr, ok := netip.AddrFromSlice(ip) + if !ok { + panic(ip) + } + g.MakeMapIfNilAndSet(&cl.badPeerIPs, ipAddr, struct{}{}) + for _, t := range cl.torrents { + t.iterPeers(func(p *Peer) { + if p.remoteIp().Equal(ip) { + t.logger.Levelf(log.Warning, "dropping peer %v with banned ip %v", p, ip) + // Should this be a close? + p.drop() + } + }) + } +} + +type newConnectionOpts struct { + outgoing bool + remoteAddr PeerRemoteAddr + localPublicAddr peerLocalPublicAddr + network string + connString string +} + +func (cl *Client) newConnection(nc net.Conn, opts newConnectionOpts) (c *PeerConn) { + if opts.network == "" { + panic(opts.remoteAddr) + } + c = &PeerConn{ + Peer: Peer{ + outgoing: opts.outgoing, + choking: true, + peerChoking: true, + PeerMaxRequests: 250, + + RemoteAddr: opts.remoteAddr, + localPublicAddr: opts.localPublicAddr, + Network: opts.network, + callbacks: &cl.config.Callbacks, + }, + connString: opts.connString, + conn: nc, + } + c.peerRequestDataAllocLimiter.Max = cl.config.MaxAllocPeerRequestDataPerConn + c.initRequestState() + // TODO: Need to be much more explicit about this, including allowing non-IP bannable addresses. + if opts.remoteAddr != nil { + netipAddrPort, err := netip.ParseAddrPort(opts.remoteAddr.String()) + if err == nil { + c.bannableAddr = Some(netipAddrPort.Addr()) + } + } + c.peerImpl = c + c.logger = cl.logger.WithDefaultLevel(log.Warning) + c.logger = c.logger.WithContextText(fmt.Sprintf("%T %p", c, c)) + c.setRW(connStatsReadWriter{nc, c}) + c.r = &rateLimitedReader{ + l: cl.config.DownloadRateLimiter, + r: c.r, + } + c.logger.Levelf( + log.Debug, + "inited with remoteAddr %v network %v outgoing %t", + opts.remoteAddr, opts.network, opts.outgoing, + ) + for _, f := range cl.config.Callbacks.NewPeer { + f(&c.Peer) + } + return +} + +func (cl *Client) onDHTAnnouncePeer(ih metainfo.Hash, ip net.IP, port int, portOk bool) { + cl.lock() + defer cl.unlock() + t := cl.torrent(ih) + if t == nil { + return + } + t.addPeers([]PeerInfo{{ + Addr: ipPortAddr{ip, port}, + Source: PeerSourceDhtAnnouncePeer, + }}) +} + +func firstNotNil(ips ...net.IP) net.IP { + for _, ip := range ips { + if ip != nil { + return ip + } + } + return nil +} + +func (cl *Client) eachListener(f func(Listener) bool) { + for _, s := range cl.listeners { + if !f(s) { + break + } + } +} + +func (cl *Client) findListener(f func(Listener) bool) (ret Listener) { + for i := 0; i < len(cl.listeners); i += 1 { + if ret = cl.listeners[i]; f(ret) { + return + } + } + return nil +} + +func (cl *Client) publicIp(peer net.IP) net.IP { + // TODO: Use BEP 10 to determine how peers are seeing us. + if peer.To4() != nil { + return firstNotNil( + cl.config.PublicIp4, + cl.findListenerIp(func(ip net.IP) bool { return ip.To4() != nil }), + ) + } + + return firstNotNil( + cl.config.PublicIp6, + cl.findListenerIp(func(ip net.IP) bool { return ip.To4() == nil }), + ) +} + +func (cl *Client) findListenerIp(f func(net.IP) bool) net.IP { + l := cl.findListener( + func(l Listener) bool { + return f(addrIpOrNil(l.Addr())) + }, + ) + if l == nil { + return nil + } + return addrIpOrNil(l.Addr()) +} + +// Our IP as a peer should see it. +func (cl *Client) publicAddr(peer net.IP) IpPort { + return IpPort{IP: cl.publicIp(peer), Port: uint16(cl.incomingPeerPort())} +} + +// ListenAddrs addresses currently being listened to. +func (cl *Client) ListenAddrs() (ret []net.Addr) { + cl.lock() + ret = make([]net.Addr, len(cl.listeners)) + for i := 0; i < len(cl.listeners); i += 1 { + ret[i] = cl.listeners[i].Addr() + } + cl.unlock() + return +} + +func (cl *Client) PublicIPs() (ips []net.IP) { + if ip := cl.config.PublicIp4; ip != nil { + ips = append(ips, ip) + } + if ip := cl.config.PublicIp6; ip != nil { + ips = append(ips, ip) + } + return +} + +func (cl *Client) onBadAccept(addr PeerRemoteAddr) { + ipa, ok := tryIpPortFromNetAddr(addr) + if !ok { + return + } + ip := maskIpForAcceptLimiting(ipa.IP) + if cl.acceptLimiter == nil { + cl.acceptLimiter = make(map[ipStr]int) + } + cl.acceptLimiter[ipStr(ip.String())]++ +} + +func maskIpForAcceptLimiting(ip net.IP) net.IP { + if ip4 := ip.To4(); ip4 != nil { + return ip4.Mask(net.CIDRMask(24, 32)) + } + return ip +} + +func (cl *Client) clearAcceptLimits() { + cl.acceptLimiter = nil +} + +func (cl *Client) acceptLimitClearer() { + for { + select { + case <-cl.closed.Done(): + return + case <-time.After(15 * time.Minute): + cl.lock() + cl.clearAcceptLimits() + cl.unlock() + } + } +} + +func (cl *Client) rateLimitAccept(ip net.IP) bool { + if cl.config.DisableAcceptRateLimiting { + return false + } + return cl.acceptLimiter[ipStr(maskIpForAcceptLimiting(ip).String())] > 0 +} + +func (cl *Client) rLock() { + cl._mu.RLock() +} + +func (cl *Client) rUnlock() { + cl._mu.RUnlock() +} + +func (cl *Client) lock() { + cl._mu.Lock() +} + +func (cl *Client) unlock() { + cl._mu.Unlock() +} + +func (cl *Client) locker() *lockWithDeferreds { + return &cl._mu +} + +func (cl *Client) String() string { + return fmt.Sprintf("<%[1]T %[1]p>", cl) +} + +// Returns connection-level aggregate connStats at the Client level. See the comment on +// TorrentStats.ConnStats. +func (cl *Client) ConnStats() ConnStats { + return cl.connStats.Copy() +} + +func (cl *Client) Stats() ClientStats { + cl.rLock() + defer cl.rUnlock() + return cl.statsLocked() +} diff --git a/deps/github.com/anacrolix/torrent/client_test.go b/deps/github.com/anacrolix/torrent/client_test.go new file mode 100644 index 0000000..d2a88e9 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/client_test.go @@ -0,0 +1,909 @@ +package torrent + +import ( + "encoding/binary" + "fmt" + "io" + "net" + "net/netip" + "os" + "path/filepath" + "reflect" + "testing" + "testing/iotest" + "time" + + "github.com/anacrolix/dht/v2" + "github.com/anacrolix/log" + "github.com/anacrolix/missinggo/v2" + "github.com/anacrolix/missinggo/v2/filecache" + "github.com/frankban/quicktest" + qt "github.com/frankban/quicktest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/anacrolix/torrent/bencode" + "github.com/anacrolix/torrent/internal/testutil" + "github.com/anacrolix/torrent/iplist" + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/storage" +) + +func TestClientDefault(t *testing.T) { + cl, err := NewClient(TestingConfig(t)) + require.NoError(t, err) + require.Empty(t, cl.Close()) +} + +func TestClientNilConfig(t *testing.T) { + // The default config will put crap in the working directory. + origDir, _ := os.Getwd() + defer os.Chdir(origDir) + os.Chdir(t.TempDir()) + cl, err := NewClient(nil) + require.NoError(t, err) + require.Empty(t, cl.Close()) +} + +func TestAddDropTorrent(t *testing.T) { + cl, err := NewClient(TestingConfig(t)) + require.NoError(t, err) + defer cl.Close() + dir, mi := testutil.GreetingTestTorrent() + defer os.RemoveAll(dir) + tt, new, err := cl.AddTorrentSpec(TorrentSpecFromMetaInfo(mi)) + require.NoError(t, err) + assert.True(t, new) + tt.SetMaxEstablishedConns(0) + tt.SetMaxEstablishedConns(1) + tt.Drop() +} + +func TestAddTorrentNoSupportedTrackerSchemes(t *testing.T) { + // TODO? + t.SkipNow() +} + +func TestAddTorrentNoUsableURLs(t *testing.T) { + // TODO? + t.SkipNow() +} + +func TestAddPeersToUnknownTorrent(t *testing.T) { + // TODO? + t.SkipNow() +} + +func TestPieceHashSize(t *testing.T) { + assert.Equal(t, 20, pieceHash.Size()) +} + +func TestTorrentInitialState(t *testing.T) { + dir, mi := testutil.GreetingTestTorrent() + defer os.RemoveAll(dir) + var cl Client + cl.init(TestingConfig(t)) + cl.initLogger() + tor := cl.newTorrent( + mi.HashInfoBytes(), + storage.NewFileWithCompletion(t.TempDir(), storage.NewMapPieceCompletion()), + ) + tor.setChunkSize(2) + tor.cl.lock() + err := tor.setInfoBytesLocked(mi.InfoBytes) + tor.cl.unlock() + require.NoError(t, err) + require.Len(t, tor.pieces, 3) + tor.pendAllChunkSpecs(0) + tor.cl.lock() + assert.EqualValues(t, 3, tor.pieceNumPendingChunks(0)) + tor.cl.unlock() + assert.EqualValues(t, ChunkSpec{4, 1}, chunkIndexSpec(2, tor.pieceLength(0), tor.chunkSize)) +} + +func TestReducedDialTimeout(t *testing.T) { + cfg := NewDefaultClientConfig() + for _, _case := range []struct { + Max time.Duration + HalfOpenLimit int + PendingPeers int + ExpectedReduced time.Duration + }{ + {cfg.NominalDialTimeout, 40, 0, cfg.NominalDialTimeout}, + {cfg.NominalDialTimeout, 40, 1, cfg.NominalDialTimeout}, + {cfg.NominalDialTimeout, 40, 39, cfg.NominalDialTimeout}, + {cfg.NominalDialTimeout, 40, 40, cfg.NominalDialTimeout / 2}, + {cfg.NominalDialTimeout, 40, 80, cfg.NominalDialTimeout / 3}, + {cfg.NominalDialTimeout, 40, 4000, cfg.NominalDialTimeout / 101}, + } { + reduced := reducedDialTimeout(cfg.MinDialTimeout, _case.Max, _case.HalfOpenLimit, _case.PendingPeers) + expected := _case.ExpectedReduced + if expected < cfg.MinDialTimeout { + expected = cfg.MinDialTimeout + } + if reduced != expected { + t.Fatalf("expected %s, got %s", _case.ExpectedReduced, reduced) + } + } +} + +func TestAddDropManyTorrents(t *testing.T) { + cl, err := NewClient(TestingConfig(t)) + require.NoError(t, err) + defer cl.Close() + for i := 0; i < 1000; i += 1 { + var spec TorrentSpec + binary.PutVarint(spec.InfoHash[:], int64(i)) + tt, new, err := cl.AddTorrentSpec(&spec) + assert.NoError(t, err) + assert.True(t, new) + defer tt.Drop() + } +} + +func fileCachePieceResourceStorage(fc *filecache.Cache) storage.ClientImpl { + return storage.NewResourcePiecesOpts( + fc.AsResourceProvider(), + storage.ResourcePiecesOpts{ + LeaveIncompleteChunks: true, + }, + ) +} + +func TestMergingTrackersByAddingSpecs(t *testing.T) { + cl, err := NewClient(TestingConfig(t)) + require.NoError(t, err) + defer cl.Close() + spec := TorrentSpec{} + T, new, _ := cl.AddTorrentSpec(&spec) + if !new { + t.FailNow() + } + spec.Trackers = [][]string{{"http://a"}, {"udp://b"}} + _, new, _ = cl.AddTorrentSpec(&spec) + assert.False(t, new) + assert.EqualValues(t, [][]string{{"http://a"}, {"udp://b"}}, T.metainfo.AnnounceList) + // Because trackers are disabled in TestingConfig. + assert.EqualValues(t, 0, len(T.trackerAnnouncers)) +} + +// We read from a piece which is marked completed, but is missing data. +func TestCompletedPieceWrongSize(t *testing.T) { + cfg := TestingConfig(t) + cfg.DefaultStorage = badStorage{} + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + info := metainfo.Info{ + PieceLength: 15, + Pieces: make([]byte, 20), + Files: []metainfo.FileInfo{ + {Path: []string{"greeting"}, Length: 13}, + }, + } + b, err := bencode.Marshal(info) + require.NoError(t, err) + tt, new, err := cl.AddTorrentSpec(&TorrentSpec{ + InfoBytes: b, + InfoHash: metainfo.HashBytes(b), + }) + require.NoError(t, err) + defer tt.Drop() + assert.True(t, new) + r := tt.NewReader() + defer r.Close() + quicktest.Check(t, iotest.TestReader(r, []byte(testutil.GreetingFileContents)), quicktest.IsNil) +} + +func BenchmarkAddLargeTorrent(b *testing.B) { + cfg := TestingConfig(b) + cfg.DisableTCP = true + cfg.DisableUTP = true + cl, err := NewClient(cfg) + require.NoError(b, err) + defer cl.Close() + b.ReportAllocs() + for i := 0; i < b.N; i += 1 { + t, err := cl.AddTorrentFromFile("testdata/bootstrap.dat.torrent") + if err != nil { + b.Fatal(err) + } + t.Drop() + } +} + +func TestResponsive(t *testing.T) { + seederDataDir, mi := testutil.GreetingTestTorrent() + defer os.RemoveAll(seederDataDir) + cfg := TestingConfig(t) + cfg.Seed = true + cfg.DataDir = seederDataDir + seeder, err := NewClient(cfg) + require.Nil(t, err) + defer seeder.Close() + seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi)) + seederTorrent.VerifyData() + leecherDataDir := t.TempDir() + cfg = TestingConfig(t) + cfg.DataDir = leecherDataDir + leecher, err := NewClient(cfg) + require.Nil(t, err) + defer leecher.Close() + leecherTorrent, _, _ := leecher.AddTorrentSpec(func() (ret *TorrentSpec) { + ret = TorrentSpecFromMetaInfo(mi) + ret.ChunkSize = 2 + return + }()) + leecherTorrent.AddClientPeer(seeder) + reader := leecherTorrent.NewReader() + defer reader.Close() + reader.SetReadahead(0) + reader.SetResponsive() + b := make([]byte, 2) + _, err = reader.Seek(3, io.SeekStart) + require.NoError(t, err) + _, err = io.ReadFull(reader, b) + assert.Nil(t, err) + assert.EqualValues(t, "lo", string(b)) + _, err = reader.Seek(11, io.SeekStart) + require.NoError(t, err) + n, err := io.ReadFull(reader, b) + assert.Nil(t, err) + assert.EqualValues(t, 2, n) + assert.EqualValues(t, "d\n", string(b)) +} + +// TestResponsive was the first test to fail if uTP is disabled and TCP sockets dial from the +// listening port. +func TestResponsiveTcpOnly(t *testing.T) { + seederDataDir, mi := testutil.GreetingTestTorrent() + defer os.RemoveAll(seederDataDir) + cfg := TestingConfig(t) + cfg.DisableUTP = true + cfg.Seed = true + cfg.DataDir = seederDataDir + seeder, err := NewClient(cfg) + require.Nil(t, err) + defer seeder.Close() + seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi)) + seederTorrent.VerifyData() + leecherDataDir := t.TempDir() + cfg = TestingConfig(t) + cfg.DataDir = leecherDataDir + leecher, err := NewClient(cfg) + require.Nil(t, err) + defer leecher.Close() + leecherTorrent, _, _ := leecher.AddTorrentSpec(func() (ret *TorrentSpec) { + ret = TorrentSpecFromMetaInfo(mi) + ret.ChunkSize = 2 + return + }()) + leecherTorrent.AddClientPeer(seeder) + reader := leecherTorrent.NewReader() + defer reader.Close() + reader.SetReadahead(0) + reader.SetResponsive() + b := make([]byte, 2) + _, err = reader.Seek(3, io.SeekStart) + require.NoError(t, err) + _, err = io.ReadFull(reader, b) + assert.Nil(t, err) + assert.EqualValues(t, "lo", string(b)) + _, err = reader.Seek(11, io.SeekStart) + require.NoError(t, err) + n, err := io.ReadFull(reader, b) + assert.Nil(t, err) + assert.EqualValues(t, 2, n) + assert.EqualValues(t, "d\n", string(b)) +} + +func TestTorrentDroppedDuringResponsiveRead(t *testing.T) { + seederDataDir, mi := testutil.GreetingTestTorrent() + defer os.RemoveAll(seederDataDir) + cfg := TestingConfig(t) + cfg.Seed = true + cfg.DataDir = seederDataDir + seeder, err := NewClient(cfg) + require.Nil(t, err) + defer seeder.Close() + seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi)) + seederTorrent.VerifyData() + leecherDataDir := t.TempDir() + cfg = TestingConfig(t) + cfg.DataDir = leecherDataDir + leecher, err := NewClient(cfg) + require.Nil(t, err) + defer leecher.Close() + leecherTorrent, _, _ := leecher.AddTorrentSpec(func() (ret *TorrentSpec) { + ret = TorrentSpecFromMetaInfo(mi) + ret.ChunkSize = 2 + return + }()) + leecherTorrent.AddClientPeer(seeder) + reader := leecherTorrent.NewReader() + defer reader.Close() + reader.SetReadahead(0) + reader.SetResponsive() + b := make([]byte, 2) + _, err = reader.Seek(3, io.SeekStart) + require.NoError(t, err) + _, err = io.ReadFull(reader, b) + assert.Nil(t, err) + assert.EqualValues(t, "lo", string(b)) + _, err = reader.Seek(11, io.SeekStart) + require.NoError(t, err) + leecherTorrent.Drop() + n, err := reader.Read(b) + assert.EqualError(t, err, "torrent closed") + assert.EqualValues(t, 0, n) +} + +func TestDhtInheritBlocklist(t *testing.T) { + ipl := iplist.New(nil) + require.NotNil(t, ipl) + cfg := TestingConfig(t) + cfg.IPBlocklist = ipl + cfg.NoDHT = false + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + numServers := 0 + cl.eachDhtServer(func(s DhtServer) { + t.Log(s) + assert.Equal(t, ipl, s.(AnacrolixDhtServerWrapper).Server.IPBlocklist()) + numServers++ + }) + assert.EqualValues(t, 2, numServers) +} + +// Check that stuff is merged in subsequent AddTorrentSpec for the same +// infohash. +func TestAddTorrentSpecMerging(t *testing.T) { + cl, err := NewClient(TestingConfig(t)) + require.NoError(t, err) + defer cl.Close() + dir, mi := testutil.GreetingTestTorrent() + defer os.RemoveAll(dir) + tt, new, err := cl.AddTorrentSpec(&TorrentSpec{ + InfoHash: mi.HashInfoBytes(), + }) + require.NoError(t, err) + require.True(t, new) + require.Nil(t, tt.Info()) + _, new, err = cl.AddTorrentSpec(TorrentSpecFromMetaInfo(mi)) + require.NoError(t, err) + require.False(t, new) + require.NotNil(t, tt.Info()) +} + +func TestTorrentDroppedBeforeGotInfo(t *testing.T) { + dir, mi := testutil.GreetingTestTorrent() + os.RemoveAll(dir) + cl, _ := NewClient(TestingConfig(t)) + defer cl.Close() + tt, _, _ := cl.AddTorrentSpec(&TorrentSpec{ + InfoHash: mi.HashInfoBytes(), + }) + tt.Drop() + assert.EqualValues(t, 0, len(cl.Torrents())) + select { + case <-tt.GotInfo(): + t.FailNow() + default: + } +} + +func writeTorrentData(ts *storage.Torrent, info metainfo.Info, b []byte) { + for i := 0; i < info.NumPieces(); i += 1 { + p := info.Piece(i) + ts.Piece(p).WriteAt(b[p.Offset():p.Offset()+p.Length()], 0) + } +} + +func testAddTorrentPriorPieceCompletion(t *testing.T, alreadyCompleted bool, csf func(*filecache.Cache) storage.ClientImpl) { + fileCacheDir := t.TempDir() + fileCache, err := filecache.NewCache(fileCacheDir) + require.NoError(t, err) + greetingDataTempDir, greetingMetainfo := testutil.GreetingTestTorrent() + defer os.RemoveAll(greetingDataTempDir) + filePieceStore := csf(fileCache) + info, err := greetingMetainfo.UnmarshalInfo() + require.NoError(t, err) + ih := greetingMetainfo.HashInfoBytes() + greetingData, err := storage.NewClient(filePieceStore).OpenTorrent(&info, ih) + require.NoError(t, err) + writeTorrentData(greetingData, info, []byte(testutil.GreetingFileContents)) + // require.Equal(t, len(testutil.GreetingFileContents), written) + // require.NoError(t, err) + for i := 0; i < info.NumPieces(); i++ { + p := info.Piece(i) + if alreadyCompleted { + require.NoError(t, greetingData.Piece(p).MarkComplete()) + } + } + cfg := TestingConfig(t) + // TODO: Disable network option? + cfg.DisableTCP = true + cfg.DisableUTP = true + cfg.DefaultStorage = filePieceStore + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + tt, err := cl.AddTorrent(greetingMetainfo) + require.NoError(t, err) + psrs := tt.PieceStateRuns() + assert.Len(t, psrs, 1) + assert.EqualValues(t, 3, psrs[0].Length) + assert.Equal(t, alreadyCompleted, psrs[0].Complete) + if alreadyCompleted { + r := tt.NewReader() + quicktest.Check(t, iotest.TestReader(r, []byte(testutil.GreetingFileContents)), quicktest.IsNil) + } +} + +func TestAddTorrentPiecesAlreadyCompleted(t *testing.T) { + testAddTorrentPriorPieceCompletion(t, true, fileCachePieceResourceStorage) +} + +func TestAddTorrentPiecesNotAlreadyCompleted(t *testing.T) { + testAddTorrentPriorPieceCompletion(t, false, fileCachePieceResourceStorage) +} + +func TestAddMetainfoWithNodes(t *testing.T) { + cfg := TestingConfig(t) + cfg.ListenHost = func(string) string { return "" } + cfg.NoDHT = false + cfg.DhtStartingNodes = func(string) dht.StartingNodesGetter { return func() ([]dht.Addr, error) { return nil, nil } } + // For now, we want to just jam the nodes into the table, without verifying them first. Also the + // DHT code doesn't support mixing secure and insecure nodes if security is enabled (yet). + // cfg.DHTConfig.NoSecurity = true + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + sum := func() (ret int64) { + cl.eachDhtServer(func(s DhtServer) { + ret += s.Stats().(dht.ServerStats).OutboundQueriesAttempted + }) + return + } + assert.EqualValues(t, 0, sum()) + tt, err := cl.AddTorrentFromFile("metainfo/testdata/issue_65a.torrent") + require.NoError(t, err) + // Nodes are not added or exposed in Torrent's metainfo. We just randomly + // check if the announce-list is here instead. TODO: Add nodes. + assert.Len(t, tt.metainfo.AnnounceList, 5) + // There are 6 nodes in the torrent file. + for sum() != int64(6*len(cl.dhtServers)) { + time.Sleep(time.Millisecond) + } +} + +type testDownloadCancelParams struct { + SetLeecherStorageCapacity bool + LeecherStorageCapacity int64 + Cancel bool +} + +func testDownloadCancel(t *testing.T, ps testDownloadCancelParams) { + greetingTempDir, mi := testutil.GreetingTestTorrent() + defer os.RemoveAll(greetingTempDir) + cfg := TestingConfig(t) + cfg.Seed = true + cfg.DataDir = greetingTempDir + seeder, err := NewClient(cfg) + require.NoError(t, err) + defer seeder.Close() + defer testutil.ExportStatusWriter(seeder, "s", t)() + seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi)) + seederTorrent.VerifyData() + leecherDataDir := t.TempDir() + fc, err := filecache.NewCache(leecherDataDir) + require.NoError(t, err) + if ps.SetLeecherStorageCapacity { + fc.SetCapacity(ps.LeecherStorageCapacity) + } + cfg.DefaultStorage = storage.NewResourcePieces(fc.AsResourceProvider()) + cfg.DataDir = leecherDataDir + leecher, err := NewClient(cfg) + require.NoError(t, err) + defer leecher.Close() + defer testutil.ExportStatusWriter(leecher, "l", t)() + leecherGreeting, new, err := leecher.AddTorrentSpec(func() (ret *TorrentSpec) { + ret = TorrentSpecFromMetaInfo(mi) + ret.ChunkSize = 2 + return + }()) + require.NoError(t, err) + assert.True(t, new) + psc := leecherGreeting.SubscribePieceStateChanges() + defer psc.Close() + + leecherGreeting.cl.lock() + leecherGreeting.downloadPiecesLocked(0, leecherGreeting.numPieces()) + if ps.Cancel { + leecherGreeting.cancelPiecesLocked(0, leecherGreeting.NumPieces(), "") + } + leecherGreeting.cl.unlock() + done := make(chan struct{}) + defer close(done) + go leecherGreeting.AddClientPeer(seeder) + completes := make(map[int]bool, 3) + expected := func() map[int]bool { + if ps.Cancel { + return map[int]bool{0: false, 1: false, 2: false} + } else { + return map[int]bool{0: true, 1: true, 2: true} + } + }() + for !reflect.DeepEqual(completes, expected) { + v := <-psc.Values + completes[v.Index] = v.Complete + } +} + +func TestTorrentDownloadAll(t *testing.T) { + testDownloadCancel(t, testDownloadCancelParams{}) +} + +func TestTorrentDownloadAllThenCancel(t *testing.T) { + testDownloadCancel(t, testDownloadCancelParams{ + Cancel: true, + }) +} + +// Ensure that it's an error for a peer to send an invalid have message. +func TestPeerInvalidHave(t *testing.T) { + cfg := TestingConfig(t) + cfg.DropMutuallyCompletePeers = false + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + info := metainfo.Info{ + PieceLength: 1, + Pieces: make([]byte, 20), + Files: []metainfo.FileInfo{{Length: 1}}, + } + infoBytes, err := bencode.Marshal(info) + require.NoError(t, err) + tt, _new, err := cl.AddTorrentSpec(&TorrentSpec{ + InfoBytes: infoBytes, + InfoHash: metainfo.HashBytes(infoBytes), + Storage: badStorage{}, + }) + require.NoError(t, err) + assert.True(t, _new) + defer tt.Drop() + cn := &PeerConn{Peer: Peer{ + t: tt, + callbacks: &cfg.Callbacks, + }} + tt.conns[cn] = struct{}{} + cn.peerImpl = cn + cl.lock() + defer cl.unlock() + assert.NoError(t, cn.peerSentHave(0)) + assert.Error(t, cn.peerSentHave(1)) +} + +func TestPieceCompletedInStorageButNotClient(t *testing.T) { + greetingTempDir, greetingMetainfo := testutil.GreetingTestTorrent() + defer os.RemoveAll(greetingTempDir) + cfg := TestingConfig(t) + cfg.DataDir = greetingTempDir + seeder, err := NewClient(TestingConfig(t)) + require.NoError(t, err) + defer seeder.Close() + seeder.AddTorrentSpec(&TorrentSpec{ + InfoBytes: greetingMetainfo.InfoBytes, + }) +} + +// Check that when the listen port is 0, all the protocols listened on have +// the same port, and it isn't zero. +func TestClientDynamicListenPortAllProtocols(t *testing.T) { + cl, err := NewClient(TestingConfig(t)) + require.NoError(t, err) + defer cl.Close() + port := cl.LocalPort() + assert.NotEqual(t, 0, port) + cl.eachListener(func(s Listener) bool { + assert.Equal(t, port, missinggo.AddrPort(s.Addr())) + return true + }) +} + +func TestClientDynamicListenTCPOnly(t *testing.T) { + cfg := TestingConfig(t) + cfg.DisableUTP = true + cfg.DisableTCP = false + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + assert.NotEqual(t, 0, cl.LocalPort()) +} + +func TestClientDynamicListenUTPOnly(t *testing.T) { + cfg := TestingConfig(t) + cfg.DisableTCP = true + cfg.DisableUTP = false + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + assert.NotEqual(t, 0, cl.LocalPort()) +} + +func totalConns(tts []*Torrent) (ret int) { + for _, tt := range tts { + tt.cl.lock() + ret += len(tt.conns) + tt.cl.unlock() + } + return +} + +func TestSetMaxEstablishedConn(t *testing.T) { + var tts []*Torrent + ih := testutil.GreetingMetaInfo().HashInfoBytes() + cfg := TestingConfig(t) + cfg.DisableAcceptRateLimiting = true + cfg.DropDuplicatePeerIds = true + for i := 0; i < 3; i += 1 { + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + tt, _ := cl.AddTorrentInfoHash(ih) + tt.SetMaxEstablishedConns(2) + defer testutil.ExportStatusWriter(cl, fmt.Sprintf("%d", i), t)() + tts = append(tts, tt) + } + addPeers := func() { + for _, tt := range tts { + for _, _tt := range tts { + // if tt != _tt { + tt.AddClientPeer(_tt.cl) + // } + } + } + } + waitTotalConns := func(num int) { + for totalConns(tts) != num { + addPeers() + time.Sleep(time.Millisecond) + } + } + addPeers() + waitTotalConns(6) + tts[0].SetMaxEstablishedConns(1) + waitTotalConns(4) + tts[0].SetMaxEstablishedConns(0) + waitTotalConns(2) + tts[0].SetMaxEstablishedConns(1) + addPeers() + waitTotalConns(4) + tts[0].SetMaxEstablishedConns(2) + addPeers() + waitTotalConns(6) +} + +// Creates a file containing its own name as data. Make a metainfo from that, adds it to the given +// client, and returns a magnet link. +func makeMagnet(t *testing.T, cl *Client, dir, name string) string { + os.MkdirAll(dir, 0o770) + file, err := os.Create(filepath.Join(dir, name)) + require.NoError(t, err) + file.Write([]byte(name)) + file.Close() + mi := metainfo.MetaInfo{} + mi.SetDefaults() + info := metainfo.Info{PieceLength: 256 * 1024} + err = info.BuildFromFilePath(filepath.Join(dir, name)) + require.NoError(t, err) + mi.InfoBytes, err = bencode.Marshal(info) + require.NoError(t, err) + magnet := mi.Magnet(nil, &info).String() + tr, err := cl.AddTorrent(&mi) + require.NoError(t, err) + require.True(t, tr.Seeding()) + tr.VerifyData() + return magnet +} + +// https://github.com/anacrolix/torrent/issues/114 +func TestMultipleTorrentsWithEncryption(t *testing.T) { + testSeederLeecherPair( + t, + func(cfg *ClientConfig) { + cfg.HeaderObfuscationPolicy.Preferred = true + cfg.HeaderObfuscationPolicy.RequirePreferred = true + }, + func(cfg *ClientConfig) { + cfg.HeaderObfuscationPolicy.RequirePreferred = false + }, + ) +} + +// Test that the leecher can download a torrent in its entirety from the seeder. Note that the +// seeder config is done first. +func testSeederLeecherPair(t *testing.T, seeder, leecher func(*ClientConfig)) { + cfg := TestingConfig(t) + cfg.Seed = true + cfg.DataDir = filepath.Join(cfg.DataDir, "server") + os.Mkdir(cfg.DataDir, 0o755) + seeder(cfg) + server, err := NewClient(cfg) + require.NoError(t, err) + defer server.Close() + defer testutil.ExportStatusWriter(server, "s", t)() + magnet1 := makeMagnet(t, server, cfg.DataDir, "test1") + // Extra torrents are added to test the seeder having to match incoming obfuscated headers + // against more than one torrent. See issue #114 + makeMagnet(t, server, cfg.DataDir, "test2") + for i := 0; i < 100; i++ { + makeMagnet(t, server, cfg.DataDir, fmt.Sprintf("test%d", i+2)) + } + cfg = TestingConfig(t) + cfg.DataDir = filepath.Join(cfg.DataDir, "client") + leecher(cfg) + client, err := NewClient(cfg) + require.NoError(t, err) + defer client.Close() + defer testutil.ExportStatusWriter(client, "c", t)() + tr, err := client.AddMagnet(magnet1) + require.NoError(t, err) + tr.AddClientPeer(server) + <-tr.GotInfo() + tr.DownloadAll() + client.WaitAll() +} + +// This appears to be the situation with the S3 BitTorrent client. +func TestObfuscatedHeaderFallbackSeederDisallowsLeecherPrefers(t *testing.T) { + // Leecher prefers obfuscation, but the seeder does not allow it. + testSeederLeecherPair( + t, + func(cfg *ClientConfig) { + cfg.HeaderObfuscationPolicy.Preferred = false + cfg.HeaderObfuscationPolicy.RequirePreferred = true + }, + func(cfg *ClientConfig) { + cfg.HeaderObfuscationPolicy.Preferred = true + cfg.HeaderObfuscationPolicy.RequirePreferred = false + }, + ) +} + +func TestObfuscatedHeaderFallbackSeederRequiresLeecherPrefersNot(t *testing.T) { + // Leecher prefers no obfuscation, but the seeder enforces it. + testSeederLeecherPair( + t, + func(cfg *ClientConfig) { + cfg.HeaderObfuscationPolicy.Preferred = true + cfg.HeaderObfuscationPolicy.RequirePreferred = true + }, + func(cfg *ClientConfig) { + cfg.HeaderObfuscationPolicy.Preferred = false + cfg.HeaderObfuscationPolicy.RequirePreferred = false + }, + ) +} + +func TestClientAddressInUse(t *testing.T) { + s, _ := NewUtpSocket("udp", "localhost:50007", nil, log.Default) + if s != nil { + defer s.Close() + } + cfg := TestingConfig(t).SetListenAddr("localhost:50007") + cfg.DisableUTP = false + cl, err := NewClient(cfg) + if err == nil { + assert.Nil(t, cl.Close()) + } + require.Error(t, err) + require.Nil(t, cl) +} + +func TestClientHasDhtServersWhenUtpDisabled(t *testing.T) { + cc := TestingConfig(t) + cc.DisableUTP = true + cc.NoDHT = false + cl, err := NewClient(cc) + require.NoError(t, err) + defer cl.Close() + assert.NotEmpty(t, cl.DhtServers()) +} + +func TestClientDisabledImplicitNetworksButDhtEnabled(t *testing.T) { + cfg := TestingConfig(t) + cfg.DisableTCP = true + cfg.DisableUTP = true + cfg.NoDHT = false + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + assert.Empty(t, cl.listeners) + assert.NotEmpty(t, cl.DhtServers()) +} + +func TestBadPeerIpPort(t *testing.T) { + for _, tc := range []struct { + title string + ip net.IP + port int + expectedOk bool + setup func(*Client) + }{ + {"empty both", nil, 0, true, func(*Client) {}}, + {"empty/nil ip", nil, 6666, true, func(*Client) {}}, + { + "empty port", + net.ParseIP("127.0.0.1/32"), + 0, true, + func(*Client) {}, + }, + { + "in doppleganger addresses", + net.ParseIP("127.0.0.1/32"), + 2322, + true, + func(cl *Client) { + cl.dopplegangerAddrs["10.0.0.1:2322"] = struct{}{} + }, + }, + { + "in IP block list", + net.ParseIP("10.0.0.1"), + 2322, + true, + func(cl *Client) { + cl.ipBlockList = iplist.New([]iplist.Range{ + {First: net.ParseIP("10.0.0.1"), Last: net.ParseIP("10.0.0.255")}, + }) + }, + }, + { + "in bad peer IPs", + net.ParseIP("10.0.0.1"), + 2322, + true, + func(cl *Client) { + ipAddr, ok := netip.AddrFromSlice(net.ParseIP("10.0.0.1")) + require.True(t, ok) + cl.badPeerIPs = map[netip.Addr]struct{}{} + cl.badPeerIPs[ipAddr] = struct{}{} + }, + }, + { + "good", + net.ParseIP("10.0.0.1"), + 2322, + false, + func(cl *Client) {}, + }, + } { + t.Run(tc.title, func(t *testing.T) { + cfg := TestingConfig(t) + cfg.DisableTCP = true + cfg.DisableUTP = true + cfg.NoDHT = false + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + + tc.setup(cl) + require.Equal(t, tc.expectedOk, cl.badPeerIPPort(tc.ip, tc.port)) + }) + } +} + +// https://github.com/anacrolix/torrent/issues/837 +func TestClientConfigSetHandlerNotIgnored(t *testing.T) { + cfg := TestingConfig(t) + cfg.Logger.SetHandlers(log.DiscardHandler) + c := qt.New(t) + cl, err := NewClient(cfg) + c.Assert(err, qt.IsNil) + defer cl.Close() + c.Assert(cl.logger.Handlers, qt.HasLen, 1) + h := cl.logger.Handlers[0].(log.StreamHandler) + c.Check(h.W, qt.Equals, io.Discard) +} diff --git a/deps/github.com/anacrolix/torrent/cmd/magnet-metainfo/main.go b/deps/github.com/anacrolix/torrent/cmd/magnet-metainfo/main.go new file mode 100644 index 0000000..536f7ab --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/magnet-metainfo/main.go @@ -0,0 +1,59 @@ +// Converts magnet URIs and info hashes into torrent metainfo files. +package main + +import ( + "log" + "net/http" + "os" + "sync" + + _ "github.com/anacrolix/envpprof" + "github.com/anacrolix/tagflag" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/bencode" +) + +func main() { + args := struct { + tagflag.StartPos + Magnet []string + }{} + tagflag.Parse(&args) + cl, err := torrent.NewClient(nil) + if err != nil { + log.Fatalf("error creating client: %s", err) + } + http.HandleFunc("/torrent", func(w http.ResponseWriter, r *http.Request) { + cl.WriteStatus(w) + }) + http.HandleFunc("/dht", func(w http.ResponseWriter, r *http.Request) { + for _, ds := range cl.DhtServers() { + ds.WriteStatus(w) + } + }) + wg := sync.WaitGroup{} + for _, arg := range args.Magnet { + t, err := cl.AddMagnet(arg) + if err != nil { + log.Fatalf("error adding magnet to client: %s", err) + } + wg.Add(1) + go func() { + defer wg.Done() + <-t.GotInfo() + mi := t.Metainfo() + t.Drop() + f, err := os.Create(t.Info().Name + ".torrent") + if err != nil { + log.Fatalf("error creating torrent metainfo file: %s", err) + } + defer f.Close() + err = bencode.NewEncoder(f).Encode(mi) + if err != nil { + log.Fatalf("error writing torrent metainfo file: %s", err) + } + }() + } + wg.Wait() +} diff --git a/deps/github.com/anacrolix/torrent/cmd/torrent-pick/main.go b/deps/github.com/anacrolix/torrent/cmd/torrent-pick/main.go new file mode 100644 index 0000000..73a597f --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/torrent-pick/main.go @@ -0,0 +1,189 @@ +// Downloads torrents from the command-line. +package main + +import ( + "bufio" + "fmt" + "io" + "log" + "net" + "net/http" + _ "net/http/pprof" + "os" + "strings" + "time" + + _ "github.com/anacrolix/envpprof" + "github.com/dustin/go-humanize" + "github.com/jessevdk/go-flags" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/metainfo" +) + +// fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) + +func resolvedPeerAddrs(ss []string) (ret []torrent.PeerInfo, err error) { + for _, s := range ss { + var addr *net.TCPAddr + addr, err = net.ResolveTCPAddr("tcp", s) + if err != nil { + return + } + ret = append(ret, torrent.PeerInfo{ + Addr: addr, + }) + } + return +} + +func bytesCompleted(tc *torrent.Client) (ret int64) { + for _, t := range tc.Torrents() { + if t.Info() != nil { + ret += t.BytesCompleted() + } + } + return +} + +// Returns an estimate of the total bytes for all torrents. +func totalBytesEstimate(tc *torrent.Client) (ret int64) { + var noInfo, hadInfo int64 + for _, t := range tc.Torrents() { + info := t.Info() + if info == nil { + noInfo++ + continue + } + ret += info.TotalLength() + hadInfo++ + } + if hadInfo != 0 { + // Treat each torrent without info as the average of those with, + // rounded up. + ret += (noInfo*ret + hadInfo - 1) / hadInfo + } + return +} + +func progressLine(tc *torrent.Client) string { + return fmt.Sprintf("\033[K%s / %s\r", humanize.Bytes(uint64(bytesCompleted(tc))), humanize.Bytes(uint64(totalBytesEstimate(tc)))) +} + +func dstFileName(picked string) string { + parts := strings.Split(picked, "/") + return parts[len(parts)-1] +} + +func main() { + log.SetFlags(log.LstdFlags | log.Lshortfile) + rootGroup := struct { + Client *torrent.ClientConfig `group:"Client Options"` + TestPeers []string `long:"test-peer" description:"address of peer to inject to every torrent"` + Pick string `long:"pick" description:"filename to pick"` + }{ + Client: torrent.NewDefaultClientConfig(), + } + // Don't pass flags.PrintError because it's inconsistent with printing. + // https://github.com/jessevdk/go-flags/issues/132 + parser := flags.NewParser(&rootGroup, flags.HelpFlag|flags.PassDoubleDash) + parser.Usage = "[OPTIONS] (magnet URI or .torrent file path)..." + posArgs, err := parser.Parse() + if err != nil { + fmt.Fprintf(os.Stderr, "%s", "Download from the BitTorrent network.\n\n") + fmt.Println(err) + os.Exit(2) + } + log.Printf("File to pick: %s", rootGroup.Pick) + + testPeers, err := resolvedPeerAddrs(rootGroup.TestPeers) + if err != nil { + log.Fatal(err) + } + + if len(posArgs) == 0 { + fmt.Fprintln(os.Stderr, "no torrents specified") + return + } + + tmpdir, err := os.MkdirTemp("", "torrent-pick-") + if err != nil { + log.Fatal(err) + } + + defer os.RemoveAll(tmpdir) + + rootGroup.Client.DataDir = tmpdir + + client, err := torrent.NewClient(rootGroup.Client) + if err != nil { + log.Fatalf("error creating client: %s", err) + } + http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + client.WriteStatus(w) + }) + defer client.Close() + + dstName := dstFileName(rootGroup.Pick) + + f, err := os.Create(dstName) + if err != nil { + log.Fatal(err) + } + dstWriter := bufio.NewWriter(f) + + done := make(chan struct{}) + for _, arg := range posArgs { + t := func() *torrent.Torrent { + if strings.HasPrefix(arg, "magnet:") { + t, err := client.AddMagnet(arg) + if err != nil { + log.Fatalf("error adding magnet: %s", err) + } + return t + } else { + metaInfo, err := metainfo.LoadFromFile(arg) + if err != nil { + log.Fatal(err) + } + t, err := client.AddTorrent(metaInfo) + if err != nil { + log.Fatal(err) + } + return t + } + }() + t.AddPeers(testPeers) + + go func() { + defer close(done) + <-t.GotInfo() + for _, file := range t.Files() { + if file.DisplayPath() != rootGroup.Pick { + continue + } + file.Download() + srcReader := file.NewReader() + defer srcReader.Close() + io.Copy(dstWriter, srcReader) + return + } + log.Print("file not found") + }() + } + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() +waitDone: + for { + select { + case <-done: + break waitDone + case <-ticker.C: + os.Stdout.WriteString(progressLine(client)) + } + } + if rootGroup.Client.Seed { + select {} + } +} diff --git a/deps/github.com/anacrolix/torrent/cmd/torrent-verify/main.go b/deps/github.com/anacrolix/torrent/cmd/torrent-verify/main.go new file mode 100644 index 0000000..0fbf024 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/torrent-verify/main.go @@ -0,0 +1,94 @@ +package main + +import ( + "bytes" + "crypto/sha1" + "fmt" + "io" + "log" + "os" + "path/filepath" + + "github.com/anacrolix/tagflag" + "github.com/edsrzf/mmap-go" + + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/mmap_span" + "github.com/anacrolix/torrent/storage" +) + +func mmapFile(name string) (mm storage.FileMapping, err error) { + f, err := os.Open(name) + if err != nil { + return + } + defer func() { + if err != nil { + f.Close() + } + }() + fi, err := f.Stat() + if err != nil { + return + } + if fi.Size() == 0 { + return + } + reg, err := mmap.MapRegion(f, -1, mmap.RDONLY, mmap.COPY, 0) + if err != nil { + return + } + return storage.WrapFileMapping(reg, f), nil +} + +func verifyTorrent(info *metainfo.Info, root string) error { + span := new(mmap_span.MMapSpan) + for _, file := range info.UpvertedFiles() { + filename := filepath.Join(append([]string{root, info.Name}, file.Path...)...) + mm, err := mmapFile(filename) + if err != nil { + return err + } + if int64(len(mm.Bytes())) != file.Length { + return fmt.Errorf("file %q has wrong length", filename) + } + span.Append(mm) + } + span.InitIndex() + for i, numPieces := 0, info.NumPieces(); i < numPieces; i += 1 { + p := info.Piece(i) + hash := sha1.New() + _, err := io.Copy(hash, io.NewSectionReader(span, p.Offset(), p.Length())) + if err != nil { + return err + } + good := bytes.Equal(hash.Sum(nil), p.Hash().Bytes()) + if !good { + return fmt.Errorf("hash mismatch at piece %d", i) + } + fmt.Printf("%d: %v: %v\n", i, p.Hash(), good) + } + return nil +} + +func main() { + log.SetFlags(log.Flags() | log.Lshortfile) + flags := struct { + DataDir string + tagflag.StartPos + TorrentFile string + }{} + tagflag.Parse(&flags) + metaInfo, err := metainfo.LoadFromFile(flags.TorrentFile) + if err != nil { + log.Fatal(err) + } + info, err := metaInfo.UnmarshalInfo() + if err != nil { + log.Fatalf("error unmarshalling info: %s", err) + } + err = verifyTorrent(&info, flags.DataDir) + if err != nil { + log.Fatalf("torrent failed verification: %s", err) + } +} diff --git a/deps/github.com/anacrolix/torrent/cmd/torrent/announce.go b/deps/github.com/anacrolix/torrent/cmd/torrent/announce.go new file mode 100644 index 0000000..31676d9 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/torrent/announce.go @@ -0,0 +1,40 @@ +package main + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/tracker" + "github.com/anacrolix/torrent/tracker/udp" +) + +type AnnounceCmd struct { + Event udp.AnnounceEvent + Port *uint16 + Tracker string `arg:"positional"` + InfoHash torrent.InfoHash `arg:"positional"` +} + +func announceErr(flags AnnounceCmd) error { + req := tracker.AnnounceRequest{ + InfoHash: flags.InfoHash, + Port: uint16(torrent.NewDefaultClientConfig().ListenPort), + NumWant: -1, + Event: flags.Event, + Left: -1, + } + if flags.Port != nil { + req.Port = *flags.Port + } + response, err := tracker.Announce{ + TrackerUrl: flags.Tracker, + Request: req, + }.Do() + if err != nil { + return fmt.Errorf("doing announce: %w", err) + } + spew.Dump(response) + return nil +} diff --git a/deps/github.com/anacrolix/torrent/cmd/torrent/create.go b/deps/github.com/anacrolix/torrent/cmd/torrent/create.go new file mode 100644 index 0000000..5169a1f --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/torrent/create.go @@ -0,0 +1,70 @@ +package main + +import ( + "os" + + "github.com/anacrolix/bargle" + "github.com/anacrolix/tagflag" + + "github.com/anacrolix/torrent/bencode" + "github.com/anacrolix/torrent/metainfo" +) + +var builtinAnnounceList = [][]string{ + {"http://p4p.arenabg.com:1337/announce"}, + {"udp://tracker.opentrackr.org:1337/announce"}, + {"udp://tracker.openbittorrent.com:6969/announce"}, +} + +func create() (cmd bargle.Command) { + var args struct { + AnnounceList []string `name:"a" help:"extra announce-list tier entry"` + EmptyAnnounceList bool `name:"n" help:"exclude default announce-list entries"` + Comment string `name:"t" help:"comment"` + CreatedBy string `name:"c" help:"created by"` + InfoName *string `name:"i" help:"override info name (defaults to ROOT)"` + PieceLength tagflag.Bytes + Url []string `name:"u" help:"add webseed url"` + Private *bool + Root string `arg:"positional"` + } + cmd = bargle.FromStruct(&args) + cmd.Desc = "Creates a torrent metainfo for the file system rooted at ROOT, and outputs it to stdout" + cmd.DefaultAction = func() (err error) { + mi := metainfo.MetaInfo{ + AnnounceList: builtinAnnounceList, + } + if args.EmptyAnnounceList { + mi.AnnounceList = make([][]string, 0) + } + for _, a := range args.AnnounceList { + mi.AnnounceList = append(mi.AnnounceList, []string{a}) + } + mi.SetDefaults() + if len(args.Comment) > 0 { + mi.Comment = args.Comment + } + if len(args.CreatedBy) > 0 { + mi.CreatedBy = args.CreatedBy + } + mi.UrlList = args.Url + info := metainfo.Info{ + PieceLength: args.PieceLength.Int64(), + Private: args.Private, + } + err = info.BuildFromFilePath(args.Root) + if err != nil { + return + } + if args.InfoName != nil { + info.Name = *args.InfoName + } + mi.InfoBytes, err = bencode.Marshal(info) + if err != nil { + return + } + err = mi.Write(os.Stdout) + return + } + return +} diff --git a/deps/github.com/anacrolix/torrent/cmd/torrent/download.go b/deps/github.com/anacrolix/torrent/cmd/torrent/download.go new file mode 100644 index 0000000..cb3ca58 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/torrent/download.go @@ -0,0 +1,394 @@ +package main + +import ( + "context" + "expvar" + "fmt" + "io" + "net" + "net/http" + "os" + "os/signal" + "strings" + "sync" + "syscall" + "time" + + "github.com/anacrolix/log" + "github.com/anacrolix/tagflag" + "github.com/davecgh/go-spew/spew" + "github.com/dustin/go-humanize" + "golang.org/x/time/rate" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/iplist" + "github.com/anacrolix/torrent/metainfo" + pp "github.com/anacrolix/torrent/peer_protocol" + "github.com/anacrolix/torrent/storage" +) + +func torrentBar(t *torrent.Torrent, pieceStates bool) { + go func() { + start := time.Now() + if t.Info() == nil { + fmt.Printf("%v: getting torrent info for %q\n", time.Since(start), t.Name()) + <-t.GotInfo() + } + lastStats := t.Stats() + var lastLine string + interval := 3 * time.Second + for range time.Tick(interval) { + var completedPieces, partialPieces int + psrs := t.PieceStateRuns() + for _, r := range psrs { + if r.Complete { + completedPieces += r.Length + } + if r.Partial { + partialPieces += r.Length + } + } + stats := t.Stats() + byteRate := int64(time.Second) + byteRate *= stats.BytesReadUsefulData.Int64() - lastStats.BytesReadUsefulData.Int64() + byteRate /= int64(interval) + line := fmt.Sprintf( + "%v: downloading %q: %s/%s, %d/%d pieces completed (%d partial): %v/s\n", + time.Since(start), + t.Name(), + humanize.Bytes(uint64(t.BytesCompleted())), + humanize.Bytes(uint64(t.Length())), + completedPieces, + t.NumPieces(), + partialPieces, + humanize.Bytes(uint64(byteRate)), + ) + if line != lastLine { + lastLine = line + os.Stdout.WriteString(line) + } + if pieceStates { + fmt.Println(psrs) + } + lastStats = stats + } + }() +} + +type stringAddr string + +func (stringAddr) Network() string { return "" } +func (me stringAddr) String() string { return string(me) } + +func resolveTestPeers(addrs []string) (ret []torrent.PeerInfo) { + for _, ta := range addrs { + ret = append(ret, torrent.PeerInfo{ + Addr: stringAddr(ta), + }) + } + return +} + +func addTorrents(ctx context.Context, client *torrent.Client, flags downloadFlags, wg *sync.WaitGroup) error { + testPeers := resolveTestPeers(flags.TestPeer) + for _, arg := range flags.Torrent { + t, err := func() (*torrent.Torrent, error) { + if strings.HasPrefix(arg, "magnet:") { + t, err := client.AddMagnet(arg) + if err != nil { + return nil, fmt.Errorf("error adding magnet: %w", err) + } + return t, nil + } else if strings.HasPrefix(arg, "http://") || strings.HasPrefix(arg, "https://") { + response, err := http.Get(arg) + if err != nil { + return nil, fmt.Errorf("Error downloading torrent file: %s", err) + } + + metaInfo, err := metainfo.Load(response.Body) + defer response.Body.Close() + if err != nil { + return nil, fmt.Errorf("error loading torrent file %q: %s\n", arg, err) + } + t, err := client.AddTorrent(metaInfo) + if err != nil { + return nil, fmt.Errorf("adding torrent: %w", err) + } + return t, nil + } else if strings.HasPrefix(arg, "infohash:") { + t, _ := client.AddTorrentInfoHash(metainfo.NewHashFromHex(strings.TrimPrefix(arg, "infohash:"))) + return t, nil + } else { + metaInfo, err := metainfo.LoadFromFile(arg) + if err != nil { + return nil, fmt.Errorf("error loading torrent file %q: %s\n", arg, err) + } + t, err := client.AddTorrent(metaInfo) + if err != nil { + return nil, fmt.Errorf("adding torrent: %w", err) + } + return t, nil + } + }() + if err != nil { + return fmt.Errorf("adding torrent for %q: %w", arg, err) + } + if flags.Progress { + torrentBar(t, flags.PieceStates) + } + t.AddPeers(testPeers) + wg.Add(1) + go func() { + defer wg.Done() + select { + case <-ctx.Done(): + return + case <-t.GotInfo(): + } + if flags.SaveMetainfos { + path := fmt.Sprintf("%v.torrent", t.InfoHash().HexString()) + err := writeMetainfoToFile(t.Metainfo(), path) + if err == nil { + log.Printf("wrote %q", path) + } else { + log.Printf("error writing %q: %v", path, err) + } + } + if len(flags.File) == 0 { + t.DownloadAll() + wg.Add(1) + go func() { + defer wg.Done() + waitForPieces(ctx, t, 0, t.NumPieces()) + }() + if flags.LinearDiscard { + r := t.NewReader() + io.Copy(io.Discard, r) + r.Close() + } + } else { + for _, f := range t.Files() { + for _, fileArg := range flags.File { + if f.DisplayPath() == fileArg { + wg.Add(1) + go func() { + defer wg.Done() + waitForPieces(ctx, t, f.BeginPieceIndex(), f.EndPieceIndex()) + }() + f.Download() + if flags.LinearDiscard { + r := f.NewReader() + go func() { + defer r.Close() + io.Copy(io.Discard, r) + }() + } + } + } + } + } + }() + } + return nil +} + +func waitForPieces(ctx context.Context, t *torrent.Torrent, beginIndex, endIndex int) { + sub := t.SubscribePieceStateChanges() + defer sub.Close() + expected := storage.Completion{ + Complete: true, + Ok: true, + } + pending := make(map[int]struct{}) + for i := beginIndex; i < endIndex; i++ { + if t.Piece(i).State().Completion != expected { + pending[i] = struct{}{} + } + } + for { + if len(pending) == 0 { + return + } + select { + case ev := <-sub.Values: + if ev.Completion == expected { + delete(pending, ev.Index) + } + case <-ctx.Done(): + return + } + } +} + +func writeMetainfoToFile(mi metainfo.MetaInfo, path string) error { + f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o640) + if err != nil { + return err + } + defer f.Close() + err = mi.Write(f) + if err != nil { + return err + } + return f.Close() +} + +type downloadFlags struct { + Debug bool + DownloadCmd +} + +type DownloadCmd struct { + SaveMetainfos bool + Mmap bool `help:"memory-map torrent data"` + Seed bool `help:"seed after download is complete"` + Addr string `help:"network listen addr"` + MaxUnverifiedBytes *tagflag.Bytes `help:"maximum number bytes to have pending verification"` + UploadRate *tagflag.Bytes `help:"max piece bytes to send per second"` + DownloadRate *tagflag.Bytes `help:"max bytes per second down from peers"` + PackedBlocklist string + PublicIP net.IP + Progress bool `default:"true"` + PieceStates bool `help:"Output piece state runs at progress intervals."` + Quiet bool `help:"discard client logging"` + Stats bool `help:"print stats at termination"` + Dht bool `default:"true"` + PortForward bool `default:"true"` + + TcpPeers bool `default:"true"` + UtpPeers bool `default:"true"` + Webtorrent bool `default:"true"` + DisableWebseeds bool + // Don't progress past handshake for peer connections where the peer doesn't offer the fast + // extension. + RequireFastExtension bool + + Ipv4 bool `default:"true"` + Ipv6 bool `default:"true"` + Pex bool `default:"true"` + + LinearDiscard bool `help:"Read and discard selected regions from start to finish. Useful for testing simultaneous Reader and static file prioritization."` + TestPeer []string `help:"addresses of some starting peers"` + + File []string + Torrent []string `arity:"+" help:"torrent file path or magnet uri" arg:"positional"` +} + +func statsEnabled(flags downloadFlags) bool { + return flags.Stats +} + +func downloadErr(flags downloadFlags) error { + clientConfig := torrent.NewDefaultClientConfig() + clientConfig.DisableWebseeds = flags.DisableWebseeds + clientConfig.DisableTCP = !flags.TcpPeers + clientConfig.DisableUTP = !flags.UtpPeers + clientConfig.DisableIPv4 = !flags.Ipv4 + clientConfig.DisableIPv6 = !flags.Ipv6 + clientConfig.DisableAcceptRateLimiting = true + clientConfig.NoDHT = !flags.Dht + clientConfig.Debug = flags.Debug + clientConfig.Seed = flags.Seed + clientConfig.PublicIp4 = flags.PublicIP.To4() + clientConfig.PublicIp6 = flags.PublicIP + clientConfig.DisablePEX = !flags.Pex + clientConfig.DisableWebtorrent = !flags.Webtorrent + clientConfig.NoDefaultPortForwarding = !flags.PortForward + if flags.PackedBlocklist != "" { + blocklist, err := iplist.MMapPackedFile(flags.PackedBlocklist) + if err != nil { + return fmt.Errorf("loading blocklist: %v", err) + } + defer blocklist.Close() + clientConfig.IPBlocklist = blocklist + } + if flags.Mmap { + clientConfig.DefaultStorage = storage.NewMMap("") + } + if flags.Addr != "" { + clientConfig.SetListenAddr(flags.Addr) + } + if flags.UploadRate != nil { + // TODO: I think the upload rate limit could be much lower. + clientConfig.UploadRateLimiter = rate.NewLimiter(rate.Limit(*flags.UploadRate), 256<<10) + } + if flags.DownloadRate != nil { + clientConfig.DownloadRateLimiter = rate.NewLimiter(rate.Limit(*flags.DownloadRate), 1<<16) + } + { + logger := log.Default.WithNames("main", "client") + if flags.Quiet { + logger = logger.FilterLevel(log.Critical) + } + clientConfig.Logger = logger + } + if flags.RequireFastExtension { + clientConfig.MinPeerExtensions.SetBit(pp.ExtensionBitFast, true) + } + if flags.MaxUnverifiedBytes != nil { + clientConfig.MaxUnverifiedBytes = flags.MaxUnverifiedBytes.Int64() + } + + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer cancel() + + client, err := torrent.NewClient(clientConfig) + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + defer client.Close() + + // Write status on the root path on the default HTTP muxer. This will be bound to localhost + // somewhere if GOPPROF is set, thanks to the envpprof import. + http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + client.WriteStatus(w) + }) + var wg sync.WaitGroup + err = addTorrents(ctx, client, flags, &wg) + if err != nil { + return fmt.Errorf("adding torrents: %w", err) + } + started := time.Now() + defer outputStats(client, flags) + wg.Wait() + if ctx.Err() == nil { + log.Print("downloaded ALL the torrents") + } else { + err = ctx.Err() + } + clientConnStats := client.ConnStats() + log.Printf( + "average download rate: %v/s", + humanize.Bytes(uint64(float64( + clientConnStats.BytesReadUsefulData.Int64(), + )/time.Since(started).Seconds())), + ) + if flags.Seed { + if len(client.Torrents()) == 0 { + log.Print("no torrents to seed") + } else { + outputStats(client, flags) + <-ctx.Done() + } + } + spew.Dump(expvar.Get("torrent").(*expvar.Map).Get("chunks received")) + spew.Dump(client.ConnStats()) + clStats := client.ConnStats() + sentOverhead := clStats.BytesWritten.Int64() - clStats.BytesWrittenData.Int64() + log.Printf( + "client read %v, %.1f%% was useful data. sent %v non-data bytes", + humanize.Bytes(uint64(clStats.BytesRead.Int64())), + 100*float64(clStats.BytesReadUsefulData.Int64())/float64(clStats.BytesRead.Int64()), + humanize.Bytes(uint64(sentOverhead))) + return err +} + +func outputStats(cl *torrent.Client, args downloadFlags) { + if !statsEnabled(args) { + return + } + expvar.Do(func(kv expvar.KeyValue) { + fmt.Printf("%s: %s\n", kv.Key, kv.Value) + }) + cl.WriteStatus(os.Stdout) +} diff --git a/deps/github.com/anacrolix/torrent/cmd/torrent/main.go b/deps/github.com/anacrolix/torrent/cmd/torrent/main.go new file mode 100644 index 0000000..2c1081b --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/torrent/main.go @@ -0,0 +1,152 @@ +// Downloads torrents from the command-line. +package main + +import ( + "context" + "encoding/json" + "fmt" + "io" + stdLog "log" + "net/http" + "os" + "time" + + "github.com/anacrolix/bargle" + "github.com/anacrolix/envpprof" + "github.com/anacrolix/log" + xprometheus "github.com/anacrolix/missinggo/v2/prometheus" + "github.com/davecgh/go-spew/spew" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/sdk/trace" + + "github.com/anacrolix/torrent/bencode" + "github.com/anacrolix/torrent/version" +) + +func init() { + prometheus.MustRegister(xprometheus.NewExpvarCollector()) + http.Handle("/metrics", promhttp.Handler()) +} + +func shutdownTracerProvider(ctx context.Context, tp *trace.TracerProvider) { + started := time.Now() + err := tp.Shutdown(ctx) + elapsed := time.Since(started) + log.Levelf(log.Error, "shutting down tracer provider (took %v): %v", elapsed, err) +} + +func main() { + defer stdLog.SetFlags(stdLog.Flags() | stdLog.Lshortfile) + + ctx := context.Background() + tracingExporter, err := otlptracegrpc.New(ctx) + if err != nil { + log.Fatalf("creating tracing exporter: %v", err) + } + tracerProvider := trace.NewTracerProvider(trace.WithBatcher(tracingExporter)) + defer shutdownTracerProvider(ctx, tracerProvider) + otel.SetTracerProvider(tracerProvider) + + main := bargle.Main{} + main.Defer(envpprof.Stop) + main.Defer(func() { shutdownTracerProvider(ctx, tracerProvider) }) + debug := false + debugFlag := bargle.NewFlag(&debug) + debugFlag.AddLong("debug") + main.Options = append(main.Options, debugFlag.Make()) + main.Positionals = append(main.Positionals, + bargle.Subcommand{Name: "metainfo", Command: metainfoCmd()}, + bargle.Subcommand{Name: "announce", Command: func() bargle.Command { + var ac AnnounceCmd + cmd := bargle.FromStruct(&ac) + cmd.DefaultAction = func() error { + return announceErr(ac) + } + return cmd + }()}, + bargle.Subcommand{Name: "scrape", Command: func() bargle.Command { + var scrapeCfg scrapeCfg + cmd := bargle.FromStruct(&scrapeCfg) + cmd.Desc = "fetch swarm metrics for info-hashes from tracker" + cmd.DefaultAction = func() error { + return scrape(scrapeCfg) + } + return cmd + }()}, + bargle.Subcommand{Name: "download", Command: func() bargle.Command { + var dlc DownloadCmd + cmd := bargle.FromStruct(&dlc) + cmd.DefaultAction = func() error { + return downloadErr(downloadFlags{ + Debug: debug, + DownloadCmd: dlc, + }) + } + return cmd + }()}, + bargle.Subcommand{ + Name: "bencode", + Command: func() (cmd bargle.Command) { + var print func(interface{}) error + cmd.Positionals = append(cmd.Positionals, + bargle.Subcommand{Name: "json", Command: func() (cmd bargle.Command) { + cmd.DefaultAction = func() error { + je := json.NewEncoder(os.Stdout) + je.SetIndent("", " ") + print = je.Encode + return nil + } + return + }()}, + bargle.Subcommand{Name: "spew", Command: func() (cmd bargle.Command) { + cmd.DefaultAction = func() error { + config := spew.NewDefaultConfig() + config.DisableCapacities = true + config.Indent = " " + print = func(v interface{}) error { + config.Dump(v) + return nil + } + return nil + } + return + }()}) + d := bencode.NewDecoder(os.Stdin) + cmd.AfterParseFunc = func(ctx bargle.Context) error { + ctx.AfterParse(func() error { + for i := 0; ; i++ { + var v interface{} + err := d.Decode(&v) + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("decoding message index %d: %w", i, err) + } + print(v) + } + return nil + }) + return nil + } + cmd.Desc = "reads bencoding from stdin into Go native types and spews the result" + return + }(), + }, + bargle.Subcommand{Name: "version", Command: bargle.Command{ + DefaultAction: func() error { + fmt.Printf("HTTP User-Agent: %q\n", version.DefaultHttpUserAgent) + fmt.Printf("Torrent client version: %q\n", version.DefaultExtendedHandshakeClientVersion) + fmt.Printf("Torrent version prefix: %q\n", version.DefaultBep20Prefix) + return nil + }, + Desc: "prints various protocol default version strings", + }}, + bargle.Subcommand{Name: "serve", Command: serve()}, + bargle.Subcommand{Name: "create", Command: create()}, + ) + main.Run() +} diff --git a/deps/github.com/anacrolix/torrent/cmd/torrent/metainfo.go b/deps/github.com/anacrolix/torrent/cmd/torrent/metainfo.go new file mode 100644 index 0000000..929cb45 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/torrent/metainfo.go @@ -0,0 +1,132 @@ +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + + "github.com/anacrolix/bargle" + "github.com/bradfitz/iter" + + "github.com/anacrolix/torrent/metainfo" +) + +type pprintMetainfoFlags struct { + JustName bool + PieceHashes bool + Files bool +} + +func metainfoCmd() (cmd bargle.Command) { + var metainfoPath string + var mi *metainfo.MetaInfo + // TODO: Test if bargle treats no subcommand as a failure. + cmd.Positionals = append(cmd.Positionals, + &bargle.Positional{ + Name: "torrent file", + Value: &bargle.String{Target: &metainfoPath}, + AfterParseFunc: func(ctx bargle.Context) error { + ctx.AfterParse(func() (err error) { + if strings.HasPrefix(metainfoPath, "http://") || strings.HasPrefix(metainfoPath, "https://") { + response, err := http.Get(metainfoPath) + if err != nil { + return nil + } + mi, err = metainfo.Load(response.Body) + if err != nil { + return nil + } + } else { + mi, err = metainfo.LoadFromFile(metainfoPath) + } + return + }) + return nil + }, + }, + bargle.Subcommand{Name: "magnet", Command: func() (cmd bargle.Command) { + cmd.DefaultAction = func() (err error) { + info, err := mi.UnmarshalInfo() + if err != nil { + return + } + fmt.Fprintf(os.Stdout, "%s\n", mi.Magnet(nil, &info).String()) + return nil + } + return + }()}, + bargle.Subcommand{Name: "pprint", Command: func() (cmd bargle.Command) { + var flags pprintMetainfoFlags + cmd = bargle.FromStruct(&flags) + cmd.DefaultAction = func() (err error) { + err = pprintMetainfo(mi, flags) + if err != nil { + return + } + if !flags.JustName { + os.Stdout.WriteString("\n") + } + return + } + return + }()}, + //bargle.Subcommand{Name: "infohash", Command: func(ctx args.SubCmdCtx) (err error) { + // fmt.Printf("%s: %s\n", mi.HashInfoBytes().HexString(), metainfoPath) + // return nil + //}}, + //bargle.Subcommand{Name: "list-files", Command: func(ctx args.SubCmdCtx) (err error) { + // info, err := mi.UnmarshalInfo() + // if err != nil { + // return fmt.Errorf("unmarshalling info from metainfo at %q: %v", metainfoPath, err) + // } + // for _, f := range info.UpvertedFiles() { + // fmt.Println(f.DisplayPath(&info)) + // } + // return nil + //}}, + ) + return +} + +func pprintMetainfo(metainfo *metainfo.MetaInfo, flags pprintMetainfoFlags) error { + info, err := metainfo.UnmarshalInfo() + if err != nil { + return fmt.Errorf("error unmarshalling info: %s", err) + } + if flags.JustName { + fmt.Printf("%s\n", info.Name) + return nil + } + d := map[string]interface{}{ + "Name": info.Name, + "Name.Utf8": info.NameUtf8, + "NumPieces": info.NumPieces(), + "PieceLength": info.PieceLength, + "InfoHash": metainfo.HashInfoBytes().HexString(), + "NumFiles": len(info.UpvertedFiles()), + "TotalLength": info.TotalLength(), + "Announce": metainfo.Announce, + "AnnounceList": metainfo.AnnounceList, + "UrlList": metainfo.UrlList, + } + if len(metainfo.Nodes) > 0 { + d["Nodes"] = metainfo.Nodes + } + if flags.Files { + d["Files"] = info.UpvertedFiles() + } + if flags.PieceHashes { + d["PieceHashes"] = func() (ret []string) { + for i := range iter.N(info.NumPieces()) { + ret = append(ret, hex.EncodeToString(info.Pieces[i*20:(i+1)*20])) + } + return + }() + } + b, _ := json.MarshalIndent(d, "", " ") + _, err = os.Stdout.Write(b) + return err +} diff --git a/deps/github.com/anacrolix/torrent/cmd/torrent/scrape.go b/deps/github.com/anacrolix/torrent/cmd/torrent/scrape.go new file mode 100644 index 0000000..01a59b7 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/torrent/scrape.go @@ -0,0 +1,31 @@ +package main + +import ( + "context" + "fmt" + + "github.com/davecgh/go-spew/spew" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/tracker" +) + +type scrapeCfg struct { + Tracker string `arg:"positional"` + InfoHashes []torrent.InfoHash `arity:"+" arg:"positional"` +} + +func scrape(flags scrapeCfg) error { + cc, err := tracker.NewClient(flags.Tracker, tracker.NewClientOpts{}) + if err != nil { + err = fmt.Errorf("creating new tracker client: %w", err) + return err + } + defer cc.Close() + scrapeOut, err := cc.Scrape(context.TODO(), flags.InfoHashes) + if err != nil { + return fmt.Errorf("scraping: %w", err) + } + spew.Dump(scrapeOut) + return nil +} diff --git a/deps/github.com/anacrolix/torrent/cmd/torrent/serve.go b/deps/github.com/anacrolix/torrent/cmd/torrent/serve.go new file mode 100644 index 0000000..bdb1559 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/torrent/serve.go @@ -0,0 +1,89 @@ +package main + +import ( + "fmt" + "net/http" + "path/filepath" + + "github.com/anacrolix/bargle" + "github.com/anacrolix/log" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/bencode" + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/storage" +) + +func serve() (cmd bargle.Command) { + var filePaths []string + cmd.Positionals = append(cmd.Positionals, &bargle.Positional{ + Value: bargle.AutoUnmarshaler(&filePaths), + }) + cmd.Desc = "creates and seeds a torrent from a filepath" + cmd.DefaultAction = func() error { + cfg := torrent.NewDefaultClientConfig() + cfg.Seed = true + cl, err := torrent.NewClient(cfg) + if err != nil { + return fmt.Errorf("new torrent client: %w", err) + } + defer cl.Close() + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + cl.WriteStatus(w) + }) + for _, filePath := range filePaths { + totalLength, err := totalLength(filePath) + if err != nil { + return fmt.Errorf("calculating total length of %q: %v", filePath, err) + } + pieceLength := metainfo.ChoosePieceLength(totalLength) + info := metainfo.Info{ + PieceLength: pieceLength, + } + err = info.BuildFromFilePath(filePath) + if err != nil { + return fmt.Errorf("building info from path %q: %w", filePath, err) + } + for _, fi := range info.Files { + log.Printf("added %q", fi.Path) + } + mi := metainfo.MetaInfo{ + InfoBytes: bencode.MustMarshal(info), + } + pc, err := storage.NewDefaultPieceCompletionForDir(".") + if err != nil { + return fmt.Errorf("new piece completion: %w", err) + } + defer pc.Close() + ih := mi.HashInfoBytes() + to, _ := cl.AddTorrentOpt(torrent.AddTorrentOpts{ + InfoHash: ih, + Storage: storage.NewFileOpts(storage.NewFileClientOpts{ + ClientBaseDir: filePath, + FilePathMaker: func(opts storage.FilePathMakerOpts) string { + return filepath.Join(opts.File.Path...) + }, + TorrentDirMaker: nil, + PieceCompletion: pc, + }), + }) + defer to.Drop() + err = to.MergeSpec(&torrent.TorrentSpec{ + InfoBytes: mi.InfoBytes, + Trackers: [][]string{{ + `wss://tracker.btorrent.xyz`, + `wss://tracker.openwebtorrent.com`, + "http://p4p.arenabg.com:1337/announce", + "udp://tracker.opentrackr.org:1337/announce", + "udp://tracker.openbittorrent.com:6969/announce", + }}, + }) + if err != nil { + return fmt.Errorf("setting trackers: %w", err) + } + fmt.Printf("%v: %v\n", to, to.Metainfo().Magnet(&ih, &info)) + } + select {} + } + return +} diff --git a/deps/github.com/anacrolix/torrent/cmd/torrent/total-length.go b/deps/github.com/anacrolix/torrent/cmd/torrent/total-length.go new file mode 100644 index 0000000..52888ee --- /dev/null +++ b/deps/github.com/anacrolix/torrent/cmd/torrent/total-length.go @@ -0,0 +1,21 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" +) + +func totalLength(path string) (totalLength int64, err error) { + err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + return nil + } + totalLength += info.Size() + return nil + }) + if err != nil { + return 0, fmt.Errorf("walking path, %w", err) + } + return totalLength, nil +} diff --git a/deps/github.com/anacrolix/torrent/common/upverted_files.go b/deps/github.com/anacrolix/torrent/common/upverted_files.go new file mode 100644 index 0000000..1933e16 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/common/upverted_files.go @@ -0,0 +1,18 @@ +package common + +import ( + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/segments" +) + +func LengthIterFromUpvertedFiles(fis []metainfo.FileInfo) segments.LengthIter { + i := 0 + return func() (segments.Length, bool) { + if i == len(fis) { + return -1, false + } + l := fis[i].Length + i++ + return l, true + } +} diff --git a/deps/github.com/anacrolix/torrent/config.go b/deps/github.com/anacrolix/torrent/config.go new file mode 100644 index 0000000..e2d0ea1 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/config.go @@ -0,0 +1,249 @@ +package torrent + +import ( + "context" + "net" + "net/http" + "net/url" + "time" + + "github.com/anacrolix/dht/v2" + "github.com/anacrolix/dht/v2/krpc" + "github.com/anacrolix/log" + "github.com/anacrolix/missinggo/v2" + "golang.org/x/time/rate" + + "github.com/anacrolix/torrent/iplist" + "github.com/anacrolix/torrent/mse" + "github.com/anacrolix/torrent/storage" + "github.com/anacrolix/torrent/version" +) + +// Contains config elements that are exclusive to tracker handling. There may be other fields in +// ClientConfig that are also relevant. +type ClientTrackerConfig struct { + // Don't announce to trackers. This only leaves DHT to discover peers. + DisableTrackers bool `long:"disable-trackers"` + // Defines DialContext func to use for HTTP tracker announcements + TrackerDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + // Defines ListenPacket func to use for UDP tracker announcements + TrackerListenPacket func(network, addr string) (net.PacketConn, error) + // Takes a tracker's hostname and requests DNS A and AAAA records. + // Used in case DNS lookups require a special setup (i.e., dns-over-https) + LookupTrackerIp func(*url.URL) ([]net.IP, error) +} + +type ClientDhtConfig struct { + // Don't create a DHT. + NoDHT bool `long:"disable-dht"` + DhtStartingNodes func(network string) dht.StartingNodesGetter + // Called for each anacrolix/dht Server created for the Client. + ConfigureAnacrolixDhtServer func(*dht.ServerConfig) + PeriodicallyAnnounceTorrentsToDht bool + // OnQuery hook func + DHTOnQuery func(query *krpc.Msg, source net.Addr) (propagate bool) +} + +// Probably not safe to modify this after it's given to a Client. +type ClientConfig struct { + ClientTrackerConfig + ClientDhtConfig + + // Store torrent file data in this directory unless .DefaultStorage is + // specified. + DataDir string `long:"data-dir" description:"directory to store downloaded torrent data"` + // The address to listen for new uTP and TCP BitTorrent protocol connections. DHT shares a UDP + // socket with uTP unless configured otherwise. + ListenHost func(network string) string + ListenPort int + NoDefaultPortForwarding bool + UpnpID string + DisablePEX bool `long:"disable-pex"` + + // Never send chunks to peers. + NoUpload bool `long:"no-upload"` + // Disable uploading even when it isn't fair. + DisableAggressiveUpload bool `long:"disable-aggressive-upload"` + // Upload even after there's nothing in it for us. By default uploading is + // not altruistic, we'll only upload to encourage the peer to reciprocate. + Seed bool `long:"seed"` + // Only applies to chunks uploaded to peers, to maintain responsiveness + // communicating local Client state to peers. Each limiter token + // represents one byte. The Limiter's burst must be large enough to fit a + // whole chunk, which is usually 16 KiB (see TorrentSpec.ChunkSize). + UploadRateLimiter *rate.Limiter + // Rate limits all reads from connections to peers. Each limiter token + // represents one byte. The Limiter's burst must be bigger than the + // largest Read performed on a the underlying rate-limiting io.Reader + // minus one. This is likely to be the larger of the main read loop buffer + // (~4096), and the requested chunk size (~16KiB, see + // TorrentSpec.ChunkSize). + DownloadRateLimiter *rate.Limiter + // Maximum unverified bytes across all torrents. Not used if zero. + MaxUnverifiedBytes int64 + + // User-provided Client peer ID. If not present, one is generated automatically. + PeerID string + // For the bittorrent protocol. + DisableUTP bool + // For the bittorrent protocol. + DisableTCP bool `long:"disable-tcp"` + // Called to instantiate storage for each added torrent. Builtin backends + // are in the storage package. If not set, the "file" implementation is + // used (and Closed when the Client is Closed). + DefaultStorage storage.ClientImpl + + HeaderObfuscationPolicy HeaderObfuscationPolicy + // The crypto methods to offer when initiating connections with header obfuscation. + CryptoProvides mse.CryptoMethod + // Chooses the crypto method to use when receiving connections with header obfuscation. + CryptoSelector mse.CryptoSelector + + IPBlocklist iplist.Ranger + DisableIPv6 bool `long:"disable-ipv6"` + DisableIPv4 bool + DisableIPv4Peers bool + // Perform logging and any other behaviour that will help debug. + Debug bool `help:"enable debugging"` + Logger log.Logger + + // Used for torrent sources and webseeding if set. + WebTransport http.RoundTripper + // Defines proxy for HTTP requests, such as for trackers. It's commonly set from the result of + // "net/http".ProxyURL(HTTPProxy). + HTTPProxy func(*http.Request) (*url.URL, error) + // Defines DialContext func to use for HTTP requests, such as for fetching metainfo and webtorrent seeds + HTTPDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + // HTTPUserAgent changes default UserAgent for HTTP requests + HTTPUserAgent string + // HttpRequestDirector modifies the request before it's sent. + // Useful for adding authentication headers, for example + HttpRequestDirector func(*http.Request) error + // WebsocketTrackerHttpHeader returns a custom header to be used when dialing a websocket connection + // to the tracker. Useful for adding authentication headers + WebsocketTrackerHttpHeader func() http.Header + // Updated occasionally to when there's been some changes to client + // behaviour in case other clients are assuming anything of us. See also + // `bep20`. + ExtendedHandshakeClientVersion string + // Peer ID client identifier prefix. We'll update this occasionally to + // reflect changes to client behaviour that other clients may depend on. + // Also see `extendedHandshakeClientVersion`. + Bep20 string + + // Peer dial timeout to use when there are limited peers. + NominalDialTimeout time.Duration + // Minimum peer dial timeout to use (even if we have lots of peers). + MinDialTimeout time.Duration + EstablishedConnsPerTorrent int + HalfOpenConnsPerTorrent int + TotalHalfOpenConns int + // Maximum number of peer addresses in reserve. + TorrentPeersHighWater int + // Minumum number of peers before effort is made to obtain more peers. + TorrentPeersLowWater int + + // Limit how long handshake can take. This is to reduce the lingering + // impact of a few bad apples. 4s loses 1% of successful handshakes that + // are obtained with 60s timeout, and 5% of unsuccessful handshakes. + HandshakesTimeout time.Duration + // How long between writes before sending a keep alive message on a peer connection that we want + // to maintain. + KeepAliveTimeout time.Duration + // Maximum bytes to buffer per peer connection for peer request data before it is sent. + MaxAllocPeerRequestDataPerConn int64 + + // The IP addresses as our peers should see them. May differ from the + // local interfaces due to NAT or other network configurations. + PublicIp4 net.IP + PublicIp6 net.IP + + // Accept rate limiting affects excessive connection attempts from IPs that fail during + // handshakes or request torrents that we don't have. + DisableAcceptRateLimiting bool + // Don't add connections that have the same peer ID as an existing + // connection for a given Torrent. + DropDuplicatePeerIds bool + // Drop peers that are complete if we are also complete and have no use for the peer. This is a + // bit of a special case, since a peer could also be useless if they're just not interested, or + // we don't intend to obtain all of a torrent's data. + DropMutuallyCompletePeers bool + // Whether to accept peer connections at all. + AcceptPeerConnections bool + // Whether a Client should want conns without delegating to any attached Torrents. This is + // useful when torrents might be added dynamically in callbacks for example. + AlwaysWantConns bool + + Extensions PeerExtensionBits + // Bits that peers must have set to proceed past handshakes. + MinPeerExtensions PeerExtensionBits + + DisableWebtorrent bool + DisableWebseeds bool + + Callbacks Callbacks + + // ICEServers defines a slice describing servers available to be used by + // ICE, such as STUN and TURN servers. + ICEServers []string + + DialRateLimiter *rate.Limiter + + PieceHashersPerTorrent int // default: 2 +} + +func (cfg *ClientConfig) SetListenAddr(addr string) *ClientConfig { + host, port, err := missinggo.ParseHostPort(addr) + if err != nil { + panic(err) + } + cfg.ListenHost = func(string) string { return host } + cfg.ListenPort = port + return cfg +} + +func NewDefaultClientConfig() *ClientConfig { + cc := &ClientConfig{ + HTTPUserAgent: version.DefaultHttpUserAgent, + ExtendedHandshakeClientVersion: version.DefaultExtendedHandshakeClientVersion, + Bep20: version.DefaultBep20Prefix, + UpnpID: version.DefaultUpnpId, + NominalDialTimeout: 20 * time.Second, + MinDialTimeout: 3 * time.Second, + EstablishedConnsPerTorrent: 50, + HalfOpenConnsPerTorrent: 25, + TotalHalfOpenConns: 100, + TorrentPeersHighWater: 500, + TorrentPeersLowWater: 50, + HandshakesTimeout: 4 * time.Second, + KeepAliveTimeout: time.Minute, + MaxAllocPeerRequestDataPerConn: 1 << 20, + ListenHost: func(string) string { return "" }, + UploadRateLimiter: unlimited, + DownloadRateLimiter: unlimited, + DisableAcceptRateLimiting: true, + DropMutuallyCompletePeers: true, + HeaderObfuscationPolicy: HeaderObfuscationPolicy{ + Preferred: true, + RequirePreferred: false, + }, + CryptoSelector: mse.DefaultCryptoSelector, + CryptoProvides: mse.AllSupportedCrypto, + ListenPort: 42069, + Extensions: defaultPeerExtensionBytes(), + AcceptPeerConnections: true, + MaxUnverifiedBytes: 64 << 20, + DialRateLimiter: rate.NewLimiter(10, 10), + PieceHashersPerTorrent: 2, + } + cc.DhtStartingNodes = func(network string) dht.StartingNodesGetter { + return func() ([]dht.Addr, error) { return dht.GlobalBootstrapAddrs(network) } + } + cc.PeriodicallyAnnounceTorrentsToDht = true + return cc +} + +type HeaderObfuscationPolicy struct { + RequirePreferred bool // Whether the value of Preferred is a strict requirement. + Preferred bool // Whether header obfuscation is preferred. +} diff --git a/deps/github.com/anacrolix/torrent/conn_stats.go b/deps/github.com/anacrolix/torrent/conn_stats.go new file mode 100644 index 0000000..0c5bfc7 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/conn_stats.go @@ -0,0 +1,117 @@ +package torrent + +import ( + "encoding/json" + "fmt" + "io" + "reflect" + "sync/atomic" + + pp "github.com/anacrolix/torrent/peer_protocol" +) + +// Various connection-level metrics. At the Torrent level these are aggregates. Chunks are messages +// with data payloads. Data is actual torrent content without any overhead. Useful is something we +// needed locally. Unwanted is something we didn't ask for (but may still be useful). Written is +// things sent to the peer, and Read is stuff received from them. Due to the implementation of +// Count, must be aligned on some platforms: See https://github.com/anacrolix/torrent/issues/262. +type ConnStats struct { + // Total bytes on the wire. Includes handshakes and encryption. + BytesWritten Count + BytesWrittenData Count + + BytesRead Count + BytesReadData Count + BytesReadUsefulData Count + BytesReadUsefulIntendedData Count + + ChunksWritten Count + + ChunksRead Count + ChunksReadUseful Count + ChunksReadWasted Count + + MetadataChunksRead Count + + // Number of pieces data was written to, that subsequently passed verification. + PiecesDirtiedGood Count + // Number of pieces data was written to, that subsequently failed verification. Note that a + // connection may not have been the sole dirtier of a piece. + PiecesDirtiedBad Count +} + +func (me *ConnStats) Copy() (ret ConnStats) { + for i := 0; i < reflect.TypeOf(ConnStats{}).NumField(); i++ { + n := reflect.ValueOf(me).Elem().Field(i).Addr().Interface().(*Count).Int64() + reflect.ValueOf(&ret).Elem().Field(i).Addr().Interface().(*Count).Add(n) + } + return +} + +type Count struct { + n int64 +} + +var _ fmt.Stringer = (*Count)(nil) + +func (me *Count) Add(n int64) { + atomic.AddInt64(&me.n, n) +} + +func (me *Count) Int64() int64 { + return atomic.LoadInt64(&me.n) +} + +func (me *Count) String() string { + return fmt.Sprintf("%v", me.Int64()) +} + +func (me *Count) MarshalJSON() ([]byte, error) { + return json.Marshal(me.n) +} + +func (cs *ConnStats) wroteMsg(msg *pp.Message) { + // TODO: Track messages and not just chunks. + switch msg.Type { + case pp.Piece: + cs.ChunksWritten.Add(1) + cs.BytesWrittenData.Add(int64(len(msg.Piece))) + } +} + +func (cs *ConnStats) receivedChunk(size int64) { + cs.ChunksRead.Add(1) + cs.BytesReadData.Add(size) +} + +func (cs *ConnStats) incrementPiecesDirtiedGood() { + cs.PiecesDirtiedGood.Add(1) +} + +func (cs *ConnStats) incrementPiecesDirtiedBad() { + cs.PiecesDirtiedBad.Add(1) +} + +func add(n int64, f func(*ConnStats) *Count) func(*ConnStats) { + return func(cs *ConnStats) { + p := f(cs) + p.Add(n) + } +} + +type connStatsReadWriter struct { + rw io.ReadWriter + c *PeerConn +} + +func (me connStatsReadWriter) Write(b []byte) (n int, err error) { + n, err = me.rw.Write(b) + me.c.wroteBytes(int64(n)) + return +} + +func (me connStatsReadWriter) Read(b []byte) (n int, err error) { + n, err = me.rw.Read(b) + me.c.readBytes(int64(n)) + return +} diff --git a/deps/github.com/anacrolix/torrent/deferrwl.go b/deps/github.com/anacrolix/torrent/deferrwl.go new file mode 100644 index 0000000..bf95be2 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/deferrwl.go @@ -0,0 +1,36 @@ +package torrent + +import "github.com/anacrolix/sync" + +// Runs deferred actions on Unlock. Note that actions are assumed to be the results of changes that +// would only occur with a write lock at present. The race detector should catch instances of defers +// without the write lock being held. +type lockWithDeferreds struct { + internal sync.RWMutex + unlockActions []func() +} + +func (me *lockWithDeferreds) Lock() { + me.internal.Lock() +} + +func (me *lockWithDeferreds) Unlock() { + unlockActions := me.unlockActions + for i := 0; i < len(unlockActions); i += 1 { + unlockActions[i]() + } + me.unlockActions = unlockActions[:0] + me.internal.Unlock() +} + +func (me *lockWithDeferreds) RLock() { + me.internal.RLock() +} + +func (me *lockWithDeferreds) RUnlock() { + me.internal.RUnlock() +} + +func (me *lockWithDeferreds) Defer(action func()) { + me.unlockActions = append(me.unlockActions, action) +} diff --git a/deps/github.com/anacrolix/torrent/dht.go b/deps/github.com/anacrolix/torrent/dht.go new file mode 100644 index 0000000..77975a2 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/dht.go @@ -0,0 +1,62 @@ +package torrent + +import ( + "io" + "net" + + "github.com/anacrolix/dht/v2" + "github.com/anacrolix/dht/v2/krpc" + peer_store "github.com/anacrolix/dht/v2/peer-store" +) + +// DHT server interface for use by a Torrent or Client. It's reasonable for this to make assumptions +// for torrent-use that might not be the default behaviour for the DHT server. +type DhtServer interface { + Stats() interface{} + ID() [20]byte + Addr() net.Addr + AddNode(ni krpc.NodeInfo) error + // This is called asynchronously when receiving PORT messages. + Ping(addr *net.UDPAddr) + Announce(hash [20]byte, port int, impliedPort bool) (DhtAnnounce, error) + WriteStatus(io.Writer) +} + +// Optional interface for DhtServer's that can expose their peer store (if any). +type PeerStorer interface { + PeerStore() peer_store.Interface +} + +type DhtAnnounce interface { + Close() + Peers() <-chan dht.PeersValues +} + +type AnacrolixDhtServerWrapper struct { + *dht.Server +} + +func (me AnacrolixDhtServerWrapper) Stats() interface{} { + return me.Server.Stats() +} + +type anacrolixDhtAnnounceWrapper struct { + *dht.Announce +} + +func (me anacrolixDhtAnnounceWrapper) Peers() <-chan dht.PeersValues { + return me.Announce.Peers +} + +func (me AnacrolixDhtServerWrapper) Announce(hash [20]byte, port int, impliedPort bool) (DhtAnnounce, error) { + ann, err := me.Server.Announce(hash, port, impliedPort) + return anacrolixDhtAnnounceWrapper{ann}, err +} + +func (me AnacrolixDhtServerWrapper) Ping(addr *net.UDPAddr) { + me.Server.PingQueryInput(addr, dht.QueryInput{ + RateLimiting: dht.QueryRateLimiting{NoWaitFirst: true}, + }) +} + +var _ DhtServer = AnacrolixDhtServerWrapper{} diff --git a/deps/github.com/anacrolix/torrent/dial-pool.go b/deps/github.com/anacrolix/torrent/dial-pool.go new file mode 100644 index 0000000..c0c233e --- /dev/null +++ b/deps/github.com/anacrolix/torrent/dial-pool.go @@ -0,0 +1,43 @@ +package torrent + +import ( + "context" +) + +type dialPool struct { + resCh chan DialResult + addr string + left int +} + +func (me *dialPool) getFirst() (res DialResult) { + for me.left > 0 && res.Conn == nil { + res = <-me.resCh + me.left-- + } + return +} + +func (me *dialPool) add(ctx context.Context, dialer Dialer) { + me.left++ + go func() { + me.resCh <- DialResult{ + dialFromSocket(ctx, dialer, me.addr), + dialer, + } + }() +} + +func (me *dialPool) startDrainer() { + go me.drainAndCloseRemainingDials() +} + +func (me *dialPool) drainAndCloseRemainingDials() { + for me.left > 0 { + conn := (<-me.resCh).Conn + me.left-- + if conn != nil { + conn.Close() + } + } +} diff --git a/deps/github.com/anacrolix/torrent/dialer.go b/deps/github.com/anacrolix/torrent/dialer.go new file mode 100644 index 0000000..5cdf3fc --- /dev/null +++ b/deps/github.com/anacrolix/torrent/dialer.go @@ -0,0 +1,12 @@ +package torrent + +import ( + "github.com/anacrolix/torrent/dialer" +) + +type ( + Dialer = dialer.T + NetworkDialer = dialer.WithNetwork +) + +var DefaultNetDialer = &dialer.Default diff --git a/deps/github.com/anacrolix/torrent/dialer/dialer.go b/deps/github.com/anacrolix/torrent/dialer/dialer.go new file mode 100644 index 0000000..5e5dff4 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/dialer/dialer.go @@ -0,0 +1,34 @@ +package dialer + +import ( + "context" + "net" +) + +// Dialers have the network locked in. +type T interface { + Dial(_ context.Context, addr string) (net.Conn, error) + DialerNetwork() string +} + +// An interface to ease wrapping dialers that explicitly include a network parameter. +type WithContext interface { + DialContext(ctx context.Context, network, addr string) (net.Conn, error) +} + +// Used by wrappers of standard library network types. +var Default = &net.Dialer{} + +// Adapts a WithContext to the Dial interface in this package. +type WithNetwork struct { + Network string + Dialer WithContext +} + +func (me WithNetwork) DialerNetwork() string { + return me.Network +} + +func (me WithNetwork) Dial(ctx context.Context, addr string) (_ net.Conn, err error) { + return me.Dialer.DialContext(ctx, me.Network, addr) +} diff --git a/deps/github.com/anacrolix/torrent/doc.go b/deps/github.com/anacrolix/torrent/doc.go new file mode 100644 index 0000000..bc90c0d --- /dev/null +++ b/deps/github.com/anacrolix/torrent/doc.go @@ -0,0 +1,33 @@ +/* +Package torrent implements a torrent client. Goals include: + - Configurable data storage, such as file, mmap, and piece-based. + - Downloading on demand: torrent.Reader will request only the data required to + satisfy Reads, which is ideal for streaming and torrentfs. + +BitTorrent features implemented include: + - Protocol obfuscation + - DHT + - uTP + - PEX + - Magnet links + - IP Blocklists + - Some IPv6 + - HTTP and UDP tracker clients + - BEPs: + - 3: Basic BitTorrent protocol + - 5: DHT + - 6: Fast Extension (have all/none only) + - 7: IPv6 Tracker Extension + - 9: ut_metadata + - 10: Extension protocol + - 11: PEX + - 12: Multitracker metadata extension + - 15: UDP Tracker Protocol + - 20: Peer ID convention ("-GTnnnn-") + - 23: Tracker Returns Compact Peer Lists + - 29: uTorrent transport protocol + - 41: UDP Tracker Protocol Extensions + - 42: DHT Security extension + - 43: Read-only DHT Nodes +*/ +package torrent diff --git a/deps/github.com/anacrolix/torrent/example_test.go b/deps/github.com/anacrolix/torrent/example_test.go new file mode 100644 index 0000000..54cb719 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/example_test.go @@ -0,0 +1,25 @@ +package torrent_test + +import ( + "log" + + "github.com/anacrolix/torrent" +) + +func Example() { + c, _ := torrent.NewClient(nil) + defer c.Close() + t, _ := c.AddMagnet("magnet:?xt=urn:btih:ZOCMZQIPFFW7OLLMIC5HUB6BPCSDEOQU") + <-t.GotInfo() + t.DownloadAll() + c.WaitAll() + log.Print("ermahgerd, torrent downloaded") +} + +func Example_fileReader() { + var f torrent.File + // Accesses the parts of the torrent pertaining to f. Data will be + // downloaded as required, per the configuration of the torrent.Reader. + r := f.NewReader() + defer r.Close() +} diff --git a/deps/github.com/anacrolix/torrent/file.go b/deps/github.com/anacrolix/torrent/file.go new file mode 100644 index 0000000..bea4b13 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/file.go @@ -0,0 +1,206 @@ +package torrent + +import ( + "github.com/RoaringBitmap/roaring" + "github.com/anacrolix/missinggo/v2/bitmap" + + "github.com/anacrolix/torrent/metainfo" +) + +// Provides access to regions of torrent data that correspond to its files. +type File struct { + t *Torrent + path string + offset int64 + length int64 + fi metainfo.FileInfo + displayPath string + prio piecePriority +} + +func (f *File) Torrent() *Torrent { + return f.t +} + +// Data for this file begins this many bytes into the Torrent. +func (f *File) Offset() int64 { + return f.offset +} + +// The FileInfo from the metainfo.Info to which this file corresponds. +func (f File) FileInfo() metainfo.FileInfo { + return f.fi +} + +// The file's path components joined by '/'. +func (f File) Path() string { + return f.path +} + +// The file's length in bytes. +func (f *File) Length() int64 { + return f.length +} + +// Number of bytes of the entire file we have completed. This is the sum of +// completed pieces, and dirtied chunks of incomplete pieces. +func (f *File) BytesCompleted() (n int64) { + f.t.cl.rLock() + n = f.bytesCompletedLocked() + f.t.cl.rUnlock() + return +} + +func (f *File) bytesCompletedLocked() int64 { + return f.length - f.bytesLeft() +} + +func fileBytesLeft( + torrentUsualPieceSize int64, + fileFirstPieceIndex int, + fileEndPieceIndex int, + fileTorrentOffset int64, + fileLength int64, + torrentCompletedPieces *roaring.Bitmap, + pieceSizeCompletedFn func(pieceIndex int) int64, +) (left int64) { + if fileLength == 0 { + return + } + + noCompletedMiddlePieces := roaring.New() + noCompletedMiddlePieces.AddRange(bitmap.BitRange(fileFirstPieceIndex), bitmap.BitRange(fileEndPieceIndex)) + noCompletedMiddlePieces.AndNot(torrentCompletedPieces) + noCompletedMiddlePieces.Iterate(func(pieceIndex uint32) bool { + i := int(pieceIndex) + pieceSizeCompleted := pieceSizeCompletedFn(i) + if i == fileFirstPieceIndex { + beginOffset := fileTorrentOffset % torrentUsualPieceSize + beginSize := torrentUsualPieceSize - beginOffset + beginDownLoaded := pieceSizeCompleted - beginOffset + if beginDownLoaded < 0 { + beginDownLoaded = 0 + } + left += beginSize - beginDownLoaded + } else if i == fileEndPieceIndex-1 { + endSize := (fileTorrentOffset + fileLength) % torrentUsualPieceSize + if endSize == 0 { + endSize = torrentUsualPieceSize + } + endDownloaded := pieceSizeCompleted + if endDownloaded > endSize { + endDownloaded = endSize + } + left += endSize - endDownloaded + } else { + left += torrentUsualPieceSize - pieceSizeCompleted + } + return true + }) + + if left > fileLength { + left = fileLength + } + // + //numPiecesSpanned := f.EndPieceIndex() - f.BeginPieceIndex() + //completedMiddlePieces := f.t._completedPieces.Clone() + //completedMiddlePieces.RemoveRange(0, bitmap.BitRange(f.BeginPieceIndex()+1)) + //completedMiddlePieces.RemoveRange(bitmap.BitRange(f.EndPieceIndex()-1), bitmap.ToEnd) + //left += int64(numPiecesSpanned-2-pieceIndex(completedMiddlePieces.GetCardinality())) * torrentUsualPieceSize + return +} + +func (f *File) bytesLeft() (left int64) { + return fileBytesLeft(int64(f.t.usualPieceSize()), f.BeginPieceIndex(), f.EndPieceIndex(), f.offset, f.length, &f.t._completedPieces, func(pieceIndex int) int64 { + return int64(f.t.piece(pieceIndex).numDirtyBytes()) + }) +} + +// The relative file path for a multi-file torrent, and the torrent name for a +// single-file torrent. Dir separators are '/'. +func (f *File) DisplayPath() string { + return f.displayPath +} + +// The download status of a piece that comprises part of a File. +type FilePieceState struct { + Bytes int64 // Bytes within the piece that are part of this File. + PieceState +} + +// Returns the state of pieces in this file. +func (f *File) State() (ret []FilePieceState) { + f.t.cl.rLock() + defer f.t.cl.rUnlock() + pieceSize := int64(f.t.usualPieceSize()) + off := f.offset % pieceSize + remaining := f.length + for i := pieceIndex(f.offset / pieceSize); ; i++ { + if remaining == 0 { + break + } + len1 := pieceSize - off + if len1 > remaining { + len1 = remaining + } + ps := f.t.pieceState(i) + ret = append(ret, FilePieceState{len1, ps}) + off = 0 + remaining -= len1 + } + return +} + +// Requests that all pieces containing data in the file be downloaded. +func (f *File) Download() { + f.SetPriority(PiecePriorityNormal) +} + +func byteRegionExclusivePieces(off, size, pieceSize int64) (begin, end int) { + begin = int((off + pieceSize - 1) / pieceSize) + end = int((off + size) / pieceSize) + return +} + +// Deprecated: Use File.SetPriority. +func (f *File) Cancel() { + f.SetPriority(PiecePriorityNone) +} + +func (f *File) NewReader() Reader { + return f.t.newReader(f.Offset(), f.Length()) +} + +// Sets the minimum priority for pieces in the File. +func (f *File) SetPriority(prio piecePriority) { + f.t.cl.lock() + if prio != f.prio { + f.prio = prio + f.t.updatePiecePriorities(f.BeginPieceIndex(), f.EndPieceIndex(), "File.SetPriority") + } + f.t.cl.unlock() +} + +// Returns the priority per File.SetPriority. +func (f *File) Priority() (prio piecePriority) { + f.t.cl.rLock() + prio = f.prio + f.t.cl.rUnlock() + return +} + +// Returns the index of the first piece containing data for the file. +func (f *File) BeginPieceIndex() int { + if f.t.usualPieceSize() == 0 { + return 0 + } + return pieceIndex(f.offset / int64(f.t.usualPieceSize())) +} + +// Returns the index of the piece after the last one containing data for the file. +func (f *File) EndPieceIndex() int { + if f.t.usualPieceSize() == 0 { + return 0 + } + return pieceIndex((f.offset + f.length + int64(f.t.usualPieceSize()) - 1) / int64(f.t.usualPieceSize())) +} diff --git a/deps/github.com/anacrolix/torrent/file_test.go b/deps/github.com/anacrolix/torrent/file_test.go new file mode 100644 index 0000000..2f57bcf --- /dev/null +++ b/deps/github.com/anacrolix/torrent/file_test.go @@ -0,0 +1,118 @@ +package torrent + +import ( + "testing" + + "github.com/RoaringBitmap/roaring" + "github.com/stretchr/testify/assert" +) + +func TestFileExclusivePieces(t *testing.T) { + for _, _case := range []struct { + off, size, pieceSize int64 + begin, end int + }{ + {0, 2, 2, 0, 1}, + {1, 2, 2, 1, 1}, + {1, 4, 2, 1, 2}, + } { + begin, end := byteRegionExclusivePieces(_case.off, _case.size, _case.pieceSize) + assert.EqualValues(t, _case.begin, begin) + assert.EqualValues(t, _case.end, end) + } +} + +type testFileBytesLeft struct { + usualPieceSize int64 + firstPieceIndex int + endPieceIndex int + fileOffset int64 + fileLength int64 + completedPieces roaring.Bitmap + expected int64 + name string +} + +func (me testFileBytesLeft) Run(t *testing.T) { + t.Run(me.name, func(t *testing.T) { + assert.EqualValues(t, me.expected, fileBytesLeft(me.usualPieceSize, me.firstPieceIndex, me.endPieceIndex, me.fileOffset, me.fileLength, &me.completedPieces, func(pieceIndex int) int64 { + return 0 + })) + }) +} + +func TestFileBytesLeft(t *testing.T) { + testFileBytesLeft{ + usualPieceSize: 3, + firstPieceIndex: 1, + endPieceIndex: 1, + fileOffset: 1, + fileLength: 0, + expected: 0, + name: "ZeroLengthFile", + }.Run(t) + + testFileBytesLeft{ + usualPieceSize: 2, + firstPieceIndex: 1, + endPieceIndex: 2, + fileOffset: 1, + fileLength: 1, + expected: 1, + name: "EndOfSecondPiece", + }.Run(t) + + testFileBytesLeft{ + usualPieceSize: 3, + firstPieceIndex: 0, + endPieceIndex: 1, + fileOffset: 1, + fileLength: 1, + expected: 1, + name: "FileInFirstPiece", + }.Run(t) + + testFileBytesLeft{ + usualPieceSize: 3, + firstPieceIndex: 0, + endPieceIndex: 1, + fileOffset: 1, + fileLength: 1, + expected: 1, + name: "LandLocked", + }.Run(t) + + testFileBytesLeft{ + usualPieceSize: 3, + firstPieceIndex: 1, + endPieceIndex: 3, + fileOffset: 4, + fileLength: 4, + expected: 4, + name: "TwoPieces", + }.Run(t) + + testFileBytesLeft{ + usualPieceSize: 3, + firstPieceIndex: 1, + endPieceIndex: 4, + fileOffset: 5, + fileLength: 7, + expected: 7, + name: "ThreePieces", + }.Run(t) + + testFileBytesLeft{ + usualPieceSize: 3, + firstPieceIndex: 1, + endPieceIndex: 4, + fileOffset: 5, + fileLength: 7, + expected: 0, + completedPieces: func() (ret roaring.Bitmap) { + ret.AddRange(0, 5) + return + }(), + name: "ThreePiecesCompletedAll", + }.Run(t) +} diff --git a/deps/github.com/anacrolix/torrent/fs/TODO b/deps/github.com/anacrolix/torrent/fs/TODO new file mode 100644 index 0000000..9ab12b5 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/fs/TODO @@ -0,0 +1 @@ + * Reinstate InitAsyncRead, or find out if it's worth it. Upstream made it a PITA to apply it automatically. diff --git a/deps/github.com/anacrolix/torrent/fs/cmd/torrentfs/main.go b/deps/github.com/anacrolix/torrent/fs/cmd/torrentfs/main.go new file mode 100644 index 0000000..d35f5c2 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/fs/cmd/torrentfs/main.go @@ -0,0 +1,158 @@ +// Mounts a FUSE filesystem backed by torrents and magnet links. +package main + +import ( + "fmt" + "net" + "net/http" + _ "net/http/pprof" + "os" + "os/signal" + "os/user" + "path/filepath" + "syscall" + "time" + + "github.com/anacrolix/envpprof" + _ "github.com/anacrolix/envpprof" + "github.com/anacrolix/fuse" + fusefs "github.com/anacrolix/fuse/fs" + "github.com/anacrolix/log" + "github.com/anacrolix/tagflag" + + "github.com/anacrolix/torrent" + torrentfs "github.com/anacrolix/torrent/fs" + "github.com/anacrolix/torrent/util/dirwatch" +) + +var logger = log.Default.WithNames("main") + +var args = struct { + MetainfoDir string `help:"torrent files in this location describe the contents of the mounted filesystem"` + DownloadDir string `help:"location to save torrent data"` + MountDir string `help:"location the torrent contents are made available"` + + DisableTrackers bool + TestPeer *net.TCPAddr + ReadaheadBytes tagflag.Bytes + ListenAddr *net.TCPAddr +}{ + MetainfoDir: func() string { + _user, err := user.Current() + if err != nil { + panic(err) + } + return filepath.Join(_user.HomeDir, ".config/transmission/torrents") + }(), + ReadaheadBytes: 10 << 20, + ListenAddr: &net.TCPAddr{}, +} + +func exitSignalHandlers(fs *torrentfs.TorrentFS) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) + for { + <-c + fs.Destroy() + err := fuse.Unmount(args.MountDir) + if err != nil { + log.Print(err) + } + } +} + +func addTestPeer(client *torrent.Client) { + for _, t := range client.Torrents() { + t.AddPeers([]torrent.PeerInfo{{ + Addr: args.TestPeer, + }}) + } +} + +func main() { + defer envpprof.Stop() + err := mainErr() + if err != nil { + logger.Levelf(log.Error, "error in main: %v", err) + os.Exit(1) + } +} + +func mainErr() error { + tagflag.Parse(&args) + if args.MountDir == "" { + os.Stderr.WriteString("y u no specify mountpoint?\n") + os.Exit(2) + } + conn, err := fuse.Mount(args.MountDir, fuse.ReadOnly()) + if err != nil { + return fmt.Errorf("mounting: %w", err) + } + defer fuse.Unmount(args.MountDir) + // TODO: Think about the ramifications of exiting not due to a signal. + defer conn.Close() + cfg := torrent.NewDefaultClientConfig() + cfg.DataDir = args.DownloadDir + cfg.DisableTrackers = args.DisableTrackers + cfg.NoUpload = true // Ensure that downloads are responsive. + cfg.SetListenAddr(args.ListenAddr.String()) + client, err := torrent.NewClient(cfg) + if err != nil { + return fmt.Errorf("creating torrent client: %w", err) + } + // This is naturally exported via GOPPROF=http. + http.DefaultServeMux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + client.WriteStatus(w) + }) + dw, err := dirwatch.New(args.MetainfoDir) + if err != nil { + return fmt.Errorf("watching torrent dir: %w", err) + } + dw.Logger = dw.Logger.FilterLevel(log.Info) + go func() { + for ev := range dw.Events { + switch ev.Change { + case dirwatch.Added: + if ev.TorrentFilePath != "" { + _, err := client.AddTorrentFromFile(ev.TorrentFilePath) + if err != nil { + log.Printf("error adding torrent from file %q to client: %v", ev.TorrentFilePath, err) + } + } else if ev.MagnetURI != "" { + _, err := client.AddMagnet(ev.MagnetURI) + if err != nil { + log.Printf("error adding magnet: %s", err) + } + } + case dirwatch.Removed: + T, ok := client.Torrent(ev.InfoHash) + if !ok { + break + } + T.Drop() + } + } + }() + fs := torrentfs.New(client) + go exitSignalHandlers(fs) + + if args.TestPeer != nil { + go func() { + for { + addTestPeer(client) + time.Sleep(10 * time.Second) + } + }() + } + + logger.Levelf(log.Debug, "serving fuse fs") + if err := fusefs.Serve(conn, fs); err != nil { + return fmt.Errorf("serving fuse fs: %w", err) + } + logger.Levelf(log.Debug, "fuse fs completed successfully. waiting for conn ready") + <-conn.Ready + if err := conn.MountError; err != nil { + return fmt.Errorf("mount error: %w", err) + } + return nil +} diff --git a/deps/github.com/anacrolix/torrent/fs/file_handle.go b/deps/github.com/anacrolix/torrent/fs/file_handle.go new file mode 100644 index 0000000..ce5ded0 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/fs/file_handle.go @@ -0,0 +1,89 @@ +package torrentfs + +import ( + "context" + "io" + + "github.com/anacrolix/fuse" + "github.com/anacrolix/fuse/fs" + "github.com/anacrolix/missinggo/v2" + + "github.com/anacrolix/torrent" +) + +type fileHandle struct { + fn fileNode + tf *torrent.File +} + +var _ interface { + fs.HandleReader + fs.HandleReleaser +} = fileHandle{} + +func (me fileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { + torrentfsReadRequests.Add(1) + if req.Dir { + panic("read on directory") + } + r := me.tf.NewReader() + defer r.Close() + pos, err := r.Seek(req.Offset, io.SeekStart) + if err != nil { + panic(err) + } + if pos != req.Offset { + panic("seek failed") + } + resp.Data = resp.Data[:req.Size] + readDone := make(chan struct{}) + ctx, cancel := context.WithCancel(ctx) + var readErr error + go func() { + defer close(readDone) + me.fn.FS.mu.Lock() + me.fn.FS.blockedReads++ + me.fn.FS.event.Broadcast() + me.fn.FS.mu.Unlock() + var n int + r := missinggo.ContextedReader{r, ctx} + // log.Printf("reading %v bytes at %v", len(resp.Data), req.Offset) + if true { + // A user reported on that on freebsd 12.2, the system requires that reads are + // completely filled. Their system only asks for 64KiB at a time. I've seen systems that + // can demand up to 16MiB at a time, so this gets tricky. For now, I'll restore the old + // behaviour from before 2a7352a, which nobody reported problems with. + n, readErr = io.ReadFull(r, resp.Data) + if readErr == io.ErrUnexpectedEOF { + readErr = nil + } + } else { + n, readErr = r.Read(resp.Data) + if readErr == io.EOF { + readErr = nil + } + } + resp.Data = resp.Data[:n] + }() + defer func() { + <-readDone + me.fn.FS.mu.Lock() + me.fn.FS.blockedReads-- + me.fn.FS.event.Broadcast() + me.fn.FS.mu.Unlock() + }() + defer cancel() + + select { + case <-readDone: + return readErr + case <-me.fn.FS.destroyed: + return fuse.EIO + case <-ctx.Done(): + return fuse.EINTR + } +} + +func (me fileHandle) Release(context.Context, *fuse.ReleaseRequest) error { + return nil +} diff --git a/deps/github.com/anacrolix/torrent/fs/filenode.go b/deps/github.com/anacrolix/torrent/fs/filenode.go new file mode 100644 index 0000000..28a433e --- /dev/null +++ b/deps/github.com/anacrolix/torrent/fs/filenode.go @@ -0,0 +1,27 @@ +package torrentfs + +import ( + "context" + + "github.com/anacrolix/fuse" + fusefs "github.com/anacrolix/fuse/fs" + + "github.com/anacrolix/torrent" +) + +type fileNode struct { + node + f *torrent.File +} + +var _ fusefs.NodeOpener = fileNode{} + +func (fn fileNode) Attr(ctx context.Context, attr *fuse.Attr) error { + attr.Size = uint64(fn.f.Length()) + attr.Mode = defaultMode + return nil +} + +func (fn fileNode) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fusefs.Handle, error) { + return fileHandle{fn, fn.f}, nil +} diff --git a/deps/github.com/anacrolix/torrent/fs/test.sh b/deps/github.com/anacrolix/torrent/fs/test.sh new file mode 100644 index 0000000..5374ed8 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/fs/test.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +echo $BASH_VERSION +set -eux +repopath="$(cd "$(dirname "$0")/.."; pwd)" +debian_file=debian-10.8.0-amd64-netinst.iso +mkdir -p mnt torrents +# I think the timing can cause torrents to not get added correctly to the torrentfs client, so add +# them first and start the fs afterwards. +pushd torrents +cp "$repopath/testdata/$debian_file.torrent" . +godo -v -- "$repopath/cmd/torrent" metainfo "$repopath/testdata/sintel.torrent" magnet > sintel.magnet +popd +#file="$debian_file" +file=Sintel/Sintel.mp4 + +GOPPROF=http godo -v -- "$repopath/fs/cmd/torrentfs" -mountDir=mnt -metainfoDir=torrents & +torrentfs_pid=$! +trap "kill $torrentfs_pid" EXIT + +check_file() { + while [ ! -e "mnt/$file" ]; do sleep 1; done + pv -f "mnt/$file" | md5sum -c <(cat <<-EOF + 083e808d56aa7b146f513b3458658292 - + EOF + ) +} + +( check_file ) & +check_file_pid=$! + +trap "kill $torrentfs_pid $check_file_pid" EXIT +wait -n +status=$? +sudo umount mnt +trap - EXIT +echo "wait returned" $status +exit $status diff --git a/deps/github.com/anacrolix/torrent/fs/torrentfs.go b/deps/github.com/anacrolix/torrent/fs/torrentfs.go new file mode 100644 index 0000000..5e0b75e --- /dev/null +++ b/deps/github.com/anacrolix/torrent/fs/torrentfs.go @@ -0,0 +1,215 @@ +package torrentfs + +import ( + "context" + "expvar" + "os" + "strings" + "sync" + + "github.com/anacrolix/fuse" + fusefs "github.com/anacrolix/fuse/fs" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/metainfo" +) + +const ( + defaultMode = 0o555 +) + +var torrentfsReadRequests = expvar.NewInt("torrentfsReadRequests") + +type TorrentFS struct { + Client *torrent.Client + destroyed chan struct{} + mu sync.Mutex + blockedReads int + event sync.Cond +} + +var ( + _ fusefs.FSDestroyer = &TorrentFS{} + + _ fusefs.NodeForgetter = rootNode{} + _ fusefs.HandleReadDirAller = rootNode{} + _ fusefs.HandleReadDirAller = dirNode{} +) + +// Is a directory node that lists all torrents and handles destruction of the +// filesystem. +type rootNode struct { + fs *TorrentFS +} + +type node struct { + path string + metadata *metainfo.Info + FS *TorrentFS + t *torrent.Torrent +} + +type dirNode struct { + node +} + +var _ fusefs.HandleReadDirAller = dirNode{} + +func isSubPath(parent, child string) bool { + if parent == "" { + return len(child) > 0 + } + if !strings.HasPrefix(child, parent) { + return false + } + extra := child[len(parent):] + if extra == "" { + return false + } + // Not just a file with more stuff on the end. + return extra[0] == '/' +} + +func (dn dirNode) ReadDirAll(ctx context.Context) (des []fuse.Dirent, err error) { + names := map[string]bool{} + for _, fi := range dn.metadata.Files { + filePathname := strings.Join(fi.Path, "/") + if !isSubPath(dn.path, filePathname) { + continue + } + var name string + if dn.path == "" { + name = fi.Path[0] + } else { + dirPathname := strings.Split(dn.path, "/") + name = fi.Path[len(dirPathname)] + } + if names[name] { + continue + } + names[name] = true + de := fuse.Dirent{ + Name: name, + } + if len(fi.Path) == len(dn.path)+1 { + de.Type = fuse.DT_File + } else { + de.Type = fuse.DT_Dir + } + des = append(des, de) + } + return +} + +func (dn dirNode) Lookup(_ context.Context, name string) (fusefs.Node, error) { + dir := false + var file *torrent.File + var fullPath string + if dn.path != "" { + fullPath = dn.path + "/" + name + } else { + fullPath = name + } + for _, f := range dn.t.Files() { + if f.DisplayPath() == fullPath { + file = f + } + if isSubPath(fullPath, f.DisplayPath()) { + dir = true + } + } + n := dn.node + n.path = fullPath + if dir && file != nil { + panic("both dir and file") + } + if file != nil { + return fileNode{n, file}, nil + } + if dir { + return dirNode{n}, nil + } + return nil, fuse.ENOENT +} + +func (dn dirNode) Attr(ctx context.Context, attr *fuse.Attr) error { + attr.Mode = os.ModeDir | defaultMode + return nil +} + +func (rn rootNode) Lookup(ctx context.Context, name string) (_node fusefs.Node, err error) { + for _, t := range rn.fs.Client.Torrents() { + info := t.Info() + if t.Name() != name || info == nil { + continue + } + __node := node{ + metadata: info, + FS: rn.fs, + t: t, + } + if !info.IsDir() { + _node = fileNode{__node, t.Files()[0]} + } else { + _node = dirNode{__node} + } + break + } + if _node == nil { + err = fuse.ENOENT + } + return +} + +func (rn rootNode) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) { + for _, t := range rn.fs.Client.Torrents() { + info := t.Info() + if info == nil { + continue + } + dirents = append(dirents, fuse.Dirent{ + Name: info.Name, + Type: func() fuse.DirentType { + if !info.IsDir() { + return fuse.DT_File + } else { + return fuse.DT_Dir + } + }(), + }) + } + return +} + +func (rn rootNode) Attr(ctx context.Context, attr *fuse.Attr) error { + attr.Mode = os.ModeDir | defaultMode + return nil +} + +// TODO(anacrolix): Why should rootNode implement this? +func (rn rootNode) Forget() { + rn.fs.Destroy() +} + +func (tfs *TorrentFS) Root() (fusefs.Node, error) { + return rootNode{tfs}, nil +} + +func (tfs *TorrentFS) Destroy() { + tfs.mu.Lock() + select { + case <-tfs.destroyed: + default: + close(tfs.destroyed) + } + tfs.mu.Unlock() +} + +func New(cl *torrent.Client) *TorrentFS { + fs := &TorrentFS{ + Client: cl, + destroyed: make(chan struct{}), + } + fs.event.L = &fs.mu + return fs +} diff --git a/deps/github.com/anacrolix/torrent/fs/torrentfs_test.go b/deps/github.com/anacrolix/torrent/fs/torrentfs_test.go new file mode 100644 index 0000000..097f1bb --- /dev/null +++ b/deps/github.com/anacrolix/torrent/fs/torrentfs_test.go @@ -0,0 +1,240 @@ +package torrentfs + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "net" + _ "net/http/pprof" + "os" + "path/filepath" + "testing" + "time" + + _ "github.com/anacrolix/envpprof" + "github.com/anacrolix/fuse" + fusefs "github.com/anacrolix/fuse/fs" + "github.com/anacrolix/missinggo/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/internal/testutil" + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/storage" +) + +func init() { + log.SetFlags(log.Flags() | log.Lshortfile) +} + +func TestTCPAddrString(t *testing.T) { + l, err := net.Listen("tcp4", "localhost:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + c, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + ras := c.RemoteAddr().String() + ta := &net.TCPAddr{ + IP: net.IPv4(127, 0, 0, 1), + Port: missinggo.AddrPort(l.Addr()), + } + s := ta.String() + if ras != s { + t.FailNow() + } +} + +type testLayout struct { + BaseDir string + MountDir string + Completed string + Metainfo *metainfo.MetaInfo +} + +func (tl *testLayout) Destroy() error { + return os.RemoveAll(tl.BaseDir) +} + +func newGreetingLayout(t *testing.T) (tl testLayout, err error) { + tl.BaseDir = t.TempDir() + tl.Completed = filepath.Join(tl.BaseDir, "completed") + os.Mkdir(tl.Completed, 0o777) + tl.MountDir = filepath.Join(tl.BaseDir, "mnt") + os.Mkdir(tl.MountDir, 0o777) + testutil.CreateDummyTorrentData(tl.Completed) + tl.Metainfo = testutil.GreetingMetaInfo() + return +} + +// Unmount without first killing the FUSE connection while there are FUSE +// operations blocked inside the filesystem code. +func TestUnmountWedged(t *testing.T) { + layout, err := newGreetingLayout(t) + require.NoError(t, err) + defer func() { + err := layout.Destroy() + if err != nil { + t.Log(err) + } + }() + cfg := torrent.NewDefaultClientConfig() + cfg.DataDir = filepath.Join(layout.BaseDir, "incomplete") + cfg.DisableTrackers = true + cfg.NoDHT = true + cfg.DisableTCP = true + cfg.DisableUTP = true + client, err := torrent.NewClient(cfg) + require.NoError(t, err) + defer client.Close() + tt, err := client.AddTorrent(layout.Metainfo) + require.NoError(t, err) + fs := New(client) + fuseConn, err := fuse.Mount(layout.MountDir) + if err != nil { + switch err.Error() { + case "cannot locate OSXFUSE": + fallthrough + case "fusermount: exit status 1": + t.Skip(err) + } + t.Fatal(err) + } + go func() { + server := fusefs.New(fuseConn, &fusefs.Config{ + Debug: func(msg interface{}) { + t.Log(msg) + }, + }) + server.Serve(fs) + }() + <-fuseConn.Ready + if err := fuseConn.MountError; err != nil { + t.Fatalf("mount error: %s", err) + } + ctx, cancel := context.WithCancel(context.Background()) + // Read the greeting file, though it will never be available. This should + // "wedge" FUSE, requiring the fs object to be forcibly destroyed. The + // read call will return with a FS error. + go func() { + <-ctx.Done() + fs.mu.Lock() + fs.event.Broadcast() + fs.mu.Unlock() + }() + go func() { + defer cancel() + _, err := ioutil.ReadFile(filepath.Join(layout.MountDir, tt.Info().Name)) + require.Error(t, err) + }() + + // Wait until the read has blocked inside the filesystem code. + fs.mu.Lock() + for fs.blockedReads != 1 && ctx.Err() == nil { + fs.event.Wait() + } + fs.mu.Unlock() + + fs.Destroy() + + for { + err = fuse.Unmount(layout.MountDir) + if err != nil { + t.Logf("error unmounting: %s", err) + time.Sleep(time.Millisecond) + } else { + break + } + } + + err = fuseConn.Close() + assert.NoError(t, err) +} + +func TestDownloadOnDemand(t *testing.T) { + layout, err := newGreetingLayout(t) + require.NoError(t, err) + defer layout.Destroy() + cfg := torrent.NewDefaultClientConfig() + cfg.DataDir = layout.Completed + cfg.DisableTrackers = true + cfg.NoDHT = true + cfg.Seed = true + cfg.ListenPort = 0 + cfg.ListenHost = torrent.LoopbackListenHost + seeder, err := torrent.NewClient(cfg) + require.NoError(t, err) + defer seeder.Close() + defer testutil.ExportStatusWriter(seeder, "s", t)() + // Just to mix things up, the seeder starts with the data, but the leecher + // starts with the metainfo. + seederTorrent, err := seeder.AddMagnet(fmt.Sprintf("magnet:?xt=urn:btih:%s", layout.Metainfo.HashInfoBytes().HexString())) + require.NoError(t, err) + go func() { + // Wait until we get the metainfo, then check for the data. + <-seederTorrent.GotInfo() + seederTorrent.VerifyData() + }() + cfg = torrent.NewDefaultClientConfig() + cfg.DisableTrackers = true + cfg.NoDHT = true + cfg.DisableTCP = true + cfg.DefaultStorage = storage.NewMMap(filepath.Join(layout.BaseDir, "download")) + cfg.ListenHost = torrent.LoopbackListenHost + cfg.ListenPort = 0 + leecher, err := torrent.NewClient(cfg) + require.NoError(t, err) + testutil.ExportStatusWriter(leecher, "l", t)() + defer leecher.Close() + leecherTorrent, err := leecher.AddTorrent(layout.Metainfo) + require.NoError(t, err) + leecherTorrent.AddClientPeer(seeder) + fs := New(leecher) + defer fs.Destroy() + root, _ := fs.Root() + node, _ := root.(fusefs.NodeStringLookuper).Lookup(context.Background(), "greeting") + var attr fuse.Attr + node.Attr(context.Background(), &attr) + size := attr.Size + data := make([]byte, size) + h, err := node.(fusefs.NodeOpener).Open(context.TODO(), nil, nil) + require.NoError(t, err) + + // torrent.Reader.Read no longer tries to fill the entire read buffer, so this is a ReadFull for + // fusefs. + var n int + for n < len(data) { + resp := fuse.ReadResponse{Data: data[n:]} + err := h.(fusefs.HandleReader).Read(context.Background(), &fuse.ReadRequest{ + Size: int(size) - n, + Offset: int64(n), + }, &resp) + assert.NoError(t, err) + n += len(resp.Data) + } + + assert.EqualValues(t, testutil.GreetingFileContents, data) +} + +func TestIsSubPath(t *testing.T) { + for _, case_ := range []struct { + parent, child string + is bool + }{ + {"", "", false}, + {"", "/", true}, + {"", "a", true}, + {"a/b", "a/bc", false}, + {"a/b", "a/b", false}, + {"a/b", "a/b/c", true}, + {"a/b", "a//b", false}, + } { + assert.Equal(t, case_.is, isSubPath(case_.parent, case_.child)) + } +} diff --git a/deps/github.com/anacrolix/torrent/fs/unwedge-tests.sh b/deps/github.com/anacrolix/torrent/fs/unwedge-tests.sh new file mode 100644 index 0000000..322a280 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/fs/unwedge-tests.sh @@ -0,0 +1,5 @@ +shopt -s nullglob +for a in "${TMPDIR:-/tmp}"/torrentfs*; do + sudo umount -f "$a/mnt" + rm -r -- "$a" +done diff --git a/deps/github.com/anacrolix/torrent/global.go b/deps/github.com/anacrolix/torrent/global.go new file mode 100644 index 0000000..5a5bddb --- /dev/null +++ b/deps/github.com/anacrolix/torrent/global.go @@ -0,0 +1,65 @@ +package torrent + +import ( + "crypto" + "expvar" + + pp "github.com/anacrolix/torrent/peer_protocol" +) + +const ( + pieceHash = crypto.SHA1 + defaultChunkSize = 0x4000 // 16KiB + + // Arbitrary maximum of "metadata_size" (see https://www.bittorrent.org/beps/bep_0009.html) + // libtorrent-rasterbar uses 4MiB at last check. TODO: Add links to values used by other + // implementations here. I saw 14143527 in the metainfo for + // 3597f16e239aeb8f8524a1a1c4e4725a0a96b470. Large values for legitimate torrents should be + // recorded here for consideration. + maxMetadataSize uint32 = 16 * 1024 * 1024 +) + +// These are our extended message IDs. Peers will use these values to +// select which extension a message is intended for. +const ( + metadataExtendedId = iota + 1 // 0 is reserved for deleting keys + pexExtendedId + utHolepunchExtendedId +) + +func defaultPeerExtensionBytes() PeerExtensionBits { + return pp.NewPeerExtensionBytes(pp.ExtensionBitDht, pp.ExtensionBitLtep, pp.ExtensionBitFast) +} + +func init() { + torrent.Set("peers supporting extension", &peersSupportingExtension) + torrent.Set("chunks received", &chunksReceived) +} + +// I could move a lot of these counters to their own file, but I suspect they +// may be attached to a Client someday. +var ( + torrent = expvar.NewMap("torrent") + peersSupportingExtension expvar.Map + chunksReceived expvar.Map + + pieceHashedCorrect = expvar.NewInt("pieceHashedCorrect") + pieceHashedNotCorrect = expvar.NewInt("pieceHashedNotCorrect") + + completedHandshakeConnectionFlags = expvar.NewMap("completedHandshakeConnectionFlags") + // Count of connections to peer with same client ID. + connsToSelf = expvar.NewInt("connsToSelf") + receivedKeepalives = expvar.NewInt("receivedKeepalives") + // Requests received for pieces we don't have. + requestsReceivedForMissingPieces = expvar.NewInt("requestsReceivedForMissingPieces") + requestedChunkLengths = expvar.NewMap("requestedChunkLengths") + + messageTypesReceived = expvar.NewMap("messageTypesReceived") + + // Track the effectiveness of Torrent.connPieceInclinationPool. + pieceInclinationsReused = expvar.NewInt("pieceInclinationsReused") + pieceInclinationsNew = expvar.NewInt("pieceInclinationsNew") + pieceInclinationsPut = expvar.NewInt("pieceInclinationsPut") + + concurrentChunkWrites = expvar.NewInt("torrentConcurrentChunkWrites") +) diff --git a/deps/github.com/anacrolix/torrent/go.mod b/deps/github.com/anacrolix/torrent/go.mod new file mode 100644 index 0000000..6812000 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/go.mod @@ -0,0 +1,127 @@ +module github.com/anacrolix/torrent + +go 1.20 + +require ( + github.com/RoaringBitmap/roaring v1.2.3 + github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 + github.com/alexflint/go-arg v1.4.3 + github.com/anacrolix/bargle v0.0.0-20220630015206-d7a4d433886a + github.com/anacrolix/chansync v0.3.0 + github.com/anacrolix/dht/v2 v2.19.2-0.20221121215055-066ad8494444 + github.com/anacrolix/envpprof v1.3.0 + github.com/anacrolix/fuse v0.2.0 + github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45 + github.com/anacrolix/go-libutp v1.3.1 + github.com/anacrolix/log v0.14.6-0.20231202035202-ed7a02cad0b4 + github.com/anacrolix/missinggo v1.3.0 + github.com/anacrolix/missinggo/perf v1.0.0 + github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 + github.com/anacrolix/multiless v0.3.0 + github.com/anacrolix/squirrel v0.6.0 + github.com/anacrolix/sync v0.5.1 + github.com/anacrolix/tagflag v1.3.0 + github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 + github.com/anacrolix/utp v0.1.0 + github.com/bahlo/generic-list-go v0.2.0 + github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 + github.com/davecgh/go-spew v1.1.1 + github.com/dustin/go-humanize v1.0.0 + github.com/edsrzf/mmap-go v1.1.0 + github.com/elliotchance/orderedmap v1.4.0 + github.com/frankban/quicktest v1.14.6 + github.com/fsnotify/fsnotify v1.5.4 + github.com/go-llsqlite/adapter v0.0.0-20230927005056-7f5ce7f0c916 + github.com/google/btree v1.1.2 + github.com/google/go-cmp v0.5.9 + github.com/gorilla/websocket v1.5.0 + github.com/jessevdk/go-flags v1.5.0 + github.com/pion/datachannel v1.5.2 + github.com/pion/logging v0.2.2 + github.com/pion/webrtc/v3 v3.1.42 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.12.2 + github.com/stretchr/testify v1.8.1 + github.com/tidwall/btree v1.6.0 + go.etcd.io/bbolt v1.3.6 + go.opentelemetry.io/otel v1.8.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0 + go.opentelemetry.io/otel/sdk v1.8.0 + go.opentelemetry.io/otel/trace v1.8.0 + golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df + golang.org/x/sys v0.15.0 + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 +) + +require ( + github.com/alecthomas/atomic v0.1.0-alpha2 // indirect + github.com/alexflint/go-scalar v1.1.0 // indirect + github.com/anacrolix/mmsg v1.0.0 // indirect + github.com/anacrolix/stm v0.4.0 // indirect + github.com/benbjohnson/immutable v0.3.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.2.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/go-llsqlite/crawshaw v0.4.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/huandu/xstrings v1.3.2 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mschoch/smat v0.2.0 // indirect + github.com/pion/dtls/v2 v2.2.4 // indirect + github.com/pion/ice/v2 v2.2.6 // indirect + github.com/pion/interceptor v0.1.11 // indirect + github.com/pion/mdns v0.0.5 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.9 // indirect + github.com/pion/rtp v1.7.13 // indirect + github.com/pion/sctp v1.8.2 // indirect + github.com/pion/sdp/v3 v3.0.5 // indirect + github.com/pion/srtp/v2 v2.0.9 // indirect + github.com/pion/stun v0.3.5 // indirect + github.com/pion/transport v0.13.1 // indirect + github.com/pion/transport/v2 v2.0.0 // indirect + github.com/pion/turn/v2 v2.0.8 // indirect + github.com/pion/udp v0.1.4 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.35.0 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect + github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 // indirect + go.opentelemetry.io/proto/otlp v0.18.0 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect + google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect + google.golang.org/grpc v1.46.2 // indirect + google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + modernc.org/libc v1.22.3 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/memory v1.5.0 // indirect + modernc.org/sqlite v1.21.1 // indirect + zombiezen.com/go/sqlite v0.13.1 // indirect +) + +retract ( + // Doesn't signal interest to peers if choked when piece priorities change. + v1.39.0 + // peer-requesting doesn't scale + [v1.34.0, v1.38.1] + // Indefinite outgoing requests on storage write errors. https://github.com/anacrolix/torrent/issues/889 + [v1.29.0, v1.53.2] +) diff --git a/deps/github.com/anacrolix/torrent/go.sum b/deps/github.com/anacrolix/torrent/go.sum new file mode 100644 index 0000000..ecf8519 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/go.sum @@ -0,0 +1,908 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oXPKINnZ8mcqnye1EkVkqsectk= +crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= +github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= +github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= +github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY= +github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 h1:byYvvbfSo3+9efR4IeReh77gVs4PnNDR3AMOE9NJ7a0= +github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0/go.mod h1:q37NoqncT41qKc048STsifIt69LfUJ8SrWWcz/yam5k= +github.com/alecthomas/assert/v2 v2.0.0-alpha3 h1:pcHeMvQ3OMstAWgaeaXIAL8uzB9xMm2zlxt+/4ml8lk= +github.com/alecthomas/atomic v0.1.0-alpha2 h1:dqwXmax66gXvHhsOS4pGPZKqYOlTkapELkLb3MNdlH8= +github.com/alecthomas/atomic v0.1.0-alpha2/go.mod h1:zD6QGEyw49HIq19caJDc2NMXAy8rNi9ROrxtMXATfyI= +github.com/alecthomas/repr v0.0.0-20210801044451-80ca428c5142 h1:8Uy0oSf5co/NZXje7U1z8Mpep++QJOldL2hs/sBQf48= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-arg v1.4.3 h1:9rwwEBpMXfKQKceuZfYcwuc/7YY7tWJbFsgG5cAU/uo= +github.com/alexflint/go-arg v1.4.3/go.mod h1:3PZ/wp/8HuqRZMUUgu7I+e1qcpUbvmS258mRXkFH4IA= +github.com/alexflint/go-scalar v1.1.0 h1:aaAouLLzI9TChcPXotr6gUhq+Scr8rl0P9P4PnltbhM= +github.com/alexflint/go-scalar v1.1.0/go.mod h1:LoFvNMqS1CPrMVltza4LvnGKhaSpc3oyLEBUZVhhS2o= +github.com/anacrolix/bargle v0.0.0-20220630015206-d7a4d433886a h1:KCP9QvHlLoUQBOaTf/YCuOzG91Ym1cPB6S68O4Q3puo= +github.com/anacrolix/bargle v0.0.0-20220630015206-d7a4d433886a/go.mod h1:9xUiZbkh+94FbiIAL1HXpAIBa832f3Mp07rRPl5c5RQ= +github.com/anacrolix/chansync v0.3.0 h1:lRu9tbeuw3wl+PhMu/r+JJCRu5ArFXIluOgdF0ao6/U= +github.com/anacrolix/chansync v0.3.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k= +github.com/anacrolix/dht/v2 v2.19.2-0.20221121215055-066ad8494444 h1:8V0K09lrGoeT2KRJNOtspA7q+OMxGwQqK/Ug0IiaaRE= +github.com/anacrolix/dht/v2 v2.19.2-0.20221121215055-066ad8494444/go.mod h1:MctKM1HS5YYDb3F30NGJxLE+QPuqWoT5ReW/4jt8xew= +github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= +github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= +github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= +github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk= +github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0= +github.com/anacrolix/fuse v0.2.0 h1:pc+To78kI2d/WUjIyrsdqeJQAesuwpGxlI3h1nAv3Do= +github.com/anacrolix/fuse v0.2.0/go.mod h1:Kfu02xBwnySDpH3N23BmrP3MDfwAQGRLUCj6XyeOvBQ= +github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45 h1:Kmcl3I9K2+5AdnnR7hvrnVT0TLeFWWMa9bxnm55aVIg= +github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= +github.com/anacrolix/go-libutp v1.3.1 h1:idJzreNLl+hNjGC3ZnUOjujEaryeOGgkwHLqSGoige0= +github.com/anacrolix/go-libutp v1.3.1/go.mod h1:heF41EC8kN0qCLMokLBVkB8NXiLwx3t8R8810MTNI5o= +github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= +github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= +github.com/anacrolix/log v0.10.1-0.20220123034749-3920702c17f8/go.mod h1:GmnE2c0nvz8pOIPUSC9Rawgefy1sDXqposC2wgtBZE4= +github.com/anacrolix/log v0.13.1/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/log v0.14.6-0.20231202035202-ed7a02cad0b4 h1:CdVK9IoqoqklXQQ4+L2aew64xsz14KdOD+rnKdTQajg= +github.com/anacrolix/log v0.14.6-0.20231202035202-ed7a02cad0b4/go.mod h1:1OmJESOtxQGNMlUO5rcv96Vpp9mfMqXXbe2RdinFLdY= +github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62 h1:P04VG6Td13FHMgS5ZBcJX23NPC/fiC4cp9bXwYujdYM= +github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62/go.mod h1:66cFKPCO7Sl4vbFnAaSq7e4OXtdMhRSBagJGWgmpJbM= +github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s= +github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo= +github.com/anacrolix/missinggo v1.1.2-0.20190815015349-b888af804467/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo= +github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y= +github.com/anacrolix/missinggo v1.3.0 h1:06HlMsudotL7BAELRZs0yDZ4yVXsHXGi323QBjAVASw= +github.com/anacrolix/missinggo v1.3.0/go.mod h1:bqHm8cE8xr+15uVfMG3BFui/TxyB6//H5fwlq/TeqMc= +github.com/anacrolix/missinggo/perf v1.0.0 h1:7ZOGYziGEBytW49+KmYGTaNfnwUqP1HBsy6BqESAJVw= +github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ= +github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY= +github.com/anacrolix/missinggo/v2 v2.5.1/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA= +github.com/anacrolix/missinggo/v2 v2.5.2/go.mod h1:yNvsLrtZYRYCOI+KRH/JM8TodHjtIE/bjOGhQaLOWIE= +github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 h1:W/oGeHhYwxueeiDjQfmK9G+X9M2xJgfTtow62v0TWAs= +github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9/go.mod h1:mIEtp9pgaXqt8VQ3NQxFOod/eQ1H0D1XsZzKUQfwtac= +github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw= +github.com/anacrolix/mmsg v1.0.0 h1:btC7YLjOn29aTUAExJiVUhQOuf/8rhm+/nWCMAnL3Hg= +github.com/anacrolix/mmsg v1.0.0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc= +github.com/anacrolix/multiless v0.3.0 h1:5Bu0DZncjE4e06b9r1Ap2tUY4Au0NToBP5RpuEngSis= +github.com/anacrolix/multiless v0.3.0/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/squirrel v0.6.0 h1:ovfWW42wcGzrVYYI9s56pEYzfeTwtXxCCvSd+KwvUEA= +github.com/anacrolix/squirrel v0.6.0/go.mod h1:60vdNPUbK1jYWePp39Wqn9whHm12Yb9JEuwOXzLMDuY= +github.com/anacrolix/stm v0.2.0/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg= +github.com/anacrolix/stm v0.4.0 h1:tOGvuFwaBjeu1u9X1eIh9TX8OEedEiEQ1se1FjhFnXY= +github.com/anacrolix/stm v0.4.0/go.mod h1:GCkwqWoAsP7RfLW+jw+Z0ovrt2OO7wRzcTtFYMYY5t8= +github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk= +github.com/anacrolix/sync v0.3.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= +github.com/anacrolix/sync v0.5.1 h1:FbGju6GqSjzVoTgcXTUKkF041lnZkG5P0C3T5RL3SGc= +github.com/anacrolix/sync v0.5.1/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= +github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= +github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= +github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= +github.com/anacrolix/tagflag v1.3.0 h1:5NI+9CniDnEH0BWA4UcQbERyFPjKJqZnVkItGVIDy/s= +github.com/anacrolix/tagflag v1.3.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= +github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 h1:QAVZ3pN/J4/UziniAhJR2OZ9Ox5kOY2053tBbbqUPYA= +github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96/go.mod h1:Wa6n8cYIdaG35x15aH3Zy6d03f7P728QfdcDeD/IEOs= +github.com/anacrolix/utp v0.1.0 h1:FOpQOmIwYsnENnz7tAGohA+r6iXpRjrq8ssKSre2Cp4= +github.com/anacrolix/utp v0.1.0/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= +github.com/benbjohnson/immutable v0.3.0 h1:TVRhuZx2wG9SZ0LRdqlbs9S5BZ6Y24hJEHTCgWHZEIw= +github.com/benbjohnson/immutable v0.3.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.2.2 h1:J5gbX05GpMdBjCvQ9MteIg2KKDExr7DrgK+Yc15FvIk= +github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= +github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= +github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8= +github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/elliotchance/orderedmap v1.4.0 h1:wZtfeEONCbx6in1CZyE6bELEt/vFayMvsxqI5SgsR+A= +github.com/elliotchance/orderedmap v1.4.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-llsqlite/adapter v0.0.0-20230927005056-7f5ce7f0c916 h1:OyQmpAN302wAopDgwVjgs2HkFawP9ahIEqkUYz7V7CA= +github.com/go-llsqlite/adapter v0.0.0-20230927005056-7f5ce7f0c916/go.mod h1:DADrR88ONKPPeSGjFp5iEN55Arx3fi2qXZeKCYDpbmU= +github.com/go-llsqlite/crawshaw v0.4.0 h1:L02s2jZBBJj80xm1VkkdyB/JlQ/Fi0kLbNHfXA8yrec= +github.com/go-llsqlite/crawshaw v0.4.0/go.mod h1:/YJdV7uBQaYDE0fwe4z3wwJIZBJxdYzd38ICggWqtaE= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pion/datachannel v1.5.2 h1:piB93s8LGmbECrpO84DnkIVWasRMk3IimbcXkTQLE6E= +github.com/pion/datachannel v1.5.2/go.mod h1:FTGQWaHrdCwIJ1rw6xBIfZVkslikjShim5yr05XFuCQ= +github.com/pion/dtls/v2 v2.1.3/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus= +github.com/pion/dtls/v2 v2.1.5/go.mod h1:BqCE7xPZbPSubGasRoDFJeTsyJtdD1FanJYL0JGheqY= +github.com/pion/dtls/v2 v2.2.4 h1:YSfYwDQgrxMYXLBc/m7PFY5BVtWlNm/DN4qoU2CbcWg= +github.com/pion/dtls/v2 v2.2.4/go.mod h1:WGKfxqhrddne4Kg3p11FUMJrynkOY4lb25zHNO49wuw= +github.com/pion/ice/v2 v2.2.6 h1:R/vaLlI1J2gCx141L5PEwtuGAGcyS6e7E0hDeJFq5Ig= +github.com/pion/ice/v2 v2.2.6/go.mod h1:SWuHiOGP17lGromHTFadUe1EuPgFh/oCU6FCMZHooVE= +github.com/pion/interceptor v0.1.11 h1:00U6OlqxA3FFB50HSg25J/8cWi7P6FbSzw4eFn24Bvs= +github.com/pion/interceptor v0.1.11/go.mod h1:tbtKjZY14awXd7Bq0mmWvgtHB5MDaRN7HV3OZ/uy7s8= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns v0.0.5 h1:Q2oj/JB3NqfzY9xGZ1fPzZzK7sDSD8rZPOvcIQ10BCw= +github.com/pion/mdns v0.0.5/go.mod h1:UgssrvdD3mxpi8tMxAXbsppL3vJ4Jipw1mTCW+al01g= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.9 h1:1ujStwg++IOLIEoOiIQ2s+qBuJ1VN81KW+9pMPsif+U= +github.com/pion/rtcp v1.2.9/go.mod h1:qVPhiCzAm4D/rxb6XzKeyZiQK69yJpbUDJSF7TgrqNo= +github.com/pion/rtp v1.7.13 h1:qcHwlmtiI50t1XivvoawdCGTP4Uiypzfrsap+bijcoA= +github.com/pion/rtp v1.7.13/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= +github.com/pion/sctp v1.8.0/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s= +github.com/pion/sctp v1.8.2 h1:yBBCIrUMJ4yFICL3RIvR4eh/H2BTTvlligmSTy+3kiA= +github.com/pion/sctp v1.8.2/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s= +github.com/pion/sdp/v3 v3.0.5 h1:ouvI7IgGl+V4CrqskVtr3AaTrPvPisEOxwgpdktctkU= +github.com/pion/sdp/v3 v3.0.5/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw= +github.com/pion/srtp/v2 v2.0.9 h1:JJq3jClmDFBPX/F5roEb0U19jSU7eUhyDqR/NZ34EKQ= +github.com/pion/srtp/v2 v2.0.9/go.mod h1:5TtM9yw6lsH0ppNCehB/EjEUli7VkUgKSPJqWVqbhQ4= +github.com/pion/stun v0.3.5 h1:uLUCBCkQby4S1cf6CGuR9QrVOKcvUwFeemaC865QHDg= +github.com/pion/stun v0.3.5/go.mod h1:gDMim+47EeEtfWogA37n6qXZS88L5V6LqFcf+DZA2UA= +github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q= +github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZIWJ6q9A= +github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g= +github.com/pion/transport v0.13.1 h1:/UH5yLeQtwm2VZIPjxwnNFxjS4DFhyLfS4GlfuKUzfA= +github.com/pion/transport v0.13.1/go.mod h1:EBxbqzyv+ZrmDb82XswEE0BjfQFtuw1Nu6sjnjWCsGg= +github.com/pion/transport/v2 v2.0.0 h1:bsMYyqHCbkvHwj+eNCFBuxtlKndKfyGI2vaQmM3fIE4= +github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc= +github.com/pion/turn/v2 v2.0.8 h1:KEstL92OUN3k5k8qxsXHpr7WWfrdp7iJZHx99ud8muw= +github.com/pion/turn/v2 v2.0.8/go.mod h1:+y7xl719J8bAEVpSXBXvTxStjJv3hbz9YFflvkpcGPw= +github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= +github.com/pion/udp v0.1.4 h1:OowsTmu1Od3sD6i3fQUJxJn2fEvJO6L1TidgadtbTI8= +github.com/pion/udp v0.1.4/go.mod h1:G8LDo56HsFwC24LIcnT4YIDU5qcB6NepqqjP0keL2us= +github.com/pion/webrtc/v3 v3.1.42 h1:wJEQFIXVanptnQcHOLTuIo4AtGB2+mG2x4OhIhnITOA= +github.com/pion/webrtc/v3 v3.1.42/go.mod h1:ffD9DulDrPxyWvDPUIPAOSAWx9GUlOExiJPf7cCcMLA= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE= +github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs= +github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= +github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= +github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= +github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= +go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 h1:ao8CJIShCaIbaMsGxy+jp2YHSudketpDgDRcbirov78= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 h1:LrHL1A3KqIgAgi6mK7Q0aczmzU414AONAGT5xtnp+uo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0/go.mod h1:w8aZL87GMOvOBa2lU/JlVXE1q4chk/0FX+8ai4513bw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0 h1:00hCSGLIxdYK/Z7r8GkaX0QIlfvgU3tmnLlQvcnix6U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0/go.mod h1:twhIvtDQW2sWP1O2cT1N8nkSBgKCRZv2z6COTTBrf8Q= +go.opentelemetry.io/otel/sdk v1.8.0 h1:xwu69/fNuwbSHWe/0PGS888RmjWY181OmcXDQKu7ZQk= +go.opentelemetry.io/otel/sdk v1.8.0/go.mod h1:uPSfc+yfDH2StDM/Rm35WE8gXSNdvCg023J6HeGNO0c= +go.opentelemetry.io/otel/trace v1.8.0 h1:cSy0DF9eGI5WIfNwZ1q2iUyGj00tGzP24dE1lOlHrfY= +go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.18.0 h1:W5hyXNComRa23tGpKwG+FRAc4rfF6ZUg1JReK+QHS80= +go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220531201128-c960675eff93/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +modernc.org/libc v1.22.3 h1:D/g6O5ftAfavceqlLOFwaZuA5KYafKwmr30A6iSqoyY= +modernc.org/libc v1.22.3/go.mod h1:MQrloYP209xa2zHome2a8HLiLm6k0UT8CoHpV74tOFw= +modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/sqlite v1.21.1 h1:GyDFqNnESLOhwwDRaHGdp2jKLDzpyT/rNLglX3ZkMSU= +modernc.org/sqlite v1.21.1/go.mod h1:XwQ0wZPIh1iKb5mkvCJ3szzbhk+tykC8ZWqTRTgYRwI= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +zombiezen.com/go/sqlite v0.13.1 h1:qDzxyWWmMtSSEH5qxamqBFmqA2BLSSbtODi3ojaE02o= +zombiezen.com/go/sqlite v0.13.1/go.mod h1:Ht/5Rg3Ae2hoyh1I7gbWtWAl89CNocfqeb/aAMTkJr4= diff --git a/deps/github.com/anacrolix/torrent/handshake.go b/deps/github.com/anacrolix/torrent/handshake.go new file mode 100644 index 0000000..b38a708 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/handshake.go @@ -0,0 +1,72 @@ +package torrent + +import ( + "bytes" + "fmt" + "io" + "net" + "time" + + "github.com/anacrolix/torrent/mse" + pp "github.com/anacrolix/torrent/peer_protocol" +) + +// Wraps a raw connection and provides the interface we want for using the +// connection in the message loop. +type deadlineReader struct { + nc net.Conn + r io.Reader +} + +func (r deadlineReader) Read(b []byte) (int, error) { + // Keep-alives should be received every 2 mins. Give a bit of gracetime. + err := r.nc.SetReadDeadline(time.Now().Add(150 * time.Second)) + if err != nil { + return 0, fmt.Errorf("error setting read deadline: %s", err) + } + return r.r.Read(b) +} + +// Handles stream encryption for inbound connections. +func handleEncryption( + rw io.ReadWriter, + skeys mse.SecretKeyIter, + policy HeaderObfuscationPolicy, + selector mse.CryptoSelector, +) ( + ret io.ReadWriter, + headerEncrypted bool, + cryptoMethod mse.CryptoMethod, + err error, +) { + // Tries to start an unencrypted stream. + if !policy.RequirePreferred || !policy.Preferred { + var protocol [len(pp.Protocol)]byte + _, err = io.ReadFull(rw, protocol[:]) + if err != nil { + return + } + // Put the protocol back into the stream. + rw = struct { + io.Reader + io.Writer + }{ + io.MultiReader(bytes.NewReader(protocol[:]), rw), + rw, + } + if string(protocol[:]) == pp.Protocol { + ret = rw + return + } + if policy.RequirePreferred { + // We are here because we require unencrypted connections. + err = fmt.Errorf("unexpected protocol string %q and header obfuscation disabled", protocol) + return + } + } + headerEncrypted = true + ret, cryptoMethod, err = mse.ReceiveHandshake(rw, skeys, selector) + return +} + +type PeerExtensionBits = pp.PeerExtensionBits diff --git a/deps/github.com/anacrolix/torrent/handshake_test.go b/deps/github.com/anacrolix/torrent/handshake_test.go new file mode 100644 index 0000000..8c2c6d2 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/handshake_test.go @@ -0,0 +1,15 @@ +package torrent + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDefaultExtensionBytes(t *testing.T) { + pex := defaultPeerExtensionBytes() + assert.True(t, pex.SupportsDHT()) + assert.True(t, pex.SupportsExtended()) + assert.False(t, pex.GetBit(63)) + assert.Panics(t, func() { pex.GetBit(64) }) +} diff --git a/deps/github.com/anacrolix/torrent/internal/alloclim/alloclim_test.go b/deps/github.com/anacrolix/torrent/internal/alloclim/alloclim_test.go new file mode 100644 index 0000000..5952804 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/alloclim/alloclim_test.go @@ -0,0 +1,93 @@ +package alloclim + +import ( + "context" + "testing" + "time" + + _ "github.com/anacrolix/envpprof" + qt "github.com/frankban/quicktest" +) + +func TestReserveOverMax(t *testing.T) { + c := qt.New(t) + l := &Limiter{Max: 10} + r := l.Reserve(20) + c.Assert(r.Wait(context.Background()), qt.IsNotNil) +} + +func TestImmediateAllow(t *testing.T) { + c := qt.New(t) + l := &Limiter{Max: 10} + r := l.Reserve(10) + c.Assert(r.Wait(context.Background()), qt.IsNil) +} + +func TestSimpleSequence(t *testing.T) { + c := qt.New(t) + l := &Limiter{Max: 10} + rs := make([]*Reservation, 0) + rs = append(rs, l.Reserve(6)) + rs = append(rs, l.Reserve(5)) + rs = append(rs, l.Reserve(5)) + c.Assert(rs[0].Wait(context.Background()), qt.IsNil) + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Nanosecond)) + c.Assert(rs[1].Wait(ctx), qt.Equals, context.DeadlineExceeded) + go cancel() + ctx, cancel = context.WithCancel(context.Background()) + go cancel() + c.Assert(rs[2].Wait(ctx), qt.Equals, context.Canceled) + go rs[0].Release() + ctx, cancel = context.WithDeadline(context.Background(), time.Now().Add(time.Second)) + c.Assert(rs[1].Wait(ctx), qt.IsNil) + go rs[1].Release() + c.Assert(rs[2].Wait(ctx), qt.IsNil) + go rs[2].Release() + go cancel() + rs[2].Release() + rs[1].Release() + c.Assert(l.Value(), qt.Equals, l.Max) +} + +func TestSequenceWithCancel(t *testing.T) { + c := qt.New(t) + l := &Limiter{Max: 10} + rs := make([]*Reservation, 0) + rs = append(rs, l.Reserve(6)) + rs = append(rs, l.Reserve(6)) + rs = append(rs, l.Reserve(4)) + rs = append(rs, l.Reserve(4)) + c.Assert(rs[0].Cancel(), qt.IsFalse) + c.Assert(func() { rs[1].Release() }, qt.PanicMatches, "not resolved") + c.Assert(rs[1].Cancel(), qt.IsTrue) + c.Assert(rs[2].Wait(context.Background()), qt.IsNil) + rs[0].Release() + c.Assert(rs[3].Wait(context.Background()), qt.IsNil) + c.Assert(l.Value(), qt.Equals, int64(2)) + rs[1].Release() + rs[2].Release() + rs[3].Release() + c.Assert(l.Value(), qt.Equals, l.Max) +} + +func TestCancelWhileWaiting(t *testing.T) { + c := qt.New(t) + l := &Limiter{Max: 10} + rs := make([]*Reservation, 0) + rs = append(rs, l.Reserve(6)) + rs = append(rs, l.Reserve(6)) + rs = append(rs, l.Reserve(4)) + rs = append(rs, l.Reserve(4)) + go rs[1].Cancel() + err := rs[1].Wait(context.Background()) + c.Assert(err, qt.IsNotNil) + err = rs[2].Wait(context.Background()) + c.Assert(err, qt.IsNil) + ctx, cancel := context.WithCancel(context.Background()) + go cancel() + err = rs[3].Wait(ctx) + c.Assert(err, qt.Equals, context.Canceled) + rs[0].Drop() + err = rs[3].Wait(ctx) + c.Assert(err, qt.IsNil) +} diff --git a/deps/github.com/anacrolix/torrent/internal/alloclim/l.go b/deps/github.com/anacrolix/torrent/internal/alloclim/l.go new file mode 100644 index 0000000..98be1a1 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/alloclim/l.go @@ -0,0 +1,80 @@ +package alloclim + +import "sync" + +// Manages reservations sharing a common allocation limit. +type Limiter struct { + // Maximum outstanding allocation space. + Max int64 + initOnce sync.Once + mu sync.Mutex + // Current unallocated space. + value int64 + // Reservations waiting to in the order they arrived. + waiting []*Reservation +} + +func (me *Limiter) initValue() { + me.value = me.Max +} + +func (me *Limiter) init() { + me.initOnce.Do(func() { + me.initValue() + }) +} + +func (me *Limiter) Reserve(n int64) *Reservation { + r := &Reservation{ + l: me, + n: n, + } + me.init() + me.mu.Lock() + if n <= me.value { + me.value -= n + r.granted.Set() + } else { + me.waiting = append(me.waiting, r) + } + me.mu.Unlock() + return r +} + +func (me *Limiter) doWakesLocked() { + for { + if len(me.waiting) == 0 { + break + } + r := me.waiting[0] + switch { + case r.cancelled.IsSet(): + case r.n <= me.value: + if r.wake() { + me.value -= r.n + } + default: + return + } + me.waiting = me.waiting[1:] + } +} + +func (me *Limiter) doWakes() { + me.mu.Lock() + me.doWakesLocked() + me.mu.Unlock() +} + +func (me *Limiter) addValue(n int64) { + me.mu.Lock() + me.value += n + me.doWakesLocked() + me.mu.Unlock() +} + +func (me *Limiter) Value() int64 { + me.mu.Lock() + defer me.mu.Unlock() + return me.value +} diff --git a/deps/github.com/anacrolix/torrent/internal/alloclim/r.go b/deps/github.com/anacrolix/torrent/internal/alloclim/r.go new file mode 100644 index 0000000..71a4dd7 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/alloclim/r.go @@ -0,0 +1,101 @@ +package alloclim + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/anacrolix/chansync" + "github.com/anacrolix/log" +) + +type Reservation struct { + l *Limiter + n int64 + releaseOnce sync.Once + mu sync.Mutex + granted chansync.SetOnce + cancelled chansync.SetOnce +} + +// Releases the alloc claim if the reservation has been granted. Does nothing if it was cancelled. +// Otherwise panics. +func (me *Reservation) Release() { + me.mu.Lock() + defer me.mu.Unlock() + switch { + default: + panic("not resolved") + case me.cancelled.IsSet(): + return + case me.granted.IsSet(): + } + me.releaseOnce.Do(func() { + me.l.addValue(me.n) + }) +} + +// Cancel the reservation, returns false if it was already granted. You must still release if that's +// the case. See Drop. +func (me *Reservation) Cancel() bool { + me.mu.Lock() + defer me.mu.Unlock() + if me.granted.IsSet() { + return false + } + if me.cancelled.Set() { + go me.l.doWakes() + } + return true +} + +// If the reservation is granted, release it, otherwise cancel the reservation. +func (me *Reservation) Drop() { + me.mu.Lock() + defer me.mu.Unlock() + if me.granted.IsSet() { + me.releaseOnce.Do(func() { + me.l.addValue(me.n) + }) + return + } + if me.cancelled.Set() { + go me.l.doWakes() + } +} + +func (me *Reservation) wake() bool { + me.mu.Lock() + defer me.mu.Unlock() + if me.cancelled.IsSet() { + return false + } + return me.granted.Set() +} + +func (me *Reservation) Wait(ctx context.Context) error { + if me.n > me.l.Max { + return log.WithLevel( + log.Warning, + fmt.Errorf("reservation for %v exceeds limiter max %v", me.n, me.l.Max), + ) + } + select { + case <-ctx.Done(): + case <-me.granted.Done(): + case <-me.cancelled.Done(): + } + defer me.mu.Unlock() + me.mu.Lock() + switch { + case me.granted.IsSet(): + return nil + case me.cancelled.IsSet(): + return errors.New("reservation cancelled") + case ctx.Err() != nil: + return ctx.Err() + default: + panic("unexpected") + } +} diff --git a/deps/github.com/anacrolix/torrent/internal/check/check.go b/deps/github.com/anacrolix/torrent/internal/check/check.go new file mode 100644 index 0000000..aa75e59 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/check/check.go @@ -0,0 +1,5 @@ +package check + +// A flag for doing extra checks at runtime that are potentially expensive. Should be enabled for +// testing and debugging. +var Enabled = false diff --git a/deps/github.com/anacrolix/torrent/internal/check/check_testing.go b/deps/github.com/anacrolix/torrent/internal/check/check_testing.go new file mode 100644 index 0000000..3ec404e --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/check/check_testing.go @@ -0,0 +1,11 @@ +//go:build go1.21 + +package check + +import "testing" + +func init() { + if testing.Testing() { + Enabled = true + } +} diff --git a/deps/github.com/anacrolix/torrent/internal/cmd/issue-464/main.go b/deps/github.com/anacrolix/torrent/internal/cmd/issue-464/main.go new file mode 100644 index 0000000..fbac1b4 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/cmd/issue-464/main.go @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "io" + "log" + + "github.com/anacrolix/torrent" +) + +const testMagnet = "magnet:?xt=urn:btih:a88fda5954e89178c372716a6a78b8180ed4dad3&ws=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2F" + +func main() { + err := mainErr() + if err != nil { + log.Fatalf("error in main: %v", err) + } +} + +func mainErr() error { + cfg := torrent.NewDefaultClientConfig() + // We could disable non-webseed peer types here, to force any errors. + client, _ := torrent.NewClient(cfg) + + // Add directly from metainfo, because we want to force webseeding to serve data, and webseeding + // won't get us the metainfo. + t, err := client.AddTorrentFromFile("testdata/The WIRED CD - Rip. Sample. Mash. Share.torrent") + if err != nil { + return err + } + <-t.GotInfo() + + fmt.Println("GOT INFO") + + f := t.Files()[0] + + r := f.NewReader() + + r.Seek(5, io.SeekStart) + buf := make([]byte, 5) + n, err := r.Read(buf) + + fmt.Println("END", n, buf, err) + + t.DownloadAll() + client.WaitAll() + return nil +} diff --git a/deps/github.com/anacrolix/torrent/internal/cmd/issue-465/main.go b/deps/github.com/anacrolix/torrent/internal/cmd/issue-465/main.go new file mode 100644 index 0000000..5407535 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/cmd/issue-465/main.go @@ -0,0 +1,70 @@ +package main + +import ( + "errors" + "fmt" + "net/http" + "os" + + "github.com/anacrolix/torrent" + "github.com/anacrolix/torrent/metainfo" +) + +func main() { + if err := dlTorrents("."); err != nil { + fmt.Fprintf(os.Stderr, "fatal error: %v\n", err) + os.Exit(1) + } +} + +func dlTorrents(dir string) error { + conf := torrent.NewDefaultClientConfig() + conf.DataDir = dir + cl, err := torrent.NewClient(conf) + if err != nil { + return err + } + http.HandleFunc("/torrentClientStatus", func(w http.ResponseWriter, r *http.Request) { + cl.WriteStatus(w) + }) + ids := []string{ + "urlteam_2021-02-03-21-17-02", + "urlteam_2021-02-02-11-17-02", + "urlteam_2021-01-31-11-17-02", + "urlteam_2021-01-30-21-17-01", + "urlteam_2021-01-29-21-17-01", + "urlteam_2021-01-28-11-17-01", + "urlteam_2021-01-27-11-17-02", + "urlteam_2021-01-26-11-17-02", + "urlteam_2021-01-25-03-17-02", + "urlteam_2021-01-24-03-17-02", + } + for _, id := range ids { + t, err := addTorrentFromURL(cl, fmt.Sprintf("https://archive.org/download/%s/%s_archive.torrent", id, id)) + if err != nil { + return fmt.Errorf("downloading metainfo for %q: %w", id, err) + } + t.DownloadAll() + } + if !cl.WaitAll() { + return errors.New("client stopped early") + } + return nil +} + +func addTorrentFromURL(cl *torrent.Client, url string) (*torrent.Torrent, error) { + fmt.Printf("Adding torrent: %s\n", url) + resp, err := http.Get(url) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("status %s", resp.Status) + } + defer resp.Body.Close() + meta, err := metainfo.Load(resp.Body) + if err != nil { + return nil, err + } + return cl.AddTorrent(meta) +} diff --git a/deps/github.com/anacrolix/torrent/internal/limiter/limiter.go b/deps/github.com/anacrolix/torrent/internal/limiter/limiter.go new file mode 100644 index 0000000..1fd29db --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/limiter/limiter.go @@ -0,0 +1,64 @@ +package limiter + +import "sync" + +type Key = interface{} + +// Manages resources with a limited number of concurrent slots for use for each key. +type Instance struct { + SlotsPerKey int + + mu sync.Mutex + // Limits concurrent use of a resource. Push into the channel to use a slot, and receive to free + // up a slot. + active map[Key]*activeValueType +} + +type activeValueType struct { + ch chan struct{} + refs int +} + +type ActiveValueRef struct { + v *activeValueType + k Key + i *Instance +} + +// Returns the limiting channel. Send to it to obtain a slot, and receive to release the slot. +func (me ActiveValueRef) C() chan struct{} { + return me.v.ch +} + +// Drop the reference to a key, this allows keys to be reclaimed when they're no longer in use. +func (me ActiveValueRef) Drop() { + me.i.mu.Lock() + defer me.i.mu.Unlock() + me.v.refs-- + if me.v.refs == 0 { + delete(me.i.active, me.k) + } +} + +// Get a reference to the values for a key. You should make sure to call Drop exactly once on the +// returned value when done. +func (i *Instance) GetRef(key Key) ActiveValueRef { + i.mu.Lock() + defer i.mu.Unlock() + if i.active == nil { + i.active = make(map[Key]*activeValueType) + } + v, ok := i.active[key] + if !ok { + v = &activeValueType{ + ch: make(chan struct{}, i.SlotsPerKey), + } + i.active[key] = v + } + v.refs++ + return ActiveValueRef{ + v: v, + k: key, + i: i, + } +} diff --git a/deps/github.com/anacrolix/torrent/internal/nestedmaps/nestedmaps.go b/deps/github.com/anacrolix/torrent/internal/nestedmaps/nestedmaps.go new file mode 100644 index 0000000..62ebdcc --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/nestedmaps/nestedmaps.go @@ -0,0 +1,73 @@ +package nestedmaps + +type next[NK comparable, M ~map[NK]NV, NV any] struct { + last Path[M] + key NK +} + +func (me next[NK, CV, NV]) Exists() bool { + _, ok := me.last.Get()[me.key] + return ok +} + +func (me next[NK, CV, NV]) Get() NV { + return me.last.Get()[me.key] +} + +func (me next[NK, CV, NV]) Set(value NV) { + if me.last.Get() == nil { + me.last.Set(make(CV)) + } + me.last.Get()[me.key] = value +} + +func (me next[NK, CV, NV]) Delete() { + m := me.last.Get() + delete(m, me.key) + if len(m) == 0 { + me.last.Delete() + } +} + +func Next[K comparable, M ~map[K]V, V any]( + last Path[M], + key K, +) Path[V] { + ret := next[K, M, V]{} + ret.last = last + ret.key = key + return ret +} + +type root[K comparable, V any, M ~map[K]V] struct { + m *M +} + +func (me root[K, V, M]) Exists() bool { + return *me.m != nil +} + +func (me root[K, V, M]) Get() M { + return *me.m +} + +func (me root[K, V, M]) Set(value M) { + *me.m = value +} + +func (me root[K, V, M]) Delete() { + *me.m = nil +} + +func Begin[K comparable, M ~map[K]V, V any](m *M) Path[M] { + ret := root[K, V, M]{} + ret.m = m + return ret +} + +type Path[V any] interface { + Set(V) + Get() V + Exists() bool + Delete() +} diff --git a/deps/github.com/anacrolix/torrent/internal/nestedmaps/nestedmaps_test.go b/deps/github.com/anacrolix/torrent/internal/nestedmaps/nestedmaps_test.go new file mode 100644 index 0000000..97916af --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/nestedmaps/nestedmaps_test.go @@ -0,0 +1,41 @@ +package nestedmaps + +import ( + "testing" + + g "github.com/anacrolix/generics" + qt "github.com/frankban/quicktest" +) + +func TestNestedMaps(t *testing.T) { + c := qt.New(t) + var nest map[string]map[*int]map[byte][]int64 + intKey := g.PtrTo(420) + var root = Begin(&nest) + var first = Next(root, "answer") + var second = Next(first, intKey) + var last = Next(second, 69) + c.Assert(root.Exists(), qt.IsFalse) + c.Assert(first.Exists(), qt.IsFalse) + c.Assert(second.Exists(), qt.IsFalse) + c.Assert(last.Exists(), qt.IsFalse) + last.Set([]int64{4, 8, 15, 16, 23, 42}) + c.Assert(root.Exists(), qt.IsTrue) + c.Assert(first.Exists(), qt.IsTrue) + c.Assert(second.Exists(), qt.IsTrue) + c.Assert(last.Exists(), qt.IsTrue) + c.Assert(Next(second, 70).Exists(), qt.IsFalse) + secondIntKey := g.PtrTo(1337) + secondPath := Next(Next(Next(Begin(&nest), "answer"), secondIntKey), 42) + secondPath.Set(nil) + c.Assert(secondPath.Exists(), qt.IsTrue) + last.Delete() + c.Assert(last.Exists(), qt.IsFalse) + c.Assert(second.Exists(), qt.IsFalse) + c.Assert(root.Exists(), qt.IsTrue) + c.Assert(first.Exists(), qt.IsTrue) + // See if we get panics deleting an already deleted item. + last.Delete() + secondPath.Delete() + c.Assert(root.Exists(), qt.IsFalse) +} diff --git a/deps/github.com/anacrolix/torrent/internal/panicif/panicif.go b/deps/github.com/anacrolix/torrent/internal/panicif/panicif.go new file mode 100644 index 0000000..c12d09c --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/panicif/panicif.go @@ -0,0 +1,21 @@ +package panicif + +import "fmt" + +func NotEqual[T comparable](a, b T) { + if a != b { + panic(fmt.Sprintf("%v != %v", a, b)) + } +} + +func False(b bool) { + if !b { + panic("is false") + } +} + +func True(b bool) { + if b { + panic("is true") + } +} diff --git a/deps/github.com/anacrolix/torrent/internal/testutil/greeting.go b/deps/github.com/anacrolix/torrent/internal/testutil/greeting.go new file mode 100644 index 0000000..6544483 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/testutil/greeting.go @@ -0,0 +1,50 @@ +// Package testutil contains stuff for testing torrent-related behaviour. +// +// "greeting" is a single-file torrent of a file called "greeting" that +// "contains "hello, world\n". + +package testutil + +import ( + "os" + "path/filepath" + + "github.com/anacrolix/torrent/metainfo" +) + +var Greeting = Torrent{ + Files: []File{{ + Data: GreetingFileContents, + }}, + Name: GreetingFileName, +} + +const ( + // A null in the middle triggers an error if SQLite stores data as text instead of blob. + GreetingFileContents = "hello,\x00world\n" + GreetingFileName = "greeting" +) + +func CreateDummyTorrentData(dirName string) string { + f, _ := os.Create(filepath.Join(dirName, "greeting")) + defer f.Close() + f.WriteString(GreetingFileContents) + return f.Name() +} + +func GreetingMetaInfo() *metainfo.MetaInfo { + return Greeting.Metainfo(5) +} + +// Gives a temporary directory containing the completed "greeting" torrent, +// and a corresponding metainfo describing it. The temporary directory can be +// cleaned away with os.RemoveAll. +func GreetingTestTorrent() (tempDir string, metaInfo *metainfo.MetaInfo) { + tempDir, err := os.MkdirTemp(os.TempDir(), "") + if err != nil { + panic(err) + } + CreateDummyTorrentData(tempDir) + metaInfo = GreetingMetaInfo() + return +} diff --git a/deps/github.com/anacrolix/torrent/internal/testutil/spec.go b/deps/github.com/anacrolix/torrent/internal/testutil/spec.go new file mode 100644 index 0000000..63e4a74 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/testutil/spec.go @@ -0,0 +1,67 @@ +package testutil + +import ( + "io" + "strings" + + "github.com/anacrolix/missinggo/expect" + + "github.com/anacrolix/torrent/bencode" + "github.com/anacrolix/torrent/metainfo" +) + +type File struct { + Name string + Data string +} + +type Torrent struct { + Files []File + Name string +} + +func (t *Torrent) IsDir() bool { + return len(t.Files) == 1 && t.Files[0].Name == "" +} + +func (t *Torrent) GetFile(name string) *File { + if t.IsDir() && t.Name == name { + return &t.Files[0] + } + for _, f := range t.Files { + if f.Name == name { + return &f + } + } + return nil +} + +func (t *Torrent) Info(pieceLength int64) metainfo.Info { + info := metainfo.Info{ + Name: t.Name, + PieceLength: pieceLength, + } + if t.IsDir() { + info.Length = int64(len(t.Files[0].Data)) + } else { + for _, f := range t.Files { + info.Files = append(info.Files, metainfo.FileInfo{ + Path: []string{f.Name}, + Length: int64(len(f.Data)), + }) + } + } + err := info.GeneratePieces(func(fi metainfo.FileInfo) (io.ReadCloser, error) { + return io.NopCloser(strings.NewReader(t.GetFile(strings.Join(fi.Path, "/")).Data)), nil + }) + expect.Nil(err) + return info +} + +func (t *Torrent) Metainfo(pieceLength int64) *metainfo.MetaInfo { + mi := metainfo.MetaInfo{} + var err error + mi.InfoBytes, err = bencode.Marshal(t.Info(pieceLength)) + expect.Nil(err) + return &mi +} diff --git a/deps/github.com/anacrolix/torrent/internal/testutil/status_writer.go b/deps/github.com/anacrolix/torrent/internal/testutil/status_writer.go new file mode 100644 index 0000000..bcc5c2b --- /dev/null +++ b/deps/github.com/anacrolix/torrent/internal/testutil/status_writer.go @@ -0,0 +1,55 @@ +package testutil + +import ( + "fmt" + "io" + "net/http" + "sync" + "testing" + + _ "github.com/anacrolix/envpprof" +) + +type StatusWriter interface { + WriteStatus(io.Writer) +} + +// The key is the route pattern. The value is nil when the resource is released. +var ( + mu sync.Mutex + sws = map[string]StatusWriter{} +) + +func ExportStatusWriter(sw StatusWriter, path string, t testing.TB) (release func()) { + pattern := fmt.Sprintf("/%s/%s", t.Name(), path) + t.Logf("exporting status path %q", pattern) + release = func() { + mu.Lock() + defer mu.Unlock() + sws[pattern] = nil + } + mu.Lock() + defer mu.Unlock() + if curSw, ok := sws[pattern]; ok { + if curSw != nil { + panic(fmt.Sprintf("%q still in use", pattern)) + } + sws[pattern] = sw + return + } + http.HandleFunc( + pattern, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + sw := sws[pattern] + mu.Unlock() + if sw == nil { + http.NotFound(w, r) + return + } + sw.WriteStatus(w) + }, + ) + sws[pattern] = sw + return +} diff --git a/deps/github.com/anacrolix/torrent/iplist/cidr.go b/deps/github.com/anacrolix/torrent/iplist/cidr.go new file mode 100644 index 0000000..e131964 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/iplist/cidr.go @@ -0,0 +1,41 @@ +package iplist + +import ( + "bufio" + "io" + "net" +) + +func ParseCIDRListReader(r io.Reader) (ret []Range, err error) { + s := bufio.NewScanner(r) + for s.Scan() { + err = func() (err error) { + _, in, err := net.ParseCIDR(s.Text()) + if err != nil { + return + } + ret = append(ret, Range{ + First: in.IP, + Last: IPNetLast(in), + }) + return + }() + if err != nil { + return + } + } + return +} + +// Returns the last, inclusive IP in a net.IPNet. +func IPNetLast(in *net.IPNet) (last net.IP) { + n := len(in.IP) + if n != len(in.Mask) { + panic("wat") + } + last = make(net.IP, n) + for i := 0; i < n; i++ { + last[i] = in.IP[i] | ^in.Mask[i] + } + return +} diff --git a/deps/github.com/anacrolix/torrent/iplist/cidr_test.go b/deps/github.com/anacrolix/torrent/iplist/cidr_test.go new file mode 100644 index 0000000..ad904f5 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/iplist/cidr_test.go @@ -0,0 +1,41 @@ +package iplist + +import ( + "bytes" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIPNetLast(t *testing.T) { + _, in, err := net.ParseCIDR("138.255.252.0/22") + require.NoError(t, err) + assert.EqualValues(t, []byte{138, 255, 252, 0}, in.IP) + assert.EqualValues(t, []byte{255, 255, 252, 0}, in.Mask) + assert.EqualValues(t, []byte{138, 255, 255, 255}, IPNetLast(in)) + _, in, err = net.ParseCIDR("2400:cb00::/31") + require.NoError(t, err) + assert.EqualValues(t, []byte{0x24, 0, 0xcb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, in.IP) + assert.EqualValues(t, []byte{255, 255, 255, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, in.Mask) + assert.EqualValues(t, []byte{0x24, 0, 0xcb, 1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, IPNetLast(in)) +} + +func TestParseCIDRList(t *testing.T) { + r := bytes.NewBufferString(`2400:cb00::/32 +2405:8100::/32 +2405:b500::/32 +2606:4700::/32 +2803:f800::/32 +2c0f:f248::/32 +2a06:98c0::/29 +`) + rs, err := ParseCIDRListReader(r) + require.NoError(t, err) + require.Len(t, rs, 7) + assert.EqualValues(t, Range{ + First: net.IP{0x28, 3, 0xf8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Last: net.IP{0x28, 3, 0xf8, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + }, rs[4]) +} diff --git a/deps/github.com/anacrolix/torrent/iplist/cmd/iplist/main.go b/deps/github.com/anacrolix/torrent/iplist/cmd/iplist/main.go new file mode 100644 index 0000000..7117b46 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/iplist/cmd/iplist/main.go @@ -0,0 +1,33 @@ +package main + +import ( + "fmt" + "log" + "net" + "os" + + "github.com/anacrolix/tagflag" + + "github.com/anacrolix/torrent/iplist" +) + +func main() { + flags := struct { + tagflag.StartPos + Ips []net.IP + }{} + tagflag.Parse(&flags) + il, err := iplist.NewFromReader(os.Stdin) + if err != nil { + log.Fatalf("error loading ip list: %s", err) + } + log.Printf("loaded %d ranges", il.NumRanges()) + for _, ip := range flags.Ips { + r, ok := il.Lookup(ip) + if ok { + fmt.Printf("%s is in %v\n", ip, r) + } else { + fmt.Printf("%s not found\n", ip) + } + } +} diff --git a/deps/github.com/anacrolix/torrent/iplist/cmd/pack-blocklist/main.go b/deps/github.com/anacrolix/torrent/iplist/cmd/pack-blocklist/main.go new file mode 100644 index 0000000..9a97503 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/iplist/cmd/pack-blocklist/main.go @@ -0,0 +1,27 @@ +// Takes P2P blocklist text format in stdin, and outputs the packed format +// from the iplist package. +package main + +import ( + "bufio" + "os" + + "github.com/anacrolix/missinggo/v2" + "github.com/anacrolix/tagflag" + + "github.com/anacrolix/torrent/iplist" +) + +func main() { + tagflag.Parse(nil) + l, err := iplist.NewFromReader(os.Stdin) + if err != nil { + missinggo.Fatal(err) + } + wb := bufio.NewWriter(os.Stdout) + defer wb.Flush() + err = l.WritePacked(wb) + if err != nil { + missinggo.Fatal(err) + } +} diff --git a/deps/github.com/anacrolix/torrent/iplist/iplist.go b/deps/github.com/anacrolix/torrent/iplist/iplist.go new file mode 100644 index 0000000..d6d70a9 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/iplist/iplist.go @@ -0,0 +1,185 @@ +// Package iplist handles the P2P Plaintext Format described by +// https://en.wikipedia.org/wiki/PeerGuardian#P2P_plaintext_format. +package iplist + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" +) + +// An abstraction of IP list implementations. +type Ranger interface { + // Return a Range containing the IP. + Lookup(net.IP) (r Range, ok bool) + // If your ranges hurt, use this. + NumRanges() int +} + +type IPList struct { + ranges []Range +} + +type Range struct { + First, Last net.IP + Description string +} + +func (r Range) String() string { + return fmt.Sprintf("%s-%s: %s", r.First, r.Last, r.Description) +} + +// Create a new IP list. The given ranges must already sorted by the lower +// bound IP in each range. Behaviour is undefined for lists of overlapping +// ranges. +func New(initSorted []Range) *IPList { + return &IPList{ + ranges: initSorted, + } +} + +func (ipl *IPList) NumRanges() int { + if ipl == nil { + return 0 + } + return len(ipl.ranges) +} + +// Return the range the given IP is in. ok if false if no range is found. +func (ipl *IPList) Lookup(ip net.IP) (r Range, ok bool) { + if ipl == nil { + return + } + // TODO: Perhaps all addresses should be converted to IPv6, if the future + // of IP is to always be backwards compatible. But this will cost 4x the + // memory for IPv4 addresses? + v4 := ip.To4() + if v4 != nil { + r, ok = ipl.lookup(v4) + if ok { + return + } + } + v6 := ip.To16() + if v6 != nil { + return ipl.lookup(v6) + } + if v4 == nil && v6 == nil { + r = Range{ + Description: "bad IP", + } + ok = true + } + return +} + +// Return a range that contains ip, or nil. +func lookup( + first func(i int) net.IP, + full func(i int) Range, + n int, + ip net.IP, +) ( + r Range, ok bool, +) { + // Find the index of the first range for which the following range exceeds + // it. + i := sort.Search(n, func(i int) bool { + if i+1 >= n { + return true + } + return bytes.Compare(ip, first(i+1)) < 0 + }) + if i == n { + return + } + r = full(i) + ok = bytes.Compare(r.First, ip) <= 0 && bytes.Compare(ip, r.Last) <= 0 + return +} + +// Return the range the given IP is in. Returns nil if no range is found. +func (ipl *IPList) lookup(ip net.IP) (Range, bool) { + return lookup(func(i int) net.IP { + return ipl.ranges[i].First + }, func(i int) Range { + return ipl.ranges[i] + }, len(ipl.ranges), ip) +} + +func minifyIP(ip *net.IP) { + v4 := ip.To4() + if v4 != nil { + *ip = append(make([]byte, 0, 4), v4...) + } +} + +// Parse a line of the PeerGuardian Text Lists (P2P) Format. Returns !ok but +// no error if a line doesn't contain a range but isn't erroneous, such as +// comment and blank lines. +func ParseBlocklistP2PLine(l []byte) (r Range, ok bool, err error) { + l = bytes.TrimSpace(l) + if len(l) == 0 || bytes.HasPrefix(l, []byte("#")) { + return + } + // TODO: Check this when IPv6 blocklists are available. + colon := bytes.LastIndexAny(l, ":") + if colon == -1 { + err = errors.New("missing colon") + return + } + hyphen := bytes.IndexByte(l[colon+1:], '-') + if hyphen == -1 { + err = errors.New("missing hyphen") + return + } + hyphen += colon + 1 + r.Description = string(l[:colon]) + r.First = net.ParseIP(string(l[colon+1 : hyphen])) + minifyIP(&r.First) + r.Last = net.ParseIP(string(l[hyphen+1:])) + minifyIP(&r.Last) + if r.First == nil || r.Last == nil || len(r.First) != len(r.Last) { + err = errors.New("bad IP range") + return + } + ok = true + return +} + +// Creates an IPList from a line-delimited P2P Plaintext file. +func NewFromReader(f io.Reader) (ret *IPList, err error) { + var ranges []Range + // There's a lot of similar descriptions, so we maintain a pool and reuse + // them to reduce memory overhead. + uniqStrs := make(map[string]string) + scanner := bufio.NewScanner(f) + lineNum := 1 + for scanner.Scan() { + r, ok, lineErr := ParseBlocklistP2PLine(scanner.Bytes()) + if lineErr != nil { + err = fmt.Errorf("error parsing line %d: %s", lineNum, lineErr) + return + } + lineNum++ + if !ok { + continue + } + if s, ok := uniqStrs[r.Description]; ok { + r.Description = s + } else { + uniqStrs[r.Description] = r.Description + } + ranges = append(ranges, r) + } + err = scanner.Err() + if err != nil { + return + } + ret = New(ranges) + return +} diff --git a/deps/github.com/anacrolix/torrent/iplist/iplist_test.go b/deps/github.com/anacrolix/torrent/iplist/iplist_test.go new file mode 100644 index 0000000..4e36e0a --- /dev/null +++ b/deps/github.com/anacrolix/torrent/iplist/iplist_test.go @@ -0,0 +1,124 @@ +package iplist + +import ( + "bufio" + "bytes" + "net" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + // Note the shared description "eff". The overlapping ranges at 1.2.8.2 + // will cause problems. Don't overlap your ranges. + sample = ` +# List distributed by iblocklist.com + +a:1.2.4.0-1.2.4.255 +b:1.2.8.0-1.2.8.255 +eff:1.2.8.2-1.2.8.2 +something:more detail:86.59.95.195-86.59.95.195 +eff:127.0.0.0-127.0.0.1` + packedSample []byte +) + +func init() { + var buf bytes.Buffer + list, err := NewFromReader(strings.NewReader(sample)) + if err != nil { + panic(err) + } + err = list.WritePacked(&buf) + if err != nil { + panic(err) + } + packedSample = buf.Bytes() +} + +func TestIPv4RangeLen(t *testing.T) { + ranges, _ := sampleRanges(t) + for i := 0; i < 3; i += 1 { + if len(ranges[i].First) != 4 { + t.FailNow() + } + if len(ranges[i].Last) != 4 { + t.FailNow() + } + } +} + +func sampleRanges(tb testing.TB) (ranges []Range, err error) { + scanner := bufio.NewScanner(strings.NewReader(sample)) + for scanner.Scan() { + r, ok, err := ParseBlocklistP2PLine(scanner.Bytes()) + if err != nil { + tb.Fatal(err) + } + if ok { + ranges = append(ranges, r) + } + } + err = scanner.Err() + return +} + +func BenchmarkParseP2pBlocklist(b *testing.B) { + for i := 0; i < b.N; i++ { + sampleRanges(b) + } +} + +func lookupOk(r Range, ok bool) bool { + return ok +} + +func TestBadIP(t *testing.T) { + for _, iplist := range []Ranger{ + // New(nil), + NewFromPacked([]byte("\x00\x00\x00\x00\x00\x00\x00\x00")), + } { + assert.False(t, lookupOk(iplist.Lookup(net.IP(make([]byte, 4)))), "%v", iplist) + assert.False(t, lookupOk(iplist.Lookup(net.IP(make([]byte, 16))))) + assert.Panics(t, func() { iplist.Lookup(nil) }) + assert.Panics(t, func() { iplist.Lookup(net.IP(make([]byte, 5))) }) + } +} + +func testLookuperSimple(t *testing.T, iplist Ranger) { + for _, _case := range []struct { + IP string + Hit bool + Desc string + }{ + {"1.2.3.255", false, ""}, + {"1.2.8.0", true, "b"}, + {"1.2.4.255", true, "a"}, + // Try to roll over to the next octet on the parse. Note the final + // octet is overbounds. In the next case. + // {"1.2.7.256", true, "bad IP"}, + {"1.2.8.1", true, "b"}, + {"1.2.8.2", true, "eff"}, + } { + ip := net.ParseIP(_case.IP) + require.NotNil(t, ip, _case.IP) + r, ok := iplist.Lookup(ip) + assert.Equal(t, _case.Hit, ok, "%s", _case) + if !_case.Hit { + continue + } + assert.Equal(t, _case.Desc, r.Description, "%T", iplist) + } +} + +func TestSimple(t *testing.T) { + ranges, err := sampleRanges(t) + require.NoError(t, err) + require.Len(t, ranges, 5) + iplist := New(ranges) + testLookuperSimple(t, iplist) + packed := NewFromPacked(packedSample) + testLookuperSimple(t, packed) +} diff --git a/deps/github.com/anacrolix/torrent/iplist/packed.go b/deps/github.com/anacrolix/torrent/iplist/packed.go new file mode 100644 index 0000000..5ae1fae --- /dev/null +++ b/deps/github.com/anacrolix/torrent/iplist/packed.go @@ -0,0 +1,145 @@ +//go:build !wasm +// +build !wasm + +package iplist + +import ( + "encoding/binary" + "fmt" + "io" + "net" + "os" + + "github.com/edsrzf/mmap-go" +) + +// The packed format is an 8 byte integer of the number of ranges. Then 20 +// bytes per range, consisting of 4 byte packed IP being the lower bound IP of +// the range, then 4 bytes of the upper, inclusive bound, 8 bytes for the +// offset of the description from the end of the packed ranges, and 4 bytes +// for the length of the description. After these packed ranges, are the +// concatenated descriptions. + +const ( + packedRangesOffset = 8 + packedRangeLen = 44 +) + +func (ipl *IPList) WritePacked(w io.Writer) (err error) { + descOffsets := make(map[string]int64, len(ipl.ranges)) + descs := make([]string, 0, len(ipl.ranges)) + var nextOffset int64 + // This is a little monadic, no? + write := func(b []byte, expectedLen int) { + if err != nil { + return + } + var n int + n, err = w.Write(b) + if err != nil { + return + } + if n != expectedLen { + panic(n) + } + } + var b [8]byte + binary.LittleEndian.PutUint64(b[:], uint64(len(ipl.ranges))) + write(b[:], 8) + for _, r := range ipl.ranges { + write(r.First.To16(), 16) + write(r.Last.To16(), 16) + descOff, ok := descOffsets[r.Description] + if !ok { + descOff = nextOffset + descOffsets[r.Description] = descOff + descs = append(descs, r.Description) + nextOffset += int64(len(r.Description)) + } + binary.LittleEndian.PutUint64(b[:], uint64(descOff)) + write(b[:], 8) + binary.LittleEndian.PutUint32(b[:], uint32(len(r.Description))) + write(b[:4], 4) + } + for _, d := range descs { + write([]byte(d), len(d)) + } + return +} + +func NewFromPacked(b []byte) PackedIPList { + ret := PackedIPList(b) + minLen := packedRangesOffset + ret.len()*packedRangeLen + if len(b) < minLen { + panic(fmt.Sprintf("packed len %d < %d", len(b), minLen)) + } + return ret +} + +type PackedIPList []byte + +var _ Ranger = PackedIPList{} + +func (pil PackedIPList) len() int { + return int(binary.LittleEndian.Uint64(pil[:8])) +} + +func (pil PackedIPList) NumRanges() int { + return pil.len() +} + +func (pil PackedIPList) getFirst(i int) net.IP { + off := packedRangesOffset + packedRangeLen*i + return net.IP(pil[off : off+16]) +} + +func (pil PackedIPList) getRange(i int) (ret Range) { + rOff := packedRangesOffset + packedRangeLen*i + last := pil[rOff+16 : rOff+32] + descOff := int(binary.LittleEndian.Uint64(pil[rOff+32:])) + descLen := int(binary.LittleEndian.Uint32(pil[rOff+40:])) + descOff += packedRangesOffset + packedRangeLen*pil.len() + ret = Range{ + pil.getFirst(i), + net.IP(last), + string(pil[descOff : descOff+descLen]), + } + return +} + +func (pil PackedIPList) Lookup(ip net.IP) (r Range, ok bool) { + ip16 := ip.To16() + if ip16 == nil { + panic(ip) + } + return lookup(pil.getFirst, pil.getRange, pil.len(), ip16) +} + +type closerFunc func() error + +func (me closerFunc) Close() error { + return me() +} + +func MMapPackedFile(filename string) ( + ret interface { + Ranger + io.Closer + }, + err error, +) { + f, err := os.Open(filename) + if err != nil { + return + } + defer f.Close() + mm, err := mmap.Map(f, mmap.RDONLY, 0) + if err != nil { + return + } + ret = struct { + Ranger + io.Closer + }{NewFromPacked(mm), closerFunc(mm.Unmap)} + return +} diff --git a/deps/github.com/anacrolix/torrent/iplist/packed_test.go b/deps/github.com/anacrolix/torrent/iplist/packed_test.go new file mode 100644 index 0000000..abc9e29 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/iplist/packed_test.go @@ -0,0 +1,35 @@ +package iplist + +import ( + "bytes" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// The active ingredients in the sample P2P blocklist file contents `sample`, +// for reference: +// +// a:1.2.4.0-1.2.4.255 +// b:1.2.8.0-1.2.8.255 +// eff:1.2.8.2-1.2.8.2 +// something:more detail:86.59.95.195-86.59.95.195 +// eff:127.0.0.0-127.0.0.1` + +func TestWritePacked(t *testing.T) { + l, err := NewFromReader(strings.NewReader(sample)) + require.NoError(t, err) + var buf bytes.Buffer + err = l.WritePacked(&buf) + require.NoError(t, err) + require.Equal(t, + "\x05\x00\x00\x00\x00\x00\x00\x00"+ + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x01\x02\x04\x00"+"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x01\x02\x04\xff"+"\x00\x00\x00\x00\x00\x00\x00\x00"+"\x01\x00\x00\x00"+ + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x01\x02\x08\x00"+"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x01\x02\x08\xff"+"\x01\x00\x00\x00\x00\x00\x00\x00"+"\x01\x00\x00\x00"+ + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x01\x02\x08\x02"+"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x01\x02\x08\x02"+"\x02\x00\x00\x00\x00\x00\x00\x00"+"\x03\x00\x00\x00"+ + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x56\x3b\x5f\xc3"+"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x56\x3b\x5f\xc3"+"\x05\x00\x00\x00\x00\x00\x00\x00"+"\x15\x00\x00\x00"+ + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x7f\x00\x00\x00"+"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x7f\x00\x00\x01"+"\x02\x00\x00\x00\x00\x00\x00\x00"+"\x03\x00\x00\x00"+ + "abeffsomething:more detail", + buf.String()) +} diff --git a/deps/github.com/anacrolix/torrent/ipport.go b/deps/github.com/anacrolix/torrent/ipport.go new file mode 100644 index 0000000..a85a97f --- /dev/null +++ b/deps/github.com/anacrolix/torrent/ipport.go @@ -0,0 +1,71 @@ +package torrent + +import ( + "net" + "strconv" +) + +// Extracts the port as an integer from an address string. +func addrPortOrZero(addr net.Addr) int { + switch raw := addr.(type) { + case *net.UDPAddr: + return raw.Port + case *net.TCPAddr: + return raw.Port + default: + // Consider a unix socket on Windows with a name like "C:notanint". + _, port, err := net.SplitHostPort(addr.String()) + if err != nil { + return 0 + } + i64, err := strconv.ParseUint(port, 0, 16) + if err != nil { + return 0 + } + return int(i64) + } +} + +func addrIpOrNil(addr net.Addr) net.IP { + if addr == nil { + return nil + } + switch raw := addr.(type) { + case *net.UDPAddr: + return raw.IP + case *net.TCPAddr: + return raw.IP + default: + host, _, err := net.SplitHostPort(addr.String()) + if err != nil { + return nil + } + return net.ParseIP(host) + } +} + +type ipPortAddr struct { + IP net.IP + Port int +} + +func (ipPortAddr) Network() string { + return "" +} + +func (me ipPortAddr) String() string { + return net.JoinHostPort(me.IP.String(), strconv.FormatInt(int64(me.Port), 10)) +} + +func tryIpPortFromNetAddr(addr PeerRemoteAddr) (ipPortAddr, bool) { + ok := true + host, port, err := net.SplitHostPort(addr.String()) + if err != nil { + ok = false + } + portI64, err := strconv.ParseInt(port, 10, 0) + if err != nil { + ok = false + } + return ipPortAddr{net.ParseIP(host), int(portI64)}, ok +} diff --git a/deps/github.com/anacrolix/torrent/issue211_test.go b/deps/github.com/anacrolix/torrent/issue211_test.go new file mode 100644 index 0000000..a76be07 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/issue211_test.go @@ -0,0 +1,41 @@ +//go:build !wasm +// +build !wasm + +package torrent + +import ( + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/time/rate" + + "github.com/anacrolix/torrent/internal/testutil" + "github.com/anacrolix/torrent/storage" +) + +func TestDropTorrentWithMmapStorageWhileHashing(t *testing.T) { + cfg := TestingConfig(t) + // Ensure the data is present when the torrent is added, and not obtained + // over the network as the test runs. + cfg.DownloadRateLimiter = rate.NewLimiter(0, 0) + cl, err := NewClient(cfg) + require.NoError(t, err) + defer cl.Close() + + td, mi := testutil.GreetingTestTorrent() + mms := storage.NewMMap(td) + defer mms.Close() + tt, new, err := cl.AddTorrentSpec(&TorrentSpec{ + Storage: mms, + InfoHash: mi.HashInfoBytes(), + InfoBytes: mi.InfoBytes, + }) + require.NoError(t, err) + assert.True(t, new) + + r := tt.NewReader() + go tt.Drop() + io.Copy(io.Discard, r) +} diff --git a/deps/github.com/anacrolix/torrent/issue97_test.go b/deps/github.com/anacrolix/torrent/issue97_test.go new file mode 100644 index 0000000..ee8107c --- /dev/null +++ b/deps/github.com/anacrolix/torrent/issue97_test.go @@ -0,0 +1,28 @@ +package torrent + +import ( + "testing" + + "github.com/anacrolix/log" + "github.com/stretchr/testify/require" + + "github.com/anacrolix/torrent/internal/testutil" + "github.com/anacrolix/torrent/storage" +) + +func TestHashPieceAfterStorageClosed(t *testing.T) { + td := t.TempDir() + cs := storage.NewFile(td) + defer cs.Close() + tt := &Torrent{ + storageOpener: storage.NewClient(cs), + logger: log.Default, + chunkSize: defaultChunkSize, + } + mi := testutil.GreetingMetaInfo() + info, err := mi.UnmarshalInfo() + require.NoError(t, err) + require.NoError(t, tt.setInfo(&info)) + require.NoError(t, tt.storage.Close()) + tt.hashPiece(0) +} diff --git a/deps/github.com/anacrolix/torrent/listen.go b/deps/github.com/anacrolix/torrent/listen.go new file mode 100644 index 0000000..3840cc1 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/listen.go @@ -0,0 +1,11 @@ +package torrent + +import "strings" + +func LoopbackListenHost(network string) string { + if strings.IndexByte(network, '4') != -1 { + return "127.0.0.1" + } else { + return "::1" + } +} diff --git a/deps/github.com/anacrolix/torrent/logonce/logonce.go b/deps/github.com/anacrolix/torrent/logonce/logonce.go new file mode 100644 index 0000000..7d44edc --- /dev/null +++ b/deps/github.com/anacrolix/torrent/logonce/logonce.go @@ -0,0 +1,47 @@ +// Package logonce implements an io.Writer facade that only performs distinct +// writes. This can be used by log.Loggers as they're guaranteed to make a +// single Write method call for each message. This is useful for loggers that +// print useful information about unexpected conditions that aren't fatal in +// code. +package logonce + +import ( + "io" + "log" + "os" +) + +// A default logger similar to the default logger in the log package. +var Stderr *log.Logger + +func init() { + // This should emulate the default logger in the log package where + // possible. No time flag so that messages don't differ by time. Code + // debug information is useful. + Stderr = log.New(Writer(os.Stderr), "logonce: ", log.Lshortfile) +} + +type writer struct { + w io.Writer + writes map[string]struct{} +} + +func (w writer) Write(p []byte) (n int, err error) { + s := string(p) + if _, ok := w.writes[s]; ok { + return + } + n, err = w.w.Write(p) + if n != len(s) { + s = string(p[:n]) + } + w.writes[s] = struct{}{} + return +} + +func Writer(w io.Writer) io.Writer { + return writer{ + w: w, + writes: make(map[string]struct{}), + } +} diff --git a/deps/github.com/anacrolix/torrent/main_test.go b/deps/github.com/anacrolix/torrent/main_test.go new file mode 100644 index 0000000..578d992 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/main_test.go @@ -0,0 +1,21 @@ +package torrent + +import ( + "log" + "os" + "testing" + + _ "github.com/anacrolix/envpprof" + analog "github.com/anacrolix/log" +) + +func init() { + log.SetFlags(log.LstdFlags | log.Lshortfile) + analog.DefaultTimeFormatter = analog.TimeFormatSecondsSinceInit +} + +func TestMain(m *testing.M) { + code := m.Run() + // select {} + os.Exit(code) +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/README b/deps/github.com/anacrolix/torrent/metainfo/README new file mode 100644 index 0000000..6da37b8 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/README @@ -0,0 +1 @@ +A library for manipulating ".torrent" files. diff --git a/deps/github.com/anacrolix/torrent/metainfo/announcelist.go b/deps/github.com/anacrolix/torrent/metainfo/announcelist.go new file mode 100644 index 0000000..f19af14 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/announcelist.go @@ -0,0 +1,35 @@ +package metainfo + +type AnnounceList [][]string + +func (al AnnounceList) Clone() (ret AnnounceList) { + for _, tier := range al { + ret = append(ret, append([]string(nil), tier...)) + } + return +} + +// Whether the AnnounceList should be preferred over a single URL announce. +func (al AnnounceList) OverridesAnnounce(announce string) bool { + for _, tier := range al { + for _, url := range tier { + if url != "" || announce == "" { + return true + } + } + } + return false +} + +func (al AnnounceList) DistinctValues() (ret []string) { + seen := make(map[string]struct{}) + for _, tier := range al { + for _, v := range tier { + if _, ok := seen[v]; !ok { + seen[v] = struct{}{} + ret = append(ret, v) + } + } + } + return +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/fileinfo.go b/deps/github.com/anacrolix/torrent/metainfo/fileinfo.go new file mode 100644 index 0000000..2a5ea01 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/fileinfo.go @@ -0,0 +1,35 @@ +package metainfo + +import "strings" + +// Information specific to a single file inside the MetaInfo structure. +type FileInfo struct { + Length int64 `bencode:"length"` // BEP3 + Path []string `bencode:"path"` // BEP3 + PathUtf8 []string `bencode:"path.utf-8,omitempty"` +} + +func (fi *FileInfo) DisplayPath(info *Info) string { + if info.IsDir() { + return strings.Join(fi.BestPath(), "/") + } else { + return info.BestName() + } +} + +func (me FileInfo) Offset(info *Info) (ret int64) { + for _, fi := range info.UpvertedFiles() { + if me.DisplayPath(info) == fi.DisplayPath(info) { + return + } + ret += fi.Length + } + panic("not found") +} + +func (fi FileInfo) BestPath() []string { + if len(fi.PathUtf8) != 0 { + return fi.PathUtf8 + } + return fi.Path +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/fuzz_test.go b/deps/github.com/anacrolix/torrent/metainfo/fuzz_test.go new file mode 100644 index 0000000..c01ab6c --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/fuzz_test.go @@ -0,0 +1,47 @@ +//go:build go1.18 +// +build go1.18 + +package metainfo + +import ( + "os" + "path/filepath" + "testing" + + "github.com/anacrolix/torrent/bencode" +) + +func Fuzz(f *testing.F) { + // Is there an OS-agnostic version of Glob? + matches, err := filepath.Glob(filepath.FromSlash("testdata/*.torrent")) + if err != nil { + f.Fatal(err) + } + for _, m := range matches { + b, err := os.ReadFile(m) + if err != nil { + f.Fatal(err) + } + f.Logf("adding %q", m) + f.Add(b) + } + f.Fuzz(func(t *testing.T, b []byte) { + var mi MetaInfo + err := bencode.Unmarshal(b, &mi) + if err != nil { + t.Skip(err) + } + _, err = bencode.Marshal(mi) + if err != nil { + panic(err) + } + info, err := mi.UnmarshalInfo() + if err != nil { + t.Skip(err) + } + _, err = bencode.Marshal(info) + if err != nil { + panic(err) + } + }) +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/hash.go b/deps/github.com/anacrolix/torrent/metainfo/hash.go new file mode 100644 index 0000000..39daf6f --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/hash.go @@ -0,0 +1,16 @@ +package metainfo + +import ( + "github.com/anacrolix/torrent/types/infohash" +) + +// This type has been moved to allow avoiding importing everything in metainfo to get at it. + +const HashSize = infohash.Size + +type Hash = infohash.T + +var ( + NewHashFromHex = infohash.FromHexString + HashBytes = infohash.HashBytes +) diff --git a/deps/github.com/anacrolix/torrent/metainfo/info.go b/deps/github.com/anacrolix/torrent/metainfo/info.go new file mode 100644 index 0000000..1ee2704 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/info.go @@ -0,0 +1,165 @@ +package metainfo + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/anacrolix/missinggo/v2/slices" +) + +// The info dictionary. +type Info struct { + PieceLength int64 `bencode:"piece length"` // BEP3 + Pieces []byte `bencode:"pieces"` // BEP3 + Name string `bencode:"name"` // BEP3 + NameUtf8 string `bencode:"name.utf-8,omitempty"` + Length int64 `bencode:"length,omitempty"` // BEP3, mutually exclusive with Files + Private *bool `bencode:"private,omitempty"` // BEP27 + // TODO: Document this field. + Source string `bencode:"source,omitempty"` + Files []FileInfo `bencode:"files,omitempty"` // BEP3, mutually exclusive with Length +} + +// The Info.Name field is "advisory". For multi-file torrents it's usually a suggested directory +// name. There are situations where we don't want a directory (like using the contents of a torrent +// as the immediate contents of a directory), or the name is invalid. Transmission will inject the +// name of the torrent file if it doesn't like the name, resulting in a different infohash +// (https://github.com/transmission/transmission/issues/1775). To work around these situations, we +// will use a sentinel name for compatibility with Transmission and to signal to our own client that +// we intended to have no directory name. By exposing it in the API we can check for references to +// this behaviour within this implementation. +const NoName = "-" + +// This is a helper that sets Files and Pieces from a root path and its children. +func (info *Info) BuildFromFilePath(root string) (err error) { + info.Name = func() string { + b := filepath.Base(root) + switch b { + case ".", "..", string(filepath.Separator): + return NoName + default: + return b + } + }() + info.Files = nil + err = filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if fi.IsDir() { + // Directories are implicit in torrent files. + return nil + } else if path == root { + // The root is a file. + info.Length = fi.Size() + return nil + } + relPath, err := filepath.Rel(root, path) + if err != nil { + return fmt.Errorf("error getting relative path: %s", err) + } + info.Files = append(info.Files, FileInfo{ + Path: strings.Split(relPath, string(filepath.Separator)), + Length: fi.Size(), + }) + return nil + }) + if err != nil { + return + } + slices.Sort(info.Files, func(l, r FileInfo) bool { + return strings.Join(l.Path, "/") < strings.Join(r.Path, "/") + }) + if info.PieceLength == 0 { + info.PieceLength = ChoosePieceLength(info.TotalLength()) + } + err = info.GeneratePieces(func(fi FileInfo) (io.ReadCloser, error) { + return os.Open(filepath.Join(root, strings.Join(fi.Path, string(filepath.Separator)))) + }) + if err != nil { + err = fmt.Errorf("error generating pieces: %s", err) + } + return +} + +// Concatenates all the files in the torrent into w. open is a function that +// gets at the contents of the given file. +func (info *Info) writeFiles(w io.Writer, open func(fi FileInfo) (io.ReadCloser, error)) error { + for _, fi := range info.UpvertedFiles() { + r, err := open(fi) + if err != nil { + return fmt.Errorf("error opening %v: %s", fi, err) + } + wn, err := io.CopyN(w, r, fi.Length) + r.Close() + if wn != fi.Length { + return fmt.Errorf("error copying %v: %s", fi, err) + } + } + return nil +} + +// Sets Pieces (the block of piece hashes in the Info) by using the passed +// function to get at the torrent data. +func (info *Info) GeneratePieces(open func(fi FileInfo) (io.ReadCloser, error)) (err error) { + if info.PieceLength == 0 { + return errors.New("piece length must be non-zero") + } + pr, pw := io.Pipe() + go func() { + err := info.writeFiles(pw, open) + pw.CloseWithError(err) + }() + defer pr.Close() + info.Pieces, err = GeneratePieces(pr, info.PieceLength, nil) + return +} + +func (info *Info) TotalLength() (ret int64) { + if info.IsDir() { + for _, fi := range info.Files { + ret += fi.Length + } + } else { + ret = info.Length + } + return +} + +func (info *Info) NumPieces() int { + return len(info.Pieces) / 20 +} + +func (info *Info) IsDir() bool { + return len(info.Files) != 0 +} + +// The files field, converted up from the old single-file in the parent info +// dict if necessary. This is a helper to avoid having to conditionally handle +// single and multi-file torrent infos. +func (info *Info) UpvertedFiles() []FileInfo { + if len(info.Files) == 0 { + return []FileInfo{{ + Length: info.Length, + // Callers should determine that Info.Name is the basename, and + // thus a regular file. + Path: nil, + }} + } + return info.Files +} + +func (info *Info) Piece(index int) Piece { + return Piece{info, pieceIndex(index)} +} + +func (info Info) BestName() string { + if info.NameUtf8 != "" { + return info.NameUtf8 + } + return info.Name +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/info_test.go b/deps/github.com/anacrolix/torrent/metainfo/info_test.go new file mode 100644 index 0000000..d65ac2f --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/info_test.go @@ -0,0 +1,16 @@ +package metainfo + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/anacrolix/torrent/bencode" +) + +func TestMarshalInfo(t *testing.T) { + var info Info + b, err := bencode.Marshal(info) + assert.NoError(t, err) + assert.EqualValues(t, "d4:name0:12:piece lengthi0e6:pieces0:e", string(b)) +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/magnet.go b/deps/github.com/anacrolix/torrent/metainfo/magnet.go new file mode 100644 index 0000000..48dc148 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/magnet.go @@ -0,0 +1,120 @@ +package metainfo + +import ( + "encoding/base32" + "encoding/hex" + "errors" + "fmt" + "net/url" + "strings" +) + +// Magnet link components. +type Magnet struct { + InfoHash Hash // Expected in this implementation + Trackers []string // "tr" values + DisplayName string // "dn" value, if not empty + Params url.Values // All other values, such as "x.pe", "as", "xs" etc. +} + +const xtPrefix = "urn:btih:" + +func (m Magnet) String() string { + // Deep-copy m.Params + vs := make(url.Values, len(m.Params)+len(m.Trackers)+2) + for k, v := range m.Params { + vs[k] = append([]string(nil), v...) + } + + for _, tr := range m.Trackers { + vs.Add("tr", tr) + } + if m.DisplayName != "" { + vs.Add("dn", m.DisplayName) + } + + // Transmission and Deluge both expect "urn:btih:" to be unescaped. Deluge wants it to be at the + // start of the magnet link. The InfoHash field is expected to be BitTorrent in this + // implementation. + u := url.URL{ + Scheme: "magnet", + RawQuery: "xt=" + xtPrefix + m.InfoHash.HexString(), + } + if len(vs) != 0 { + u.RawQuery += "&" + vs.Encode() + } + return u.String() +} + +// Deprecated: Use ParseMagnetUri. +var ParseMagnetURI = ParseMagnetUri + +// ParseMagnetUri parses Magnet-formatted URIs into a Magnet instance +func ParseMagnetUri(uri string) (m Magnet, err error) { + u, err := url.Parse(uri) + if err != nil { + err = fmt.Errorf("error parsing uri: %w", err) + return + } + if u.Scheme != "magnet" { + err = fmt.Errorf("unexpected scheme %q", u.Scheme) + return + } + q := u.Query() + xt := q.Get("xt") + m.InfoHash, err = parseInfohash(q.Get("xt")) + if err != nil { + err = fmt.Errorf("error parsing infohash %q: %w", xt, err) + return + } + dropFirst(q, "xt") + m.DisplayName = q.Get("dn") + dropFirst(q, "dn") + m.Trackers = q["tr"] + delete(q, "tr") + if len(q) == 0 { + q = nil + } + m.Params = q + return +} + +func parseInfohash(xt string) (ih Hash, err error) { + if !strings.HasPrefix(xt, xtPrefix) { + err = errors.New("bad xt parameter prefix") + return + } + encoded := xt[len(xtPrefix):] + decode := func() func(dst, src []byte) (int, error) { + switch len(encoded) { + case 40: + return hex.Decode + case 32: + return base32.StdEncoding.Decode + } + return nil + }() + if decode == nil { + err = fmt.Errorf("unhandled xt parameter encoding (encoded length %d)", len(encoded)) + return + } + n, err := decode(ih[:], []byte(encoded)) + if err != nil { + err = fmt.Errorf("error decoding xt: %w", err) + return + } + if n != 20 { + panic(n) + } + return +} + +func dropFirst(vs url.Values, key string) { + sl := vs[key] + switch len(sl) { + case 0, 1: + vs.Del(key) + default: + vs[key] = sl[1:] + } +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/magnet_test.go b/deps/github.com/anacrolix/torrent/metainfo/magnet_test.go new file mode 100644 index 0000000..24ab15b --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/magnet_test.go @@ -0,0 +1,114 @@ +package metainfo + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + exampleMagnetURI = `magnet:?xt=urn:btih:51340689c960f0778a4387aef9b4b52fd08390cd&dn=Shit+Movie+%281985%29+1337p+-+Eru&tr=http%3A%2F%2Fhttp.was.great%21&tr=udp%3A%2F%2Fanti.piracy.honeypot%3A6969` + exampleMagnet = Magnet{ + DisplayName: "Shit Movie (1985) 1337p - Eru", + Trackers: []string{ + "http://http.was.great!", + "udp://anti.piracy.honeypot:6969", + }, + } +) + +func init() { + hex.Decode(exampleMagnet.InfoHash[:], []byte("51340689c960f0778a4387aef9b4b52fd08390cd")) +} + +// Converting from our Magnet type to URL string. +func TestMagnetString(t *testing.T) { + m, err := ParseMagnetUri(exampleMagnet.String()) + require.NoError(t, err) + assert.EqualValues(t, exampleMagnet, m) +} + +func TestParseMagnetURI(t *testing.T) { + var uri string + var m Magnet + var err error + + // parsing the legit Magnet URI with btih-formatted xt should not return errors + uri = "magnet:?xt=urn:btih:ZOCMZQIPFFW7OLLMIC5HUB6BPCSDEOQU" + _, err = ParseMagnetUri(uri) + if err != nil { + t.Errorf("Attempting parsing the proper Magnet btih URI:\"%v\" failed with err: %v", uri, err) + } + + // Checking if the magnet instance struct is built correctly from parsing + m, err = ParseMagnetUri(exampleMagnetURI) + assert.EqualValues(t, exampleMagnet, m) + assert.NoError(t, err) + + // empty string URI case + _, err = ParseMagnetUri("") + if err == nil { + t.Errorf("Parsing empty string as URI should have returned an error but didn't") + } + + // only BTIH (BitTorrent info hash)-formatted magnet links are currently supported + // must return error correctly when encountering other URN formats + uri = "magnet:?xt=urn:sha1:YNCKHTQCWBTRNJIV4WNAE52SJUQCZO5C" + _, err = ParseMagnetUri(uri) + if err == nil { + t.Errorf("Magnet URI with non-BTIH URNs (like \"%v\") are not supported and should return an error", uri) + } + + // resilience to the broken hash + uri = "magnet:?xt=urn:btih:this hash is really broken" + _, err = ParseMagnetUri(uri) + if err == nil { + t.Errorf("Failed to detect broken Magnet URI: %v", uri) + } +} + +func TestMagnetize(t *testing.T) { + mi, err := LoadFromFile("../testdata/bootstrap.dat.torrent") + require.NoError(t, err) + + info, err := mi.UnmarshalInfo() + require.NoError(t, err) + m := mi.Magnet(nil, &info) + + assert.EqualValues(t, "bootstrap.dat", m.DisplayName) + + ih := [20]byte{ + 54, 113, 155, 162, 206, 207, 159, 59, 215, 197, + 171, 251, 122, 136, 233, 57, 97, 27, 83, 108, + } + + if m.InfoHash != ih { + t.Errorf("Magnet infohash is incorrect") + } + + trackers := []string{ + "udp://tracker.openbittorrent.com:80", + "udp://tracker.openbittorrent.com:80", + "udp://tracker.publicbt.com:80", + "udp://coppersurfer.tk:6969/announce", + "udp://open.demonii.com:1337", + "http://bttracker.crunchbanglinux.org:6969/announce", + } + + for _, expected := range trackers { + if !contains(m.Trackers, expected) { + t.Errorf("Magnet does not contain expected tracker: %s", expected) + } + } +} + +func contains(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + return false +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/metainfo.go b/deps/github.com/anacrolix/torrent/metainfo/metainfo.go new file mode 100644 index 0000000..93f9103 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/metainfo.go @@ -0,0 +1,98 @@ +package metainfo + +import ( + "bufio" + "io" + "net/url" + "os" + "time" + + "github.com/anacrolix/torrent/bencode" +) + +type MetaInfo struct { + InfoBytes bencode.Bytes `bencode:"info,omitempty"` // BEP 3 + Announce string `bencode:"announce,omitempty"` // BEP 3 + AnnounceList AnnounceList `bencode:"announce-list,omitempty"` // BEP 12 + Nodes []Node `bencode:"nodes,omitempty,ignore_unmarshal_type_error"` // BEP 5 + // Where's this specified? Mentioned at + // https://wiki.theory.org/index.php/BitTorrentSpecification: (optional) the creation time of + // the torrent, in standard UNIX epoch format (integer, seconds since 1-Jan-1970 00:00:00 UTC) + CreationDate int64 `bencode:"creation date,omitempty,ignore_unmarshal_type_error"` + Comment string `bencode:"comment,omitempty"` + CreatedBy string `bencode:"created by,omitempty"` + Encoding string `bencode:"encoding,omitempty"` + UrlList UrlList `bencode:"url-list,omitempty"` // BEP 19 WebSeeds +} + +// Load a MetaInfo from an io.Reader. Returns a non-nil error in case of +// failure. +func Load(r io.Reader) (*MetaInfo, error) { + var mi MetaInfo + d := bencode.NewDecoder(r) + err := d.Decode(&mi) + if err != nil { + return nil, err + } + return &mi, nil +} + +// Convenience function for loading a MetaInfo from a file. +func LoadFromFile(filename string) (*MetaInfo, error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + var buf bufio.Reader + buf.Reset(f) + return Load(&buf) +} + +func (mi MetaInfo) UnmarshalInfo() (info Info, err error) { + err = bencode.Unmarshal(mi.InfoBytes, &info) + return +} + +func (mi MetaInfo) HashInfoBytes() (infoHash Hash) { + return HashBytes(mi.InfoBytes) +} + +// Encode to bencoded form. +func (mi MetaInfo) Write(w io.Writer) error { + return bencode.NewEncoder(w).Encode(mi) +} + +// Set good default values in preparation for creating a new MetaInfo file. +func (mi *MetaInfo) SetDefaults() { + mi.CreatedBy = "github.com/anacrolix/torrent" + mi.CreationDate = time.Now().Unix() +} + +// Creates a Magnet from a MetaInfo. Optional infohash and parsed info can be provided. +func (mi MetaInfo) Magnet(infoHash *Hash, info *Info) (m Magnet) { + m.Trackers = append(m.Trackers, mi.UpvertedAnnounceList().DistinctValues()...) + if info != nil { + m.DisplayName = info.BestName() + } + if infoHash != nil { + m.InfoHash = *infoHash + } else { + m.InfoHash = mi.HashInfoBytes() + } + m.Params = make(url.Values) + m.Params["ws"] = mi.UrlList + return +} + +// Returns the announce list converted from the old single announce field if +// necessary. +func (mi *MetaInfo) UpvertedAnnounceList() AnnounceList { + if mi.AnnounceList.OverridesAnnounce(mi.Announce) { + return mi.AnnounceList + } + if mi.Announce != "" { + return [][]string{{mi.Announce}} + } + return nil +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/metainfo_test.go b/deps/github.com/anacrolix/torrent/metainfo/metainfo_test.go new file mode 100644 index 0000000..335631f --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/metainfo_test.go @@ -0,0 +1,162 @@ +package metainfo + +import ( + "io" + "os" + "path" + "path/filepath" + "strings" + "testing" + + "github.com/anacrolix/missinggo/v2" + qt "github.com/frankban/quicktest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/anacrolix/torrent/bencode" +) + +func testFile(t *testing.T, filename string) { + mi, err := LoadFromFile(filename) + require.NoError(t, err) + info, err := mi.UnmarshalInfo() + require.NoError(t, err) + + if len(info.Files) == 1 { + t.Logf("Single file: %s (length: %d)\n", info.Name, info.Files[0].Length) + } else { + t.Logf("Multiple files: %s\n", info.Name) + for _, f := range info.Files { + t.Logf(" - %s (length: %d)\n", path.Join(f.Path...), f.Length) + } + } + + for _, group := range mi.AnnounceList { + for _, tracker := range group { + t.Logf("Tracker: %s\n", tracker) + } + } + + b, err := bencode.Marshal(&info) + require.NoError(t, err) + assert.EqualValues(t, string(b), string(mi.InfoBytes)) +} + +func TestFile(t *testing.T) { + testFile(t, "testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent") + testFile(t, "testdata/continuum.torrent") + testFile(t, "testdata/23516C72685E8DB0C8F15553382A927F185C4F01.torrent") + testFile(t, "testdata/trackerless.torrent") +} + +// Ensure that the correct number of pieces are generated when hashing files. +func TestNumPieces(t *testing.T) { + for _, _case := range []struct { + PieceLength int64 + Files []FileInfo + NumPieces int + }{ + {256 * 1024, []FileInfo{{Length: 1024*1024 + -1}}, 4}, + {256 * 1024, []FileInfo{{Length: 1024 * 1024}}, 4}, + {256 * 1024, []FileInfo{{Length: 1024*1024 + 1}}, 5}, + {5, []FileInfo{{Length: 1}, {Length: 12}}, 3}, + {5, []FileInfo{{Length: 4}, {Length: 12}}, 4}, + } { + info := Info{ + Files: _case.Files, + PieceLength: _case.PieceLength, + } + err := info.GeneratePieces(func(fi FileInfo) (io.ReadCloser, error) { + return io.NopCloser(missinggo.ZeroReader), nil + }) + assert.NoError(t, err) + assert.EqualValues(t, _case.NumPieces, info.NumPieces()) + } +} + +func touchFile(path string) (err error) { + f, err := os.Create(path) + if err != nil { + return + } + err = f.Close() + return +} + +func TestBuildFromFilePathOrder(t *testing.T) { + td := t.TempDir() + require.NoError(t, touchFile(filepath.Join(td, "b"))) + require.NoError(t, touchFile(filepath.Join(td, "a"))) + info := Info{ + PieceLength: 1, + } + require.NoError(t, info.BuildFromFilePath(td)) + assert.EqualValues(t, []FileInfo{{ + Path: []string{"a"}, + }, { + Path: []string{"b"}, + }}, info.Files) +} + +func testUnmarshal(t *testing.T, input string, expected *MetaInfo) { + var actual MetaInfo + err := bencode.Unmarshal([]byte(input), &actual) + if expected == nil { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.EqualValues(t, *expected, actual) +} + +func TestUnmarshal(t *testing.T) { + testUnmarshal(t, `de`, &MetaInfo{}) + testUnmarshal(t, `d4:infoe`, nil) + testUnmarshal(t, `d4:infoabce`, nil) + testUnmarshal(t, `d4:infodee`, &MetaInfo{InfoBytes: []byte("de")}) +} + +func TestMetainfoWithListURLList(t *testing.T) { + mi, err := LoadFromFile("testdata/SKODAOCTAVIA336x280_archive.torrent") + require.NoError(t, err) + assert.Len(t, mi.UrlList, 3) + qt.Assert(t, mi.Magnet(nil, nil).String(), qt.ContentEquals, + strings.Join([]string{ + "magnet:?xt=urn:btih:d4b197dff199aad447a9a352e31528adbbd97922", + "tr=http%3A%2F%2Fbt1.archive.org%3A6969%2Fannounce", + "tr=http%3A%2F%2Fbt2.archive.org%3A6969%2Fannounce", + "ws=https%3A%2F%2Farchive.org%2Fdownload%2F", + "ws=http%3A%2F%2Fia601600.us.archive.org%2F26%2Fitems%2F", + "ws=http%3A%2F%2Fia801600.us.archive.org%2F26%2Fitems%2F", + }, "&")) +} + +func TestMetainfoWithStringURLList(t *testing.T) { + mi, err := LoadFromFile("testdata/flat-url-list.torrent") + require.NoError(t, err) + assert.Len(t, mi.UrlList, 1) + qt.Assert(t, mi.Magnet(nil, nil).String(), qt.ContentEquals, + strings.Join([]string{ + "magnet:?xt=urn:btih:9da24e606e4ed9c7b91c1772fb5bf98f82bd9687", + "tr=http%3A%2F%2Fbt1.archive.org%3A6969%2Fannounce", + "tr=http%3A%2F%2Fbt2.archive.org%3A6969%2Fannounce", + "ws=https%3A%2F%2Farchive.org%2Fdownload%2F", + }, "&")) +} + +// https://github.com/anacrolix/torrent/issues/247 +// +// The decoder buffer wasn't cleared before starting the next dict item after +// a syntax error on a field with the ignore_unmarshal_type_error tag. +func TestStringCreationDate(t *testing.T) { + var mi MetaInfo + assert.NoError(t, bencode.Unmarshal([]byte("d13:creation date23:29.03.2018 22:18:14 UTC4:infodee"), &mi)) +} + +// See https://github.com/anacrolix/torrent/issues/843. +func TestUnmarshalEmptyStringNodes(t *testing.T) { + var mi MetaInfo + c := qt.New(t) + err := bencode.Unmarshal([]byte("d5:nodes0:e"), &mi) + c.Assert(err, qt.IsNil) +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/nodes.go b/deps/github.com/anacrolix/torrent/metainfo/nodes.go new file mode 100644 index 0000000..06c3b3f --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/nodes.go @@ -0,0 +1,38 @@ +package metainfo + +import ( + "fmt" + "net" + "strconv" + + "github.com/anacrolix/torrent/bencode" +) + +type Node string + +var _ bencode.Unmarshaler = (*Node)(nil) + +func (n *Node) UnmarshalBencode(b []byte) (err error) { + var iface interface{} + err = bencode.Unmarshal(b, &iface) + if err != nil { + return + } + switch v := iface.(type) { + case string: + *n = Node(v) + case []interface{}: + func() { + defer func() { + r := recover() + if r != nil { + err = r.(error) + } + }() + *n = Node(net.JoinHostPort(v[0].(string), strconv.FormatInt(v[1].(int64), 10))) + }() + default: + err = fmt.Errorf("unsupported type: %T", iface) + } + return +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/nodes_test.go b/deps/github.com/anacrolix/torrent/metainfo/nodes_test.go new file mode 100644 index 0000000..adebbb3 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/nodes_test.go @@ -0,0 +1,74 @@ +package metainfo + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/anacrolix/torrent/bencode" +) + +func testFileNodesMatch(t *testing.T, file string, nodes []Node) { + mi, err := LoadFromFile(file) + require.NoError(t, err) + assert.EqualValues(t, nodes, mi.Nodes) +} + +func TestNodesListStrings(t *testing.T) { + testFileNodesMatch(t, "testdata/trackerless.torrent", []Node{ + "udp://tracker.openbittorrent.com:80", + "udp://tracker.openbittorrent.com:80", + }) +} + +func TestNodesListPairsBEP5(t *testing.T) { + testFileNodesMatch(t, "testdata/issue_65a.torrent", []Node{ + "185.34.3.132:5680", + "185.34.3.103:12340", + "94.209.253.165:47232", + "78.46.103.11:34319", + "195.154.162.70:55011", + "185.34.3.137:3732", + }) + testFileNodesMatch(t, "testdata/issue_65b.torrent", []Node{ + "95.211.203.130:6881", + "84.72.116.169:6889", + "204.83.98.77:7000", + "101.187.175.163:19665", + "37.187.118.32:6881", + "83.128.223.71:23865", + }) +} + +func testMarshalMetainfo(t *testing.T, expected string, mi *MetaInfo) { + b, err := bencode.Marshal(*mi) + assert.NoError(t, err) + assert.EqualValues(t, expected, string(b)) +} + +func TestMarshalMetainfoNodes(t *testing.T) { + testMarshalMetainfo(t, "d4:infodee", &MetaInfo{InfoBytes: []byte("de")}) + testMarshalMetainfo(t, "d4:infod2:hi5:theree5:nodesl12:1.2.3.4:555514:not a hostportee", &MetaInfo{ + Nodes: []Node{"1.2.3.4:5555", "not a hostport"}, + InfoBytes: []byte("d2:hi5:theree"), + }) +} + +func TestUnmarshalBadMetainfoNodes(t *testing.T) { + var mi MetaInfo + // Should barf on the integer in the nodes list. + err := bencode.Unmarshal([]byte("d5:nodesl1:ai42eee"), &mi) + require.Error(t, err) +} + +func TestMetainfoEmptyInfoBytes(t *testing.T) { + var buf bytes.Buffer + require.NoError(t, (&MetaInfo{ + // Include a non-empty field that comes after "info". + UrlList: []string{"hello"}, + }).Write(&buf)) + var mi MetaInfo + require.NoError(t, bencode.Unmarshal(buf.Bytes(), &mi)) +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/piece-length.go b/deps/github.com/anacrolix/torrent/metainfo/piece-length.go new file mode 100644 index 0000000..183b4d8 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/piece-length.go @@ -0,0 +1,55 @@ +// From https://github.com/jackpal/Taipei-Torrent + +// Copyright (c) 2010 Jack Palevich. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package metainfo + +// For more context on why these numbers, see http://wiki.vuze.com/w/Torrent_Piece_Size +const ( + minimumPieceLength = 16 * 1024 + targetPieceCountLog2 = 10 + targetPieceCountMin = 1 << targetPieceCountLog2 +) + +// Target piece count should be < targetPieceCountMax +const targetPieceCountMax = targetPieceCountMin << 1 + +// Choose a good piecelength. +func ChoosePieceLength(totalLength int64) (pieceLength int64) { + // Must be a power of 2. + // Must be a multiple of 16KB + // Prefer to provide around 1024..2048 pieces. + pieceLength = minimumPieceLength + pieces := totalLength / pieceLength + for pieces >= targetPieceCountMax { + pieceLength <<= 1 + pieces >>= 1 + } + return +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/piece.go b/deps/github.com/anacrolix/torrent/metainfo/piece.go new file mode 100644 index 0000000..d889538 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/piece.go @@ -0,0 +1,28 @@ +package metainfo + +type Piece struct { + Info *Info // Can we embed the fields here instead, or is it something to do with saving memory? + i pieceIndex +} + +type pieceIndex = int + +func (p Piece) Length() int64 { + if int(p.i) == p.Info.NumPieces()-1 { + return p.Info.TotalLength() - int64(p.i)*p.Info.PieceLength + } + return p.Info.PieceLength +} + +func (p Piece) Offset() int64 { + return int64(p.i) * p.Info.PieceLength +} + +func (p Piece) Hash() (ret Hash) { + copy(ret[:], p.Info.Pieces[p.i*HashSize:(p.i+1)*HashSize]) + return +} + +func (p Piece) Index() pieceIndex { + return p.i +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/piece_key.go b/deps/github.com/anacrolix/torrent/metainfo/piece_key.go new file mode 100644 index 0000000..6ddf065 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/piece_key.go @@ -0,0 +1,7 @@ +package metainfo + +// Uniquely identifies a piece. +type PieceKey struct { + InfoHash Hash + Index pieceIndex +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/pieces.go b/deps/github.com/anacrolix/torrent/metainfo/pieces.go new file mode 100644 index 0000000..812f3f4 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/pieces.go @@ -0,0 +1,22 @@ +package metainfo + +import ( + "crypto/sha1" + "io" +) + +func GeneratePieces(r io.Reader, pieceLength int64, b []byte) ([]byte, error) { + for { + h := sha1.New() + written, err := io.CopyN(h, r, pieceLength) + if written > 0 { + b = h.Sum(b) + } + if err == io.EOF { + return b, nil + } + if err != nil { + return b, err + } + } +} diff --git a/deps/github.com/anacrolix/torrent/metainfo/testdata/23516C72685E8DB0C8F15553382A927F185C4F01.torrent b/deps/github.com/anacrolix/torrent/metainfo/testdata/23516C72685E8DB0C8F15553382A927F185C4F01.torrent new file mode 100644 index 0000000..492908c Binary files /dev/null and b/deps/github.com/anacrolix/torrent/metainfo/testdata/23516C72685E8DB0C8F15553382A927F185C4F01.torrent differ diff --git a/deps/github.com/anacrolix/torrent/metainfo/testdata/SKODAOCTAVIA336x280_archive.torrent b/deps/github.com/anacrolix/torrent/metainfo/testdata/SKODAOCTAVIA336x280_archive.torrent new file mode 100644 index 0000000..40ec11f Binary files /dev/null and b/deps/github.com/anacrolix/torrent/metainfo/testdata/SKODAOCTAVIA336x280_archive.torrent differ diff --git a/deps/github.com/anacrolix/torrent/metainfo/testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent b/deps/github.com/anacrolix/torrent/metainfo/testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent new file mode 100644 index 0000000..9ce7748 Binary files /dev/null and b/deps/github.com/anacrolix/torrent/metainfo/testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent differ diff --git a/deps/github.com/anacrolix/torrent/metainfo/testdata/continuum.torrent b/deps/github.com/anacrolix/torrent/metainfo/testdata/continuum.torrent new file mode 100644 index 0000000..ac15b75 Binary files /dev/null and b/deps/github.com/anacrolix/torrent/metainfo/testdata/continuum.torrent differ diff --git a/deps/github.com/anacrolix/torrent/metainfo/testdata/flat-url-list.torrent b/deps/github.com/anacrolix/torrent/metainfo/testdata/flat-url-list.torrent new file mode 100644 index 0000000..216ab38 Binary files /dev/null and b/deps/github.com/anacrolix/torrent/metainfo/testdata/flat-url-list.torrent differ diff --git a/deps/github.com/anacrolix/torrent/metainfo/testdata/issue_65a.torrent b/deps/github.com/anacrolix/torrent/metainfo/testdata/issue_65a.torrent new file mode 100644 index 0000000..8fc13ec Binary files /dev/null and b/deps/github.com/anacrolix/torrent/metainfo/testdata/issue_65a.torrent differ diff --git a/deps/github.com/anacrolix/torrent/metainfo/testdata/issue_65b.torrent b/deps/github.com/anacrolix/torrent/metainfo/testdata/issue_65b.torrent new file mode 100644 index 0000000..47b4619 Binary files /dev/null and b/deps/github.com/anacrolix/torrent/metainfo/testdata/issue_65b.torrent differ diff --git a/deps/github.com/anacrolix/torrent/metainfo/testdata/trackerless.torrent b/deps/github.com/anacrolix/torrent/metainfo/testdata/trackerless.torrent new file mode 100644 index 0000000..6537276 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/testdata/trackerless.torrent @@ -0,0 +1 @@ +d7:comment19:This is just a test10:created by12:Johnny Bravo13:creation datei1430648794e8:encoding5:UTF-84:infod6:lengthi1128e4:name12:testfile.bin12:piece lengthi32768e6:pieces20:Ո =Ui^栰E?e5:nodesl35:udp://tracker.openbittorrent.com:8035:udp://tracker.openbittorrent.com:80ee diff --git a/deps/github.com/anacrolix/torrent/metainfo/urllist.go b/deps/github.com/anacrolix/torrent/metainfo/urllist.go new file mode 100644 index 0000000..ed7c36d --- /dev/null +++ b/deps/github.com/anacrolix/torrent/metainfo/urllist.go @@ -0,0 +1,25 @@ +package metainfo + +import ( + "github.com/anacrolix/torrent/bencode" +) + +type UrlList []string + +var _ bencode.Unmarshaler = (*UrlList)(nil) + +func (me *UrlList) UnmarshalBencode(b []byte) error { + if len(b) == 0 { + return nil + } + if b[0] == 'l' { + var l []string + err := bencode.Unmarshal(b, &l) + *me = l + return err + } + var s string + err := bencode.Unmarshal(b, &s) + *me = []string{s} + return err +} diff --git a/deps/github.com/anacrolix/torrent/misc.go b/deps/github.com/anacrolix/torrent/misc.go new file mode 100644 index 0000000..7d3007e --- /dev/null +++ b/deps/github.com/anacrolix/torrent/misc.go @@ -0,0 +1,194 @@ +package torrent + +import ( + "errors" + "net" + + "github.com/RoaringBitmap/roaring" + "github.com/anacrolix/missinggo/v2" + "golang.org/x/time/rate" + + "github.com/anacrolix/torrent/metainfo" + pp "github.com/anacrolix/torrent/peer_protocol" + "github.com/anacrolix/torrent/types" + "github.com/anacrolix/torrent/types/infohash" +) + +type ( + Request = types.Request + ChunkSpec = types.ChunkSpec + piecePriority = types.PiecePriority +) + +const ( + PiecePriorityNormal = types.PiecePriorityNormal + PiecePriorityNone = types.PiecePriorityNone + PiecePriorityNow = types.PiecePriorityNow + PiecePriorityReadahead = types.PiecePriorityReadahead + PiecePriorityNext = types.PiecePriorityNext + PiecePriorityHigh = types.PiecePriorityHigh +) + +func newRequest(index, begin, length pp.Integer) Request { + return Request{index, ChunkSpec{begin, length}} +} + +func newRequestFromMessage(msg *pp.Message) Request { + switch msg.Type { + case pp.Request, pp.Cancel, pp.Reject: + return newRequest(msg.Index, msg.Begin, msg.Length) + case pp.Piece: + return newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece))) + default: + panic(msg.Type) + } +} + +// The size in bytes of a metadata extension piece. +func metadataPieceSize(totalSize, piece int) int { + ret := totalSize - piece*(1<<14) + if ret > 1<<14 { + ret = 1 << 14 + } + return ret +} + +// Return the request that would include the given offset into the torrent data. +func torrentOffsetRequest( + torrentLength, pieceSize, chunkSize, offset int64, +) ( + r Request, ok bool, +) { + if offset < 0 || offset >= torrentLength { + return + } + r.Index = pp.Integer(offset / pieceSize) + r.Begin = pp.Integer(offset % pieceSize / chunkSize * chunkSize) + r.Length = pp.Integer(chunkSize) + pieceLeft := pp.Integer(pieceSize - int64(r.Begin)) + if r.Length > pieceLeft { + r.Length = pieceLeft + } + torrentLeft := torrentLength - int64(r.Index)*pieceSize - int64(r.Begin) + if int64(r.Length) > torrentLeft { + r.Length = pp.Integer(torrentLeft) + } + ok = true + return +} + +func torrentRequestOffset(torrentLength, pieceSize int64, r Request) (off int64) { + off = int64(r.Index)*pieceSize + int64(r.Begin) + if off < 0 || off >= torrentLength { + panic("invalid Request") + } + return +} + +func validateInfo(info *metainfo.Info) error { + if len(info.Pieces)%20 != 0 { + return errors.New("pieces has invalid length") + } + if info.PieceLength == 0 { + if info.TotalLength() != 0 { + return errors.New("zero piece length") + } + } else { + if int((info.TotalLength()+info.PieceLength-1)/info.PieceLength) != info.NumPieces() { + return errors.New("piece count and file lengths are at odds") + } + } + return nil +} + +func chunkIndexSpec(index, pieceLength, chunkSize pp.Integer) ChunkSpec { + ret := ChunkSpec{pp.Integer(index) * chunkSize, chunkSize} + if ret.Begin+ret.Length > pieceLength { + ret.Length = pieceLength - ret.Begin + } + return ret +} + +func connLessTrusted(l, r *Peer) bool { + return l.trust().Less(r.trust()) +} + +func connIsIpv6(nc interface { + LocalAddr() net.Addr +}, +) bool { + ra := nc.LocalAddr() + rip := addrIpOrNil(ra) + return rip.To4() == nil && rip.To16() != nil +} + +func clamp(min, value, max int64) int64 { + if min > max { + panic("harumph") + } + if value < min { + value = min + } + if value > max { + value = max + } + return value +} + +func max(as ...int64) int64 { + ret := as[0] + for _, a := range as[1:] { + if a > ret { + ret = a + } + } + return ret +} + +func maxInt(as ...int) int { + ret := as[0] + for _, a := range as[1:] { + if a > ret { + ret = a + } + } + return ret +} + +func min(as ...int64) int64 { + ret := as[0] + for _, a := range as[1:] { + if a < ret { + ret = a + } + } + return ret +} + +func minInt(as ...int) int { + ret := as[0] + for _, a := range as[1:] { + if a < ret { + ret = a + } + } + return ret +} + +var unlimited = rate.NewLimiter(rate.Inf, 0) + +type ( + pieceIndex = int + // Deprecated: Use infohash.T directly to avoid unnecessary imports. + InfoHash = infohash.T + IpPort = missinggo.IpPort +) + +func boolSliceToBitmap(slice []bool) (rb roaring.Bitmap) { + for i, b := range slice { + if b { + rb.AddInt(i) + } + } + return +} diff --git a/deps/github.com/anacrolix/torrent/misc_test.go b/deps/github.com/anacrolix/torrent/misc_test.go new file mode 100644 index 0000000..d8c0c7a --- /dev/null +++ b/deps/github.com/anacrolix/torrent/misc_test.go @@ -0,0 +1,47 @@ +package torrent + +import ( + "reflect" + "strings" + "testing" + + "github.com/anacrolix/missinggo/iter" + "github.com/anacrolix/missinggo/v2/bitmap" + "github.com/davecgh/go-spew/spew" + "github.com/stretchr/testify/assert" +) + +func TestTorrentOffsetRequest(t *testing.T) { + check := func(tl, ps, off int64, expected Request, ok bool) { + req, _ok := torrentOffsetRequest(tl, ps, defaultChunkSize, off) + assert.Equal(t, _ok, ok) + assert.Equal(t, req, expected) + } + check(13, 5, 0, newRequest(0, 0, 5), true) + check(13, 5, 3, newRequest(0, 0, 5), true) + check(13, 5, 11, newRequest(2, 0, 3), true) + check(13, 5, 13, Request{}, false) +} + +func BenchmarkIterBitmapsDistinct(t *testing.B) { + t.ReportAllocs() + for i := 0; i < t.N; i += 1 { + var skip, first, second bitmap.Bitmap + skip.Add(1) + first.Add(1, 0, 3) + second.Add(1, 2, 0) + skipCopy := skip.Copy() + t.StartTimer() + output := iter.ToSlice(iterBitmapsDistinct(&skipCopy, first, second)) + t.StopTimer() + assert.Equal(t, []interface{}{0, 3, 2}, output) + assert.Equal(t, []bitmap.BitIndex{1}, skip.ToSortedSlice()) + } +} + +func TestSpewConnStats(t *testing.T) { + s := spew.Sdump(ConnStats{}) + t.Logf("\n%s", s) + lines := strings.Count(s, "\n") + assert.EqualValues(t, 2+reflect.ValueOf(ConnStats{}).NumField(), lines) +} diff --git a/deps/github.com/anacrolix/torrent/mmap_span/mmap_span.go b/deps/github.com/anacrolix/torrent/mmap_span/mmap_span.go new file mode 100644 index 0000000..22c394f --- /dev/null +++ b/deps/github.com/anacrolix/torrent/mmap_span/mmap_span.go @@ -0,0 +1,107 @@ +package mmap_span + +import ( + "fmt" + "io" + "sync" + + "github.com/anacrolix/torrent/segments" +) + +type Mmap interface { + Flush() error + Unmap() error + Bytes() []byte +} + +type MMapSpan struct { + mu sync.RWMutex + mMaps []Mmap + segmentLocater segments.Index +} + +func (ms *MMapSpan) Append(mMap Mmap) { + ms.mMaps = append(ms.mMaps, mMap) +} + +func (ms *MMapSpan) Flush() (errs []error) { + ms.mu.RLock() + defer ms.mu.RUnlock() + for _, mMap := range ms.mMaps { + err := mMap.Flush() + if err != nil { + errs = append(errs, err) + } + } + return +} + +func (ms *MMapSpan) Close() (errs []error) { + ms.mu.Lock() + defer ms.mu.Unlock() + for _, mMap := range ms.mMaps { + err := mMap.Unmap() + if err != nil { + errs = append(errs, err) + } + } + // This is for issue 211. + ms.mMaps = nil + ms.InitIndex() + return +} + +func (me *MMapSpan) InitIndex() { + i := 0 + me.segmentLocater = segments.NewIndex(func() (segments.Length, bool) { + if i == len(me.mMaps) { + return -1, false + } + l := int64(len(me.mMaps[i].Bytes())) + i++ + return l, true + }) + // log.Printf("made mmapspan index: %v", me.segmentLocater) +} + +func (ms *MMapSpan) ReadAt(p []byte, off int64) (n int, err error) { + // log.Printf("reading %v bytes at %v", len(p), off) + ms.mu.RLock() + defer ms.mu.RUnlock() + n = ms.locateCopy(func(a, b []byte) (_, _ []byte) { return a, b }, p, off) + if n != len(p) { + err = io.EOF + } + return +} + +func copyBytes(dst, src []byte) int { + return copy(dst, src) +} + +func (ms *MMapSpan) locateCopy(copyArgs func(remainingArgument, mmapped []byte) (dst, src []byte), p []byte, off int64) (n int) { + ms.segmentLocater.Locate(segments.Extent{off, int64(len(p))}, func(i int, e segments.Extent) bool { + mMapBytes := ms.mMaps[i].Bytes()[e.Start:] + // log.Printf("got segment %v: %v, copying %v, %v", i, e, len(p), len(mMapBytes)) + _n := copyBytes(copyArgs(p, mMapBytes)) + p = p[_n:] + n += _n + + if segments.Int(_n) != e.Length { + panic(fmt.Sprintf("did %d bytes, expected to do %d", _n, e.Length)) + } + return true + }) + return +} + +func (ms *MMapSpan) WriteAt(p []byte, off int64) (n int, err error) { + // log.Printf("writing %v bytes at %v", len(p), off) + ms.mu.RLock() + defer ms.mu.RUnlock() + n = ms.locateCopy(func(a, b []byte) (_, _ []byte) { return b, a }, p, off) + if n != len(p) { + err = io.ErrShortWrite + } + return +} diff --git a/deps/github.com/anacrolix/torrent/mse/cmd/mse/main.go b/deps/github.com/anacrolix/torrent/mse/cmd/mse/main.go new file mode 100644 index 0000000..7d10a26 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/mse/cmd/mse/main.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "io" + "log" + "net" + "os" + "sync" + + "github.com/alexflint/go-arg" + + "github.com/anacrolix/torrent/mse" +) + +func main() { + err := mainErr() + if err != nil { + log.Fatalf("fatal error: %v", err) + } +} + +func mainErr() error { + args := struct { + CryptoMethod mse.CryptoMethod + Dial *struct { + Network string `arg:"positional"` + Address string `arg:"positional"` + SecretKey string `arg:"positional"` + InitialPayload []byte + } `arg:"subcommand"` + Listen *struct { + Network string `arg:"positional"` + Address string `arg:"positional"` + SecretKeys []string `arg:"positional"` + } `arg:"subcommand"` + }{ + CryptoMethod: mse.AllSupportedCrypto, + } + p := arg.MustParse(&args) + if args.Dial != nil { + cn, err := net.Dial(args.Dial.Network, args.Dial.Address) + if err != nil { + return fmt.Errorf("dialing: %w", err) + } + defer cn.Close() + rw, _, err := mse.InitiateHandshake(cn, []byte(args.Dial.SecretKey), args.Dial.InitialPayload, args.CryptoMethod) + if err != nil { + return fmt.Errorf("initiating handshake: %w", err) + } + doStreaming(rw) + } + if args.Listen != nil { + l, err := net.Listen(args.Listen.Network, args.Listen.Address) + if err != nil { + return fmt.Errorf("listening: %w", err) + } + defer l.Close() + cn, err := l.Accept() + l.Close() + if err != nil { + return fmt.Errorf("accepting: %w", err) + } + defer cn.Close() + rw, _, err := mse.ReceiveHandshake(cn, func(f func([]byte) bool) { + for _, sk := range args.Listen.SecretKeys { + f([]byte(sk)) + } + }, mse.DefaultCryptoSelector) + if err != nil { + log.Fatalf("error receiving: %v", err) + } + doStreaming(rw) + } + if p.Subcommand() == nil { + p.Fail("missing subcommand") + } + return nil +} + +func doStreaming(rw io.ReadWriter) { + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + log.Println(io.Copy(rw, os.Stdin)) + }() + go func() { + defer wg.Done() + log.Println(io.Copy(os.Stdout, rw)) + }() + wg.Wait() +} diff --git a/deps/github.com/anacrolix/torrent/mse/mse.go b/deps/github.com/anacrolix/torrent/mse/mse.go new file mode 100644 index 0000000..c3a9f3d --- /dev/null +++ b/deps/github.com/anacrolix/torrent/mse/mse.go @@ -0,0 +1,595 @@ +// https://wiki.vuze.com/w/Message_Stream_Encryption + +package mse + +import ( + "bytes" + "crypto/rand" + "crypto/rc4" + "crypto/sha1" + "encoding/binary" + "errors" + "expvar" + "fmt" + "io" + "math" + "math/big" + "strconv" + "sync" + + "github.com/anacrolix/missinggo/perf" +) + +const ( + maxPadLen = 512 + + CryptoMethodPlaintext CryptoMethod = 1 // After header obfuscation, drop into plaintext + CryptoMethodRC4 CryptoMethod = 2 // After header obfuscation, use RC4 for the rest of the stream + AllSupportedCrypto = CryptoMethodPlaintext | CryptoMethodRC4 +) + +type CryptoMethod uint32 + +var ( + // Prime P according to the spec, and G, the generator. + p, g big.Int + // The rand.Int max arg for use in newPadLen() + newPadLenMax big.Int + // For use in initer's hashes + req1 = []byte("req1") + req2 = []byte("req2") + req3 = []byte("req3") + // Verification constant "VC" which is all zeroes in the bittorrent + // implementation. + vc [8]byte + // Zero padding + zeroPad [512]byte + // Tracks counts of received crypto_provides + cryptoProvidesCount = expvar.NewMap("mseCryptoProvides") +) + +func init() { + p.SetString("0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A36210000000000090563", 0) + g.SetInt64(2) + newPadLenMax.SetInt64(maxPadLen + 1) +} + +func hash(parts ...[]byte) []byte { + h := sha1.New() + for _, p := range parts { + n, err := h.Write(p) + if err != nil { + panic(err) + } + if n != len(p) { + panic(n) + } + } + return h.Sum(nil) +} + +func newEncrypt(initer bool, s, skey []byte) (c *rc4.Cipher) { + c, err := rc4.NewCipher(hash([]byte(func() string { + if initer { + return "keyA" + } else { + return "keyB" + } + }()), s, skey)) + if err != nil { + panic(err) + } + var burnSrc, burnDst [1024]byte + c.XORKeyStream(burnDst[:], burnSrc[:]) + return +} + +type cipherReader struct { + c *rc4.Cipher + r io.Reader + mu sync.Mutex + be []byte +} + +func (cr *cipherReader) Read(b []byte) (n int, err error) { + var be []byte + cr.mu.Lock() + if len(cr.be) >= len(b) { + be = cr.be + cr.be = nil + cr.mu.Unlock() + } else { + cr.mu.Unlock() + be = make([]byte, len(b)) + } + n, err = cr.r.Read(be[:len(b)]) + cr.c.XORKeyStream(b[:n], be[:n]) + cr.mu.Lock() + if len(be) > len(cr.be) { + cr.be = be + } + cr.mu.Unlock() + return +} + +func newCipherReader(c *rc4.Cipher, r io.Reader) io.Reader { + return &cipherReader{c: c, r: r} +} + +type cipherWriter struct { + c *rc4.Cipher + w io.Writer + b []byte +} + +func (cr *cipherWriter) Write(b []byte) (n int, err error) { + be := func() []byte { + if len(cr.b) < len(b) { + return make([]byte, len(b)) + } else { + ret := cr.b + cr.b = nil + return ret + } + }() + cr.c.XORKeyStream(be, b) + n, err = cr.w.Write(be[:len(b)]) + if n != len(b) { + // The cipher will have advanced beyond the callers stream position. + // We can't use the cipher anymore. + cr.c = nil + } + if len(be) > len(cr.b) { + cr.b = be + } + return +} + +func newX() big.Int { + var X big.Int + X.SetBytes(func() []byte { + var b [20]byte + _, err := rand.Read(b[:]) + if err != nil { + panic(err) + } + return b[:] + }()) + return X +} + +func paddedLeft(b []byte, _len int) []byte { + if len(b) == _len { + return b + } + ret := make([]byte, _len) + if n := copy(ret[_len-len(b):], b); n != len(b) { + panic(n) + } + return ret +} + +// Calculate, and send Y, our public key. +func (h *handshake) postY(x *big.Int) error { + var y big.Int + y.Exp(&g, x, &p) + return h.postWrite(paddedLeft(y.Bytes(), 96)) +} + +func (h *handshake) establishS() error { + x := newX() + h.postY(&x) + var b [96]byte + _, err := io.ReadFull(h.conn, b[:]) + if err != nil { + return fmt.Errorf("error reading Y: %w", err) + } + var Y, S big.Int + Y.SetBytes(b[:]) + S.Exp(&Y, &x, &p) + sBytes := S.Bytes() + copy(h.s[96-len(sBytes):96], sBytes) + return nil +} + +func newPadLen() int64 { + i, err := rand.Int(rand.Reader, &newPadLenMax) + if err != nil { + panic(err) + } + ret := i.Int64() + if ret < 0 || ret > maxPadLen { + panic(ret) + } + return ret +} + +// Manages state for both initiating and receiving handshakes. +type handshake struct { + conn io.ReadWriter + s [96]byte + initer bool // Whether we're initiating or receiving. + skeys SecretKeyIter // Skeys we'll accept if receiving. + skey []byte // Skey we're initiating with. + ia []byte // Initial payload. Only used by the initiator. + // Return the bit for the crypto method the receiver wants to use. + chooseMethod CryptoSelector + // Sent to the receiver. + cryptoProvides CryptoMethod + + writeMu sync.Mutex + writes [][]byte + writeErr error + writeCond sync.Cond + writeClose bool + + writerMu sync.Mutex + writerCond sync.Cond + writerDone bool +} + +func (h *handshake) finishWriting() { + h.writeMu.Lock() + h.writeClose = true + h.writeCond.Broadcast() + h.writeMu.Unlock() + + h.writerMu.Lock() + for !h.writerDone { + h.writerCond.Wait() + } + h.writerMu.Unlock() +} + +func (h *handshake) writer() { + defer func() { + h.writerMu.Lock() + h.writerDone = true + h.writerCond.Broadcast() + h.writerMu.Unlock() + }() + for { + h.writeMu.Lock() + for { + if len(h.writes) != 0 { + break + } + if h.writeClose { + h.writeMu.Unlock() + return + } + h.writeCond.Wait() + } + b := h.writes[0] + h.writes = h.writes[1:] + h.writeMu.Unlock() + _, err := h.conn.Write(b) + if err != nil { + h.writeMu.Lock() + h.writeErr = err + h.writeMu.Unlock() + return + } + } +} + +func (h *handshake) postWrite(b []byte) error { + h.writeMu.Lock() + defer h.writeMu.Unlock() + if h.writeErr != nil { + return h.writeErr + } + h.writes = append(h.writes, b) + h.writeCond.Signal() + return nil +} + +func xor(a, b []byte) (ret []byte) { + max := len(a) + if max > len(b) { + max = len(b) + } + ret = make([]byte, max) + xorInPlace(ret, a, b) + return +} + +func xorInPlace(dst, a, b []byte) { + for i := range dst { + dst[i] = a[i] ^ b[i] + } +} + +func marshal(w io.Writer, data ...interface{}) (err error) { + for _, data := range data { + err = binary.Write(w, binary.BigEndian, data) + if err != nil { + break + } + } + return +} + +func unmarshal(r io.Reader, data ...interface{}) (err error) { + for _, data := range data { + err = binary.Read(r, binary.BigEndian, data) + if err != nil { + break + } + } + return +} + +// Looking for b at the end of a. +func suffixMatchLen(a, b []byte) int { + if len(b) > len(a) { + b = b[:len(a)] + } + // i is how much of b to try to match + for i := len(b); i > 0; i-- { + // j is how many chars we've compared + j := 0 + for ; j < i; j++ { + if b[i-1-j] != a[len(a)-1-j] { + goto shorter + } + } + return j + shorter: + } + return 0 +} + +// Reads from r until b has been seen. Keeps the minimum amount of data in +// memory. +func readUntil(r io.Reader, b []byte) error { + b1 := make([]byte, len(b)) + i := 0 + for { + _, err := io.ReadFull(r, b1[i:]) + if err != nil { + return err + } + i = suffixMatchLen(b1, b) + if i == len(b) { + break + } + if copy(b1, b1[len(b1)-i:]) != i { + panic("wat") + } + } + return nil +} + +type readWriter struct { + io.Reader + io.Writer +} + +func (h *handshake) newEncrypt(initer bool) *rc4.Cipher { + return newEncrypt(initer, h.s[:], h.skey) +} + +func (h *handshake) initerSteps() (ret io.ReadWriter, selected CryptoMethod, err error) { + h.postWrite(hash(req1, h.s[:])) + h.postWrite(xor(hash(req2, h.skey), hash(req3, h.s[:]))) + buf := &bytes.Buffer{} + padLen := uint16(newPadLen()) + if len(h.ia) > math.MaxUint16 { + err = errors.New("initial payload too large") + return + } + err = marshal(buf, vc[:], h.cryptoProvides, padLen, zeroPad[:padLen], uint16(len(h.ia)), h.ia) + if err != nil { + return + } + e := h.newEncrypt(true) + be := make([]byte, buf.Len()) + e.XORKeyStream(be, buf.Bytes()) + h.postWrite(be) + bC := h.newEncrypt(false) + var eVC [8]byte + bC.XORKeyStream(eVC[:], vc[:]) + // Read until the all zero VC. At this point we've only read the 96 byte + // public key, Y. There is potentially 512 byte padding, between us and + // the 8 byte verification constant. + err = readUntil(io.LimitReader(h.conn, 520), eVC[:]) + if err != nil { + if err == io.EOF { + err = errors.New("failed to synchronize on VC") + } else { + err = fmt.Errorf("error reading until VC: %s", err) + } + return + } + r := newCipherReader(bC, h.conn) + var method CryptoMethod + err = unmarshal(r, &method, &padLen) + if err != nil { + return + } + _, err = io.CopyN(io.Discard, r, int64(padLen)) + if err != nil { + return + } + selected = method & h.cryptoProvides + switch selected { + case CryptoMethodRC4: + ret = readWriter{r, &cipherWriter{e, h.conn, nil}} + case CryptoMethodPlaintext: + ret = h.conn + default: + err = fmt.Errorf("receiver chose unsupported method: %x", method) + } + return +} + +var ErrNoSecretKeyMatch = errors.New("no skey matched") + +func (h *handshake) receiverSteps() (ret io.ReadWriter, chosen CryptoMethod, err error) { + // There is up to 512 bytes of padding, then the 20 byte hash. + err = readUntil(io.LimitReader(h.conn, 532), hash(req1, h.s[:])) + if err != nil { + if err == io.EOF { + err = errors.New("failed to synchronize on S hash") + } + return + } + var b [20]byte + _, err = io.ReadFull(h.conn, b[:]) + if err != nil { + return + } + expectedHash := hash(req3, h.s[:]) + eachHash := sha1.New() + var sum, xored [sha1.Size]byte + err = ErrNoSecretKeyMatch + h.skeys(func(skey []byte) bool { + eachHash.Reset() + eachHash.Write(req2) + eachHash.Write(skey) + eachHash.Sum(sum[:0]) + xorInPlace(xored[:], sum[:], expectedHash) + if bytes.Equal(xored[:], b[:]) { + h.skey = skey + err = nil + return false + } + return true + }) + if err != nil { + return + } + r := newCipherReader(newEncrypt(true, h.s[:], h.skey), h.conn) + var ( + vc [8]byte + provides CryptoMethod + padLen uint16 + ) + + err = unmarshal(r, vc[:], &provides, &padLen) + if err != nil { + return + } + cryptoProvidesCount.Add(strconv.FormatUint(uint64(provides), 16), 1) + chosen = h.chooseMethod(provides) + _, err = io.CopyN(io.Discard, r, int64(padLen)) + if err != nil { + return + } + var lenIA uint16 + unmarshal(r, &lenIA) + if lenIA != 0 { + h.ia = make([]byte, lenIA) + unmarshal(r, h.ia) + } + buf := &bytes.Buffer{} + w := cipherWriter{h.newEncrypt(false), buf, nil} + padLen = uint16(newPadLen()) + err = marshal(&w, &vc, uint32(chosen), padLen, zeroPad[:padLen]) + if err != nil { + return + } + err = h.postWrite(buf.Bytes()) + if err != nil { + return + } + switch chosen { + case CryptoMethodRC4: + ret = readWriter{ + io.MultiReader(bytes.NewReader(h.ia), r), + &cipherWriter{w.c, h.conn, nil}, + } + case CryptoMethodPlaintext: + ret = readWriter{ + io.MultiReader(bytes.NewReader(h.ia), h.conn), + h.conn, + } + default: + err = errors.New("chosen crypto method is not supported") + } + return +} + +func (h *handshake) Do() (ret io.ReadWriter, method CryptoMethod, err error) { + h.writeCond.L = &h.writeMu + h.writerCond.L = &h.writerMu + go h.writer() + defer func() { + h.finishWriting() + if err == nil { + err = h.writeErr + } + }() + err = h.establishS() + if err != nil { + err = fmt.Errorf("error while establishing secret: %w", err) + return + } + pad := make([]byte, newPadLen()) + io.ReadFull(rand.Reader, pad) + err = h.postWrite(pad) + if err != nil { + return + } + if h.initer { + ret, method, err = h.initerSteps() + } else { + ret, method, err = h.receiverSteps() + } + return +} + +func InitiateHandshake( + rw io.ReadWriter, skey, initialPayload []byte, cryptoProvides CryptoMethod, +) ( + ret io.ReadWriter, method CryptoMethod, err error, +) { + h := handshake{ + conn: rw, + initer: true, + skey: skey, + ia: initialPayload, + cryptoProvides: cryptoProvides, + } + defer perf.ScopeTimerErr(&err)() + return h.Do() +} + +type HandshakeResult struct { + io.ReadWriter + CryptoMethod + error + SecretKey []byte +} + +func ReceiveHandshake(rw io.ReadWriter, skeys SecretKeyIter, selectCrypto CryptoSelector) (io.ReadWriter, CryptoMethod, error) { + res := ReceiveHandshakeEx(rw, skeys, selectCrypto) + return res.ReadWriter, res.CryptoMethod, res.error +} + +func ReceiveHandshakeEx(rw io.ReadWriter, skeys SecretKeyIter, selectCrypto CryptoSelector) (ret HandshakeResult) { + h := handshake{ + conn: rw, + initer: false, + skeys: skeys, + chooseMethod: selectCrypto, + } + ret.ReadWriter, ret.CryptoMethod, ret.error = h.Do() + ret.SecretKey = h.skey + return +} + +// A function that given a function, calls it with secret keys until it +// returns false or exhausted. +type SecretKeyIter func(callback func(skey []byte) (more bool)) + +func DefaultCryptoSelector(provided CryptoMethod) CryptoMethod { + // We prefer plaintext for performance reasons. + if provided&CryptoMethodPlaintext != 0 { + return CryptoMethodPlaintext + } + return CryptoMethodRC4 +} + +type CryptoSelector func(CryptoMethod) CryptoMethod diff --git a/deps/github.com/anacrolix/torrent/mse/mse_test.go b/deps/github.com/anacrolix/torrent/mse/mse_test.go new file mode 100644 index 0000000..f7f7fe7 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/mse/mse_test.go @@ -0,0 +1,278 @@ +package mse + +import ( + "bytes" + "crypto/rand" + "crypto/rc4" + "io" + "net" + "sync" + "testing" + + _ "github.com/anacrolix/envpprof" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func sliceIter(skeys [][]byte) SecretKeyIter { + return func(callback func([]byte) bool) { + for _, sk := range skeys { + if !callback(sk) { + break + } + } + } +} + +func TestReadUntil(t *testing.T) { + test := func(data, until string, leftover int, expectedErr error) { + r := bytes.NewReader([]byte(data)) + err := readUntil(r, []byte(until)) + if err != expectedErr { + t.Fatal(err) + } + if r.Len() != leftover { + t.Fatal(r.Len()) + } + } + test("feakjfeafeafegbaabc00", "abc", 2, nil) + test("feakjfeafeafegbaadc00", "abc", 0, io.EOF) +} + +func TestSuffixMatchLen(t *testing.T) { + test := func(a, b string, expected int) { + actual := suffixMatchLen([]byte(a), []byte(b)) + if actual != expected { + t.Fatalf("expected %d, got %d for %q and %q", expected, actual, a, b) + } + } + test("hello", "world", 0) + test("hello", "lo", 2) + test("hello", "llo", 3) + test("hello", "hell", 0) + test("hello", "helloooo!", 5) + test("hello", "lol!", 2) + test("hello", "mondo", 0) + test("mongo", "webscale", 0) + test("sup", "person", 1) +} + +func handshakeTest(t testing.TB, ia []byte, aData, bData string, cryptoProvides CryptoMethod, cryptoSelect CryptoSelector) { + a, b := net.Pipe() + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + defer wg.Done() + a, cm, err := InitiateHandshake(a, []byte("yep"), ia, cryptoProvides) + require.NoError(t, err) + assert.Equal(t, cryptoSelect(cryptoProvides), cm) + go a.Write([]byte(aData)) + + var msg [20]byte + n, _ := a.Read(msg[:]) + if n != len(bData) { + t.FailNow() + } + // t.Log(string(msg[:n])) + }() + go func() { + defer wg.Done() + res := ReceiveHandshakeEx(b, sliceIter([][]byte{[]byte("nope"), []byte("yep"), []byte("maybe")}), cryptoSelect) + require.NoError(t, res.error) + assert.EqualValues(t, "yep", res.SecretKey) + b := res.ReadWriter + assert.Equal(t, cryptoSelect(cryptoProvides), res.CryptoMethod) + go b.Write([]byte(bData)) + // Need to be exact here, as there are several reads, and net.Pipe is most synchronous. + msg := make([]byte, len(ia)+len(aData)) + n, _ := io.ReadFull(b, msg) + if n != len(msg) { + t.FailNow() + } + // t.Log(string(msg[:n])) + }() + wg.Wait() + a.Close() + b.Close() +} + +func allHandshakeTests(t testing.TB, provides CryptoMethod, selector CryptoSelector) { + handshakeTest(t, []byte("jump the gun, "), "hello world", "yo dawg", provides, selector) + handshakeTest(t, nil, "hello world", "yo dawg", provides, selector) + handshakeTest(t, []byte{}, "hello world", "yo dawg", provides, selector) +} + +func TestHandshakeDefault(t *testing.T) { + allHandshakeTests(t, AllSupportedCrypto, DefaultCryptoSelector) + t.Logf("crypto provides encountered: %s", cryptoProvidesCount) +} + +func TestHandshakeSelectPlaintext(t *testing.T) { + allHandshakeTests(t, AllSupportedCrypto, func(CryptoMethod) CryptoMethod { return CryptoMethodPlaintext }) +} + +func BenchmarkHandshakeDefault(b *testing.B) { + for i := 0; i < b.N; i += 1 { + allHandshakeTests(b, AllSupportedCrypto, DefaultCryptoSelector) + } +} + +type trackReader struct { + r io.Reader + n int64 +} + +func (tr *trackReader) Read(b []byte) (n int, err error) { + n, err = tr.r.Read(b) + tr.n += int64(n) + return +} + +func TestReceiveRandomData(t *testing.T) { + tr := trackReader{rand.Reader, 0} + _, _, err := ReceiveHandshake(readWriter{&tr, io.Discard}, nil, DefaultCryptoSelector) + // No skey matches + require.Error(t, err) + // Establishing S, and then reading the maximum padding for giving up on + // synchronizing. + require.EqualValues(t, 96+532, tr.n) +} + +func fillRand(t testing.TB, bs ...[]byte) { + for _, b := range bs { + _, err := rand.Read(b) + require.NoError(t, err) + } +} + +func readAndWrite(rw io.ReadWriter, r, w []byte) error { + var wg sync.WaitGroup + wg.Add(1) + var wErr error + go func() { + defer wg.Done() + _, wErr = rw.Write(w) + }() + _, err := io.ReadFull(rw, r) + if err != nil { + return err + } + wg.Wait() + return wErr +} + +func benchmarkStream(t *testing.B, crypto CryptoMethod) { + ia := make([]byte, 0x1000) + a := make([]byte, 1<<20) + b := make([]byte, 1<<20) + fillRand(t, ia, a, b) + t.StopTimer() + t.SetBytes(int64(len(ia) + len(a) + len(b))) + t.ResetTimer() + for i := 0; i < t.N; i += 1 { + ac, bc := net.Pipe() + ar := make([]byte, len(b)) + br := make([]byte, len(ia)+len(a)) + t.StartTimer() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer ac.Close() + defer wg.Done() + rw, _, err := InitiateHandshake(ac, []byte("cats"), ia, crypto) + require.NoError(t, err) + require.NoError(t, readAndWrite(rw, ar, a)) + }() + func() { + defer bc.Close() + rw, _, err := ReceiveHandshake(bc, sliceIter([][]byte{[]byte("cats")}), func(CryptoMethod) CryptoMethod { return crypto }) + require.NoError(t, err) + require.NoError(t, readAndWrite(rw, br, b)) + }() + wg.Wait() + t.StopTimer() + if !bytes.Equal(ar, b) { + t.Fatalf("A read the wrong bytes") + } + if !bytes.Equal(br[:len(ia)], ia) { + t.Fatalf("B read the wrong IA") + } + if !bytes.Equal(br[len(ia):], a) { + t.Fatalf("B read the wrong A") + } + // require.Equal(t, b, ar) + // require.Equal(t, ia, br[:len(ia)]) + // require.Equal(t, a, br[len(ia):]) + } +} + +func BenchmarkStreamRC4(t *testing.B) { + benchmarkStream(t, CryptoMethodRC4) +} + +func BenchmarkStreamPlaintext(t *testing.B) { + benchmarkStream(t, CryptoMethodPlaintext) +} + +func BenchmarkPipeRC4(t *testing.B) { + key := make([]byte, 20) + n, _ := rand.Read(key) + require.Equal(t, len(key), n) + var buf bytes.Buffer + c, err := rc4.NewCipher(key) + require.NoError(t, err) + r := cipherReader{ + c: c, + r: &buf, + } + c, err = rc4.NewCipher(key) + require.NoError(t, err) + w := cipherWriter{ + c: c, + w: &buf, + } + a := make([]byte, 0x1000) + n, _ = io.ReadFull(rand.Reader, a) + require.Equal(t, len(a), n) + b := make([]byte, len(a)) + t.SetBytes(int64(len(a))) + t.ResetTimer() + for i := 0; i < t.N; i += 1 { + n, _ = w.Write(a) + if n != len(a) { + t.FailNow() + } + n, _ = r.Read(b) + if n != len(b) { + t.FailNow() + } + if !bytes.Equal(a, b) { + t.FailNow() + } + } +} + +func BenchmarkSkeysReceive(b *testing.B) { + var skeys [][]byte + for i := 0; i < 100000; i += 1 { + skeys = append(skeys, make([]byte, 20)) + } + fillRand(b, skeys...) + initSkey := skeys[len(skeys)/2] + // c := qt.New(b) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i += 1 { + initiator, receiver := net.Pipe() + go func() { + _, _, err := InitiateHandshake(initiator, initSkey, nil, AllSupportedCrypto) + if err != nil { + panic(err) + } + }() + res := ReceiveHandshakeEx(receiver, sliceIter(skeys), DefaultCryptoSelector) + if res.error != nil { + panic(res.error) + } + } +} diff --git a/deps/github.com/anacrolix/torrent/netip-addrport.go b/deps/github.com/anacrolix/torrent/netip-addrport.go new file mode 100644 index 0000000..e438db7 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/netip-addrport.go @@ -0,0 +1,52 @@ +package torrent + +import ( + "fmt" + "net/netip" + + "github.com/anacrolix/dht/v2/krpc" +) + +type addrPorter interface { + AddrPort() netip.AddrPort +} + +func ipv4AddrPortFromKrpcNodeAddr(na krpc.NodeAddr) (_ netip.AddrPort, err error) { + ip4 := na.IP.To4() + if ip4 == nil { + err = fmt.Errorf("not an ipv4 address: %v", na.IP) + return + } + addr := netip.AddrFrom4(*(*[4]byte)(ip4)) + addrPort := netip.AddrPortFrom(addr, uint16(na.Port)) + return addrPort, nil +} + +func ipv6AddrPortFromKrpcNodeAddr(na krpc.NodeAddr) (_ netip.AddrPort, err error) { + ip6 := na.IP.To16() + if ip6 == nil { + err = fmt.Errorf("not an ipv4 address: %v", na.IP) + return + } + addr := netip.AddrFrom16(*(*[16]byte)(ip6)) + addrPort := netip.AddrPortFrom(addr, uint16(na.Port)) + return addrPort, nil +} + +func addrPortFromPeerRemoteAddr(pra PeerRemoteAddr) (netip.AddrPort, error) { + switch v := pra.(type) { + case addrPorter: + return v.AddrPort(), nil + case netip.AddrPort: + return v, nil + default: + return netip.ParseAddrPort(pra.String()) + } +} + +func krpcNodeAddrFromAddrPort(addrPort netip.AddrPort) krpc.NodeAddr { + return krpc.NodeAddr{ + IP: addrPort.Addr().AsSlice(), + Port: int(addrPort.Port()), + } +} diff --git a/deps/github.com/anacrolix/torrent/network_test.go b/deps/github.com/anacrolix/torrent/network_test.go new file mode 100644 index 0000000..a1fd880 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/network_test.go @@ -0,0 +1,81 @@ +package torrent + +import ( + "net" + "testing" + + "github.com/anacrolix/log" + "github.com/anacrolix/missinggo/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func testListenerNetwork( + t *testing.T, + listenFunc func(net, addr string) (net.Listener, error), + expectedNet, givenNet, addr string, validIp4 bool, +) { + l, err := listenFunc(givenNet, addr) + require.NoError(t, err) + defer l.Close() + assert.EqualValues(t, expectedNet, l.Addr().Network()) + ip := missinggo.AddrIP(l.Addr()) + assert.Equal(t, validIp4, ip.To4() != nil, ip) +} + +func listenUtpListener(net, addr string) (l net.Listener, err error) { + l, err = NewUtpSocket(net, addr, nil, log.Default) + return +} + +func testAcceptedConnAddr( + t *testing.T, + network string, valid4 bool, + dial func(addr string) (net.Conn, error), + listen func() (net.Listener, error), +) { + l, err := listen() + require.NoError(t, err) + defer l.Close() + done := make(chan struct{}) + defer close(done) + go func() { + c, err := dial(l.Addr().String()) + require.NoError(t, err) + <-done + c.Close() + }() + c, err := l.Accept() + require.NoError(t, err) + defer c.Close() + assert.EqualValues(t, network, c.RemoteAddr().Network()) + assert.Equal(t, valid4, missinggo.AddrIP(c.RemoteAddr()).To4() != nil) +} + +func listenClosure(rawListenFunc func(string, string) (net.Listener, error), network, addr string) func() (net.Listener, error) { + return func() (net.Listener, error) { + return rawListenFunc(network, addr) + } +} + +func dialClosure(f func(net, addr string) (net.Conn, error), network string) func(addr string) (net.Conn, error) { + return func(addr string) (net.Conn, error) { + return f(network, addr) + } +} + +func TestListenLocalhostNetwork(t *testing.T) { + testListenerNetwork(t, net.Listen, "tcp", "tcp", "0.0.0.0:0", false) + testListenerNetwork(t, net.Listen, "tcp", "tcp", "[::1]:0", false) + testListenerNetwork(t, listenUtpListener, "udp", "udp6", "[::1]:0", false) + testListenerNetwork(t, listenUtpListener, "udp", "udp6", "[::]:0", false) + testListenerNetwork(t, listenUtpListener, "udp", "udp4", "localhost:0", true) + + testAcceptedConnAddr( + t, + "tcp", + false, + dialClosure(net.Dial, "tcp"), + listenClosure(net.Listen, "tcp6", "localhost:0"), + ) +} diff --git a/deps/github.com/anacrolix/torrent/networks.go b/deps/github.com/anacrolix/torrent/networks.go new file mode 100644 index 0000000..068a9a5 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/networks.go @@ -0,0 +1,57 @@ +package torrent + +import "strings" + +var allPeerNetworks = func() (ret []network) { + for _, s := range []string{"tcp4", "tcp6", "udp4", "udp6"} { + ret = append(ret, parseNetworkString(s)) + } + return +}() + +type network struct { + Ipv4 bool + Ipv6 bool + Udp bool + Tcp bool +} + +func (n network) String() (ret string) { + a := func(b bool, s string) { + if b { + ret += s + } + } + a(n.Udp, "udp") + a(n.Tcp, "tcp") + a(n.Ipv4, "4") + a(n.Ipv6, "6") + return +} + +func parseNetworkString(network string) (ret network) { + c := func(s string) bool { + return strings.Contains(network, s) + } + ret.Ipv4 = c("4") + ret.Ipv6 = c("6") + ret.Udp = c("udp") + ret.Tcp = c("tcp") + return +} + +func peerNetworkEnabled(n network, cfg *ClientConfig) bool { + if cfg.DisableUTP && n.Udp { + return false + } + if cfg.DisableTCP && n.Tcp { + return false + } + if cfg.DisableIPv6 && n.Ipv6 { + return false + } + if cfg.DisableIPv4 && n.Ipv4 { + return false + } + return true +} diff --git a/deps/github.com/anacrolix/torrent/ordered-bitmap.go b/deps/github.com/anacrolix/torrent/ordered-bitmap.go new file mode 100644 index 0000000..7410671 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/ordered-bitmap.go @@ -0,0 +1,59 @@ +package torrent + +import ( + g "github.com/anacrolix/generics" + list "github.com/bahlo/generic-list-go" + + "github.com/anacrolix/torrent/typed-roaring" +) + +type orderedBitmap[T typedRoaring.BitConstraint] struct { + bitmap typedRoaring.Bitmap[T] + // There should be way more efficient ways to do this. + order list.List[T] + elements map[T]*list.Element[T] +} + +func (o *orderedBitmap[T]) IterateSnapshot(f func(T) bool) { + o.bitmap.Clone().Iterate(f) +} + +func (o *orderedBitmap[T]) IsEmpty() bool { + return o.bitmap.IsEmpty() +} + +func (o *orderedBitmap[T]) GetCardinality() uint64 { + return uint64(o.order.Len()) +} + +func (o *orderedBitmap[T]) Contains(index T) bool { + return o.bitmap.Contains(index) +} + +func (o *orderedBitmap[T]) Add(index T) { + o.bitmap.Add(index) + if _, ok := o.elements[index]; !ok { + g.MakeMapIfNilAndSet(&o.elements, index, o.order.PushBack(index)) + } +} + +func (o *orderedBitmap[T]) Rank(index T) uint64 { + return o.bitmap.Rank(index) +} + +func (o *orderedBitmap[T]) Iterate(f func(T) bool) { + for e := o.order.Front(); e != nil; e = e.Next() { + if !f(e.Value) { + return + } + } +} + +func (o *orderedBitmap[T]) CheckedRemove(index T) bool { + if !o.bitmap.CheckedRemove(index) { + return false + } + o.order.Remove(o.elements[index]) + delete(o.elements, index) + return true +} diff --git a/deps/github.com/anacrolix/torrent/otel.go b/deps/github.com/anacrolix/torrent/otel.go new file mode 100644 index 0000000..5dddd6a --- /dev/null +++ b/deps/github.com/anacrolix/torrent/otel.go @@ -0,0 +1,3 @@ +package torrent + +const tracerName = "anacrolix.torrent" diff --git a/deps/github.com/anacrolix/torrent/peer-conn-msg-writer.go b/deps/github.com/anacrolix/torrent/peer-conn-msg-writer.go new file mode 100644 index 0000000..1bacc59 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/peer-conn-msg-writer.go @@ -0,0 +1,130 @@ +package torrent + +import ( + "bytes" + "io" + "time" + + "github.com/anacrolix/chansync" + "github.com/anacrolix/log" + "github.com/anacrolix/sync" + + pp "github.com/anacrolix/torrent/peer_protocol" +) + +func (pc *PeerConn) initMessageWriter() { + w := &pc.messageWriter + *w = peerConnMsgWriter{ + fillWriteBuffer: func() { + pc.locker().Lock() + defer pc.locker().Unlock() + if pc.closed.IsSet() { + return + } + pc.fillWriteBuffer() + }, + closed: &pc.closed, + logger: pc.logger, + w: pc.w, + keepAlive: func() bool { + pc.locker().RLock() + defer pc.locker().RUnlock() + return pc.useful() + }, + writeBuffer: new(bytes.Buffer), + } +} + +func (pc *PeerConn) startMessageWriter() { + pc.initMessageWriter() + go pc.messageWriterRunner() +} + +func (pc *PeerConn) messageWriterRunner() { + defer pc.locker().Unlock() + defer pc.close() + defer pc.locker().Lock() + pc.messageWriter.run(pc.t.cl.config.KeepAliveTimeout) +} + +type peerConnMsgWriter struct { + // Must not be called with the local mutex held, as it will call back into the write method. + fillWriteBuffer func() + closed *chansync.SetOnce + logger log.Logger + w io.Writer + keepAlive func() bool + + mu sync.Mutex + writeCond chansync.BroadcastCond + // Pointer so we can swap with the "front buffer". + writeBuffer *bytes.Buffer +} + +// Routine that writes to the peer. Some of what to write is buffered by +// activity elsewhere in the Client, and some is determined locally when the +// connection is writable. +func (cn *peerConnMsgWriter) run(keepAliveTimeout time.Duration) { + lastWrite := time.Now() + keepAliveTimer := time.NewTimer(keepAliveTimeout) + frontBuf := new(bytes.Buffer) + for { + if cn.closed.IsSet() { + return + } + cn.fillWriteBuffer() + keepAlive := cn.keepAlive() + cn.mu.Lock() + if cn.writeBuffer.Len() == 0 && time.Since(lastWrite) >= keepAliveTimeout && keepAlive { + cn.writeBuffer.Write(pp.Message{Keepalive: true}.MustMarshalBinary()) + torrent.Add("written keepalives", 1) + } + if cn.writeBuffer.Len() == 0 { + writeCond := cn.writeCond.Signaled() + cn.mu.Unlock() + select { + case <-cn.closed.Done(): + case <-writeCond: + case <-keepAliveTimer.C: + } + continue + } + // Flip the buffers. + frontBuf, cn.writeBuffer = cn.writeBuffer, frontBuf + cn.mu.Unlock() + if frontBuf.Len() == 0 { + panic("expected non-empty front buffer") + } + var err error + for frontBuf.Len() != 0 { + // Limit write size for WebRTC. See https://github.com/pion/datachannel/issues/59. + next := frontBuf.Next(1<<16 - 1) + var n int + n, err = cn.w.Write(next) + if err == nil && n != len(next) { + panic("expected full write") + } + if err != nil { + break + } + } + if err != nil { + cn.logger.WithDefaultLevel(log.Debug).Printf("error writing: %v", err) + return + } + lastWrite = time.Now() + keepAliveTimer.Reset(keepAliveTimeout) + } +} + +func (cn *peerConnMsgWriter) write(msg pp.Message) bool { + cn.mu.Lock() + defer cn.mu.Unlock() + cn.writeBuffer.Write(msg.MustMarshalBinary()) + cn.writeCond.Broadcast() + return !cn.writeBufferFull() +} + +func (cn *peerConnMsgWriter) writeBufferFull() bool { + return cn.writeBuffer.Len() >= writeBufferHighWaterLen +} diff --git a/deps/github.com/anacrolix/torrent/peer-impl.go b/deps/github.com/anacrolix/torrent/peer-impl.go new file mode 100644 index 0000000..f9f9096 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/peer-impl.go @@ -0,0 +1,37 @@ +package torrent + +import ( + "github.com/RoaringBitmap/roaring" + + "github.com/anacrolix/torrent/metainfo" +) + +// Contains implementation details that differ between peer types, like Webseeds and regular +// BitTorrent protocol connections. Some methods are underlined so as to avoid collisions with +// legacy PeerConn methods. +type peerImpl interface { + // Trigger the actual request state to get updated + handleUpdateRequests() + writeInterested(interested bool) bool + + // _cancel initiates cancellation of a request and returns acked if it expects the cancel to be + // handled by a follow-up event. + _cancel(RequestIndex) (acked bool) + _request(Request) bool + connectionFlags() string + onClose() + onGotInfo(*metainfo.Info) + // Drop connection. This may be a no-op if there is no connection. + drop() + // Rebuke the peer + ban() + String() string + peerImplStatusLines() []string + + // All if the peer should have everything, known if we know that for a fact. For example, we can + // guess at how many pieces are in a torrent, and assume they have all pieces based on them + // having sent haves for everything, but we don't know for sure. But if they send a have-all + // message, then it's clear that they do. + peerHasAllPieces() (all, known bool) + peerPieces() *roaring.Bitmap +} diff --git a/deps/github.com/anacrolix/torrent/peer.go b/deps/github.com/anacrolix/torrent/peer.go new file mode 100644 index 0000000..608ccb1 --- /dev/null +++ b/deps/github.com/anacrolix/torrent/peer.go @@ -0,0 +1,877 @@ +package torrent + +import ( + "errors" + "fmt" + "io" + "net" + "strings" + "time" + + "github.com/RoaringBitmap/roaring" + "github.com/anacrolix/chansync" + . "github.com/anacrolix/generics" + "github.com/anacrolix/log" + "github.com/anacrolix/missinggo/iter" + "github.com/anacrolix/missinggo/v2/bitmap" + "github.com/anacrolix/multiless" + + "github.com/anacrolix/torrent/internal/alloclim" + "github.com/anacrolix/torrent/mse" + pp "github.com/anacrolix/torrent/peer_protocol" + request_strategy "github.com/anacrolix/torrent/request-strategy" + typedRoaring "github.com/anacrolix/torrent/typed-roaring" +) + +type ( + Peer struct { + // First to ensure 64-bit alignment for atomics. See #262. + _stats ConnStats + + t *Torrent + + peerImpl + callbacks *Callbacks + + outgoing bool + Network string + RemoteAddr PeerRemoteAddr + // The local address as observed by the remote peer. WebRTC seems to get this right without needing hints from the + // config. + localPublicAddr peerLocalPublicAddr + bannableAddr Option[bannableAddr] + // True if the connection is operating over MSE obfuscation. + headerEncrypted bool + cryptoMethod mse.CryptoMethod + Discovery PeerSource + trusted bool + closed chansync.SetOnce + // Set true after we've added our ConnStats generated during handshake to + // other ConnStat instances as determined when the *Torrent became known. + reconciledHandshakeStats bool + + lastMessageReceived time.Time + completedHandshake time.Time + lastUsefulChunkReceived time.Time + lastChunkSent time.Time + + // Stuff controlled by the local peer. + needRequestUpdate string + requestState request_strategy.PeerRequestState + updateRequestsTimer *time.Timer + lastRequestUpdate time.Time + peakRequests maxRequests + lastBecameInterested time.Time + priorInterest time.Duration + + lastStartedExpectingToReceiveChunks time.Time + cumulativeExpectedToReceiveChunks time.Duration + _chunksReceivedWhileExpecting int64 + + choking bool + piecesReceivedSinceLastRequestUpdate maxRequests + maxPiecesReceivedBetweenRequestUpdates maxRequests + // Chunks that we might reasonably expect to receive from the peer. Due to latency, buffering, + // and implementation differences, we may receive chunks that are no longer in the set of + // requests actually want. This could use a roaring.BSI if the memory use becomes noticeable. + validReceiveChunks map[RequestIndex]int + // Indexed by metadata piece, set to true if posted and pending a + // response. + metadataRequests []bool + sentHaves bitmap.Bitmap + + // Stuff controlled by the remote peer. + peerInterested bool + peerChoking bool + peerRequests map[Request]*peerRequestState + PeerPrefersEncryption bool // as indicated by 'e' field in extension handshake + // The highest possible number of pieces the torrent could have based on + // communication with the peer. Generally only useful until we have the + // torrent info. + peerMinPieces pieceIndex + // Pieces we've accepted chunks for from the peer. + peerTouchedPieces map[pieceIndex]struct{} + peerAllowedFast typedRoaring.Bitmap[pieceIndex] + + PeerMaxRequests maxRequests // Maximum pending requests the peer allows. + + logger log.Logger + } + + PeerSource string + + peerRequestState struct { + data []byte + allocReservation *alloclim.Reservation + } + + PeerRemoteAddr interface { + String() string + } + + peerRequests = orderedBitmap[RequestIndex] +) + +const ( + PeerSourceUtHolepunch = "C" + PeerSourceTracker = "Tr" + PeerSourceIncoming = "I" + PeerSourceDhtGetPeers = "Hg" // Peers we found by searching a DHT. + PeerSourceDhtAnnouncePeer = "Ha" // Peers that were announced to us by a DHT. + PeerSourcePex = "X" + // The peer was given directly, such as through a magnet link. + PeerSourceDirect = "M" +) + +// Returns the Torrent a Peer belongs to. Shouldn't change for the lifetime of the Peer. May be nil +// if we are the receiving end of a connection and the handshake hasn't been received or accepted +// yet. +func (p *Peer) Torrent() *Torrent { + return p.t +} + +func (p *Peer) initRequestState() { + p.requestState.Requests = &peerRequests{} +} + +func (cn *Peer) updateExpectingChunks() { + if cn.expectingChunks() { + if cn.lastStartedExpectingToReceiveChunks.IsZero() { + cn.lastStartedExpectingToReceiveChunks = time.Now() + } + } else { + if !cn.lastStartedExpectingToReceiveChunks.IsZero() { + cn.cumulativeExpectedToReceiveChunks += time.Since(cn.lastStartedExpectingToReceiveChunks) + cn.lastStartedExpectingToReceiveChunks = time.Time{} + } + } +} + +func (cn *Peer) expectingChunks() bool { + if cn.requestState.Requests.IsEmpty() { + return false + } + if !cn.requestState.Interested { + return false + } + if !cn.peerChoking { + return true + } + haveAllowedFastRequests := false + cn.peerAllowedFast.Iterate(func(i pieceIndex) bool { + haveAllowedFastRequests = roaringBitmapRangeCardinality[RequestIndex]( + cn.requestState.Requests, + cn.t.pieceRequestIndexOffset(i), + cn.t.pieceRequestIndexOffset(i+1), + ) == 0 + return !haveAllowedFastRequests + }) + return haveAllowedFastRequests +} + +func (cn *Peer) remoteChokingPiece(piece pieceIndex) bool { + return cn.peerChoking && !cn.peerAllowedFast.Contains(piece) +} + +func (cn *Peer) cumInterest() time.Duration { + ret := cn.priorInterest + if cn.requestState.Interested { + ret += time.Since(cn.lastBecameInterested) + } + return ret +} + +func (cn *Peer) locker() *lockWithDeferreds { + return cn.t.cl.locker() +} + +func (cn *PeerConn) supportsExtension(ext pp.ExtensionName) bool { + _, ok := cn.PeerExtensionIDs[ext] + return ok +} + +// The best guess at number of pieces in the torrent for this peer. +func (cn *Peer) bestPeerNumPieces() pieceIndex { + if cn.t.haveInfo() { + return cn.t.numPieces() + } + return cn.peerMinPieces +} + +func (cn *Peer) completedString() string { + have := pieceIndex(cn.peerPieces().GetCardinality()) + if all, _ := cn.peerHasAllPieces(); all { + have = cn.bestPeerNumPieces() + } + return fmt.Sprintf("%d/%d", have, cn.bestPeerNumPieces()) +} + +func eventAgeString(t time.Time) string { + if t.IsZero() { + return "never" + } + return fmt.Sprintf("%.2fs ago", time.Since(t).Seconds()) +} + +// Inspired by https://github.com/transmission/transmission/wiki/Peer-Status-Text. +func (cn *Peer) statusFlags() (ret string) { + c := func(b byte) { + ret += string([]byte{b}) + } + if cn.requestState.Interested { + c('i') + } + if cn.choking { + c('c') + } + c('-') + ret += cn.connectionFlags() + c('-') + if cn.peerInterested { + c('i') + } + if cn.peerChoking { + c('c') + } + return +} + +func (cn *Peer) downloadRate() float64 { + num := cn._stats.BytesReadUsefulData.Int64() + if num == 0 { + return 0 + } + return float64(num) / cn.totalExpectingTime().Seconds() +} + +func (p *Peer) DownloadRate() float64 { + p.locker().RLock() + defer p.locker().RUnlock() + + return p.downloadRate() +} + +func (cn *Peer) iterContiguousPieceRequests(f func(piece pieceIndex, count int)) { + var last Option[pieceIndex] + var count int + next := func(item Option[pieceIndex]) { + if item == last { + count++ + } else { + if count != 0 { + f(last.Value, count) + } + last = item + count = 1 + } + } + cn.requestState.Requests.Iterate(func(requestIndex request_strategy.RequestIndex) bool { + next(Some(cn.t.pieceIndexOfRequestIndex(requestIndex))) + return true + }) + next(None[pieceIndex]()) +} + +func (cn *Peer) writeStatus(w io.Writer) { + // \t isn't preserved in
 blocks?
+	if cn.closed.IsSet() {
+		fmt.Fprint(w, "CLOSED: ")
+	}
+	fmt.Fprintln(w, strings.Join(cn.peerImplStatusLines(), "\n"))
+	prio, err := cn.peerPriority()
+	prioStr := fmt.Sprintf("%08x", prio)
+	if err != nil {
+		prioStr += ": " + err.Error()
+	}
+	fmt.Fprintf(w, "bep40-prio: %v\n", prioStr)
+	fmt.Fprintf(w, "last msg: %s, connected: %s, last helpful: %s, itime: %s, etime: %s\n",
+		eventAgeString(cn.lastMessageReceived),
+		eventAgeString(cn.completedHandshake),
+		eventAgeString(cn.lastHelpful()),
+		cn.cumInterest(),
+		cn.totalExpectingTime(),
+	)
+	fmt.Fprintf(w,
+		"%s completed, %d pieces touched, good chunks: %v/%v:%v reqq: %d+%v/(%d/%d):%d/%d, flags: %s, dr: %.1f KiB/s\n",
+		cn.completedString(),
+		len(cn.peerTouchedPieces),
+		&cn._stats.ChunksReadUseful,
+		&cn._stats.ChunksRead,
+		&cn._stats.ChunksWritten,
+		cn.requestState.Requests.GetCardinality(),
+		cn.requestState.Cancelled.GetCardinality(),
+		cn.nominalMaxRequests(),
+		cn.PeerMaxRequests,
+		len(cn.peerRequests),
+		localClientReqq,
+		cn.statusFlags(),
+		cn.downloadRate()/(1<<10),
+	)
+	fmt.Fprintf(w, "requested pieces:")
+	cn.iterContiguousPieceRequests(func(piece pieceIndex, count int) {
+		fmt.Fprintf(w, " %v(%v)", piece, count)
+	})
+	fmt.Fprintf(w, "\n")
+}
+
+func (p *Peer) close() {
+	if !p.closed.Set() {
+		return
+	}
+	if p.updateRequestsTimer != nil {
+		p.updateRequestsTimer.Stop()
+	}
+	for _, prs := range p.peerRequests {
+		prs.allocReservation.Drop()
+	}
+	p.peerImpl.onClose()
+	if p.t != nil {
+		p.t.decPeerPieceAvailability(p)
+	}
+	for _, f := range p.callbacks.PeerClosed {
+		f(p)
+	}
+}
+
+// Peer definitely has a piece, for purposes of requesting. So it's not sufficient that we think
+// they do (known=true).
+func (cn *Peer) peerHasPiece(piece pieceIndex) bool {
+	if all, known := cn.peerHasAllPieces(); all && known {
+		return true
+	}
+	return cn.peerPieces().ContainsInt(piece)
+}
+
+// 64KiB, but temporarily less to work around an issue with WebRTC. TODO: Update when
+// https://github.com/pion/datachannel/issues/59 is fixed.
+const (
+	writeBufferHighWaterLen = 1 << 15
+	writeBufferLowWaterLen  = writeBufferHighWaterLen / 2
+)
+
+var (
+	interestedMsgLen = len(pp.Message{Type: pp.Interested}.MustMarshalBinary())
+	requestMsgLen    = len(pp.Message{Type: pp.Request}.MustMarshalBinary())
+	// This is the maximum request count that could fit in the write buffer if it's at or below the
+	// low water mark when we run maybeUpdateActualRequestState.
+	maxLocalToRemoteRequests = (writeBufferHighWaterLen - writeBufferLowWaterLen - interestedMsgLen) / requestMsgLen
+)
+
+// The actual value to use as the maximum outbound requests.
+func (cn *Peer) nominalMaxRequests() maxRequests {
+	return maxInt(1, minInt(cn.PeerMaxRequests, cn.peakRequests*2, maxLocalToRemoteRequests))
+}
+
+func (cn *Peer) totalExpectingTime() (ret time.Duration) {
+	ret = cn.cumulativeExpectedToReceiveChunks
+	if !cn.lastStartedExpectingToReceiveChunks.IsZero() {
+		ret += time.Since(cn.lastStartedExpectingToReceiveChunks)
+	}
+	return
+}
+
+func (cn *Peer) setInterested(interested bool) bool {
+	if cn.requestState.Interested == interested {
+		return true
+	}
+	cn.requestState.Interested = interested
+	if interested {
+		cn.lastBecameInterested = time.Now()
+	} else if !cn.lastBecameInterested.IsZero() {
+		cn.priorInterest += time.Since(cn.lastBecameInterested)
+	}
+	cn.updateExpectingChunks()
+	// log.Printf("%p: setting interest: %v", cn, interested)
+	return cn.writeInterested(interested)
+}
+
+// The function takes a message to be sent, and returns true if more messages
+// are okay.
+type messageWriter func(pp.Message) bool
+
+// This function seems to only used by Peer.request. It's all logic checks, so maybe we can no-op it
+// when we want to go fast.
+func (cn *Peer) shouldRequest(r RequestIndex) error {
+	err := cn.t.checkValidReceiveChunk(cn.t.requestIndexToRequest(r))
+	if err != nil {
+		return err
+	}
+	pi := cn.t.pieceIndexOfRequestIndex(r)
+	if cn.requestState.Cancelled.Contains(r) {
+		return errors.New("request is cancelled and waiting acknowledgement")
+	}
+	if !cn.peerHasPiece(pi) {
+		return errors.New("requesting piece peer doesn't have")
+	}
+	if !cn.t.peerIsActive(cn) {
+		panic("requesting but not in active conns")
+	}
+	if cn.closed.IsSet() {
+		panic("requesting when connection is closed")
+	}
+	if cn.t.hashingPiece(pi) {
+		panic("piece is being hashed")
+	}
+	if cn.t.pieceQueuedForHash(pi) {
+		panic("piece is queued for hash")
+	}
+	if cn.peerChoking && !cn.peerAllowedFast.Contains(pi) {
+		// This could occur if we made a request with the fast extension, and then got choked and
+		// haven't had the request rejected yet.
+		if !cn.requestState.Requests.Contains(r) {
+			panic("peer choking and piece not allowed fast")
+		}
+	}
+	return nil
+}
+
+func (cn *Peer) mustRequest(r RequestIndex) bool {
+	more, err := cn.request(r)
+	if err != nil {
+		panic(err)
+	}
+	return more
+}
+
+func (cn *Peer) request(r RequestIndex) (more bool, err error) {
+	if err := cn.shouldRequest(r); err != nil {
+		panic(err)
+	}
+	if cn.requestState.Requests.Contains(r) {
+		return true, nil
+	}
+	if maxRequests(cn.requestState.Requests.GetCardinality()) >= cn.nominalMaxRequests() {
+		return true, errors.New("too many outstanding requests")
+	}
+	cn.requestState.Requests.Add(r)
+	if cn.validReceiveChunks == nil {
+		cn.validReceiveChunks = make(map[RequestIndex]int)
+	}
+	cn.validReceiveChunks[r]++
+	cn.t.requestState[r] = requestState{
+		peer: cn,
+		when: time.Now(),
+	}
+	cn.updateExpectingChunks()
+	ppReq := cn.t.requestIndexToRequest(r)
+	for _, f := range cn.callbacks.SentRequest {
+		f(PeerRequestEvent{cn, ppReq})
+	}
+	return cn.peerImpl._request(ppReq), nil
+}
+
+func (me *Peer) cancel(r RequestIndex) {
+	if !me.deleteRequest(r) {
+		panic("request not existing should have been guarded")
+	}
+	if me._cancel(r) {
+		// Record that we expect to get a cancel ack.
+		if !me.requestState.Cancelled.CheckedAdd(r) {
+			panic("request already cancelled")
+		}
+	}
+	me.decPeakRequests()
+	if me.isLowOnRequests() {
+		me.updateRequests("Peer.cancel")
+	}
+}
+
+// Sets a reason to update requests, and if there wasn't already one, handle it.
+func (cn *Peer) updateRequests(reason string) {
+	if cn.needRequestUpdate != "" {
+		return
+	}
+	cn.needRequestUpdate = reason
+	cn.handleUpdateRequests()
+}
+
+// Emits the indices in the Bitmaps bms in order, never repeating any index.
+// skip is mutated during execution, and its initial values will never be
+// emitted.
+func iterBitmapsDistinct(skip *bitmap.Bitmap, bms ...bitmap.Bitmap) iter.Func {
+	return func(cb iter.Callback) {
+		for _, bm := range bms {
+			if !iter.All(
+				func(_i interface{}) bool {
+					i := _i.(int)
+					if skip.Contains(bitmap.BitIndex(i)) {
+						return true
+					}
+					skip.Add(bitmap.BitIndex(i))
+					return cb(i)
+				},
+				bm.Iter,
+			) {
+				return
+			}
+		}
+	}
+}
+
+// After handshake, we know what Torrent and Client stats to include for a
+// connection.
+func (cn *Peer) postHandshakeStats(f func(*ConnStats)) {
+	t := cn.t
+	f(&t.stats)
+	f(&t.cl.connStats)
+}
+
+// All ConnStats that include this connection. Some objects are not known
+// until the handshake is complete, after which it's expected to reconcile the
+// differences.
+func (cn *Peer) allStats(f func(*ConnStats)) {
+	f(&cn._stats)
+	if cn.reconciledHandshakeStats {
+		cn.postHandshakeStats(f)
+	}
+}
+
+func (cn *Peer) readBytes(n int64) {
+	cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesRead }))
+}
+
+func (c *Peer) lastHelpful() (ret time.Time) {
+	ret = c.lastUsefulChunkReceived
+	if c.t.seeding() && c.lastChunkSent.After(ret) {
+		ret = c.lastChunkSent
+	}
+	return
+}
+
+// Returns whether any part of the chunk would lie outside a piece of the given length.
+func chunkOverflowsPiece(cs ChunkSpec, pieceLength pp.Integer) bool {
+	switch {
+	default:
+		return false
+	case cs.Begin+cs.Length > pieceLength:
+	// Check for integer overflow
+	case cs.Begin > pp.IntegerMax-cs.Length:
+	}
+	return true
+}
+
+func runSafeExtraneous(f func()) {
+	if true {
+		go f()
+	} else {
+		f()
+	}
+}
+
+// Returns true if it was valid to reject the request.
+func (c *Peer) remoteRejectedRequest(r RequestIndex) bool {
+	if c.deleteRequest(r) {
+		c.decPeakRequests()
+	} else if !c.requestState.Cancelled.CheckedRemove(r) {
+		return false
+	}
+	if c.isLowOnRequests() {
+		c.updateRequests("Peer.remoteRejectedRequest")
+	}
+	c.decExpectedChunkReceive(r)
+	return true
+}
+
+func (c *Peer) decExpectedChunkReceive(r RequestIndex) {
+	count := c.validReceiveChunks[r]
+	if count == 1 {
+		delete(c.validReceiveChunks, r)
+	} else if count > 1 {
+		c.validReceiveChunks[r] = count - 1
+	} else {
+		panic(r)
+	}
+}
+
+func (c *Peer) doChunkReadStats(size int64) {
+	c.allStats(func(cs *ConnStats) { cs.receivedChunk(size) })
+}
+
+// Handle a received chunk from a peer.
+func (c *Peer) receiveChunk(msg *pp.Message) error {
+	chunksReceived.Add("total", 1)
+
+	ppReq := newRequestFromMessage(msg)
+	t := c.t
+	err := t.checkValidReceiveChunk(ppReq)
+	if err != nil {
+		err = log.WithLevel(log.Warning, err)
+		return err
+	}
+	req := c.t.requestIndexFromRequest(ppReq)
+
+	if c.bannableAddr.Ok {
+		t.smartBanCache.RecordBlock(c.bannableAddr.Value, req, msg.Piece)
+	}
+
+	if c.peerChoking {
+		chunksReceived.Add("while choked", 1)
+	}
+
+	if c.validReceiveChunks[req] <= 0 {
+		chunksReceived.Add("unexpected", 1)
+		return errors.New("received unexpected chunk")
+	}
+	c.decExpectedChunkReceive(req)
+
+	if c.peerChoking && c.peerAllowedFast.Contains(pieceIndex(ppReq.Index)) {
+		chunksReceived.Add("due to allowed fast", 1)
+	}
+
+	// The request needs to be deleted immediately to prevent cancels occurring asynchronously when
+	// have actually already received the piece, while we have the Client unlocked to write the data
+	// out.
+	intended := false
+	{
+		if c.requestState.Requests.Contains(req) {
+			for _, f := range c.callbacks.ReceivedRequested {
+				f(PeerMessageEvent{c, msg})
+			}
+		}
+		// Request has been satisfied.
+		if c.deleteRequest(req) || c.requestState.Cancelled.CheckedRemove(req) {
+			intended = true
+			if !c.peerChoking {
+				c._chunksReceivedWhileExpecting++
+			}
+			if c.isLowOnRequests() {
+				c.updateRequests("Peer.receiveChunk deleted request")
+			}
+		} else {
+			chunksReceived.Add("unintended", 1)
+		}
+	}
+
+	cl := t.cl
+
+	// Do we actually want this chunk?
+	if t.haveChunk(ppReq) {
+		// panic(fmt.Sprintf("%+v", ppReq))
+		chunksReceived.Add("redundant", 1)
+		c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted }))
+		return nil
+	}
+
+	piece := &t.pieces[ppReq.Index]
+
+	c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadUseful }))
+	c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulData }))
+	if intended {
+		c.piecesReceivedSinceLastRequestUpdate++
+		c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulIntendedData }))
+	}
+	for _, f := range c.t.cl.config.Callbacks.ReceivedUsefulData {
+		f(ReceivedUsefulDataEvent{c, msg})
+	}
+	c.lastUsefulChunkReceived = time.Now()
+
+	// Need to record that it hasn't been written yet, before we attempt to do
+	// anything with it.
+	piece.incrementPendingWrites()
+	// Record that we have the chunk, so we aren't trying to download it while
+	// waiting for it to be written to storage.
+	piece.unpendChunkIndex(chunkIndexFromChunkSpec(ppReq.ChunkSpec, t.chunkSize))
+
+	// Cancel pending requests for this chunk from *other* peers.
+	if p := t.requestingPeer(req); p != nil {
+		if p == c {
+			panic("should not be pending request from conn that just received it")
+		}
+		p.cancel(req)
+	}
+
+	err = func() error {
+		cl.unlock()
+		defer cl.lock()
+		concurrentChunkWrites.Add(1)
+		defer concurrentChunkWrites.Add(-1)
+		// Write the chunk out. Note that the upper bound on chunk writing concurrency will be the
+		// number of connections. We write inline with receiving the chunk (with this lock dance),
+		// because we want to handle errors synchronously and I haven't thought of a nice way to
+		// defer any concurrency to the storage and have that notify the client of errors. TODO: Do
+		// that instead.
+		return t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
+	}()
+
+	piece.decrementPendingWrites()
+
+	if err != nil {
+		c.logger.WithDefaultLevel(log.Error).Printf("writing received chunk %v: %v", req, err)
+		t.pendRequest(req)
+		// Necessary to pass TestReceiveChunkStorageFailureSeederFastExtensionDisabled. I think a
+		// request update runs while we're writing the chunk that just failed. Then we never do a
+		// fresh update after pending the failed request.
+		c.updateRequests("Peer.receiveChunk error writing chunk")
+		t.onWriteChunkErr(err)
+		return nil
+	}
+
+	c.onDirtiedPiece(pieceIndex(ppReq.Index))
+
+	// We need to ensure the piece is only queued once, so only the last chunk writer gets this job.
+	if t.pieceAllDirty(pieceIndex(ppReq.Index)) && piece.pendingWrites == 0 {
+		t.queuePieceCheck(pieceIndex(ppReq.Index))
+		// We don't pend all chunks here anymore because we don't want code dependent on the dirty
+		// chunk status (such as the haveChunk call above) to have to check all the various other
+		// piece states like queued for hash, hashing etc. This does mean that we need to be sure
+		// that chunk pieces are pended at an appropriate time later however.
+	}
+
+	cl.event.Broadcast()
+	// We do this because we've written a chunk, and may change PieceState.Partial.
+	t.publishPieceChange(pieceIndex(ppReq.Index))
+
+	return nil
+}
+
+func (c *Peer) onDirtiedPiece(piece pieceIndex) {
+	if c.peerTouchedPieces == nil {
+		c.peerTouchedPieces = make(map[pieceIndex]struct{})
+	}
+	c.peerTouchedPieces[piece] = struct{}{}
+	ds := &c.t.pieces[piece].dirtiers
+	if *ds == nil {
+		*ds = make(map[*Peer]struct{})
+	}
+	(*ds)[c] = struct{}{}
+}
+
+func (cn *Peer) netGoodPiecesDirtied() int64 {
+	return cn._stats.PiecesDirtiedGood.Int64() - cn._stats.PiecesDirtiedBad.Int64()
+}
+
+func (c *Peer) peerHasWantedPieces() bool {
+	if all, _ := c.peerHasAllPieces(); all {
+		return !c.t.haveAllPieces() && !c.t._pendingPieces.IsEmpty()
+	}
+	if !c.t.haveInfo() {
+		return !c.peerPieces().IsEmpty()
+	}
+	return c.peerPieces().Intersects(&c.t._pendingPieces)
+}
+
+// Returns true if an outstanding request is removed. Cancelled requests should be handled
+// separately.
+func (c *Peer) deleteRequest(r RequestIndex) bool {
+	if !c.requestState.Requests.CheckedRemove(r) {
+		return false
+	}
+	for _, f := range c.callbacks.DeletedRequest {
+		f(PeerRequestEvent{c, c.t.requestIndexToRequest(r)})
+	}
+	c.updateExpectingChunks()
+	if c.t.requestingPeer(r) != c {
+		panic("only one peer should have a given request at a time")
+	}
+	delete(c.t.requestState, r)
+	// c.t.iterPeers(func(p *Peer) {
+	// 	if p.isLowOnRequests() {
+	// 		p.updateRequests("Peer.deleteRequest")
+	// 	}
+	// })
+	return true
+}
+
+func (c *Peer) deleteAllRequests(reason string) {
+	if c.requestState.Requests.IsEmpty() {
+		return
+	}
+	c.requestState.Requests.IterateSnapshot(func(x RequestIndex) bool {
+		if !c.deleteRequest(x) {
+			panic("request should exist")
+		}
+		return true
+	})
+	c.assertNoRequests()
+	c.t.iterPeers(func(p *Peer) {
+		if p.isLowOnRequests() {
+			p.updateRequests(reason)
+		}
+	})
+	return
+}
+
+func (c *Peer) assertNoRequests() {
+	if !c.requestState.Requests.IsEmpty() {
+		panic(c.requestState.Requests.GetCardinality())
+	}
+}
+
+func (c *Peer) cancelAllRequests() {
+	c.requestState.Requests.IterateSnapshot(func(x RequestIndex) bool {
+		c.cancel(x)
+		return true
+	})
+	c.assertNoRequests()
+	return
+}
+
+func (c *Peer) peerPriority() (peerPriority, error) {
+	return bep40Priority(c.remoteIpPort(), c.localPublicAddr)
+}
+
+func (c *Peer) remoteIp() net.IP {
+	host, _, _ := net.SplitHostPort(c.RemoteAddr.String())
+	return net.ParseIP(host)
+}
+
+func (c *Peer) remoteIpPort() IpPort {
+	ipa, _ := tryIpPortFromNetAddr(c.RemoteAddr)
+	return IpPort{ipa.IP, uint16(ipa.Port)}
+}
+
+func (c *Peer) trust() connectionTrust {
+	return connectionTrust{c.trusted, c.netGoodPiecesDirtied()}
+}
+
+type connectionTrust struct {
+	Implicit            bool
+	NetGoodPiecesDirted int64
+}
+
+func (l connectionTrust) Less(r connectionTrust) bool {
+	return multiless.New().Bool(l.Implicit, r.Implicit).Int64(l.NetGoodPiecesDirted, r.NetGoodPiecesDirted).Less()
+}
+
+// Returns a new Bitmap that includes bits for all pieces the peer could have based on their claims.
+func (cn *Peer) newPeerPieces() *roaring.Bitmap {
+	// TODO: Can we use copy on write?
+	ret := cn.peerPieces().Clone()
+	if all, _ := cn.peerHasAllPieces(); all {
+		if cn.t.haveInfo() {
+			ret.AddRange(0, bitmap.BitRange(cn.t.numPieces()))
+		} else {
+			ret.AddRange(0, bitmap.ToEnd)
+		}
+	}
+	return ret
+}
+
+func (cn *Peer) stats() *ConnStats {
+	return &cn._stats
+}
+
+func (p *Peer) TryAsPeerConn() (*PeerConn, bool) {
+	pc, ok := p.peerImpl.(*PeerConn)
+	return pc, ok
+}
+
+func (p *Peer) uncancelledRequests() uint64 {
+	return p.requestState.Requests.GetCardinality()
+}
+
+type peerLocalPublicAddr = IpPort
+
+func (p *Peer) isLowOnRequests() bool {
+	return p.requestState.Requests.IsEmpty() && p.requestState.Cancelled.IsEmpty()
+}
+
+func (p *Peer) decPeakRequests() {
+	// // This can occur when peak requests are altered by the update request timer to be lower than
+	// // the actual number of outstanding requests. Let's let it go negative and see what happens. I
+	// // wonder what happens if maxRequests is not signed.
+	// if p.peakRequests < 1 {
+	// 	panic(p.peakRequests)
+	// }
+	p.peakRequests--
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_info.go b/deps/github.com/anacrolix/torrent/peer_info.go
new file mode 100644
index 0000000..e7b1b7c
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_info.go
@@ -0,0 +1,44 @@
+package torrent
+
+import (
+	"github.com/anacrolix/dht/v2/krpc"
+
+	"github.com/anacrolix/torrent/peer_protocol"
+)
+
+// Peer connection info, handed about publicly.
+type PeerInfo struct {
+	Id     [20]byte
+	Addr   PeerRemoteAddr
+	Source PeerSource
+	// Peer is known to support encryption.
+	SupportsEncryption bool
+	peer_protocol.PexPeerFlags
+	// Whether we can ignore poor or bad behaviour from the peer.
+	Trusted bool
+}
+
+func (me PeerInfo) equal(other PeerInfo) bool {
+	return me.Id == other.Id &&
+		me.Addr.String() == other.Addr.String() &&
+		me.Source == other.Source &&
+		me.SupportsEncryption == other.SupportsEncryption &&
+		me.PexPeerFlags == other.PexPeerFlags &&
+		me.Trusted == other.Trusted
+}
+
+// Generate PeerInfo from peer exchange
+func (me *PeerInfo) FromPex(na krpc.NodeAddr, fs peer_protocol.PexPeerFlags) {
+	me.Addr = ipPortAddr{append([]byte(nil), na.IP...), na.Port}
+	me.Source = PeerSourcePex
+	// If they prefer encryption, they must support it.
+	if fs.Get(peer_protocol.PexPrefersEncryption) {
+		me.SupportsEncryption = true
+	}
+	me.PexPeerFlags = fs
+}
+
+func (me PeerInfo) addr() IpPort {
+	ipPort, _ := tryIpPortFromNetAddr(me.Addr)
+	return IpPort{ipPort.IP, uint16(ipPort.Port)}
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_infos.go b/deps/github.com/anacrolix/torrent/peer_infos.go
new file mode 100644
index 0000000..f3da64e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_infos.go
@@ -0,0 +1,35 @@
+package torrent
+
+import (
+	"github.com/anacrolix/dht/v2/krpc"
+
+	"github.com/anacrolix/torrent/peer_protocol"
+	"github.com/anacrolix/torrent/tracker"
+)
+
+// Helper-type used to bulk-manage PeerInfos.
+type peerInfos []PeerInfo
+
+func (me *peerInfos) AppendFromPex(nas []krpc.NodeAddr, fs []peer_protocol.PexPeerFlags) {
+	for i, na := range nas {
+		var p PeerInfo
+		var f peer_protocol.PexPeerFlags
+		if i < len(fs) {
+			f = fs[i]
+		}
+		p.FromPex(na, f)
+		*me = append(*me, p)
+	}
+}
+
+func (ret peerInfos) AppendFromTracker(ps []tracker.Peer) peerInfos {
+	for _, p := range ps {
+		_p := PeerInfo{
+			Addr:   ipPortAddr{p.IP, p.Port},
+			Source: PeerSourceTracker,
+		}
+		copy(_p.Id[:], p.ID)
+		ret = append(ret, _p)
+	}
+	return ret
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/compactip.go b/deps/github.com/anacrolix/torrent/peer_protocol/compactip.go
new file mode 100644
index 0000000..7dddc53
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/compactip.go
@@ -0,0 +1,22 @@
+package peer_protocol
+
+import (
+	"net"
+
+	"github.com/anacrolix/torrent/bencode"
+)
+
+// Marshals to the smallest compact byte representation.
+type CompactIp net.IP
+
+var _ bencode.Marshaler = CompactIp{}
+
+func (me CompactIp) MarshalBencode() ([]byte, error) {
+	return bencode.Marshal(func() []byte {
+		if ip4 := net.IP(me).To4(); ip4 != nil {
+			return ip4
+		} else {
+			return me
+		}
+	}())
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/decoder.go b/deps/github.com/anacrolix/torrent/peer_protocol/decoder.go
new file mode 100644
index 0000000..9dfe125
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/decoder.go
@@ -0,0 +1,137 @@
+package peer_protocol
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/pkg/errors"
+)
+
+type Decoder struct {
+	R *bufio.Reader
+	// This must return *[]byte where the slices can fit data for piece messages. I think we store
+	// *[]byte in the pool to avoid an extra allocation every time we put the slice back into the
+	// pool. The chunk size should not change for the life of the decoder.
+	Pool      *sync.Pool
+	MaxLength Integer // TODO: Should this include the length header or not?
+}
+
+// io.EOF is returned if the source terminates cleanly on a message boundary.
+func (d *Decoder) Decode(msg *Message) (err error) {
+	var length Integer
+	err = length.Read(d.R)
+	if err != nil {
+		return fmt.Errorf("reading message length: %w", err)
+	}
+	if length > d.MaxLength {
+		return errors.New("message too long")
+	}
+	if length == 0 {
+		msg.Keepalive = true
+		return
+	}
+	r := d.R
+	readByte := func() (byte, error) {
+		length--
+		return d.R.ReadByte()
+	}
+	// From this point onwards, EOF is unexpected
+	defer func() {
+		if err == io.EOF {
+			err = io.ErrUnexpectedEOF
+		}
+	}()
+	c, err := readByte()
+	if err != nil {
+		return
+	}
+	msg.Type = MessageType(c)
+	// Can return directly in cases when err is not nil, or length is known to be zero.
+	switch msg.Type {
+	case Choke, Unchoke, Interested, NotInterested, HaveAll, HaveNone:
+	case Have, AllowedFast, Suggest:
+		length -= 4
+		err = msg.Index.Read(r)
+	case Request, Cancel, Reject:
+		for _, data := range []*Integer{&msg.Index, &msg.Begin, &msg.Length} {
+			err = data.Read(r)
+			if err != nil {
+				break
+			}
+		}
+		length -= 12
+	case Bitfield:
+		b := make([]byte, length)
+		_, err = io.ReadFull(r, b)
+		length = 0
+		msg.Bitfield = unmarshalBitfield(b)
+		return
+	case Piece:
+		for _, pi := range []*Integer{&msg.Index, &msg.Begin} {
+			err := pi.Read(r)
+			if err != nil {
+				return err
+			}
+		}
+		length -= 8
+		dataLen := int64(length)
+		if d.Pool == nil {
+			msg.Piece = make([]byte, dataLen)
+		} else {
+			msg.Piece = *d.Pool.Get().(*[]byte)
+			if int64(cap(msg.Piece)) < dataLen {
+				return errors.New("piece data longer than expected")
+			}
+			msg.Piece = msg.Piece[:dataLen]
+		}
+		_, err = io.ReadFull(r, msg.Piece)
+		length = 0
+		return
+	case Extended:
+		var b byte
+		b, err = readByte()
+		if err != nil {
+			break
+		}
+		msg.ExtendedID = ExtensionNumber(b)
+		msg.ExtendedPayload = make([]byte, length)
+		_, err = io.ReadFull(r, msg.ExtendedPayload)
+		length = 0
+		return
+	case Port:
+		err = binary.Read(r, binary.BigEndian, &msg.Port)
+		length -= 2
+	default:
+		err = fmt.Errorf("unknown message type %#v", c)
+	}
+	if err == nil && length != 0 {
+		err = fmt.Errorf("%v unused bytes in message type %v", length, msg.Type)
+	}
+	return
+}
+
+func readByte(r io.Reader) (b byte, err error) {
+	var arr [1]byte
+	n, err := r.Read(arr[:])
+	b = arr[0]
+	if n == 1 {
+		err = nil
+		return
+	}
+	if err == nil {
+		panic(err)
+	}
+	return
+}
+
+func unmarshalBitfield(b []byte) (bf []bool) {
+	for _, c := range b {
+		for i := 7; i >= 0; i-- {
+			bf = append(bf, (c>>uint(i))&1 == 1)
+		}
+	}
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/decoder_test.go b/deps/github.com/anacrolix/torrent/peer_protocol/decoder_test.go
new file mode 100644
index 0000000..39b54c1
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/decoder_test.go
@@ -0,0 +1,93 @@
+package peer_protocol
+
+import (
+	"bufio"
+	"bytes"
+	"io"
+	"sync"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func BenchmarkDecodePieces(t *testing.B) {
+	const pieceLen = 1 << 14
+	inputMsg := Message{
+		Type:  Piece,
+		Index: 0,
+		Begin: 1,
+		Piece: make([]byte, pieceLen),
+	}
+	b := inputMsg.MustMarshalBinary()
+	t.SetBytes(int64(len(b)))
+	var r bytes.Reader
+	// Try to somewhat emulate what torrent.Client would do. But the goal is to get decoding as fast
+	// as possible and let consumers apply their own adjustments.
+	d := Decoder{
+		R:         bufio.NewReaderSize(&r, 1<<10),
+		MaxLength: 1 << 18,
+		Pool: &sync.Pool{
+			New: func() interface{} {
+				b := make([]byte, pieceLen)
+				return &b
+			},
+		},
+	}
+	c := qt.New(t)
+	t.ReportAllocs()
+	t.ResetTimer()
+	for i := 0; i < t.N; i += 1 {
+		r.Reset(b)
+		var msg Message
+		err := d.Decode(&msg)
+		if err != nil {
+			t.Fatal(err)
+		}
+		// This is very expensive, and should be discovered in tests rather than a benchmark.
+		if false {
+			c.Assert(msg, qt.DeepEquals, inputMsg)
+		}
+		// WWJD
+		d.Pool.Put(&msg.Piece)
+	}
+}
+
+func TestDecodeShortPieceEOF(t *testing.T) {
+	r, w := io.Pipe()
+	go func() {
+		w.Write(Message{Type: Piece, Piece: make([]byte, 1)}.MustMarshalBinary())
+		w.Close()
+	}()
+	d := Decoder{
+		R:         bufio.NewReader(r),
+		MaxLength: 1 << 15,
+		Pool: &sync.Pool{New: func() interface{} {
+			b := make([]byte, 2)
+			return &b
+		}},
+	}
+	var m Message
+	require.NoError(t, d.Decode(&m))
+	assert.Len(t, m.Piece, 1)
+	assert.ErrorIs(t, d.Decode(&m), io.EOF)
+}
+
+func TestDecodeOverlongPiece(t *testing.T) {
+	r, w := io.Pipe()
+	go func() {
+		w.Write(Message{Type: Piece, Piece: make([]byte, 3)}.MustMarshalBinary())
+		w.Close()
+	}()
+	d := Decoder{
+		R:         bufio.NewReader(r),
+		MaxLength: 1 << 15,
+		Pool: &sync.Pool{New: func() interface{} {
+			b := make([]byte, 2)
+			return &b
+		}},
+	}
+	var m Message
+	require.Error(t, d.Decode(&m))
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/extended.go b/deps/github.com/anacrolix/torrent/peer_protocol/extended.go
new file mode 100644
index 0000000..8bc5181
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/extended.go
@@ -0,0 +1,40 @@
+package peer_protocol
+
+import (
+	"net"
+)
+
+// http://www.bittorrent.org/beps/bep_0010.html
+type (
+	ExtendedHandshakeMessage struct {
+		M    map[ExtensionName]ExtensionNumber `bencode:"m"`
+		V    string                            `bencode:"v,omitempty"`
+		Reqq int                               `bencode:"reqq,omitempty"`
+		// The only mention of this I can find is in https://www.bittorrent.org/beps/bep_0011.html
+		// for bit 0x01.
+		Encryption bool `bencode:"e"`
+		// BEP 9
+		MetadataSize int `bencode:"metadata_size,omitempty"`
+		// The local client port. It would be redundant for the receiving side of
+		// a connection to send this.
+		Port   int       `bencode:"p,omitempty"`
+		YourIp CompactIp `bencode:"yourip,omitempty"`
+		Ipv4   CompactIp `bencode:"ipv4,omitempty"`
+		Ipv6   net.IP    `bencode:"ipv6,omitempty"`
+	}
+
+	ExtensionName   string
+	ExtensionNumber int
+)
+
+const (
+	// http://www.bittorrent.org/beps/bep_0011.html
+	ExtensionNamePex ExtensionName = "ut_pex"
+
+	ExtensionDeleteNumber ExtensionNumber = 0
+)
+
+func (me *ExtensionNumber) UnmarshalBinary(b []byte) error {
+	*me = ExtensionNumber(b[0])
+	return nil
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/fuzz_test.go b/deps/github.com/anacrolix/torrent/peer_protocol/fuzz_test.go
new file mode 100644
index 0000000..5241504
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/fuzz_test.go
@@ -0,0 +1,65 @@
+//go:build go1.18
+// +build go1.18
+
+package peer_protocol
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"io"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+)
+
+func FuzzDecoder(f *testing.F) {
+	f.Add([]byte("\x00\x00\x00\x00"))
+	f.Add([]byte("\x00\x00\x00\x01\x00"))
+	f.Add([]byte("\x00\x00\x00\x03\x14\x00"))
+	f.Add([]byte("\x00\x00\x00\x01\x07"))
+	f.Fuzz(func(t *testing.T, b []byte) {
+		t.Logf("%q", b)
+		c := qt.New(t)
+		d := Decoder{
+			R:         bufio.NewReader(bytes.NewReader(b)),
+			MaxLength: 0x100,
+		}
+		var ms []Message
+		for {
+			var m Message
+			err := d.Decode(&m)
+			t.Log(err)
+			if errors.Is(err, io.EOF) {
+				break
+			}
+			if err == nil {
+				c.Assert(m, qt.Not(qt.Equals), Message{})
+				ms = append(ms, m)
+				continue
+			} else {
+				t.Skip(err)
+			}
+		}
+		var buf bytes.Buffer
+		for _, m := range ms {
+			buf.Write(m.MustMarshalBinary())
+		}
+		if len(b) == 0 {
+			c.Assert(buf.Bytes(), qt.HasLen, 0)
+		} else {
+			c.Assert(buf.Bytes(), qt.DeepEquals, b)
+		}
+	})
+}
+
+func FuzzMessageMarshalBinary(f *testing.F) {
+	f.Fuzz(func(t *testing.T, b []byte) {
+		var m Message
+		if err := m.UnmarshalBinary(b); err != nil {
+			t.Skip(err)
+		}
+		b0 := m.MustMarshalBinary()
+		qt.Assert(t, b0, qt.DeepEquals, b)
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/handshake.go b/deps/github.com/anacrolix/torrent/peer_protocol/handshake.go
new file mode 100644
index 0000000..a6f648c
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/handshake.go
@@ -0,0 +1,188 @@
+package peer_protocol
+
+import (
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"math/bits"
+	"strconv"
+	"strings"
+	"unsafe"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+type ExtensionBit uint
+
+// https://www.bittorrent.org/beps/bep_0004.html
+// https://wiki.theory.org/BitTorrentSpecification.html#Reserved_Bytes
+const (
+	ExtensionBitDht                          = 0 // http://www.bittorrent.org/beps/bep_0005.html
+	ExtensionBitFast                         = 2 // http://www.bittorrent.org/beps/bep_0006.html
+	ExtensionBitV2                           = 7 // "Hybrid torrent legacy to v2 upgrade"
+	ExtensionBitAzureusExtensionNegotiation1 = 16
+	ExtensionBitAzureusExtensionNegotiation2 = 17
+	// LibTorrent Extension Protocol, http://www.bittorrent.org/beps/bep_0010.html
+	ExtensionBitLtep = 20
+	// https://wiki.theory.org/BitTorrent_Location-aware_Protocol_1
+	ExtensionBitLocationAwareProtocol    = 43
+	ExtensionBitAzureusMessagingProtocol = 63 // https://www.bittorrent.org/beps/bep_0004.html
+
+)
+
+func handshakeWriter(w io.Writer, bb <-chan []byte, done chan<- error) {
+	var err error
+	for b := range bb {
+		_, err = w.Write(b)
+		if err != nil {
+			break
+		}
+	}
+	done <- err
+}
+
+type (
+	PeerExtensionBits [8]byte
+)
+
+var bitTags = []struct {
+	bit ExtensionBit
+	tag string
+}{
+	// Ordered by their bit position left to right.
+	{ExtensionBitAzureusMessagingProtocol, "amp"},
+	{ExtensionBitLocationAwareProtocol, "loc"},
+	{ExtensionBitLtep, "ltep"},
+	{ExtensionBitAzureusExtensionNegotiation2, "azen2"},
+	{ExtensionBitAzureusExtensionNegotiation1, "azen1"},
+	{ExtensionBitV2, "v2"},
+	{ExtensionBitFast, "fast"},
+	{ExtensionBitDht, "dht"},
+}
+
+func (pex PeerExtensionBits) String() string {
+	pexHex := hex.EncodeToString(pex[:])
+	tags := make([]string, 0, len(bitTags)+1)
+	for _, bitTag := range bitTags {
+		if pex.GetBit(bitTag.bit) {
+			tags = append(tags, bitTag.tag)
+			pex.SetBit(bitTag.bit, false)
+		}
+	}
+	unknownCount := bits.OnesCount64(*(*uint64)((unsafe.Pointer(&pex[0]))))
+	if unknownCount != 0 {
+		tags = append(tags, fmt.Sprintf("%v unknown", unknownCount))
+	}
+	return fmt.Sprintf("%v (%s)", pexHex, strings.Join(tags, ", "))
+
+}
+
+func NewPeerExtensionBytes(bits ...ExtensionBit) (ret PeerExtensionBits) {
+	for _, b := range bits {
+		ret.SetBit(b, true)
+	}
+	return
+}
+
+func (pex PeerExtensionBits) SupportsExtended() bool {
+	return pex.GetBit(ExtensionBitLtep)
+}
+
+func (pex PeerExtensionBits) SupportsDHT() bool {
+	return pex.GetBit(ExtensionBitDht)
+}
+
+func (pex PeerExtensionBits) SupportsFast() bool {
+	return pex.GetBit(ExtensionBitFast)
+}
+
+func (pex *PeerExtensionBits) SetBit(bit ExtensionBit, on bool) {
+	if on {
+		pex[7-bit/8] |= 1 << (bit % 8)
+	} else {
+		pex[7-bit/8] &^= 1 << (bit % 8)
+	}
+}
+
+func (pex PeerExtensionBits) GetBit(bit ExtensionBit) bool {
+	return pex[7-bit/8]&(1<<(bit%8)) != 0
+}
+
+type HandshakeResult struct {
+	PeerExtensionBits
+	PeerID [20]byte
+	metainfo.Hash
+}
+
+// ih is nil if we expect the peer to declare the InfoHash, such as when the peer initiated the
+// connection. Returns ok if the Handshake was successful, and err if there was an unexpected
+// condition other than the peer simply abandoning the Handshake.
+func Handshake(
+	sock io.ReadWriter, ih *metainfo.Hash, peerID [20]byte, extensions PeerExtensionBits,
+) (
+	res HandshakeResult, err error,
+) {
+	// Bytes to be sent to the peer. Should never block the sender.
+	postCh := make(chan []byte, 4)
+	// A single error value sent when the writer completes.
+	writeDone := make(chan error, 1)
+	// Performs writes to the socket and ensures posts don't block.
+	go handshakeWriter(sock, postCh, writeDone)
+
+	defer func() {
+		close(postCh) // Done writing.
+		if err != nil {
+			return
+		}
+		// Wait until writes complete before returning from handshake.
+		err = <-writeDone
+		if err != nil {
+			err = fmt.Errorf("error writing: %w", err)
+		}
+	}()
+
+	post := func(bb []byte) {
+		select {
+		case postCh <- bb:
+		default:
+			panic("mustn't block while posting")
+		}
+	}
+
+	post([]byte(Protocol))
+	post(extensions[:])
+	if ih != nil { // We already know what we want.
+		post(ih[:])
+		post(peerID[:])
+	}
+	var b [68]byte
+	_, err = io.ReadFull(sock, b[:68])
+	if err != nil {
+		return res, fmt.Errorf("while reading: %w", err)
+	}
+	if string(b[:20]) != Protocol {
+		return res, errors.New("unexpected protocol string")
+	}
+
+	copyExact := func(dst, src []byte) {
+		if dstLen, srcLen := uint64(len(dst)), uint64(len(src)); dstLen != srcLen {
+			panic("dst len " + strconv.FormatUint(dstLen, 10) + " != src len " + strconv.FormatUint(srcLen, 10))
+		}
+		copy(dst, src)
+	}
+	copyExact(res.PeerExtensionBits[:], b[20:28])
+	copyExact(res.Hash[:], b[28:48])
+	copyExact(res.PeerID[:], b[48:68])
+	// peerExtensions.Add(res.PeerExtensionBits.String(), 1)
+
+	// TODO: Maybe we can just drop peers here if we're not interested. This
+	// could prevent them trying to reconnect, falsely believing there was
+	// just a problem.
+	if ih == nil { // We were waiting for the peer to tell us what they wanted.
+		post(res.Hash[:])
+		post(peerID[:])
+	}
+
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/int.go b/deps/github.com/anacrolix/torrent/peer_protocol/int.go
new file mode 100644
index 0000000..ebcf603
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/int.go
@@ -0,0 +1,50 @@
+package peer_protocol
+
+import (
+	"encoding/binary"
+	"io"
+	"math"
+
+	"github.com/pkg/errors"
+)
+
+type (
+	// An alias for the underlying type of Integer. This is needed for fuzzing.
+	IntegerKind = uint32
+	Integer     IntegerKind
+)
+
+const IntegerMax = math.MaxUint32
+
+func (i *Integer) UnmarshalBinary(b []byte) error {
+	if len(b) != 4 {
+		return errors.New("expected 4 bytes")
+	}
+	*i = Integer(binary.BigEndian.Uint32(b))
+	return nil
+}
+
+func (i *Integer) Read(r io.Reader) error {
+	var b [4]byte
+	n, err := io.ReadFull(r, b[:])
+	if err == nil {
+		if n != 4 {
+			panic(n)
+		}
+		return i.UnmarshalBinary(b[:])
+	}
+	return err
+}
+
+// It's perfectly fine to cast these to an int. TODO: Or is it?
+func (i Integer) Int() int {
+	return int(i)
+}
+
+func (i Integer) Uint64() uint64 {
+	return uint64(i)
+}
+
+func (i Integer) Uint32() uint32 {
+	return uint32(i)
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/messagetype_string.go b/deps/github.com/anacrolix/torrent/peer_protocol/messagetype_string.go
new file mode 100644
index 0000000..7be19f4
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/messagetype_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -type=MessageType"; DO NOT EDIT.
+
+package peer_protocol
+
+import "strconv"
+
+const (
+	_MessageType_name_0 = "ChokeUnchokeInterestedNotInterestedHaveBitfieldRequestPieceCancelPort"
+	_MessageType_name_1 = "SuggestHaveAllHaveNoneRejectAllowedFast"
+	_MessageType_name_2 = "Extended"
+)
+
+var (
+	_MessageType_index_0 = [...]uint8{0, 5, 12, 22, 35, 39, 47, 54, 59, 65, 69}
+	_MessageType_index_1 = [...]uint8{0, 7, 14, 22, 28, 39}
+)
+
+func (i MessageType) String() string {
+	switch {
+	case i <= 9:
+		return _MessageType_name_0[_MessageType_index_0[i]:_MessageType_index_0[i+1]]
+	case 13 <= i && i <= 17:
+		i -= 13
+		return _MessageType_name_1[_MessageType_index_1[i]:_MessageType_index_1[i+1]]
+	case i == 20:
+		return _MessageType_name_2
+	default:
+		return "MessageType(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/metadata.go b/deps/github.com/anacrolix/torrent/peer_protocol/metadata.go
new file mode 100644
index 0000000..c480091
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/metadata.go
@@ -0,0 +1,42 @@
+package peer_protocol
+
+import (
+	"github.com/anacrolix/torrent/bencode"
+)
+
+const (
+	// http://bittorrent.org/beps/bep_0009.html. Note that there's an
+	// LT_metadata, but I've never implemented it.
+	ExtensionNameMetadata = "ut_metadata"
+)
+
+type (
+	ExtendedMetadataRequestMsg struct {
+		Piece     int                            `bencode:"piece"`
+		TotalSize int                            `bencode:"total_size"`
+		Type      ExtendedMetadataRequestMsgType `bencode:"msg_type"`
+	}
+
+	ExtendedMetadataRequestMsgType int
+)
+
+func MetadataExtensionRequestMsg(peerMetadataExtensionId ExtensionNumber, piece int) Message {
+	return Message{
+		Type:       Extended,
+		ExtendedID: peerMetadataExtensionId,
+		ExtendedPayload: bencode.MustMarshal(ExtendedMetadataRequestMsg{
+			Piece: piece,
+			Type:  RequestMetadataExtensionMsgType,
+		}),
+	}
+}
+
+// Returns the expected piece size for this request message. This is needed to determine the offset
+// into an extension message payload that the request metadata piece data starts.
+func (me ExtendedMetadataRequestMsg) PieceSize() int {
+	ret := me.TotalSize - me.Piece*(1<<14)
+	if ret > 1<<14 {
+		ret = 1 << 14
+	}
+	return ret
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/msg.go b/deps/github.com/anacrolix/torrent/peer_protocol/msg.go
new file mode 100644
index 0000000..f1b1f10
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/msg.go
@@ -0,0 +1,139 @@
+package peer_protocol
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"encoding/binary"
+	"fmt"
+)
+
+// This is a lazy union representing all the possible fields for messages. Go doesn't have ADTs, and
+// I didn't choose to use type-assertions.
+type Message struct {
+	Keepalive            bool
+	Type                 MessageType
+	Index, Begin, Length Integer
+	Piece                []byte
+	Bitfield             []bool
+	ExtendedID           ExtensionNumber
+	ExtendedPayload      []byte
+	Port                 uint16
+}
+
+var _ interface {
+	encoding.BinaryUnmarshaler
+	encoding.BinaryMarshaler
+} = (*Message)(nil)
+
+func MakeCancelMessage(piece, offset, length Integer) Message {
+	return Message{
+		Type:   Cancel,
+		Index:  piece,
+		Begin:  offset,
+		Length: length,
+	}
+}
+
+func (msg Message) RequestSpec() (ret RequestSpec) {
+	return RequestSpec{
+		msg.Index,
+		msg.Begin,
+		func() Integer {
+			if msg.Type == Piece {
+				return Integer(len(msg.Piece))
+			} else {
+				return msg.Length
+			}
+		}(),
+	}
+}
+
+func (msg Message) MustMarshalBinary() []byte {
+	b, err := msg.MarshalBinary()
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
+
+func (msg Message) MarshalBinary() (data []byte, err error) {
+	var buf bytes.Buffer
+	if !msg.Keepalive {
+		err = buf.WriteByte(byte(msg.Type))
+		if err != nil {
+			return
+		}
+		switch msg.Type {
+		case Choke, Unchoke, Interested, NotInterested, HaveAll, HaveNone:
+		case Have, AllowedFast, Suggest:
+			err = binary.Write(&buf, binary.BigEndian, msg.Index)
+		case Request, Cancel, Reject:
+			for _, i := range []Integer{msg.Index, msg.Begin, msg.Length} {
+				err = binary.Write(&buf, binary.BigEndian, i)
+				if err != nil {
+					break
+				}
+			}
+		case Bitfield:
+			_, err = buf.Write(marshalBitfield(msg.Bitfield))
+		case Piece:
+			for _, i := range []Integer{msg.Index, msg.Begin} {
+				err = binary.Write(&buf, binary.BigEndian, i)
+				if err != nil {
+					return
+				}
+			}
+			n, err := buf.Write(msg.Piece)
+			if err != nil {
+				break
+			}
+			if n != len(msg.Piece) {
+				panic(n)
+			}
+		case Extended:
+			err = buf.WriteByte(byte(msg.ExtendedID))
+			if err != nil {
+				return
+			}
+			_, err = buf.Write(msg.ExtendedPayload)
+		case Port:
+			err = binary.Write(&buf, binary.BigEndian, msg.Port)
+		default:
+			err = fmt.Errorf("unknown message type: %v", msg.Type)
+		}
+	}
+	data = make([]byte, 4+buf.Len())
+	binary.BigEndian.PutUint32(data, uint32(buf.Len()))
+	if buf.Len() != copy(data[4:], buf.Bytes()) {
+		panic("bad copy")
+	}
+	return
+}
+
+func marshalBitfield(bf []bool) (b []byte) {
+	b = make([]byte, (len(bf)+7)/8)
+	for i, have := range bf {
+		if !have {
+			continue
+		}
+		c := b[i/8]
+		c |= 1 << uint(7-i%8)
+		b[i/8] = c
+	}
+	return
+}
+
+func (me *Message) UnmarshalBinary(b []byte) error {
+	d := Decoder{
+		R: bufio.NewReader(bytes.NewReader(b)),
+	}
+	err := d.Decode(me)
+	if err != nil {
+		return err
+	}
+	if d.R.Buffered() != 0 {
+		return fmt.Errorf("%d trailing bytes", d.R.Buffered())
+	}
+	return nil
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/pex.go b/deps/github.com/anacrolix/torrent/peer_protocol/pex.go
new file mode 100644
index 0000000..466548a
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/pex.go
@@ -0,0 +1,49 @@
+package peer_protocol
+
+import (
+	"github.com/anacrolix/dht/v2/krpc"
+
+	"github.com/anacrolix/torrent/bencode"
+)
+
+type PexMsg struct {
+	Added       krpc.CompactIPv4NodeAddrs `bencode:"added"`
+	AddedFlags  []PexPeerFlags            `bencode:"added.f"`
+	Added6      krpc.CompactIPv6NodeAddrs `bencode:"added6"`
+	Added6Flags []PexPeerFlags            `bencode:"added6.f"`
+	Dropped     krpc.CompactIPv4NodeAddrs `bencode:"dropped"`
+	Dropped6    krpc.CompactIPv6NodeAddrs `bencode:"dropped6"`
+}
+
+func (m *PexMsg) Len() int {
+	return len(m.Added) + len(m.Added6) + len(m.Dropped) + len(m.Dropped6)
+}
+
+func (m *PexMsg) Message(pexExtendedId ExtensionNumber) Message {
+	payload := bencode.MustMarshal(m)
+	return Message{
+		Type:            Extended,
+		ExtendedID:      pexExtendedId,
+		ExtendedPayload: payload,
+	}
+}
+
+// Unmarshals and returns a PEX message.
+func LoadPexMsg(b []byte) (ret PexMsg, err error) {
+	err = bencode.Unmarshal(b, &ret)
+	return
+}
+
+type PexPeerFlags byte
+
+func (me PexPeerFlags) Get(f PexPeerFlags) bool {
+	return me&f == f
+}
+
+const (
+	PexPrefersEncryption PexPeerFlags = 1 << iota
+	PexSeedUploadOnly
+	PexSupportsUtp
+	PexHolepunchSupport
+	PexOutgoingConn
+)
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/pex_test.go b/deps/github.com/anacrolix/torrent/peer_protocol/pex_test.go
new file mode 100644
index 0000000..5e5e96c
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/pex_test.go
@@ -0,0 +1,64 @@
+package peer_protocol
+
+import (
+	"bufio"
+	"bytes"
+	"net"
+	"testing"
+
+	"github.com/anacrolix/dht/v2/krpc"
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent/bencode"
+)
+
+func TestUnmarshalPex(t *testing.T) {
+	var pem PexMsg
+	err := bencode.Unmarshal([]byte("d5:added12:\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0ce"), &pem)
+	require.NoError(t, err)
+	require.EqualValues(t, 2, len(pem.Added))
+	require.EqualValues(t, 1286, pem.Added[0].Port)
+	require.EqualValues(t, 0x100*0xb+0xc, pem.Added[1].Port)
+}
+
+func TestEmptyPexMsg(t *testing.T) {
+	pm := PexMsg{}
+	b, err := bencode.Marshal(pm)
+	t.Logf("%q", b)
+	require.NoError(t, err)
+	require.NoError(t, bencode.Unmarshal(b, &pm))
+}
+
+func TestMarshalPexMessage(t *testing.T) {
+	addr := krpc.NodeAddr{IP: net.IP{127, 0, 0, 1}, Port: 0x55aa}
+	f := PexPrefersEncryption | PexOutgoingConn
+	pm := new(PexMsg)
+	pm.Added = append(pm.Added, addr)
+	pm.AddedFlags = append(pm.AddedFlags, f)
+
+	_, err := bencode.Marshal(pm)
+	require.NoError(t, err)
+
+	pexExtendedId := ExtensionNumber(7)
+	msg := pm.Message(pexExtendedId)
+	expected := []byte("\x00\x00\x00\x4c\x14\x07d5:added6:\x7f\x00\x00\x01\x55\xaa7:added.f1:\x116:added60:8:added6.f0:7:dropped0:8:dropped60:e")
+	b, err := msg.MarshalBinary()
+	require.NoError(t, err)
+	require.EqualValues(t, b, expected)
+
+	msg = Message{}
+	dec := Decoder{
+		R:         bufio.NewReader(bytes.NewReader(b)),
+		MaxLength: 128,
+	}
+	pmOut := PexMsg{}
+	err = dec.Decode(&msg)
+	require.NoError(t, err)
+	require.EqualValues(t, Extended, msg.Type)
+	require.EqualValues(t, pexExtendedId, msg.ExtendedID)
+	err = bencode.Unmarshal(msg.ExtendedPayload, &pmOut)
+	require.NoError(t, err)
+	require.EqualValues(t, len(pm.Added), len(pmOut.Added))
+	require.EqualValues(t, pm.Added[0].IP, pmOut.Added[0].IP)
+	require.EqualValues(t, pm.Added[0].Port, pmOut.Added[0].Port)
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/protocol.go b/deps/github.com/anacrolix/torrent/peer_protocol/protocol.go
new file mode 100644
index 0000000..bfeb6a0
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/protocol.go
@@ -0,0 +1,52 @@
+package peer_protocol
+
+const (
+	Protocol = "\x13BitTorrent protocol"
+)
+
+type MessageType byte
+
+//go:generate stringer -type=MessageType
+
+func (mt MessageType) FastExtension() bool {
+	return mt >= Suggest && mt <= AllowedFast
+}
+
+func (mt *MessageType) UnmarshalBinary(b []byte) error {
+	*mt = MessageType(b[0])
+	return nil
+}
+
+const (
+	// BEP 3
+	Choke         MessageType = 0
+	Unchoke       MessageType = 1
+	Interested    MessageType = 2
+	NotInterested MessageType = 3
+	Have          MessageType = 4
+	Bitfield      MessageType = 5
+	Request       MessageType = 6
+	Piece         MessageType = 7
+	Cancel        MessageType = 8
+
+	// BEP 5
+	Port MessageType = 9
+
+	// BEP 6 - Fast extension
+	Suggest     MessageType = 0x0d // 13
+	HaveAll     MessageType = 0x0e // 14
+	HaveNone    MessageType = 0x0f // 15
+	Reject      MessageType = 0x10 // 16
+	AllowedFast MessageType = 0x11 // 17
+
+	// BEP 10
+	Extended MessageType = 20
+)
+
+const (
+	HandshakeExtendedID = 0
+
+	RequestMetadataExtensionMsgType ExtendedMetadataRequestMsgType = 0
+	DataMetadataExtensionMsgType    ExtendedMetadataRequestMsgType = 1
+	RejectMetadataExtensionMsgType  ExtendedMetadataRequestMsgType = 2
+)
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/protocol_test.go b/deps/github.com/anacrolix/torrent/peer_protocol/protocol_test.go
new file mode 100644
index 0000000..df01a1a
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/protocol_test.go
@@ -0,0 +1,154 @@
+package peer_protocol
+
+import (
+	"bufio"
+	"bytes"
+	"strings"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestBinaryReadSliceOfPointers(t *testing.T) {
+	var msg Message
+	r := bytes.NewBufferString("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00")
+	if r.Len() != 12 {
+		t.Fatalf("expected 12 bytes left, but there %d", r.Len())
+	}
+	for _, data := range []*Integer{&msg.Index, &msg.Begin, &msg.Length} {
+		err := data.Read(r)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+	if r.Len() != 0 {
+		t.FailNow()
+	}
+}
+
+func TestConstants(t *testing.T) {
+	assert.EqualValues(t, 3, NotInterested)
+	assert.EqualValues(t, 14, HaveAll)
+}
+
+func TestBitfieldEncode(t *testing.T) {
+	bf := make([]bool, 37)
+	bf[2] = true
+	bf[7] = true
+	bf[32] = true
+	s := string(marshalBitfield(bf))
+	const expected = "\x21\x00\x00\x00\x80"
+	if s != expected {
+		t.Fatalf("got %#v, expected %#v", s, expected)
+	}
+}
+
+func TestBitfieldUnmarshal(t *testing.T) {
+	bf := unmarshalBitfield([]byte("\x81\x06"))
+	expected := make([]bool, 16)
+	expected[0] = true
+	expected[7] = true
+	expected[13] = true
+	expected[14] = true
+	if len(bf) != len(expected) {
+		t.FailNow()
+	}
+	for i := range expected {
+		if bf[i] != expected[i] {
+			t.FailNow()
+		}
+	}
+}
+
+func TestHaveEncode(t *testing.T) {
+	actualBytes, err := Message{
+		Type:  Have,
+		Index: 42,
+	}.MarshalBinary()
+	if err != nil {
+		t.Fatal(err)
+	}
+	actualString := string(actualBytes)
+	expected := "\x00\x00\x00\x05\x04\x00\x00\x00\x2a"
+	if actualString != expected {
+		t.Fatalf("expected %#v, got %#v", expected, actualString)
+	}
+}
+
+func TestShortRead(t *testing.T) {
+	dec := Decoder{
+		R:         bufio.NewReader(bytes.NewBufferString("\x00\x00\x00\x02\x00!")),
+		MaxLength: 2,
+	}
+	msg := new(Message)
+	err := dec.Decode(msg)
+	if !strings.Contains(err.Error(), "1 unused bytes in message type Choke") {
+		t.Fatal(err)
+	}
+}
+
+func TestUnexpectedEOF(t *testing.T) {
+	msg := new(Message)
+	for _, stream := range []string{
+		"\x00\x00\x00",     // Header truncated.
+		"\x00\x00\x00\x01", // Expecting 1 more byte.
+		// Request with wrong length, and too short anyway.
+		"\x00\x00\x00\x06\x06\x00\x00\x00\x00\x00",
+		// Request truncated.
+		"\x00\x00\x00\x0b\x06\x00\x00\x00\x00\x00",
+	} {
+		dec := Decoder{
+			R:         bufio.NewReader(bytes.NewBufferString(stream)),
+			MaxLength: 42,
+		}
+		err := dec.Decode(msg)
+		if err == nil {
+			t.Fatalf("expected an error decoding %q", stream)
+		}
+	}
+}
+
+func TestMarshalKeepalive(t *testing.T) {
+	b, err := (Message{
+		Keepalive: true,
+	}).MarshalBinary()
+	if err != nil {
+		t.Fatalf("error marshalling keepalive: %s", err)
+	}
+	bs := string(b)
+	const expected = "\x00\x00\x00\x00"
+	if bs != expected {
+		t.Fatalf("marshalled keepalive is %q, expected %q", bs, expected)
+	}
+}
+
+func TestMarshalPortMsg(t *testing.T) {
+	b, err := (Message{
+		Type: Port,
+		Port: 0xaabb,
+	}).MarshalBinary()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if string(b) != "\x00\x00\x00\x03\x09\xaa\xbb" {
+		t.FailNow()
+	}
+}
+
+func TestUnmarshalPortMsg(t *testing.T) {
+	var m Message
+	d := Decoder{
+		R:         bufio.NewReader(bytes.NewBufferString("\x00\x00\x00\x03\x09\xaa\xbb")),
+		MaxLength: 8,
+	}
+	err := d.Decode(&m)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if m.Type != Port {
+		t.FailNow()
+	}
+	if m.Port != 0xaabb {
+		t.FailNow()
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/reqspec.go b/deps/github.com/anacrolix/torrent/peer_protocol/reqspec.go
new file mode 100644
index 0000000..f9989a2
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/reqspec.go
@@ -0,0 +1,11 @@
+package peer_protocol
+
+import "fmt"
+
+type RequestSpec struct {
+	Index, Begin, Length Integer
+}
+
+func (me RequestSpec) String() string {
+	return fmt.Sprintf("{%d %d %d}", me.Index, me.Begin, me.Length)
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/18f327bd85f3ab06 b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/18f327bd85f3ab06
new file mode 100644
index 0000000..d214fc2
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/18f327bd85f3ab06
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("\x00\x00\x000\a")
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/252f96643f6de0fc b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/252f96643f6de0fc
new file mode 100644
index 0000000..2d3ac2e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/252f96643f6de0fc
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("\x00\x00\x000\a00000000")
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/44a1b6410e7ce227 b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/44a1b6410e7ce227
new file mode 100644
index 0000000..a6bf562
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/44a1b6410e7ce227
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("\x00\x00\x00\x05\x110000")
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/52452abe5ed3cb64 b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/52452abe5ed3cb64
new file mode 100644
index 0000000..6109993
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/52452abe5ed3cb64
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("\x00\x00\x000\x05")
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/9d2ec002df4eda28 b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/9d2ec002df4eda28
new file mode 100644
index 0000000..6345fd4
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/9d2ec002df4eda28
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("\x00\x00\x003\a\x17\b\x92\xf3\x02\xd5\x1896%~\xd2Q\x84b\x18")
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/aceaaae6cd039fb5 b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/aceaaae6cd039fb5
new file mode 100644
index 0000000..3a76846
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/aceaaae6cd039fb5
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("\x00\x00\x00\x01\x10")
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/eb13c84d13ebb034 b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/eb13c84d13ebb034
new file mode 100644
index 0000000..89e0e61
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/testdata/fuzz/FuzzDecoder/eb13c84d13ebb034
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("\x00\x00\x000\x14")
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/err-code.go b/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/err-code.go
new file mode 100644
index 0000000..7cc61db
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/err-code.go
@@ -0,0 +1,31 @@
+package utHolepunch
+
+import (
+	"fmt"
+)
+
+type ErrCode uint32
+
+var _ error = ErrCode(0)
+
+const (
+	NoSuchPeer ErrCode = iota + 1
+	NotConnected
+	NoSupport
+	NoSelf
+)
+
+func (ec ErrCode) Error() string {
+	switch ec {
+	case NoSuchPeer:
+		return "target endpoint is invalid"
+	case NotConnected:
+		return "the relaying peer is not connected to the target peer"
+	case NoSupport:
+		return "the target peer does not support the holepunch extension"
+	case NoSelf:
+		return "the target endpoint belongs to the relaying peer"
+	default:
+		return fmt.Sprintf("error code %d", ec)
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/err-code_test.go b/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/err-code_test.go
new file mode 100644
index 0000000..4553810
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/err-code_test.go
@@ -0,0 +1,10 @@
+package utHolepunch
+
+import (
+	"math/rand"
+	"testing"
+)
+
+func TestUnknownErrCodeError(t *testing.T) {
+	ErrCode(rand.Uint32()).Error()
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/ut-holepunch.go b/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/ut-holepunch.go
new file mode 100644
index 0000000..3051fc0
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/ut-holepunch.go
@@ -0,0 +1,97 @@
+package utHolepunch
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"net/netip"
+)
+
+const ExtensionName = "ut_holepunch"
+
+type (
+	Msg struct {
+		MsgType  MsgType
+		AddrPort netip.AddrPort
+		ErrCode  ErrCode
+	}
+	MsgType  byte
+	AddrType byte
+)
+
+const (
+	Rendezvous MsgType = iota
+	Connect
+	Error
+)
+
+func (me MsgType) String() string {
+	switch me {
+	case Rendezvous:
+		return "rendezvous"
+	case Connect:
+		return "connect"
+	case Error:
+		return "error"
+	default:
+		return fmt.Sprintf("unknown %d", me)
+	}
+}
+
+const (
+	Ipv4 AddrType = iota
+	Ipv6 AddrType = iota
+)
+
+func (m *Msg) UnmarshalBinary(b []byte) error {
+	if len(b) < 12 {
+		return fmt.Errorf("buffer too small to be valid")
+	}
+	m.MsgType = MsgType(b[0])
+	b = b[1:]
+	addrType := AddrType(b[0])
+	b = b[1:]
+	var addr netip.Addr
+	switch addrType {
+	case Ipv4:
+		addr = netip.AddrFrom4(*(*[4]byte)(b[:4]))
+		b = b[4:]
+	case Ipv6:
+		if len(b) < 22 {
+			return fmt.Errorf("not enough bytes")
+		}
+		addr = netip.AddrFrom16(*(*[16]byte)(b[:16]))
+		b = b[16:]
+	default:
+		return fmt.Errorf("unhandled addr type value %v", addrType)
+	}
+	port := binary.BigEndian.Uint16(b[:])
+	b = b[2:]
+	m.AddrPort = netip.AddrPortFrom(addr, port)
+	m.ErrCode = ErrCode(binary.BigEndian.Uint32(b[:]))
+	b = b[4:]
+	if len(b) != 0 {
+		return fmt.Errorf("%v trailing unused bytes", len(b))
+	}
+	return nil
+}
+
+func (m *Msg) MarshalBinary() (_ []byte, err error) {
+	var buf bytes.Buffer
+	buf.Grow(24)
+	buf.WriteByte(byte(m.MsgType))
+	addr := m.AddrPort.Addr()
+	switch {
+	case addr.Is4():
+		buf.WriteByte(byte(Ipv4))
+	case addr.Is6():
+		buf.WriteByte(byte(Ipv6))
+	default:
+		err = fmt.Errorf("unhandled addr type: %v", addr)
+		return
+	}
+	buf.Write(addr.AsSlice())
+	binary.Write(&buf, binary.BigEndian, m.AddrPort.Port())
+	binary.Write(&buf, binary.BigEndian, m.ErrCode)
+	return buf.Bytes(), nil
+}
diff --git a/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/ut-holepunch_test.go b/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/ut-holepunch_test.go
new file mode 100644
index 0000000..7221e1f
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peer_protocol/ut-holepunch/ut-holepunch_test.go
@@ -0,0 +1,63 @@
+package utHolepunch
+
+import (
+	"bytes"
+	"net/netip"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+)
+
+var exampleMsgs = []Msg{
+	{
+		MsgType:  Rendezvous,
+		AddrPort: netip.MustParseAddrPort("[1234::1]:42069"),
+		ErrCode:  16777216,
+	},
+	{
+		MsgType:  Connect,
+		AddrPort: netip.MustParseAddrPort("1.2.3.4:42069"),
+		ErrCode:  16777216,
+	},
+}
+
+func TestUnmarshalMsg(t *testing.T) {
+	c := qt.New(t)
+	for _, m := range exampleMsgs {
+		b, err := m.MarshalBinary()
+		c.Assert(err, qt.IsNil)
+		expectedLen := 24
+		if m.AddrPort.Addr().Is4() {
+			expectedLen = 12
+		}
+		c.Check(b, qt.HasLen, expectedLen)
+		var um Msg
+		err = um.UnmarshalBinary(b)
+		c.Assert(err, qt.IsNil)
+		c.Check(um, qt.Equals, m)
+	}
+}
+
+func FuzzMsg(f *testing.F) {
+	for _, m := range exampleMsgs {
+		emb, err := m.MarshalBinary()
+		if err != nil {
+			f.Fatal(err)
+		}
+		f.Add(emb)
+	}
+	f.Fuzz(func(t *testing.T, b []byte) {
+		var m Msg
+		err := m.UnmarshalBinary(b)
+		if err != nil {
+			t.SkipNow()
+		}
+		mb, err := m.MarshalBinary()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !bytes.Equal(b, mb) {
+			t.FailNow()
+		}
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/peerconn.go b/deps/github.com/anacrolix/torrent/peerconn.go
new file mode 100644
index 0000000..ee47dd1
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peerconn.go
@@ -0,0 +1,1146 @@
+package torrent
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"math/rand"
+	"net"
+	"net/netip"
+	"strconv"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"github.com/RoaringBitmap/roaring"
+	"github.com/anacrolix/generics"
+	. "github.com/anacrolix/generics"
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/missinggo/v2/bitmap"
+	"github.com/anacrolix/multiless"
+	"golang.org/x/exp/maps"
+	"golang.org/x/time/rate"
+
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/internal/alloclim"
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/mse"
+	pp "github.com/anacrolix/torrent/peer_protocol"
+	utHolepunch "github.com/anacrolix/torrent/peer_protocol/ut-holepunch"
+)
+
+// Maintains the state of a BitTorrent-protocol based connection with a peer.
+type PeerConn struct {
+	Peer
+
+	// A string that should identify the PeerConn's net.Conn endpoints. The net.Conn could
+	// be wrapping WebRTC, uTP, or TCP etc. Used in writing the conn status for peers.
+	connString string
+
+	// See BEP 3 etc.
+	PeerID             PeerID
+	PeerExtensionBytes pp.PeerExtensionBits
+	PeerListenPort     int
+
+	// The actual Conn, used for closing, and setting socket options. Do not use methods on this
+	// while holding any mutexes.
+	conn net.Conn
+	// The Reader and Writer for this Conn, with hooks installed for stats,
+	// limiting, deadlines etc.
+	w io.Writer
+	r io.Reader
+
+	messageWriter peerConnMsgWriter
+
+	PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber
+	PeerClientName   atomic.Value
+	uploadTimer      *time.Timer
+	pex              pexConnState
+
+	// The pieces the peer has claimed to have.
+	_peerPieces roaring.Bitmap
+	// The peer has everything. This can occur due to a special message, when
+	// we may not even know the number of pieces in the torrent yet.
+	peerSentHaveAll bool
+
+	peerRequestDataAllocLimiter alloclim.Limiter
+
+	outstandingHolepunchingRendezvous map[netip.AddrPort]struct{}
+}
+
+func (cn *PeerConn) pexStatus() string {
+	if !cn.bitExtensionEnabled(pp.ExtensionBitLtep) {
+		return "extended protocol disabled"
+	}
+	if cn.PeerExtensionIDs == nil {
+		return "pending extended handshake"
+	}
+	if !cn.supportsExtension(pp.ExtensionNamePex) {
+		return "unsupported"
+	}
+	if true {
+		return fmt.Sprintf(
+			"%v conns, %v unsent events",
+			len(cn.pex.remoteLiveConns),
+			cn.pex.numPending(),
+		)
+	} else {
+		// This alternative branch prints out the remote live conn addresses.
+		return fmt.Sprintf(
+			"%v conns, %v unsent events",
+			strings.Join(generics.SliceMap(
+				maps.Keys(cn.pex.remoteLiveConns),
+				func(from netip.AddrPort) string {
+					return from.String()
+				}), ","),
+			cn.pex.numPending(),
+		)
+	}
+}
+
+func (cn *PeerConn) peerImplStatusLines() []string {
+	return []string{
+		cn.connString,
+		fmt.Sprintf("peer id: %+q", cn.PeerID),
+		fmt.Sprintf("extensions: %v", cn.PeerExtensionBytes),
+		fmt.Sprintf("ltep extensions: %v", cn.PeerExtensionIDs),
+		fmt.Sprintf("pex: %s", cn.pexStatus()),
+	}
+}
+
+// Returns true if the connection is over IPv6.
+func (cn *PeerConn) ipv6() bool {
+	ip := cn.remoteIp()
+	if ip.To4() != nil {
+		return false
+	}
+	return len(ip) == net.IPv6len
+}
+
+// Returns true the if the dialer/initiator has the higher client peer ID. See
+// https://github.com/arvidn/libtorrent/blame/272828e1cc37b042dfbbafa539222d8533e99755/src/bt_peer_connection.cpp#L3536-L3557.
+// As far as I can tell, Transmission just keeps the oldest connection.
+func (cn *PeerConn) isPreferredDirection() bool {
+	// True if our client peer ID is higher than the remote's peer ID.
+	return bytes.Compare(cn.PeerID[:], cn.t.cl.peerID[:]) < 0 == cn.outgoing
+}
+
+// Returns whether the left connection should be preferred over the right one,
+// considering only their networking properties. If ok is false, we can't
+// decide.
+func (l *PeerConn) hasPreferredNetworkOver(r *PeerConn) bool {
+	var ml multiless.Computation
+	ml = ml.Bool(r.isPreferredDirection(), l.isPreferredDirection())
+	ml = ml.Bool(l.utp(), r.utp())
+	ml = ml.Bool(r.ipv6(), l.ipv6())
+	return ml.Less()
+}
+
+func (cn *PeerConn) peerHasAllPieces() (all, known bool) {
+	if cn.peerSentHaveAll {
+		return true, true
+	}
+	if !cn.t.haveInfo() {
+		return false, false
+	}
+	return cn._peerPieces.GetCardinality() == uint64(cn.t.numPieces()), true
+}
+
+func (cn *PeerConn) onGotInfo(info *metainfo.Info) {
+	cn.setNumPieces(info.NumPieces())
+}
+
+// Correct the PeerPieces slice length. Return false if the existing slice is invalid, such as by
+// receiving badly sized BITFIELD, or invalid HAVE messages.
+func (cn *PeerConn) setNumPieces(num pieceIndex) {
+	cn._peerPieces.RemoveRange(bitmap.BitRange(num), bitmap.ToEnd)
+	cn.peerPiecesChanged()
+}
+
+func (cn *PeerConn) peerPieces() *roaring.Bitmap {
+	return &cn._peerPieces
+}
+
+func (cn *PeerConn) connectionFlags() (ret string) {
+	c := func(b byte) {
+		ret += string([]byte{b})
+	}
+	if cn.cryptoMethod == mse.CryptoMethodRC4 {
+		c('E')
+	} else if cn.headerEncrypted {
+		c('e')
+	}
+	ret += string(cn.Discovery)
+	if cn.utp() {
+		c('U')
+	}
+	return
+}
+
+func (cn *PeerConn) utp() bool {
+	return parseNetworkString(cn.Network).Udp
+}
+
+func (cn *PeerConn) onClose() {
+	if cn.pex.IsEnabled() {
+		cn.pex.Close()
+	}
+	cn.tickleWriter()
+	if cn.conn != nil {
+		go cn.conn.Close()
+	}
+	if cb := cn.callbacks.PeerConnClosed; cb != nil {
+		cb(cn)
+	}
+}
+
+// Writes a message into the write buffer. Returns whether it's okay to keep writing. Writing is
+// done asynchronously, so it may be that we're not able to honour backpressure from this method.
+func (cn *PeerConn) write(msg pp.Message) bool {
+	torrent.Add(fmt.Sprintf("messages written of type %s", msg.Type.String()), 1)
+	// We don't need to track bytes here because the connection's Writer has that behaviour injected
+	// (although there's some delay between us buffering the message, and the connection writer
+	// flushing it out.).
+	notFull := cn.messageWriter.write(msg)
+	// Last I checked only Piece messages affect stats, and we don't write those.
+	cn.wroteMsg(&msg)
+	cn.tickleWriter()
+	return notFull
+}
+
+func (cn *PeerConn) requestMetadataPiece(index int) {
+	eID := cn.PeerExtensionIDs[pp.ExtensionNameMetadata]
+	if eID == pp.ExtensionDeleteNumber {
+		return
+	}
+	if index < len(cn.metadataRequests) && cn.metadataRequests[index] {
+		return
+	}
+	cn.logger.WithDefaultLevel(log.Debug).Printf("requesting metadata piece %d", index)
+	cn.write(pp.MetadataExtensionRequestMsg(eID, index))
+	for index >= len(cn.metadataRequests) {
+		cn.metadataRequests = append(cn.metadataRequests, false)
+	}
+	cn.metadataRequests[index] = true
+}
+
+func (cn *PeerConn) requestedMetadataPiece(index int) bool {
+	return index < len(cn.metadataRequests) && cn.metadataRequests[index]
+}
+
+func (cn *PeerConn) onPeerSentCancel(r Request) {
+	if _, ok := cn.peerRequests[r]; !ok {
+		torrent.Add("unexpected cancels received", 1)
+		return
+	}
+	if cn.fastEnabled() {
+		cn.reject(r)
+	} else {
+		delete(cn.peerRequests, r)
+	}
+}
+
+func (cn *PeerConn) choke(msg messageWriter) (more bool) {
+	if cn.choking {
+		return true
+	}
+	cn.choking = true
+	more = msg(pp.Message{
+		Type: pp.Choke,
+	})
+	if !cn.fastEnabled() {
+		cn.deleteAllPeerRequests()
+	}
+	return
+}
+
+func (cn *PeerConn) deleteAllPeerRequests() {
+	for _, state := range cn.peerRequests {
+		state.allocReservation.Drop()
+	}
+	cn.peerRequests = nil
+}
+
+func (cn *PeerConn) unchoke(msg func(pp.Message) bool) bool {
+	if !cn.choking {
+		return true
+	}
+	cn.choking = false
+	return msg(pp.Message{
+		Type: pp.Unchoke,
+	})
+}
+
+func (pc *PeerConn) writeInterested(interested bool) bool {
+	return pc.write(pp.Message{
+		Type: func() pp.MessageType {
+			if interested {
+				return pp.Interested
+			} else {
+				return pp.NotInterested
+			}
+		}(),
+	})
+}
+
+func (me *PeerConn) _request(r Request) bool {
+	return me.write(pp.Message{
+		Type:   pp.Request,
+		Index:  r.Index,
+		Begin:  r.Begin,
+		Length: r.Length,
+	})
+}
+
+func (me *PeerConn) _cancel(r RequestIndex) bool {
+	me.write(makeCancelMessage(me.t.requestIndexToRequest(r)))
+	return me.remoteRejectsCancels()
+}
+
+// Whether we should expect a reject message after sending a cancel.
+func (me *PeerConn) remoteRejectsCancels() bool {
+	if !me.fastEnabled() {
+		return false
+	}
+	if me.remoteIsTransmission() {
+		// Transmission did not send rejects for received cancels. See
+		// https://github.com/transmission/transmission/pull/2275. Fixed in 4.0.0-beta.1 onward in
+		// https://github.com/transmission/transmission/commit/76719bf34c255da4fca991c2ad3fa4b65d2154b1.
+		// Peer ID prefix scheme described
+		// https://github.com/transmission/transmission/blob/7ec7607bbcf0fa99bd4b157b9b0f0c411d59f45d/CMakeLists.txt#L128-L149.
+		return me.PeerID[3] >= '4'
+	}
+	return true
+}
+
+func (cn *PeerConn) fillWriteBuffer() {
+	if cn.messageWriter.writeBuffer.Len() > writeBufferLowWaterLen {
+		// Fully committing to our max requests requires sufficient space (see
+		// maxLocalToRemoteRequests). Flush what we have instead. We also prefer always to make
+		// requests than to do PEX or upload, so we short-circuit before handling those. Any update
+		// request reason will not be cleared, so we'll come right back here when there's space. We
+		// can't do this in maybeUpdateActualRequestState because it's a method on Peer and has no
+		// knowledge of write buffers.
+		return
+	}
+	cn.maybeUpdateActualRequestState()
+	if cn.pex.IsEnabled() {
+		if flow := cn.pex.Share(cn.write); !flow {
+			return
+		}
+	}
+	cn.upload(cn.write)
+}
+
+func (cn *PeerConn) have(piece pieceIndex) {
+	if cn.sentHaves.Get(bitmap.BitIndex(piece)) {
+		return
+	}
+	cn.write(pp.Message{
+		Type:  pp.Have,
+		Index: pp.Integer(piece),
+	})
+	cn.sentHaves.Add(bitmap.BitIndex(piece))
+}
+
+func (cn *PeerConn) postBitfield() {
+	if cn.sentHaves.Len() != 0 {
+		panic("bitfield must be first have-related message sent")
+	}
+	if !cn.t.haveAnyPieces() {
+		return
+	}
+	cn.write(pp.Message{
+		Type:     pp.Bitfield,
+		Bitfield: cn.t.bitfield(),
+	})
+	cn.sentHaves = bitmap.Bitmap{cn.t._completedPieces.Clone()}
+}
+
+func (cn *PeerConn) handleUpdateRequests() {
+	// The writer determines the request state as needed when it can write.
+	cn.tickleWriter()
+}
+
+func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) {
+	if newMin > cn.peerMinPieces {
+		cn.peerMinPieces = newMin
+	}
+}
+
+func (cn *PeerConn) peerSentHave(piece pieceIndex) error {
+	if cn.t.haveInfo() && piece >= cn.t.numPieces() || piece < 0 {
+		return errors.New("invalid piece")
+	}
+	if cn.peerHasPiece(piece) {
+		return nil
+	}
+	cn.raisePeerMinPieces(piece + 1)
+	if !cn.peerHasPiece(piece) {
+		cn.t.incPieceAvailability(piece)
+	}
+	cn._peerPieces.Add(uint32(piece))
+	if cn.t.wantPieceIndex(piece) {
+		cn.updateRequests("have")
+	}
+	cn.peerPiecesChanged()
+	return nil
+}
+
+func (cn *PeerConn) peerSentBitfield(bf []bool) error {
+	if len(bf)%8 != 0 {
+		panic("expected bitfield length divisible by 8")
+	}
+	// We know that the last byte means that at most the last 7 bits are wasted.
+	cn.raisePeerMinPieces(pieceIndex(len(bf) - 7))
+	if cn.t.haveInfo() && len(bf) > int(cn.t.numPieces()) {
+		// Ignore known excess pieces.
+		bf = bf[:cn.t.numPieces()]
+	}
+	bm := boolSliceToBitmap(bf)
+	if cn.t.haveInfo() && pieceIndex(bm.GetCardinality()) == cn.t.numPieces() {
+		cn.onPeerHasAllPieces()
+		return nil
+	}
+	if !bm.IsEmpty() {
+		cn.raisePeerMinPieces(pieceIndex(bm.Maximum()) + 1)
+	}
+	shouldUpdateRequests := false
+	if cn.peerSentHaveAll {
+		if !cn.t.deleteConnWithAllPieces(&cn.Peer) {
+			panic(cn)
+		}
+		cn.peerSentHaveAll = false
+		if !cn._peerPieces.IsEmpty() {
+			panic("if peer has all, we expect no individual peer pieces to be set")
+		}
+	} else {
+		bm.Xor(&cn._peerPieces)
+	}
+	cn.peerSentHaveAll = false
+	// bm is now 'on' for pieces that are changing
+	bm.Iterate(func(x uint32) bool {
+		pi := pieceIndex(x)
+		if cn._peerPieces.Contains(x) {
+			// Then we must be losing this piece
+			cn.t.decPieceAvailability(pi)
+		} else {
+			if !shouldUpdateRequests && cn.t.wantPieceIndex(pieceIndex(x)) {
+				shouldUpdateRequests = true
+			}
+			// We must be gaining this piece
+			cn.t.incPieceAvailability(pieceIndex(x))
+		}
+		return true
+	})
+	// Apply the changes. If we had everything previously, this should be empty, so xor is the same
+	// as or.
+	cn._peerPieces.Xor(&bm)
+	if shouldUpdateRequests {
+		cn.updateRequests("bitfield")
+	}
+	// We didn't guard this before, I see no reason to do it now.
+	cn.peerPiecesChanged()
+	return nil
+}
+
+func (cn *PeerConn) onPeerHasAllPieces() {
+	t := cn.t
+	if t.haveInfo() {
+		cn._peerPieces.Iterate(func(x uint32) bool {
+			t.decPieceAvailability(pieceIndex(x))
+			return true
+		})
+	}
+	t.addConnWithAllPieces(&cn.Peer)
+	cn.peerSentHaveAll = true
+	cn._peerPieces.Clear()
+	if !cn.t._pendingPieces.IsEmpty() {
+		cn.updateRequests("Peer.onPeerHasAllPieces")
+	}
+	cn.peerPiecesChanged()
+}
+
+func (cn *PeerConn) onPeerSentHaveAll() error {
+	cn.onPeerHasAllPieces()
+	return nil
+}
+
+func (cn *PeerConn) peerSentHaveNone() error {
+	if !cn.peerSentHaveAll {
+		cn.t.decPeerPieceAvailability(&cn.Peer)
+	}
+	cn._peerPieces.Clear()
+	cn.peerSentHaveAll = false
+	cn.peerPiecesChanged()
+	return nil
+}
+
+func (c *PeerConn) requestPendingMetadata() {
+	if c.t.haveInfo() {
+		return
+	}
+	if c.PeerExtensionIDs[pp.ExtensionNameMetadata] == 0 {
+		// Peer doesn't support this.
+		return
+	}
+	// Request metadata pieces that we don't have in a random order.
+	var pending []int
+	for index := 0; index < c.t.metadataPieceCount(); index++ {
+		if !c.t.haveMetadataPiece(index) && !c.requestedMetadataPiece(index) {
+			pending = append(pending, index)
+		}
+	}
+	rand.Shuffle(len(pending), func(i, j int) { pending[i], pending[j] = pending[j], pending[i] })
+	for _, i := range pending {
+		c.requestMetadataPiece(i)
+	}
+}
+
+func (cn *PeerConn) wroteMsg(msg *pp.Message) {
+	torrent.Add(fmt.Sprintf("messages written of type %s", msg.Type.String()), 1)
+	if msg.Type == pp.Extended {
+		for name, id := range cn.PeerExtensionIDs {
+			if id != msg.ExtendedID {
+				continue
+			}
+			torrent.Add(fmt.Sprintf("Extended messages written for protocol %q", name), 1)
+		}
+	}
+	cn.allStats(func(cs *ConnStats) { cs.wroteMsg(msg) })
+}
+
+func (cn *PeerConn) wroteBytes(n int64) {
+	cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesWritten }))
+}
+
+func (c *PeerConn) fastEnabled() bool {
+	return c.PeerExtensionBytes.SupportsFast() && c.t.cl.config.Extensions.SupportsFast()
+}
+
+func (c *PeerConn) reject(r Request) {
+	if !c.fastEnabled() {
+		panic("fast not enabled")
+	}
+	c.write(r.ToMsg(pp.Reject))
+	// It is possible to reject a request before it is added to peer requests due to being invalid.
+	if state, ok := c.peerRequests[r]; ok {
+		state.allocReservation.Drop()
+		delete(c.peerRequests, r)
+	}
+}
+
+func (c *PeerConn) maximumPeerRequestChunkLength() (_ Option[int]) {
+	uploadRateLimiter := c.t.cl.config.UploadRateLimiter
+	if uploadRateLimiter.Limit() == rate.Inf {
+		return
+	}
+	return Some(uploadRateLimiter.Burst())
+}
+
+// startFetch is for testing purposes currently.
+func (c *PeerConn) onReadRequest(r Request, startFetch bool) error {
+	requestedChunkLengths.Add(strconv.FormatUint(r.Length.Uint64(), 10), 1)
+	if _, ok := c.peerRequests[r]; ok {
+		torrent.Add("duplicate requests received", 1)
+		if c.fastEnabled() {
+			return errors.New("received duplicate request with fast enabled")
+		}
+		return nil
+	}
+	if c.choking {
+		torrent.Add("requests received while choking", 1)
+		if c.fastEnabled() {
+			torrent.Add("requests rejected while choking", 1)
+			c.reject(r)
+		}
+		return nil
+	}
+	// TODO: What if they've already requested this?
+	if len(c.peerRequests) >= localClientReqq {
+		torrent.Add("requests received while queue full", 1)
+		if c.fastEnabled() {
+			c.reject(r)
+		}
+		// BEP 6 says we may close here if we choose.
+		return nil
+	}
+	if opt := c.maximumPeerRequestChunkLength(); opt.Ok && int(r.Length) > opt.Value {
+		err := fmt.Errorf("peer requested chunk too long (%v)", r.Length)
+		c.logger.Levelf(log.Warning, err.Error())
+		if c.fastEnabled() {
+			c.reject(r)
+			return nil
+		} else {
+			return err
+		}
+	}
+	if !c.t.havePiece(pieceIndex(r.Index)) {
+		// TODO: Tell the peer we don't have the piece, and reject this request.
+		requestsReceivedForMissingPieces.Add(1)
+		return fmt.Errorf("peer requested piece we don't have: %v", r.Index.Int())
+	}
+	pieceLength := c.t.pieceLength(pieceIndex(r.Index))
+	// Check this after we know we have the piece, so that the piece length will be known.
+	if chunkOverflowsPiece(r.ChunkSpec, pieceLength) {
+		torrent.Add("bad requests received", 1)
+		return errors.New("chunk overflows piece")
+	}
+	if c.peerRequests == nil {
+		c.peerRequests = make(map[Request]*peerRequestState, localClientReqq)
+	}
+	value := &peerRequestState{
+		allocReservation: c.peerRequestDataAllocLimiter.Reserve(int64(r.Length)),
+	}
+	c.peerRequests[r] = value
+	if startFetch {
+		// TODO: Limit peer request data read concurrency.
+		go c.peerRequestDataReader(r, value)
+	}
+	return nil
+}
+
+func (c *PeerConn) peerRequestDataReader(r Request, prs *peerRequestState) {
+	// Should we depend on Torrent closure here? I think it's okay to get cancelled from elsewhere,
+	// or fail to read and then cleanup. Also, we used to hang here if the reservation was never
+	// dropped, that was fixed.
+	ctx := context.Background()
+	err := prs.allocReservation.Wait(ctx)
+	if err != nil {
+		c.logger.WithDefaultLevel(log.Debug).Levelf(log.ErrorLevel(err), "waiting for alloc limit reservation: %v", err)
+		return
+	}
+	b, err := c.readPeerRequestData(r)
+	c.locker().Lock()
+	defer c.locker().Unlock()
+	if err != nil {
+		c.peerRequestDataReadFailed(err, r)
+	} else {
+		if b == nil {
+			panic("data must be non-nil to trigger send")
+		}
+		torrent.Add("peer request data read successes", 1)
+		prs.data = b
+		// This might be required for the error case too (#752 and #753).
+		c.tickleWriter()
+	}
+}
+
+// If this is maintained correctly, we might be able to support optional synchronous reading for
+// chunk sending, the way it used to work.
+func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) {
+	torrent.Add("peer request data read failures", 1)
+	logLevel := log.Warning
+	if c.t.hasStorageCap() {
+		// It's expected that pieces might drop. See
+		// https://github.com/anacrolix/torrent/issues/702#issuecomment-1000953313.
+		logLevel = log.Debug
+	}
+	c.logger.Levelf(logLevel, "error reading chunk for peer Request %v: %v", r, err)
+	if c.t.closed.IsSet() {
+		return
+	}
+	i := pieceIndex(r.Index)
+	if c.t.pieceComplete(i) {
+		// There used to be more code here that just duplicated the following break. Piece
+		// completions are currently cached, so I'm not sure how helpful this update is, except to
+		// pull any completion changes pushed to the storage backend in failed reads that got us
+		// here.
+		c.t.updatePieceCompletion(i)
+	}
+	// We've probably dropped a piece from storage, but there's no way to communicate this to the
+	// peer. If they ask for it again, we kick them allowing us to send them updated piece states if
+	// we reconnect. TODO: Instead, we could just try to update them with Bitfield or HaveNone and
+	// if they kick us for breaking protocol, on reconnect we will be compliant again (at least
+	// initially).
+	if c.fastEnabled() {
+		c.reject(r)
+	} else {
+		if c.choking {
+			// If fast isn't enabled, I think we would have wiped all peer requests when we last
+			// choked, and requests while we're choking would be ignored. It could be possible that
+			// a peer request data read completed concurrently to it being deleted elsewhere.
+			c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly")
+		}
+		// Choking a non-fast peer should cause them to flush all their requests.
+		c.choke(c.write)
+	}
+}
+
+func (c *PeerConn) readPeerRequestData(r Request) ([]byte, error) {
+	b := make([]byte, r.Length)
+	p := c.t.info.Piece(int(r.Index))
+	n, err := c.t.readAt(b, p.Offset()+int64(r.Begin))
+	if n == len(b) {
+		if err == io.EOF {
+			err = nil
+		}
+	} else {
+		if err == nil {
+			panic("expected error")
+		}
+	}
+	return b, err
+}
+
+func (c *PeerConn) logProtocolBehaviour(level log.Level, format string, arg ...interface{}) {
+	c.logger.WithContextText(fmt.Sprintf(
+		"peer id %q, ext v %q", c.PeerID, c.PeerClientName.Load(),
+	)).SkipCallers(1).Levelf(level, format, arg...)
+}
+
+// Processes incoming BitTorrent wire-protocol messages. The client lock is held upon entry and
+// exit. Returning will end the connection.
+func (c *PeerConn) mainReadLoop() (err error) {
+	defer func() {
+		if err != nil {
+			torrent.Add("connection.mainReadLoop returned with error", 1)
+		} else {
+			torrent.Add("connection.mainReadLoop returned with no error", 1)
+		}
+	}()
+	t := c.t
+	cl := t.cl
+
+	decoder := pp.Decoder{
+		R:         bufio.NewReaderSize(c.r, 1<<17),
+		MaxLength: 4 * pp.Integer(max(int64(t.chunkSize), defaultChunkSize)),
+		Pool:      &t.chunkPool,
+	}
+	for {
+		var msg pp.Message
+		func() {
+			cl.unlock()
+			defer cl.lock()
+			err = decoder.Decode(&msg)
+		}()
+		if cb := c.callbacks.ReadMessage; cb != nil && err == nil {
+			cb(c, &msg)
+		}
+		if t.closed.IsSet() || c.closed.IsSet() {
+			return nil
+		}
+		if err != nil {
+			return err
+		}
+		c.lastMessageReceived = time.Now()
+		if msg.Keepalive {
+			receivedKeepalives.Add(1)
+			continue
+		}
+		messageTypesReceived.Add(msg.Type.String(), 1)
+		if msg.Type.FastExtension() && !c.fastEnabled() {
+			runSafeExtraneous(func() { torrent.Add("fast messages received when extension is disabled", 1) })
+			return fmt.Errorf("received fast extension message (type=%v) but extension is disabled", msg.Type)
+		}
+		switch msg.Type {
+		case pp.Choke:
+			if c.peerChoking {
+				break
+			}
+			if !c.fastEnabled() {
+				c.deleteAllRequests("choked by non-fast PeerConn")
+			} else {
+				// We don't decrement pending requests here, let's wait for the peer to either
+				// reject or satisfy the outstanding requests. Additionally, some peers may unchoke
+				// us and resume where they left off, we don't want to have piled on to those chunks
+				// in the meanwhile. I think a peer's ability to abuse this should be limited: they
+				// could let us request a lot of stuff, then choke us and never reject, but they're
+				// only a single peer, our chunk balancing should smooth over this abuse.
+			}
+			c.peerChoking = true
+			c.updateExpectingChunks()
+		case pp.Unchoke:
+			if !c.peerChoking {
+				// Some clients do this for some reason. Transmission doesn't error on this, so we
+				// won't for consistency.
+				c.logProtocolBehaviour(log.Debug, "received unchoke when already unchoked")
+				break
+			}
+			c.peerChoking = false
+			preservedCount := 0
+			c.requestState.Requests.Iterate(func(x RequestIndex) bool {
+				if !c.peerAllowedFast.Contains(c.t.pieceIndexOfRequestIndex(x)) {
+					preservedCount++
+				}
+				return true
+			})
+			if preservedCount != 0 {
+				// TODO: Yes this is a debug log but I'm not happy with the state of the logging lib
+				// right now.
+				c.logger.Levelf(log.Debug,
+					"%v requests were preserved while being choked (fast=%v)",
+					preservedCount,
+					c.fastEnabled())
+
+				torrent.Add("requestsPreservedThroughChoking", int64(preservedCount))
+			}
+			if !c.t._pendingPieces.IsEmpty() {
+				c.updateRequests("unchoked")
+			}
+			c.updateExpectingChunks()
+		case pp.Interested:
+			c.peerInterested = true
+			c.tickleWriter()
+		case pp.NotInterested:
+			c.peerInterested = false
+			// We don't clear their requests since it isn't clear in the spec.
+			// We'll probably choke them for this, which will clear them if
+			// appropriate, and is clearly specified.
+		case pp.Have:
+			err = c.peerSentHave(pieceIndex(msg.Index))
+		case pp.Bitfield:
+			err = c.peerSentBitfield(msg.Bitfield)
+		case pp.Request:
+			r := newRequestFromMessage(&msg)
+			err = c.onReadRequest(r, true)
+			if err != nil {
+				err = fmt.Errorf("on reading request %v: %w", r, err)
+			}
+		case pp.Piece:
+			c.doChunkReadStats(int64(len(msg.Piece)))
+			err = c.receiveChunk(&msg)
+			if len(msg.Piece) == int(t.chunkSize) {
+				t.chunkPool.Put(&msg.Piece)
+			}
+			if err != nil {
+				err = fmt.Errorf("receiving chunk: %w", err)
+			}
+		case pp.Cancel:
+			req := newRequestFromMessage(&msg)
+			c.onPeerSentCancel(req)
+		case pp.Port:
+			ipa, ok := tryIpPortFromNetAddr(c.RemoteAddr)
+			if !ok {
+				break
+			}
+			pingAddr := net.UDPAddr{
+				IP:   ipa.IP,
+				Port: ipa.Port,
+			}
+			if msg.Port != 0 {
+				pingAddr.Port = int(msg.Port)
+			}
+			cl.eachDhtServer(func(s DhtServer) {
+				go s.Ping(&pingAddr)
+			})
+		case pp.Suggest:
+			torrent.Add("suggests received", 1)
+			log.Fmsg("peer suggested piece %d", msg.Index).AddValues(c, msg.Index).LogLevel(log.Debug, c.t.logger)
+			c.updateRequests("suggested")
+		case pp.HaveAll:
+			err = c.onPeerSentHaveAll()
+		case pp.HaveNone:
+			err = c.peerSentHaveNone()
+		case pp.Reject:
+			req := newRequestFromMessage(&msg)
+			if !c.remoteRejectedRequest(c.t.requestIndexFromRequest(req)) {
+				err = fmt.Errorf("received invalid reject for request %v", req)
+				c.logger.Levelf(log.Debug, "%v", err)
+			}
+		case pp.AllowedFast:
+			torrent.Add("allowed fasts received", 1)
+			log.Fmsg("peer allowed fast: %d", msg.Index).AddValues(c).LogLevel(log.Debug, c.t.logger)
+			c.updateRequests("PeerConn.mainReadLoop allowed fast")
+		case pp.Extended:
+			err = c.onReadExtendedMsg(msg.ExtendedID, msg.ExtendedPayload)
+		default:
+			err = fmt.Errorf("received unknown message type: %#v", msg.Type)
+		}
+		if err != nil {
+			return err
+		}
+	}
+}
+
+func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err error) {
+	defer func() {
+		// TODO: Should we still do this?
+		if err != nil {
+			// These clients use their own extension IDs for outgoing message
+			// types, which is incorrect.
+			if bytes.HasPrefix(c.PeerID[:], []byte("-SD0100-")) || strings.HasPrefix(string(c.PeerID[:]), "-XL0012-") {
+				err = nil
+			}
+		}
+	}()
+	t := c.t
+	cl := t.cl
+	switch id {
+	case pp.HandshakeExtendedID:
+		var d pp.ExtendedHandshakeMessage
+		if err := bencode.Unmarshal(payload, &d); err != nil {
+			c.logger.Printf("error parsing extended handshake message %q: %s", payload, err)
+			return fmt.Errorf("unmarshalling extended handshake payload: %w", err)
+		}
+		if cb := c.callbacks.ReadExtendedHandshake; cb != nil {
+			cb(c, &d)
+		}
+		// c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d))
+		if d.Reqq != 0 {
+			c.PeerMaxRequests = d.Reqq
+		}
+		c.PeerClientName.Store(d.V)
+		if c.PeerExtensionIDs == nil {
+			c.PeerExtensionIDs = make(map[pp.ExtensionName]pp.ExtensionNumber, len(d.M))
+		}
+		c.PeerListenPort = d.Port
+		c.PeerPrefersEncryption = d.Encryption
+		for name, id := range d.M {
+			if _, ok := c.PeerExtensionIDs[name]; !ok {
+				peersSupportingExtension.Add(
+					// expvar.Var.String must produce valid JSON. "ut_payme\xeet_address" was being
+					// entered here which caused problems later when unmarshalling.
+					strconv.Quote(string(name)),
+					1)
+			}
+			c.PeerExtensionIDs[name] = id
+		}
+		if d.MetadataSize != 0 {
+			if err = t.setMetadataSize(d.MetadataSize); err != nil {
+				return fmt.Errorf("setting metadata size to %d: %w", d.MetadataSize, err)
+			}
+		}
+		c.requestPendingMetadata()
+		if !t.cl.config.DisablePEX {
+			t.pex.Add(c) // we learnt enough now
+			// This checks the extension is supported internally.
+			c.pex.Init(c)
+		}
+		return nil
+	case metadataExtendedId:
+		err := cl.gotMetadataExtensionMsg(payload, t, c)
+		if err != nil {
+			return fmt.Errorf("handling metadata extension message: %w", err)
+		}
+		return nil
+	case pexExtendedId:
+		if !c.pex.IsEnabled() {
+			return nil // or hang-up maybe?
+		}
+		err = c.pex.Recv(payload)
+		if err != nil {
+			err = fmt.Errorf("receiving pex message: %w", err)
+		}
+		return
+	case utHolepunchExtendedId:
+		var msg utHolepunch.Msg
+		err = msg.UnmarshalBinary(payload)
+		if err != nil {
+			err = fmt.Errorf("unmarshalling ut_holepunch message: %w", err)
+			return
+		}
+		err = c.t.handleReceivedUtHolepunchMsg(msg, c)
+		return
+	default:
+		return fmt.Errorf("unexpected extended message ID: %v", id)
+	}
+}
+
+// Set both the Reader and Writer for the connection from a single ReadWriter.
+func (cn *PeerConn) setRW(rw io.ReadWriter) {
+	cn.r = rw
+	cn.w = rw
+}
+
+// Returns the Reader and Writer as a combined ReadWriter.
+func (cn *PeerConn) rw() io.ReadWriter {
+	return struct {
+		io.Reader
+		io.Writer
+	}{cn.r, cn.w}
+}
+
+func (c *PeerConn) uploadAllowed() bool {
+	if c.t.cl.config.NoUpload {
+		return false
+	}
+	if c.t.dataUploadDisallowed {
+		return false
+	}
+	if c.t.seeding() {
+		return true
+	}
+	if !c.peerHasWantedPieces() {
+		return false
+	}
+	// Don't upload more than 100 KiB more than we download.
+	if c._stats.BytesWrittenData.Int64() >= c._stats.BytesReadData.Int64()+100<<10 {
+		return false
+	}
+	return true
+}
+
+func (c *PeerConn) setRetryUploadTimer(delay time.Duration) {
+	if c.uploadTimer == nil {
+		c.uploadTimer = time.AfterFunc(delay, c.tickleWriter)
+	} else {
+		c.uploadTimer.Reset(delay)
+	}
+}
+
+// Also handles choking and unchoking of the remote peer.
+func (c *PeerConn) upload(msg func(pp.Message) bool) bool {
+	// Breaking or completing this loop means we don't want to upload to the peer anymore, and we
+	// choke them.
+another:
+	for c.uploadAllowed() {
+		// We want to upload to the peer.
+		if !c.unchoke(msg) {
+			return false
+		}
+		for r, state := range c.peerRequests {
+			if state.data == nil {
+				continue
+			}
+			res := c.t.cl.config.UploadRateLimiter.ReserveN(time.Now(), int(r.Length))
+			if !res.OK() {
+				panic(fmt.Sprintf("upload rate limiter burst size < %d", r.Length))
+			}
+			delay := res.Delay()
+			if delay > 0 {
+				res.Cancel()
+				c.setRetryUploadTimer(delay)
+				// Hard to say what to return here.
+				return true
+			}
+			more := c.sendChunk(r, msg, state)
+			delete(c.peerRequests, r)
+			if !more {
+				return false
+			}
+			goto another
+		}
+		return true
+	}
+	return c.choke(msg)
+}
+
+func (cn *PeerConn) drop() {
+	cn.t.dropConnection(cn)
+}
+
+func (cn *PeerConn) ban() {
+	cn.t.cl.banPeerIP(cn.remoteIp())
+}
+
+// This is called when something has changed that should wake the writer, such as putting stuff into
+// the writeBuffer, or changing some state that the writer can act on.
+func (c *PeerConn) tickleWriter() {
+	c.messageWriter.writeCond.Broadcast()
+}
+
+func (c *PeerConn) sendChunk(r Request, msg func(pp.Message) bool, state *peerRequestState) (more bool) {
+	c.lastChunkSent = time.Now()
+	state.allocReservation.Release()
+	return msg(pp.Message{
+		Type:  pp.Piece,
+		Index: r.Index,
+		Begin: r.Begin,
+		Piece: state.data,
+	})
+}
+
+func (c *PeerConn) setTorrent(t *Torrent) {
+	if c.t != nil {
+		panic("connection already associated with a torrent")
+	}
+	c.t = t
+	c.logger.WithDefaultLevel(log.Debug).Printf("set torrent=%v", t)
+	t.reconcileHandshakeStats(c)
+}
+
+func (c *PeerConn) pexPeerFlags() pp.PexPeerFlags {
+	f := pp.PexPeerFlags(0)
+	if c.PeerPrefersEncryption {
+		f |= pp.PexPrefersEncryption
+	}
+	if c.outgoing {
+		f |= pp.PexOutgoingConn
+	}
+	if c.utp() {
+		f |= pp.PexSupportsUtp
+	}
+	return f
+}
+
+// This returns the address to use if we want to dial the peer again. It incorporates the peer's
+// advertised listen port.
+func (c *PeerConn) dialAddr() PeerRemoteAddr {
+	if c.outgoing || c.PeerListenPort == 0 {
+		return c.RemoteAddr
+	}
+	addrPort, err := addrPortFromPeerRemoteAddr(c.RemoteAddr)
+	if err != nil {
+		c.logger.Levelf(
+			log.Warning,
+			"error parsing %q for alternate dial port: %v",
+			c.RemoteAddr,
+			err,
+		)
+		return c.RemoteAddr
+	}
+	return netip.AddrPortFrom(addrPort.Addr(), uint16(c.PeerListenPort))
+}
+
+func (c *PeerConn) pexEvent(t pexEventType) (_ pexEvent, err error) {
+	f := c.pexPeerFlags()
+	dialAddr := c.dialAddr()
+	addr, err := addrPortFromPeerRemoteAddr(dialAddr)
+	if err != nil || !addr.IsValid() {
+		err = fmt.Errorf("parsing dial addr %q: %w", dialAddr, err)
+		return
+	}
+	return pexEvent{t, addr, f, nil}, nil
+}
+
+func (pc *PeerConn) String() string {
+	return fmt.Sprintf("%T %p [id=%+q, exts=%v, v=%q]", pc, pc, pc.PeerID, pc.PeerExtensionBytes, pc.PeerClientName.Load())
+}
+
+// Returns the pieces the peer could have based on their claims. If we don't know how many pieces
+// are in the torrent, it could be a very large range if the peer has sent HaveAll.
+func (pc *PeerConn) PeerPieces() *roaring.Bitmap {
+	pc.locker().RLock()
+	defer pc.locker().RUnlock()
+	return pc.newPeerPieces()
+}
+
+func (pc *PeerConn) remoteIsTransmission() bool {
+	return bytes.HasPrefix(pc.PeerID[:], []byte("-TR")) && pc.PeerID[7] == '-'
+}
+
+func (pc *PeerConn) remoteDialAddrPort() (netip.AddrPort, error) {
+	dialAddr := pc.dialAddr()
+	return addrPortFromPeerRemoteAddr(dialAddr)
+}
+
+func (pc *PeerConn) bitExtensionEnabled(bit pp.ExtensionBit) bool {
+	return pc.t.cl.config.Extensions.GetBit(bit) && pc.PeerExtensionBytes.GetBit(bit)
+}
+
+func (cn *PeerConn) peerPiecesChanged() {
+	cn.t.maybeDropMutuallyCompletePeer(cn)
+}
+
+// Returns whether the connection could be useful to us. We're seeding and
+// they want data, we don't have metainfo and they can provide it, etc.
+func (c *PeerConn) useful() bool {
+	t := c.t
+	if c.closed.IsSet() {
+		return false
+	}
+	if !t.haveInfo() {
+		return c.supportsExtension("ut_metadata")
+	}
+	if t.seeding() && c.peerInterested {
+		return true
+	}
+	if c.peerHasWantedPieces() {
+		return true
+	}
+	return false
+}
diff --git a/deps/github.com/anacrolix/torrent/peerconn_test.go b/deps/github.com/anacrolix/torrent/peerconn_test.go
new file mode 100644
index 0000000..e294b6b
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peerconn_test.go
@@ -0,0 +1,368 @@
+package torrent
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"sync"
+	"testing"
+
+	"github.com/frankban/quicktest"
+	qt "github.com/frankban/quicktest"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/time/rate"
+
+	"github.com/anacrolix/torrent/metainfo"
+	pp "github.com/anacrolix/torrent/peer_protocol"
+	"github.com/anacrolix/torrent/storage"
+)
+
+// Ensure that no race exists between sending a bitfield, and a subsequent
+// Have that would potentially alter it.
+func TestSendBitfieldThenHave(t *testing.T) {
+	var cl Client
+	cl.init(TestingConfig(t))
+	cl.initLogger()
+	c := cl.newConnection(nil, newConnectionOpts{network: "io.Pipe"})
+	c.setTorrent(cl.newTorrent(metainfo.Hash{}, nil))
+	if err := c.t.setInfo(&metainfo.Info{Pieces: make([]byte, metainfo.HashSize*3)}); err != nil {
+		t.Log(err)
+	}
+	r, w := io.Pipe()
+	// c.r = r
+	c.w = w
+	c.startMessageWriter()
+	c.locker().Lock()
+	c.t._completedPieces.Add(1)
+	c.postBitfield( /*[]bool{false, true, false}*/ )
+	c.locker().Unlock()
+	c.locker().Lock()
+	c.have(2)
+	c.locker().Unlock()
+	b := make([]byte, 15)
+	n, err := io.ReadFull(r, b)
+	c.locker().Lock()
+	// This will cause connection.writer to terminate.
+	c.closed.Set()
+	c.locker().Unlock()
+	require.NoError(t, err)
+	require.EqualValues(t, 15, n)
+	// Here we see that the bitfield doesn't have piece 2 set, as that should
+	// arrive in the following Have message.
+	require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b))
+}
+
+type torrentStorage struct {
+	writeSem sync.Mutex
+}
+
+func (me *torrentStorage) Close() error { return nil }
+
+func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl {
+	return me
+}
+
+func (me *torrentStorage) Completion() storage.Completion {
+	return storage.Completion{}
+}
+
+func (me *torrentStorage) MarkComplete() error {
+	return nil
+}
+
+func (me *torrentStorage) MarkNotComplete() error {
+	return nil
+}
+
+func (me *torrentStorage) ReadAt([]byte, int64) (int, error) {
+	panic("shouldn't be called")
+}
+
+func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) {
+	if len(b) != defaultChunkSize {
+		panic(len(b))
+	}
+	me.writeSem.Unlock()
+	return len(b), nil
+}
+
+func BenchmarkConnectionMainReadLoop(b *testing.B) {
+	c := quicktest.New(b)
+	var cl Client
+	cl.init(&ClientConfig{
+		DownloadRateLimiter: unlimited,
+	})
+	cl.initLogger()
+	ts := &torrentStorage{}
+	t := cl.newTorrent(metainfo.Hash{}, nil)
+	t.initialPieceCheckDisabled = true
+	require.NoError(b, t.setInfo(&metainfo.Info{
+		Pieces:      make([]byte, 20),
+		Length:      1 << 20,
+		PieceLength: 1 << 20,
+	}))
+	t.storage = &storage.Torrent{TorrentImpl: storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}}
+	t.onSetInfo()
+	t._pendingPieces.Add(0)
+	r, w := net.Pipe()
+	cn := cl.newConnection(r, newConnectionOpts{
+		outgoing:   true,
+		remoteAddr: r.RemoteAddr(),
+		network:    r.RemoteAddr().Network(),
+		connString: regularNetConnPeerConnConnString(r),
+	})
+	cn.setTorrent(t)
+	mrlErrChan := make(chan error)
+	msg := pp.Message{
+		Type:  pp.Piece,
+		Piece: make([]byte, defaultChunkSize),
+	}
+	go func() {
+		cl.lock()
+		err := cn.mainReadLoop()
+		if err != nil {
+			mrlErrChan <- err
+		}
+		close(mrlErrChan)
+	}()
+	wb := msg.MustMarshalBinary()
+	b.SetBytes(int64(len(msg.Piece)))
+	go func() {
+		ts.writeSem.Lock()
+		for i := 0; i < b.N; i += 1 {
+			cl.lock()
+			// The chunk must be written to storage everytime, to ensure the
+			// writeSem is unlocked.
+			t.pendAllChunkSpecs(0)
+			cn.validReceiveChunks = map[RequestIndex]int{
+				t.requestIndexFromRequest(newRequestFromMessage(&msg)): 1,
+			}
+			cl.unlock()
+			n, err := w.Write(wb)
+			require.NoError(b, err)
+			require.EqualValues(b, len(wb), n)
+			ts.writeSem.Lock()
+		}
+		if err := w.Close(); err != nil {
+			panic(err)
+		}
+	}()
+	mrlErr := <-mrlErrChan
+	if mrlErr != nil && !errors.Is(mrlErr, io.EOF) {
+		c.Fatal(mrlErr)
+	}
+	c.Assert(cn._stats.ChunksReadUseful.Int64(), quicktest.Equals, int64(b.N))
+}
+
+func TestConnPexPeerFlags(t *testing.T) {
+	var (
+		tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
+		udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
+	)
+	testcases := []struct {
+		conn *PeerConn
+		f    pp.PexPeerFlags
+	}{
+		{&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
+		{&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
+		{&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
+		{&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
+		{&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, pp.PexSupportsUtp},
+		{&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
+		{&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn},
+		{&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}}, 0},
+	}
+	for i, tc := range testcases {
+		f := tc.conn.pexPeerFlags()
+		require.EqualValues(t, tc.f, f, i)
+	}
+}
+
+func TestConnPexEvent(t *testing.T) {
+	c := qt.New(t)
+	var (
+		udpAddr     = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
+		tcpAddr     = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
+		dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
+		dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
+	)
+	testcases := []struct {
+		t pexEventType
+		c *PeerConn
+		e pexEvent
+	}{
+		{
+			pexAdd,
+			&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}},
+			pexEvent{pexAdd, udpAddr.AddrPort(), pp.PexSupportsUtp, nil},
+		},
+		{
+			pexDrop,
+			&PeerConn{
+				Peer:           Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true},
+				PeerListenPort: dialTcpAddr.Port,
+			},
+			pexEvent{pexDrop, tcpAddr.AddrPort(), pp.PexOutgoingConn, nil},
+		},
+		{
+			pexAdd,
+			&PeerConn{
+				Peer:           Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()},
+				PeerListenPort: dialTcpAddr.Port,
+			},
+			pexEvent{pexAdd, dialTcpAddr.AddrPort(), 0, nil},
+		},
+		{
+			pexDrop,
+			&PeerConn{
+				Peer:           Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()},
+				PeerListenPort: dialUdpAddr.Port,
+			},
+			pexEvent{pexDrop, dialUdpAddr.AddrPort(), pp.PexSupportsUtp, nil},
+		},
+	}
+	for i, tc := range testcases {
+		c.Run(fmt.Sprintf("%v", i), func(c *qt.C) {
+			e, err := tc.c.pexEvent(tc.t)
+			c.Assert(err, qt.IsNil)
+			c.Check(e, qt.Equals, tc.e)
+		})
+	}
+}
+
+func TestHaveAllThenBitfield(t *testing.T) {
+	c := qt.New(t)
+	cl := newTestingClient(t)
+	tt := cl.newTorrentForTesting()
+	// cl.newConnection()
+	pc := PeerConn{
+		Peer: Peer{t: tt},
+	}
+	pc.initRequestState()
+	pc.peerImpl = &pc
+	tt.conns[&pc] = struct{}{}
+	c.Assert(pc.onPeerSentHaveAll(), qt.IsNil)
+	c.Check(pc.t.connsWithAllPieces, qt.DeepEquals, map[*Peer]struct{}{&pc.Peer: {}})
+	pc.peerSentBitfield([]bool{false, false, true, false, true, true, false, false})
+	c.Check(pc.peerMinPieces, qt.Equals, 6)
+	c.Check(pc.t.connsWithAllPieces, qt.HasLen, 0)
+	c.Assert(pc.t.setInfo(&metainfo.Info{
+		PieceLength: 0,
+		Pieces:      make([]byte, pieceHash.Size()*7),
+	}), qt.IsNil)
+	pc.t.onSetInfo()
+	c.Check(tt.numPieces(), qt.Equals, 7)
+	c.Check(tt.pieceAvailabilityRuns(), qt.DeepEquals, []pieceAvailabilityRun{
+		// The last element of the bitfield is irrelevant, as the Torrent actually only has 7
+		// pieces.
+		{2, 0}, {1, 1}, {1, 0}, {2, 1}, {1, 0},
+	})
+}
+
+func TestApplyRequestStateWriteBufferConstraints(t *testing.T) {
+	c := qt.New(t)
+	c.Check(interestedMsgLen, qt.Equals, 5)
+	c.Check(requestMsgLen, qt.Equals, 17)
+	c.Check(maxLocalToRemoteRequests >= 8, qt.IsTrue)
+	c.Logf("max local to remote requests: %v", maxLocalToRemoteRequests)
+}
+
+func peerConnForPreferredNetworkDirection(
+	localPeerId, remotePeerId int,
+	outgoing, utp, ipv6 bool,
+) *PeerConn {
+	pc := PeerConn{}
+	pc.outgoing = outgoing
+	if utp {
+		pc.Network = "udp"
+	}
+	if ipv6 {
+		pc.RemoteAddr = &net.TCPAddr{IP: net.ParseIP("::420")}
+	} else {
+		pc.RemoteAddr = &net.TCPAddr{IP: net.IPv4(1, 2, 3, 4)}
+	}
+	binary.BigEndian.PutUint64(pc.PeerID[:], uint64(remotePeerId))
+	cl := Client{}
+	binary.BigEndian.PutUint64(cl.peerID[:], uint64(localPeerId))
+	pc.t = &Torrent{cl: &cl}
+	return &pc
+}
+
+func TestPreferredNetworkDirection(t *testing.T) {
+	pc := peerConnForPreferredNetworkDirection
+	c := qt.New(t)
+
+	// Prefer outgoing to lower peer ID
+
+	c.Check(
+		pc(1, 2, true, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)),
+		qt.IsFalse,
+	)
+	c.Check(
+		pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, true, false, false)),
+		qt.IsTrue,
+	)
+	c.Check(
+		pc(2, 1, false, false, false).hasPreferredNetworkOver(pc(2, 1, true, false, false)),
+		qt.IsFalse,
+	)
+
+	// Don't prefer uTP
+	c.Check(
+		pc(1, 2, false, true, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)),
+		qt.IsFalse,
+	)
+	// Prefer IPv6
+	c.Check(
+		pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, true)),
+		qt.IsFalse,
+	)
+	// No difference
+	c.Check(
+		pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)),
+		qt.IsFalse,
+	)
+}
+
+func TestReceiveLargeRequest(t *testing.T) {
+	c := qt.New(t)
+	cl := newTestingClient(t)
+	pc := cl.newConnection(nil, newConnectionOpts{network: "test"})
+	tor := cl.newTorrentForTesting()
+	tor.info = &metainfo.Info{PieceLength: 3 << 20}
+	pc.setTorrent(tor)
+	tor._completedPieces.Add(0)
+	pc.PeerExtensionBytes.SetBit(pp.ExtensionBitFast, true)
+	pc.choking = false
+	pc.initMessageWriter()
+	req := Request{}
+	req.Length = defaultChunkSize
+	c.Assert(pc.fastEnabled(), qt.IsTrue)
+	c.Check(pc.onReadRequest(req, false), qt.IsNil)
+	c.Check(pc.peerRequests, qt.HasLen, 1)
+	req.Length = 2 << 20
+	c.Check(pc.onReadRequest(req, false), qt.IsNil)
+	c.Check(pc.peerRequests, qt.HasLen, 2)
+	pc.peerRequests = nil
+	pc.t.cl.config.UploadRateLimiter = rate.NewLimiter(1, defaultChunkSize)
+	req.Length = defaultChunkSize
+	c.Check(pc.onReadRequest(req, false), qt.IsNil)
+	c.Check(pc.peerRequests, qt.HasLen, 1)
+	req.Length = 2 << 20
+	c.Check(pc.onReadRequest(req, false), qt.IsNil)
+	c.Check(pc.messageWriter.writeBuffer.Len(), qt.Equals, 17)
+}
+
+func TestChunkOverflowsPiece(t *testing.T) {
+	c := qt.New(t)
+	check := func(begin, length, limit pp.Integer, expected bool) {
+		c.Check(chunkOverflowsPiece(ChunkSpec{begin, length}, limit), qt.Equals, expected)
+	}
+	check(2, 3, 1, true)
+	check(2, pp.IntegerMax, 1, true)
+	check(2, pp.IntegerMax, 3, true)
+	check(2, pp.IntegerMax, pp.IntegerMax, true)
+	check(2, pp.IntegerMax-2, pp.IntegerMax, false)
+}
diff --git a/deps/github.com/anacrolix/torrent/peerid.go b/deps/github.com/anacrolix/torrent/peerid.go
new file mode 100644
index 0000000..301c0e9
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peerid.go
@@ -0,0 +1,5 @@
+package torrent
+
+import "github.com/anacrolix/torrent/types"
+
+type PeerID = types.PeerID
diff --git a/deps/github.com/anacrolix/torrent/peerid_test.go b/deps/github.com/anacrolix/torrent/peerid_test.go
new file mode 100644
index 0000000..bcf0999
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/peerid_test.go
@@ -0,0 +1,16 @@
+package torrent
+
+// func TestPeerIdString(t *testing.T) {
+// 	for _, _case := range []struct {
+// 		id string
+// 		s  string
+// 	}{
+// 		{"\x1cNJ}\x9c\xc7\xc4o\x94<\x9b\x8c\xc2!I\x1c\a\xec\x98n", "\"\x1cNJ}\x9c\xc7\xc4o\x94<\x9b\x8c\xc2!I\x1c\a\xec\x98n\""},
+// 		{"-FD51W\xe4-LaZMk0N8ZLA7", "-FD51W\xe4-LaZMk0N8ZLA7"},
+// 	} {
+// 		var pi PeerID
+// 		missinggo.CopyExact(&pi, _case.id)
+// 		assert.EqualValues(t, _case.s, pi.String())
+// 		assert.EqualValues(t, fmt.Sprintf("%q", _case.s), fmt.Sprintf("%q", pi))
+// 	}
+// }
diff --git a/deps/github.com/anacrolix/torrent/pending-requests.go b/deps/github.com/anacrolix/torrent/pending-requests.go
new file mode 100644
index 0000000..dcb1faf
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/pending-requests.go
@@ -0,0 +1,50 @@
+package torrent
+
+import (
+	rbm "github.com/RoaringBitmap/roaring"
+	roaring "github.com/RoaringBitmap/roaring/BitSliceIndexing"
+)
+
+type pendingRequests struct {
+	m *roaring.BSI
+}
+
+func (p *pendingRequests) Dec(r RequestIndex) {
+	_r := uint64(r)
+	prev, _ := p.m.GetValue(_r)
+	if prev <= 0 {
+		panic(prev)
+	}
+	p.m.SetValue(_r, prev-1)
+}
+
+func (p *pendingRequests) Inc(r RequestIndex) {
+	_r := uint64(r)
+	prev, _ := p.m.GetValue(_r)
+	p.m.SetValue(_r, prev+1)
+}
+
+func (p *pendingRequests) Init(maxIndex RequestIndex) {
+	p.m = roaring.NewDefaultBSI()
+}
+
+var allBits rbm.Bitmap
+
+func init() {
+	allBits.AddRange(0, rbm.MaxRange)
+}
+
+func (p *pendingRequests) AssertEmpty() {
+	if p.m == nil {
+		panic(p.m)
+	}
+	sum, _ := p.m.Sum(&allBits)
+	if sum != 0 {
+		panic(sum)
+	}
+}
+
+func (p *pendingRequests) Get(r RequestIndex) int {
+	count, _ := p.m.GetValue(uint64(r))
+	return int(count)
+}
diff --git a/deps/github.com/anacrolix/torrent/pending-requests_test.go b/deps/github.com/anacrolix/torrent/pending-requests_test.go
new file mode 100644
index 0000000..6c9572e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/pending-requests_test.go
@@ -0,0 +1,12 @@
+package torrent
+
+// // Ensure that cmp.Diff will detect errors as required.
+// func TestPendingRequestsDiff(t *testing.T) {
+// 	var a, b pendingRequests
+// 	c := qt.New(t)
+// 	diff := func() string { return cmp.Diff(a.m, b.m) }
+// 	c.Check(diff(), qt.ContentEquals, "")
+// 	a.m = []int{1, 3}
+// 	b.m = []int{1, 2, 3}
+// 	c.Check(diff(), qt.Not(qt.Equals), "")
+// }
diff --git a/deps/github.com/anacrolix/torrent/pex.go b/deps/github.com/anacrolix/torrent/pex.go
new file mode 100644
index 0000000..a0a5f49
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/pex.go
@@ -0,0 +1,246 @@
+package torrent
+
+import (
+	"net"
+	"net/netip"
+	"sync"
+	"time"
+
+	pp "github.com/anacrolix/torrent/peer_protocol"
+)
+
+type pexEventType int
+
+const (
+	pexAdd pexEventType = iota
+	pexDrop
+)
+
+// internal, based on BEP11
+const (
+	pexTargAdded = 25 // put drops on hold when the number of alive connections is lower than this
+	pexMaxHold   = 25 // length of the drop hold-back buffer
+	pexMaxDelta  = 50 // upper bound on added+added6 and dropped+dropped6 in a single PEX message
+)
+
+// represents a single connection (t=pexAdd) or disconnection (t=pexDrop) event
+type pexEvent struct {
+	t    pexEventType
+	addr netip.AddrPort
+	f    pp.PexPeerFlags
+	next *pexEvent // event feed list
+}
+
+// facilitates efficient de-duplication while generating PEX messages
+type pexMsgFactory struct {
+	msg     pp.PexMsg
+	added   map[netip.AddrPort]struct{}
+	dropped map[netip.AddrPort]struct{}
+}
+
+func (me *pexMsgFactory) DeltaLen() int {
+	return int(max(
+		int64(len(me.added)),
+		int64(len(me.dropped))))
+}
+
+type addrKey = netip.AddrPort
+
+// Returns the key to use to identify a given addr in the factory.
+func (me *pexMsgFactory) addrKey(addr netip.AddrPort) addrKey {
+	return addr
+}
+
+// Returns whether the entry was added (we can check if we're cancelling out another entry and so
+// won't hit the limit consuming this event).
+func (me *pexMsgFactory) add(e pexEvent) {
+	key := me.addrKey(e.addr)
+	if _, ok := me.added[key]; ok {
+		return
+	}
+	if me.added == nil {
+		me.added = make(map[addrKey]struct{}, pexMaxDelta)
+	}
+	addr := krpcNodeAddrFromAddrPort(e.addr)
+	m := &me.msg
+	switch {
+	case addr.IP.To4() != nil:
+		if _, ok := me.dropped[key]; ok {
+			if i := m.Dropped.Index(addr); i >= 0 {
+				m.Dropped = append(m.Dropped[:i], m.Dropped[i+1:]...)
+			}
+			delete(me.dropped, key)
+			return
+		}
+		m.Added = append(m.Added, addr)
+		m.AddedFlags = append(m.AddedFlags, e.f)
+	case len(addr.IP) == net.IPv6len:
+		if _, ok := me.dropped[key]; ok {
+			if i := m.Dropped6.Index(addr); i >= 0 {
+				m.Dropped6 = append(m.Dropped6[:i], m.Dropped6[i+1:]...)
+			}
+			delete(me.dropped, key)
+			return
+		}
+		m.Added6 = append(m.Added6, addr)
+		m.Added6Flags = append(m.Added6Flags, e.f)
+	default:
+		panic(addr)
+	}
+	me.added[key] = struct{}{}
+}
+
+// Returns whether the entry was added (we can check if we're cancelling out another entry and so
+// won't hit the limit consuming this event).
+func (me *pexMsgFactory) drop(e pexEvent) {
+	addr := krpcNodeAddrFromAddrPort(e.addr)
+	key := me.addrKey(e.addr)
+	if me.dropped == nil {
+		me.dropped = make(map[addrKey]struct{}, pexMaxDelta)
+	}
+	if _, ok := me.dropped[key]; ok {
+		return
+	}
+	m := &me.msg
+	switch {
+	case addr.IP.To4() != nil:
+		if _, ok := me.added[key]; ok {
+			if i := m.Added.Index(addr); i >= 0 {
+				m.Added = append(m.Added[:i], m.Added[i+1:]...)
+				m.AddedFlags = append(m.AddedFlags[:i], m.AddedFlags[i+1:]...)
+			}
+			delete(me.added, key)
+			return
+		}
+		m.Dropped = append(m.Dropped, addr)
+	case len(addr.IP) == net.IPv6len:
+		if _, ok := me.added[key]; ok {
+			if i := m.Added6.Index(addr); i >= 0 {
+				m.Added6 = append(m.Added6[:i], m.Added6[i+1:]...)
+				m.Added6Flags = append(m.Added6Flags[:i], m.Added6Flags[i+1:]...)
+			}
+			delete(me.added, key)
+			return
+		}
+		m.Dropped6 = append(m.Dropped6, addr)
+	}
+	me.dropped[key] = struct{}{}
+}
+
+func (me *pexMsgFactory) append(event pexEvent) {
+	switch event.t {
+	case pexAdd:
+		me.add(event)
+	case pexDrop:
+		me.drop(event)
+	default:
+		panic(event.t)
+	}
+}
+
+func (me *pexMsgFactory) PexMsg() *pp.PexMsg {
+	return &me.msg
+}
+
+// Per-torrent PEX state
+type pexState struct {
+	sync.RWMutex
+	tail *pexEvent  // event feed list
+	hold []pexEvent // delayed drops
+	// Torrent-wide cooldown deadline on inbound. This exists to prevent PEX from drowning out other
+	// peer address sources, until that is fixed.
+	rest time.Time
+	nc   int           // net number of alive conns
+	msg0 pexMsgFactory // initial message
+}
+
+// Reset wipes the state clean, releasing resources. Called from Torrent.Close().
+func (s *pexState) Reset() {
+	s.Lock()
+	defer s.Unlock()
+	s.tail = nil
+	s.hold = nil
+	s.nc = 0
+	s.rest = time.Time{}
+	s.msg0 = pexMsgFactory{}
+}
+
+func (s *pexState) append(e *pexEvent) {
+	if s.tail != nil {
+		s.tail.next = e
+	}
+	s.tail = e
+	s.msg0.append(*e)
+}
+
+func (s *pexState) Add(c *PeerConn) {
+	e, err := c.pexEvent(pexAdd)
+	if err != nil {
+		return
+	}
+	s.Lock()
+	defer s.Unlock()
+	s.nc++
+	if s.nc >= pexTargAdded {
+		for _, e := range s.hold {
+			ne := e
+			s.append(&ne)
+		}
+		s.hold = s.hold[:0]
+	}
+	c.pex.Listed = true
+	s.append(&e)
+}
+
+func (s *pexState) Drop(c *PeerConn) {
+	if !c.pex.Listed {
+		// skip connections which were not previously Added
+		return
+	}
+	e, err := c.pexEvent(pexDrop)
+	if err != nil {
+		return
+	}
+	s.Lock()
+	defer s.Unlock()
+	s.nc--
+	if s.nc < pexTargAdded && len(s.hold) < pexMaxHold {
+		s.hold = append(s.hold, e)
+	} else {
+		s.append(&e)
+	}
+}
+
+// Generate a PEX message based on the event feed.
+// Also returns a pointer to pass to the subsequent calls
+// to produce incremental deltas.
+func (s *pexState) Genmsg(start *pexEvent) (pp.PexMsg, *pexEvent) {
+	s.RLock()
+	defer s.RUnlock()
+	if start == nil {
+		return *s.msg0.PexMsg(), s.tail
+	}
+	var msg pexMsgFactory
+	last := start
+	for e := start.next; e != nil; e = e.next {
+		if msg.DeltaLen() >= pexMaxDelta {
+			break
+		}
+		msg.append(*e)
+		last = e
+	}
+	return *msg.PexMsg(), last
+}
+
+// The same as Genmsg but just counts up the distinct events that haven't been sent.
+func (s *pexState) numPending(start *pexEvent) (num int) {
+	s.RLock()
+	defer s.RUnlock()
+	if start == nil {
+		return s.msg0.PexMsg().Len()
+	}
+	for e := start.next; e != nil; e = e.next {
+		num++
+	}
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/pex_test.go b/deps/github.com/anacrolix/torrent/pex_test.go
new file mode 100644
index 0000000..089e0df
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/pex_test.go
@@ -0,0 +1,327 @@
+package torrent
+
+import (
+	"net"
+	"testing"
+
+	"github.com/anacrolix/dht/v2/krpc"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	pp "github.com/anacrolix/torrent/peer_protocol"
+)
+
+var (
+	addrs6 = []net.Addr{
+		&net.TCPAddr{IP: net.IPv6loopback, Port: 4747},
+		&net.TCPAddr{IP: net.IPv6loopback, Port: 4748},
+		&net.TCPAddr{IP: net.IPv6loopback, Port: 4749},
+		&net.TCPAddr{IP: net.IPv6loopback, Port: 4750},
+	}
+	addrs4 = []net.Addr{
+		&net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 4747},
+		&net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 4748},
+		&net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 4749},
+		&net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 4750},
+	}
+	addrs = []net.Addr{
+		addrs6[0],
+		addrs6[1],
+		addrs4[0],
+		addrs4[1],
+	}
+)
+
+func TestPexReset(t *testing.T) {
+	s := &pexState{}
+	conns := []PeerConn{
+		{Peer: Peer{RemoteAddr: addrs[0]}},
+		{Peer: Peer{RemoteAddr: addrs[1]}},
+		{Peer: Peer{RemoteAddr: addrs[2]}},
+	}
+	s.Add(&conns[0])
+	s.Add(&conns[1])
+	s.Drop(&conns[0])
+	s.Reset()
+	targ := new(pexState)
+	require.EqualValues(t, targ, s)
+}
+
+func krpcNodeAddrFromNetAddr(addr net.Addr) krpc.NodeAddr {
+	addrPort, err := addrPortFromPeerRemoteAddr(addr)
+	if err != nil {
+		panic(err)
+	}
+	return krpcNodeAddrFromAddrPort(addrPort)
+}
+
+var testcases = []struct {
+	name   string
+	in     *pexState
+	targ   pp.PexMsg
+	update func(*pexState)
+	targ1  pp.PexMsg
+}{
+	{
+		name: "empty",
+		in:   &pexState{},
+		targ: pp.PexMsg{},
+	},
+	{
+		name: "add0",
+		in: func() *pexState {
+			s := new(pexState)
+			nullAddr := &net.TCPAddr{}
+			s.Add(&PeerConn{Peer: Peer{RemoteAddr: nullAddr}})
+			return s
+		}(),
+		targ: pp.PexMsg{},
+	},
+	{
+		name: "drop0",
+		in: func() *pexState {
+			nullAddr := &net.TCPAddr{}
+			s := new(pexState)
+			s.Drop(&PeerConn{Peer: Peer{RemoteAddr: nullAddr}, pex: pexConnState{Listed: true}})
+			return s
+		}(),
+		targ: pp.PexMsg{},
+	},
+	{
+		name: "add4",
+		in: func() *pexState {
+			s := new(pexState)
+			s.Add(&PeerConn{Peer: Peer{RemoteAddr: addrs[0]}})
+			s.Add(&PeerConn{Peer: Peer{RemoteAddr: addrs[1], outgoing: true}})
+			s.Add(&PeerConn{Peer: Peer{RemoteAddr: addrs[2], outgoing: true}})
+			s.Add(&PeerConn{Peer: Peer{RemoteAddr: addrs[3]}})
+			return s
+		}(),
+		targ: pp.PexMsg{
+			Added: krpc.CompactIPv4NodeAddrs{
+				krpcNodeAddrFromNetAddr(addrs[2]),
+				krpcNodeAddrFromNetAddr(addrs[3]),
+			},
+			AddedFlags: []pp.PexPeerFlags{pp.PexOutgoingConn, 0},
+			Added6: krpc.CompactIPv6NodeAddrs{
+				krpcNodeAddrFromNetAddr(addrs[0]),
+				krpcNodeAddrFromNetAddr(addrs[1]),
+			},
+			Added6Flags: []pp.PexPeerFlags{0, pp.PexOutgoingConn},
+		},
+	},
+	{
+		name: "drop2",
+		in: func() *pexState {
+			s := &pexState{nc: pexTargAdded + 2}
+			s.Drop(&PeerConn{Peer: Peer{RemoteAddr: addrs[0]}, pex: pexConnState{Listed: true}})
+			s.Drop(&PeerConn{Peer: Peer{RemoteAddr: addrs[2]}, pex: pexConnState{Listed: true}})
+			return s
+		}(),
+		targ: pp.PexMsg{
+			Dropped: krpc.CompactIPv4NodeAddrs{
+				krpcNodeAddrFromNetAddr(addrs[2]),
+			},
+			Dropped6: krpc.CompactIPv6NodeAddrs{
+				krpcNodeAddrFromNetAddr(addrs[0]),
+			},
+		},
+	},
+	{
+		name: "add2drop1",
+		in: func() *pexState {
+			conns := []PeerConn{
+				{Peer: Peer{RemoteAddr: addrs[0]}},
+				{Peer: Peer{RemoteAddr: addrs[1]}},
+				{Peer: Peer{RemoteAddr: addrs[2]}},
+			}
+			s := &pexState{nc: pexTargAdded}
+			s.Add(&conns[0])
+			s.Add(&conns[1])
+			s.Drop(&conns[0])
+			s.Drop(&conns[2]) // to be ignored: it wasn't added
+			return s
+		}(),
+		targ: pp.PexMsg{
+			Added6: krpc.CompactIPv6NodeAddrs{
+				krpcNodeAddrFromNetAddr(addrs[1]),
+			},
+			Added6Flags: []pp.PexPeerFlags{0},
+		},
+	},
+	{
+		name: "delayed",
+		in: func() *pexState {
+			conns := []PeerConn{
+				{Peer: Peer{RemoteAddr: addrs[0]}},
+				{Peer: Peer{RemoteAddr: addrs[1]}},
+				{Peer: Peer{RemoteAddr: addrs[2]}},
+			}
+			s := new(pexState)
+			s.Add(&conns[0])
+			s.Add(&conns[1])
+			s.Add(&conns[2])
+			s.Drop(&conns[0]) // on hold: s.nc < pexTargAdded
+			s.Drop(&conns[2])
+			s.Drop(&conns[1])
+			return s
+		}(),
+		targ: pp.PexMsg{
+			Added: krpc.CompactIPv4NodeAddrs{
+				krpcNodeAddrFromNetAddr(addrs[2]),
+			},
+			AddedFlags: []pp.PexPeerFlags{0},
+			Added6: krpc.CompactIPv6NodeAddrs{
+				krpcNodeAddrFromNetAddr(addrs[0]),
+				krpcNodeAddrFromNetAddr(addrs[1]),
+			},
+			Added6Flags: []pp.PexPeerFlags{0, 0},
+		},
+	},
+	{
+		name: "unheld",
+		in: func() *pexState {
+			conns := []PeerConn{
+				{Peer: Peer{RemoteAddr: addrs[0]}},
+				{Peer: Peer{RemoteAddr: addrs[1]}},
+			}
+			s := &pexState{nc: pexTargAdded - 1}
+			s.Add(&conns[0])
+			s.Drop(&conns[0]) // on hold: s.nc < pexTargAdded
+			s.Add(&conns[1])  // unholds the above
+			return s
+		}(),
+		targ: pp.PexMsg{
+			Added6: krpc.CompactIPv6NodeAddrs{
+				krpcNodeAddrFromNetAddr(addrs[1]),
+			},
+			Added6Flags: []pp.PexPeerFlags{0},
+		},
+	},
+	{
+		name: "followup",
+		in: func() *pexState {
+			s := new(pexState)
+			s.Add(&PeerConn{Peer: Peer{RemoteAddr: addrs[0]}})
+			return s
+		}(),
+		targ: pp.PexMsg{
+			Added6: krpc.CompactIPv6NodeAddrs{
+				krpcNodeAddrFromNetAddr(addrs[0]),
+			},
+			Added6Flags: []pp.PexPeerFlags{0},
+		},
+		update: func(s *pexState) {
+			s.Add(&PeerConn{Peer: Peer{RemoteAddr: addrs[1]}})
+		},
+		targ1: pp.PexMsg{
+			Added6: krpc.CompactIPv6NodeAddrs{
+				krpcNodeAddrFromNetAddr(addrs[1]),
+			},
+			Added6Flags: []pp.PexPeerFlags{0},
+		},
+	},
+}
+
+// Represents the contents of a PexMsg in a way that supports equivalence checking in tests. This is
+// necessary because pexMsgFactory uses maps and so ordering of the resultant PexMsg isn't
+// deterministic. Because the flags are in a different array, we can't just use testify's
+// ElementsMatch because the ordering *does* still matter between an added addr and its flags.
+type comparablePexMsg struct {
+	added, added6           []krpc.NodeAddr
+	addedFlags, added6Flags []pp.PexPeerFlags
+	dropped, dropped6       []krpc.NodeAddr
+}
+
+// Such Rust-inspired.
+func (me *comparablePexMsg) From(f pp.PexMsg) {
+	me.added = f.Added
+	me.addedFlags = f.AddedFlags
+	me.added6 = f.Added6
+	me.added6Flags = f.Added6Flags
+	me.dropped = f.Dropped
+	me.dropped6 = f.Dropped6
+}
+
+// For PexMsg created by pexMsgFactory, this is as good as it can get without using data structures
+// in pexMsgFactory that preserve insert ordering.
+func (actual comparablePexMsg) AssertEqual(t *testing.T, expected comparablePexMsg) {
+	assert.ElementsMatch(t, expected.added, actual.added)
+	assert.ElementsMatch(t, expected.addedFlags, actual.addedFlags)
+	assert.ElementsMatch(t, expected.added6, actual.added6)
+	assert.ElementsMatch(t, expected.added6Flags, actual.added6Flags)
+	assert.ElementsMatch(t, expected.dropped, actual.dropped)
+	assert.ElementsMatch(t, expected.dropped6, actual.dropped6)
+}
+
+func assertPexMsgsEqual(t *testing.T, expected, actual pp.PexMsg) {
+	var ec, ac comparablePexMsg
+	ec.From(expected)
+	ac.From(actual)
+	ac.AssertEqual(t, ec)
+}
+
+func TestPexGenmsg0(t *testing.T) {
+	for _, tc := range testcases {
+		t.Run(tc.name, func(t *testing.T) {
+			s := *tc.in
+			m, last := s.Genmsg(nil)
+			assertPexMsgsEqual(t, tc.targ, m)
+			if tc.update != nil {
+				tc.update(&s)
+				m1, last := s.Genmsg(last)
+				assertPexMsgsEqual(t, tc.targ1, m1)
+				assert.NotNil(t, last)
+			}
+		})
+	}
+}
+
+// generate 𝑛 distinct values of net.Addr
+func addrgen(n int) chan net.Addr {
+	c := make(chan net.Addr)
+	go func() {
+		defer close(c)
+		for i := 4747; i < 65535 && n > 0; i++ {
+			c <- &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: i}
+			n--
+		}
+	}()
+	return c
+}
+
+func TestPexInitialNoCutoff(t *testing.T) {
+	const n = 2 * pexMaxDelta
+	var s pexState
+
+	c := addrgen(n)
+	for addr := range c {
+		s.Add(&PeerConn{Peer: Peer{RemoteAddr: addr}})
+	}
+	m, _ := s.Genmsg(nil)
+
+	require.EqualValues(t, n, len(m.Added))
+	require.EqualValues(t, n, len(m.AddedFlags))
+	require.EqualValues(t, 0, len(m.Added6))
+	require.EqualValues(t, 0, len(m.Added6Flags))
+	require.EqualValues(t, 0, len(m.Dropped))
+	require.EqualValues(t, 0, len(m.Dropped6))
+}
+
+func benchmarkPexInitialN(b *testing.B, npeers int) {
+	for i := 0; i < b.N; i++ {
+		var s pexState
+		c := addrgen(npeers)
+		for addr := range c {
+			s.Add(&PeerConn{Peer: Peer{RemoteAddr: addr}})
+			s.Genmsg(nil)
+		}
+	}
+}
+
+// obtain at least 5 points, e.g. to plot a graph
+func BenchmarkPexInitial4(b *testing.B)   { benchmarkPexInitialN(b, 4) }
+func BenchmarkPexInitial50(b *testing.B)  { benchmarkPexInitialN(b, 50) }
+func BenchmarkPexInitial100(b *testing.B) { benchmarkPexInitialN(b, 100) }
+func BenchmarkPexInitial200(b *testing.B) { benchmarkPexInitialN(b, 200) }
+func BenchmarkPexInitial400(b *testing.B) { benchmarkPexInitialN(b, 400) }
diff --git a/deps/github.com/anacrolix/torrent/pexconn.go b/deps/github.com/anacrolix/torrent/pexconn.go
new file mode 100644
index 0000000..9254f5e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/pexconn.go
@@ -0,0 +1,168 @@
+package torrent
+
+import (
+	"fmt"
+	"net/netip"
+	"time"
+
+	g "github.com/anacrolix/generics"
+	"github.com/anacrolix/log"
+
+	pp "github.com/anacrolix/torrent/peer_protocol"
+)
+
+const (
+	pexRetryDelay = 10 * time.Second
+	pexInterval   = 1 * time.Minute
+)
+
+// per-connection PEX state
+type pexConnState struct {
+	enabled bool
+	xid     pp.ExtensionNumber
+	last    *pexEvent
+	timer   *time.Timer
+	gate    chan struct{}
+	readyfn func()
+	torrent *Torrent
+	Listed  bool
+	info    log.Logger
+	dbg     log.Logger
+	// Running record of live connections the remote end of the connection purports to have.
+	remoteLiveConns map[netip.AddrPort]g.Option[pp.PexPeerFlags]
+	lastRecv        time.Time
+}
+
+func (s *pexConnState) IsEnabled() bool {
+	return s.enabled
+}
+
+// Init is called from the reader goroutine upon the extended handshake completion
+func (s *pexConnState) Init(c *PeerConn) {
+	xid, ok := c.PeerExtensionIDs[pp.ExtensionNamePex]
+	if !ok || xid == 0 || c.t.cl.config.DisablePEX {
+		return
+	}
+	s.xid = xid
+	s.last = nil
+	s.torrent = c.t
+	s.info = c.t.cl.logger.WithDefaultLevel(log.Info)
+	s.dbg = c.logger.WithDefaultLevel(log.Debug)
+	s.readyfn = c.tickleWriter
+	s.gate = make(chan struct{}, 1)
+	s.timer = time.AfterFunc(0, func() {
+		s.gate <- struct{}{}
+		s.readyfn() // wake up the writer
+	})
+	s.enabled = true
+}
+
+// schedule next PEX message
+func (s *pexConnState) sched(delay time.Duration) {
+	s.timer.Reset(delay)
+}
+
+// generate next PEX message for the peer; returns nil if nothing yet to send
+func (s *pexConnState) genmsg() *pp.PexMsg {
+	tx, last := s.torrent.pex.Genmsg(s.last)
+	if tx.Len() == 0 {
+		return nil
+	}
+	s.last = last
+	return &tx
+}
+
+func (s *pexConnState) numPending() int {
+	if s.torrent == nil {
+		return 0
+	}
+	return s.torrent.pex.numPending(s.last)
+}
+
+// Share is called from the writer goroutine if when it is woken up with the write buffers empty
+// Returns whether there's more room on the send buffer to write to.
+func (s *pexConnState) Share(postfn messageWriter) bool {
+	select {
+	case <-s.gate:
+		if tx := s.genmsg(); tx != nil {
+			s.dbg.Print("sending PEX message: ", tx)
+			flow := postfn(tx.Message(s.xid))
+			s.sched(pexInterval)
+			return flow
+		} else {
+			// no PEX to send this time - try again shortly
+			s.sched(pexRetryDelay)
+		}
+	default:
+	}
+	return true
+}
+
+func (s *pexConnState) updateRemoteLiveConns(rx pp.PexMsg) (errs []error) {
+	for _, dropped := range rx.Dropped {
+		addrPort, _ := ipv4AddrPortFromKrpcNodeAddr(dropped)
+		delete(s.remoteLiveConns, addrPort)
+	}
+	for _, dropped := range rx.Dropped6 {
+		addrPort, _ := ipv6AddrPortFromKrpcNodeAddr(dropped)
+		delete(s.remoteLiveConns, addrPort)
+	}
+	for i, added := range rx.Added {
+		addr := netip.AddrFrom4(*(*[4]byte)(added.IP.To4()))
+		addrPort := netip.AddrPortFrom(addr, uint16(added.Port))
+		flags := g.SliceGet(rx.AddedFlags, i)
+		g.MakeMapIfNilAndSet(&s.remoteLiveConns, addrPort, flags)
+	}
+	for i, added := range rx.Added6 {
+		addr := netip.AddrFrom16(*(*[16]byte)(added.IP.To16()))
+		addrPort := netip.AddrPortFrom(addr, uint16(added.Port))
+		flags := g.SliceGet(rx.Added6Flags, i)
+		g.MakeMapIfNilAndSet(&s.remoteLiveConns, addrPort, flags)
+	}
+	return
+}
+
+// Recv is called from the reader goroutine
+func (s *pexConnState) Recv(payload []byte) error {
+	rx, err := pp.LoadPexMsg(payload)
+	if err != nil {
+		return fmt.Errorf("unmarshalling pex message: %w", err)
+	}
+	s.dbg.Printf("received pex message: %v", rx)
+	torrent.Add("pex added peers received", int64(len(rx.Added)))
+	torrent.Add("pex added6 peers received", int64(len(rx.Added6)))
+
+	// "Clients must batch updates to send no more than 1 PEX message per minute."
+	timeSinceLastRecv := time.Since(s.lastRecv)
+	if timeSinceLastRecv < 45*time.Second {
+		return fmt.Errorf("last received only %v ago", timeSinceLastRecv)
+	}
+	s.lastRecv = time.Now()
+	s.updateRemoteLiveConns(rx)
+
+	var peers peerInfos
+	peers.AppendFromPex(rx.Added6, rx.Added6Flags)
+	peers.AppendFromPex(rx.Added, rx.AddedFlags)
+	if time.Now().Before(s.torrent.pex.rest) {
+		s.dbg.Printf("in cooldown period, incoming PEX discarded")
+		return nil
+	}
+	added := s.torrent.addPeers(peers)
+	s.dbg.Printf("got %v peers over pex, added %v", len(peers), added)
+
+	if len(peers) > 0 {
+		s.torrent.pex.rest = time.Now().Add(pexInterval)
+	}
+
+	// one day we may also want to:
+	// - handle drops somehow
+	// - detect malicious peers
+
+	return nil
+}
+
+func (s *pexConnState) Close() {
+	if s.timer != nil {
+		s.timer.Stop()
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/pexconn_test.go b/deps/github.com/anacrolix/torrent/pexconn_test.go
new file mode 100644
index 0000000..f8b9c9e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/pexconn_test.go
@@ -0,0 +1,61 @@
+package torrent
+
+import (
+	"net"
+	"testing"
+
+	"github.com/anacrolix/dht/v2/krpc"
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent/metainfo"
+	pp "github.com/anacrolix/torrent/peer_protocol"
+)
+
+func TestPexConnState(t *testing.T) {
+	var cl Client
+	cl.init(TestingConfig(t))
+	cl.initLogger()
+	torrent := cl.newTorrent(metainfo.Hash{}, nil)
+	addr := &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
+	c := cl.newConnection(nil, newConnectionOpts{
+		remoteAddr: addr,
+		network:    addr.Network(),
+	})
+	c.PeerExtensionIDs = make(map[pp.ExtensionName]pp.ExtensionNumber)
+	c.PeerExtensionIDs[pp.ExtensionNamePex] = pexExtendedId
+	c.messageWriter.mu.Lock()
+	c.setTorrent(torrent)
+	if err := torrent.addPeerConn(c); err != nil {
+		t.Log(err)
+	}
+
+	connWriteCond := c.messageWriter.writeCond.Signaled()
+	c.pex.Init(c)
+	require.True(t, c.pex.IsEnabled(), "should get enabled")
+	defer c.pex.Close()
+
+	var out pp.Message
+	writerCalled := false
+	testWriter := func(m pp.Message) bool {
+		writerCalled = true
+		out = m
+		return true
+	}
+	<-connWriteCond
+	c.pex.Share(testWriter)
+	require.True(t, writerCalled)
+	require.EqualValues(t, pp.Extended, out.Type)
+	require.EqualValues(t, pexExtendedId, out.ExtendedID)
+
+	x, err := pp.LoadPexMsg(out.ExtendedPayload)
+	require.NoError(t, err)
+	targx := pp.PexMsg{
+		Added:      krpc.CompactIPv4NodeAddrs(nil),
+		AddedFlags: []pp.PexPeerFlags{},
+		Added6: krpc.CompactIPv6NodeAddrs{
+			krpcNodeAddrFromNetAddr(addr),
+		},
+		Added6Flags: []pp.PexPeerFlags{0},
+	}
+	require.EqualValues(t, targx, x)
+}
diff --git a/deps/github.com/anacrolix/torrent/piece.go b/deps/github.com/anacrolix/torrent/piece.go
new file mode 100644
index 0000000..e08b260
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/piece.go
@@ -0,0 +1,257 @@
+package torrent
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/anacrolix/chansync"
+	"github.com/anacrolix/missinggo/v2/bitmap"
+
+	"github.com/anacrolix/torrent/metainfo"
+	pp "github.com/anacrolix/torrent/peer_protocol"
+	"github.com/anacrolix/torrent/storage"
+)
+
+type Piece struct {
+	// The completed piece SHA1 hash, from the metainfo "pieces" field.
+	hash  *metainfo.Hash
+	t     *Torrent
+	index pieceIndex
+	files []*File
+
+	readerCond chansync.BroadcastCond
+
+	numVerifies         int64
+	hashing             bool
+	marking             bool
+	storageCompletionOk bool
+
+	publicPieceState PieceState
+	priority         piecePriority
+	// Availability adjustment for this piece relative to len(Torrent.connsWithAllPieces). This is
+	// incremented for any piece a peer has when a peer has a piece, Torrent.haveInfo is true, and
+	// the Peer isn't recorded in Torrent.connsWithAllPieces.
+	relativeAvailability int
+
+	// This can be locked when the Client lock is taken, but probably not vice versa.
+	pendingWritesMutex sync.Mutex
+	pendingWrites      int
+	noPendingWrites    sync.Cond
+
+	// Connections that have written data to this piece since its last check.
+	// This can include connections that have closed.
+	dirtiers map[*Peer]struct{}
+}
+
+func (p *Piece) String() string {
+	return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
+}
+
+func (p *Piece) Info() metainfo.Piece {
+	return p.t.info.Piece(int(p.index))
+}
+
+func (p *Piece) Storage() storage.Piece {
+	return p.t.storage.Piece(p.Info())
+}
+
+func (p *Piece) Flush() {
+	if p.t.storage.Flush != nil {
+		_ = p.t.storage.Flush()
+	}
+}
+
+func (p *Piece) pendingChunkIndex(chunkIndex chunkIndexType) bool {
+	return !p.chunkIndexDirty(chunkIndex)
+}
+
+func (p *Piece) pendingChunk(cs ChunkSpec, chunkSize pp.Integer) bool {
+	return p.pendingChunkIndex(chunkIndexFromChunkSpec(cs, chunkSize))
+}
+
+func (p *Piece) hasDirtyChunks() bool {
+	return p.numDirtyChunks() != 0
+}
+
+func (p *Piece) numDirtyChunks() chunkIndexType {
+	return chunkIndexType(roaringBitmapRangeCardinality[RequestIndex](
+		&p.t.dirtyChunks,
+		p.requestIndexOffset(),
+		p.t.pieceRequestIndexOffset(p.index+1)))
+}
+
+func (p *Piece) unpendChunkIndex(i chunkIndexType) {
+	p.t.dirtyChunks.Add(p.requestIndexOffset() + i)
+	p.t.updatePieceRequestOrder(p.index)
+	p.readerCond.Broadcast()
+}
+
+func (p *Piece) pendChunkIndex(i RequestIndex) {
+	p.t.dirtyChunks.Remove(p.requestIndexOffset() + i)
+	p.t.updatePieceRequestOrder(p.index)
+}
+
+func (p *Piece) numChunks() chunkIndexType {
+	return p.t.pieceNumChunks(p.index)
+}
+
+func (p *Piece) incrementPendingWrites() {
+	p.pendingWritesMutex.Lock()
+	p.pendingWrites++
+	p.pendingWritesMutex.Unlock()
+}
+
+func (p *Piece) decrementPendingWrites() {
+	p.pendingWritesMutex.Lock()
+	if p.pendingWrites == 0 {
+		panic("assertion")
+	}
+	p.pendingWrites--
+	if p.pendingWrites == 0 {
+		p.noPendingWrites.Broadcast()
+	}
+	p.pendingWritesMutex.Unlock()
+}
+
+func (p *Piece) waitNoPendingWrites() {
+	p.pendingWritesMutex.Lock()
+	for p.pendingWrites != 0 {
+		p.noPendingWrites.Wait()
+	}
+	p.pendingWritesMutex.Unlock()
+}
+
+func (p *Piece) chunkIndexDirty(chunk chunkIndexType) bool {
+	return p.t.dirtyChunks.Contains(p.requestIndexOffset() + chunk)
+}
+
+func (p *Piece) chunkIndexSpec(chunk chunkIndexType) ChunkSpec {
+	return chunkIndexSpec(pp.Integer(chunk), p.length(), p.chunkSize())
+}
+
+func (p *Piece) numDirtyBytes() (ret pp.Integer) {
+	// defer func() {
+	// 	if ret > p.length() {
+	// 		panic("too many dirty bytes")
+	// 	}
+	// }()
+	numRegularDirtyChunks := p.numDirtyChunks()
+	if p.chunkIndexDirty(p.numChunks() - 1) {
+		numRegularDirtyChunks--
+		ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
+	}
+	ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
+	return
+}
+
+func (p *Piece) length() pp.Integer {
+	return p.t.pieceLength(p.index)
+}
+
+func (p *Piece) chunkSize() pp.Integer {
+	return p.t.chunkSize
+}
+
+func (p *Piece) lastChunkIndex() chunkIndexType {
+	return p.numChunks() - 1
+}
+
+func (p *Piece) bytesLeft() (ret pp.Integer) {
+	if p.t.pieceComplete(p.index) {
+		return 0
+	}
+	return p.length() - p.numDirtyBytes()
+}
+
+// Forces the piece data to be rehashed.
+func (p *Piece) VerifyData() {
+	p.t.cl.lock()
+	defer p.t.cl.unlock()
+	target := p.numVerifies + 1
+	if p.hashing {
+		target++
+	}
+	// log.Printf("target: %d", target)
+	p.t.queuePieceCheck(p.index)
+	for {
+		// log.Printf("got %d verifies", p.numVerifies)
+		if p.numVerifies >= target {
+			break
+		}
+		p.t.cl.event.Wait()
+	}
+	// log.Print("done")
+}
+
+func (p *Piece) queuedForHash() bool {
+	return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
+}
+
+func (p *Piece) torrentBeginOffset() int64 {
+	return int64(p.index) * p.t.info.PieceLength
+}
+
+func (p *Piece) torrentEndOffset() int64 {
+	return p.torrentBeginOffset() + int64(p.length())
+}
+
+func (p *Piece) SetPriority(prio piecePriority) {
+	p.t.cl.lock()
+	defer p.t.cl.unlock()
+	p.priority = prio
+	p.t.updatePiecePriority(p.index, "Piece.SetPriority")
+}
+
+func (p *Piece) purePriority() (ret piecePriority) {
+	for _, f := range p.files {
+		ret.Raise(f.prio)
+	}
+	if p.t.readerNowPieces().Contains(bitmap.BitIndex(p.index)) {
+		ret.Raise(PiecePriorityNow)
+	}
+	// if t._readerNowPieces.Contains(piece - 1) {
+	// 	return PiecePriorityNext
+	// }
+	if p.t.readerReadaheadPieces().Contains(bitmap.BitIndex(p.index)) {
+		ret.Raise(PiecePriorityReadahead)
+	}
+	ret.Raise(p.priority)
+	return
+}
+
+func (p *Piece) uncachedPriority() (ret piecePriority) {
+	if p.hashing || p.marking || p.t.pieceComplete(p.index) || p.queuedForHash() {
+		return PiecePriorityNone
+	}
+	return p.purePriority()
+}
+
+// Tells the Client to refetch the completion status from storage, updating priority etc. if
+// necessary. Might be useful if you know the state of the piece data has changed externally.
+func (p *Piece) UpdateCompletion() {
+	p.t.cl.lock()
+	defer p.t.cl.unlock()
+	p.t.updatePieceCompletion(p.index)
+}
+
+func (p *Piece) completion() (ret storage.Completion) {
+	ret.Complete = p.t.pieceComplete(p.index)
+	ret.Ok = p.storageCompletionOk
+	return
+}
+
+func (p *Piece) allChunksDirty() bool {
+	return p.numDirtyChunks() == p.numChunks()
+}
+
+func (p *Piece) State() PieceState {
+	return p.t.PieceState(p.index)
+}
+
+func (p *Piece) requestIndexOffset() RequestIndex {
+	return p.t.pieceRequestIndexOffset(p.index)
+}
+
+func (p *Piece) availability() int {
+	return len(p.t.connsWithAllPieces) + p.relativeAvailability
+}
diff --git a/deps/github.com/anacrolix/torrent/piecestate.go b/deps/github.com/anacrolix/torrent/piecestate.go
new file mode 100644
index 0000000..089adca
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/piecestate.go
@@ -0,0 +1,27 @@
+package torrent
+
+import (
+	"github.com/anacrolix/torrent/storage"
+)
+
+// The current state of a piece.
+type PieceState struct {
+	Priority piecePriority
+	storage.Completion
+	// The piece is being hashed, or is queued for hash. Deprecated: Use those fields instead.
+	Checking bool
+
+	Hashing       bool
+	QueuedForHash bool
+	// The piece state is being marked in the storage.
+	Marking bool
+
+	// Some of the piece has been obtained.
+	Partial bool
+}
+
+// Represents a series of consecutive pieces with the same state.
+type PieceStateRun struct {
+	PieceState
+	Length int // How many consecutive pieces have this state.
+}
diff --git a/deps/github.com/anacrolix/torrent/portfwd.go b/deps/github.com/anacrolix/torrent/portfwd.go
new file mode 100644
index 0000000..0136578
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/portfwd.go
@@ -0,0 +1,45 @@
+package torrent
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/upnp"
+)
+
+const UpnpDiscoverLogTag = "upnp-discover"
+
+func (cl *Client) addPortMapping(d upnp.Device, proto upnp.Protocol, internalPort int, upnpID string) {
+	logger := cl.logger.WithContextText(fmt.Sprintf("UPnP device at %v: mapping internal %v port %v", d.GetLocalIPAddress(), proto, internalPort))
+	externalPort, err := d.AddPortMapping(proto, internalPort, internalPort, upnpID, 0)
+	if err != nil {
+		logger.WithDefaultLevel(log.Warning).Printf("error: %v", err)
+		return
+	}
+	level := log.Info
+	if externalPort != internalPort {
+		level = log.Warning
+	}
+	logger.WithDefaultLevel(level).Printf("success: external port %v", externalPort)
+}
+
+func (cl *Client) forwardPort() {
+	cl.lock()
+	defer cl.unlock()
+	if cl.config.NoDefaultPortForwarding {
+		return
+	}
+	cl.unlock()
+	ds := upnp.Discover(0, 2*time.Second, cl.logger.WithValues(UpnpDiscoverLogTag))
+	cl.lock()
+	cl.logger.WithDefaultLevel(log.Debug).Printf("discovered %d upnp devices", len(ds))
+	port := cl.incomingPeerPort()
+	id := cl.config.UpnpID
+	cl.unlock()
+	for _, d := range ds {
+		go cl.addPortMapping(d, upnp.TCP, port, id)
+		go cl.addPortMapping(d, upnp.UDP, port, id)
+	}
+	cl.lock()
+}
diff --git a/deps/github.com/anacrolix/torrent/prioritized-peers.go b/deps/github.com/anacrolix/torrent/prioritized-peers.go
new file mode 100644
index 0000000..443d720
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/prioritized-peers.go
@@ -0,0 +1,82 @@
+package torrent
+
+import (
+	"hash/maphash"
+
+	"github.com/anacrolix/multiless"
+	"github.com/google/btree"
+)
+
+// Peers are stored with their priority at insertion. Their priority may
+// change if our apparent IP changes, we don't currently handle that.
+type prioritizedPeersItem struct {
+	prio peerPriority
+	p    PeerInfo
+}
+
+var hashSeed = maphash.MakeSeed()
+
+func (me prioritizedPeersItem) addrHash() int64 {
+	var h maphash.Hash
+	h.SetSeed(hashSeed)
+	h.WriteString(me.p.Addr.String())
+	return int64(h.Sum64())
+}
+
+func (me prioritizedPeersItem) Less(than btree.Item) bool {
+	other := than.(prioritizedPeersItem)
+	return multiless.New().Bool(
+		me.p.Trusted, other.p.Trusted).Uint32(
+		me.prio, other.prio).Int64(
+		me.addrHash(), other.addrHash(),
+	).Less()
+}
+
+type prioritizedPeers struct {
+	om      *btree.BTree
+	getPrio func(PeerInfo) peerPriority
+}
+
+func (me *prioritizedPeers) Each(f func(PeerInfo)) {
+	me.om.Ascend(func(i btree.Item) bool {
+		f(i.(prioritizedPeersItem).p)
+		return true
+	})
+}
+
+func (me *prioritizedPeers) Len() int {
+	if me == nil || me.om == nil {
+		return 0
+	}
+	return me.om.Len()
+}
+
+// Returns true if a peer is replaced.
+func (me *prioritizedPeers) Add(p PeerInfo) bool {
+	return me.om.ReplaceOrInsert(prioritizedPeersItem{me.getPrio(p), p}) != nil
+}
+
+// Returns true if a peer is replaced.
+func (me *prioritizedPeers) AddReturningReplacedPeer(p PeerInfo) (ret PeerInfo, ok bool) {
+	item := me.om.ReplaceOrInsert(prioritizedPeersItem{me.getPrio(p), p})
+	if item == nil {
+		return
+	}
+	ret = item.(prioritizedPeersItem).p
+	ok = true
+	return
+}
+
+func (me *prioritizedPeers) DeleteMin() (ret prioritizedPeersItem, ok bool) {
+	i := me.om.DeleteMin()
+	if i == nil {
+		return
+	}
+	ret = i.(prioritizedPeersItem)
+	ok = true
+	return
+}
+
+func (me *prioritizedPeers) PopMax() PeerInfo {
+	return me.om.DeleteMax().(prioritizedPeersItem).p
+}
diff --git a/deps/github.com/anacrolix/torrent/prioritized-peers_test.go b/deps/github.com/anacrolix/torrent/prioritized-peers_test.go
new file mode 100644
index 0000000..5e61c25
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/prioritized-peers_test.go
@@ -0,0 +1,55 @@
+package torrent
+
+import (
+	"net"
+	"testing"
+
+	"github.com/google/btree"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestPrioritizedPeers(t *testing.T) {
+	pp := prioritizedPeers{
+		om: btree.New(3),
+		getPrio: func(p PeerInfo) peerPriority {
+			return bep40PriorityIgnoreError(p.addr(), IpPort{IP: net.ParseIP("0.0.0.0")})
+		},
+	}
+	_, ok := pp.DeleteMin()
+	assert.Panics(t, func() { pp.PopMax() })
+	assert.False(t, ok)
+	ps := []PeerInfo{
+		{Addr: ipPortAddr{IP: net.ParseIP("1.2.3.4")}},
+		{Addr: ipPortAddr{IP: net.ParseIP("1::2")}},
+		{Addr: ipPortAddr{IP: net.ParseIP("")}},
+		{Addr: ipPortAddr{IP: net.ParseIP("")}, Trusted: true},
+	}
+	for i, p := range ps {
+		t.Logf("peer %d priority: %08x trusted: %t\n", i, pp.getPrio(p), p.Trusted)
+		assert.False(t, pp.Add(p))
+		assert.True(t, pp.Add(p))
+		assert.Equal(t, i+1, pp.Len())
+	}
+	pop := func(expected *PeerInfo) {
+		if expected == nil {
+			assert.Panics(t, func() { pp.PopMax() })
+		} else {
+			assert.Equal(t, *expected, pp.PopMax())
+		}
+	}
+	min := func(expected *PeerInfo) {
+		i, ok := pp.DeleteMin()
+		if expected == nil {
+			assert.False(t, ok)
+		} else {
+			assert.True(t, ok)
+			assert.Equal(t, *expected, i.p)
+		}
+	}
+	pop(&ps[3])
+	pop(&ps[1])
+	min(&ps[2])
+	pop(&ps[0])
+	min(nil)
+	pop(nil)
+}
diff --git a/deps/github.com/anacrolix/torrent/protocol.go b/deps/github.com/anacrolix/torrent/protocol.go
new file mode 100644
index 0000000..82e36d3
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/protocol.go
@@ -0,0 +1,9 @@
+package torrent
+
+import (
+	pp "github.com/anacrolix/torrent/peer_protocol"
+)
+
+func makeCancelMessage(r Request) pp.Message {
+	return pp.MakeCancelMessage(r.Index, r.Begin, r.Length)
+}
diff --git a/deps/github.com/anacrolix/torrent/ratelimitreader.go b/deps/github.com/anacrolix/torrent/ratelimitreader.go
new file mode 100644
index 0000000..7d9e6d8
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/ratelimitreader.go
@@ -0,0 +1,53 @@
+package torrent
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"time"
+
+	"golang.org/x/time/rate"
+)
+
+type rateLimitedReader struct {
+	l *rate.Limiter
+	r io.Reader
+
+	// This is the time of the last Read's reservation.
+	lastRead time.Time
+}
+
+func (me *rateLimitedReader) Read(b []byte) (n int, err error) {
+	const oldStyle = false // Retained for future reference.
+	if oldStyle {
+		// Wait until we can read at all.
+		if err := me.l.WaitN(context.Background(), 1); err != nil {
+			panic(err)
+		}
+		// Limit the read to within the burst.
+		if me.l.Limit() != rate.Inf && len(b) > me.l.Burst() {
+			b = b[:me.l.Burst()]
+		}
+		n, err = me.r.Read(b)
+		// Pay the piper.
+		now := time.Now()
+		me.lastRead = now
+		if !me.l.ReserveN(now, n-1).OK() {
+			panic(fmt.Sprintf("burst exceeded?: %d", n-1))
+		}
+	} else {
+		// Limit the read to within the burst.
+		if me.l.Limit() != rate.Inf && len(b) > me.l.Burst() {
+			b = b[:me.l.Burst()]
+		}
+		n, err = me.r.Read(b)
+		now := time.Now()
+		r := me.l.ReserveN(now, n)
+		if !r.OK() {
+			panic(n)
+		}
+		me.lastRead = now
+		time.Sleep(r.Delay())
+	}
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/reader.go b/deps/github.com/anacrolix/torrent/reader.go
new file mode 100644
index 0000000..4b20206
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/reader.go
@@ -0,0 +1,332 @@
+package torrent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/missinggo/v2"
+)
+
+// Accesses Torrent data via a Client. Reads block until the data is available. Seeks and readahead
+// also drive Client behaviour. Not safe for concurrent use.
+type Reader interface {
+	io.ReadSeekCloser
+	missinggo.ReadContexter
+	// Configure the number of bytes ahead of a read that should also be prioritized in preparation
+	// for further reads. Overridden by non-nil readahead func, see SetReadaheadFunc.
+	SetReadahead(int64)
+	// If non-nil, the provided function is called when the implementation needs to know the
+	// readahead for the current reader. Calls occur during Reads and Seeks, and while the Client is
+	// locked.
+	SetReadaheadFunc(ReadaheadFunc)
+	// Don't wait for pieces to complete and be verified. Read calls return as soon as they can when
+	// the underlying chunks become available.
+	SetResponsive()
+}
+
+// Piece range by piece index, [begin, end).
+type pieceRange struct {
+	begin, end pieceIndex
+}
+
+type ReadaheadContext struct {
+	ContiguousReadStartPos int64
+	CurrentPos             int64
+}
+
+// Returns the desired readahead for a Reader.
+type ReadaheadFunc func(ReadaheadContext) int64
+
+type reader struct {
+	t *Torrent
+	// Adjust the read/seek window to handle Readers locked to File extents and the like.
+	offset, length int64
+
+	// Function to dynamically calculate readahead. If nil, readahead is static.
+	readaheadFunc ReadaheadFunc
+
+	// Required when modifying pos and readahead.
+	mu sync.Locker
+
+	readahead, pos int64
+	// Position that reads have continued contiguously from.
+	contiguousReadStartPos int64
+	// The cached piece range this reader wants downloaded. The zero value corresponds to nothing.
+	// We cache this so that changes can be detected, and bubbled up to the Torrent only as
+	// required.
+	pieces pieceRange
+
+	// Reads have been initiated since the last seek. This is used to prevent readaheads occurring
+	// after a seek or with a new reader at the starting position.
+	reading    bool
+	responsive bool
+}
+
+var _ io.ReadSeekCloser = (*reader)(nil)
+
+func (r *reader) SetResponsive() {
+	r.responsive = true
+	r.t.cl.event.Broadcast()
+}
+
+// Disable responsive mode. TODO: Remove?
+func (r *reader) SetNonResponsive() {
+	r.responsive = false
+	r.t.cl.event.Broadcast()
+}
+
+func (r *reader) SetReadahead(readahead int64) {
+	r.mu.Lock()
+	r.readahead = readahead
+	r.readaheadFunc = nil
+	r.posChanged()
+	r.mu.Unlock()
+}
+
+func (r *reader) SetReadaheadFunc(f ReadaheadFunc) {
+	r.mu.Lock()
+	r.readaheadFunc = f
+	r.posChanged()
+	r.mu.Unlock()
+}
+
+// How many bytes are available to read. Max is the most we could require.
+func (r *reader) available(off, max int64) (ret int64) {
+	off += r.offset
+	for max > 0 {
+		req, ok := r.t.offsetRequest(off)
+		if !ok {
+			break
+		}
+		if !r.responsive && !r.t.pieceComplete(pieceIndex(req.Index)) {
+			break
+		}
+		if !r.t.haveChunk(req) {
+			break
+		}
+		len1 := int64(req.Length) - (off - r.t.requestOffset(req))
+		max -= len1
+		ret += len1
+		off += len1
+	}
+	// Ensure that ret hasn't exceeded our original max.
+	if max < 0 {
+		ret += max
+	}
+	return
+}
+
+// Calculates the pieces this reader wants downloaded, ignoring the cached value at r.pieces.
+func (r *reader) piecesUncached() (ret pieceRange) {
+	ra := r.readahead
+	if r.readaheadFunc != nil {
+		ra = r.readaheadFunc(ReadaheadContext{
+			ContiguousReadStartPos: r.contiguousReadStartPos,
+			CurrentPos:             r.pos,
+		})
+	}
+	if ra < 1 {
+		// Needs to be at least 1, because [x, x) means we don't want
+		// anything.
+		ra = 1
+	}
+	if !r.reading {
+		ra = 0
+	}
+	if ra > r.length-r.pos {
+		ra = r.length - r.pos
+	}
+	ret.begin, ret.end = r.t.byteRegionPieces(r.torrentOffset(r.pos), ra)
+	return
+}
+
+func (r *reader) Read(b []byte) (n int, err error) {
+	return r.ReadContext(context.Background(), b)
+}
+
+func (r *reader) ReadContext(ctx context.Context, b []byte) (n int, err error) {
+	if len(b) > 0 {
+		r.reading = true
+		// TODO: Rework reader piece priorities so we don't have to push updates in to the Client
+		// and take the lock here.
+		r.mu.Lock()
+		r.posChanged()
+		r.mu.Unlock()
+	}
+	n, err = r.readOnceAt(ctx, b, r.pos)
+	if n == 0 {
+		if err == nil && len(b) > 0 {
+			panic("expected error")
+		} else {
+			return
+		}
+	}
+
+	r.mu.Lock()
+	r.pos += int64(n)
+	r.posChanged()
+	r.mu.Unlock()
+	if r.pos >= r.length {
+		err = io.EOF
+	} else if err == io.EOF {
+		err = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+var closedChan = make(chan struct{})
+
+func init() {
+	close(closedChan)
+}
+
+// Wait until some data should be available to read. Tickles the client if it isn't. Returns how
+// much should be readable without blocking.
+func (r *reader) waitAvailable(ctx context.Context, pos, wanted int64, wait bool) (avail int64, err error) {
+	t := r.t
+	for {
+		r.t.cl.rLock()
+		avail = r.available(pos, wanted)
+		readerCond := t.piece(int((r.offset + pos) / t.info.PieceLength)).readerCond.Signaled()
+		r.t.cl.rUnlock()
+		if avail != 0 {
+			return
+		}
+		var dontWait <-chan struct{}
+		if !wait || wanted == 0 {
+			dontWait = closedChan
+		}
+		select {
+		case <-r.t.closed.Done():
+			err = errors.New("torrent closed")
+			return
+		case <-ctx.Done():
+			err = ctx.Err()
+			return
+		case <-r.t.dataDownloadDisallowed.On():
+			err = errors.New("torrent data downloading disabled")
+		case <-r.t.networkingEnabled.Off():
+			err = errors.New("torrent networking disabled")
+			return
+		case <-dontWait:
+			return
+		case <-readerCond:
+		}
+	}
+}
+
+// Adds the reader's torrent offset to the reader object offset (for example the reader might be
+// constrainted to a particular file within the torrent).
+func (r *reader) torrentOffset(readerPos int64) int64 {
+	return r.offset + readerPos
+}
+
+// Performs at most one successful read to torrent storage.
+func (r *reader) readOnceAt(ctx context.Context, b []byte, pos int64) (n int, err error) {
+	if pos >= r.length {
+		err = io.EOF
+		return
+	}
+	for {
+		var avail int64
+		avail, err = r.waitAvailable(ctx, pos, int64(len(b)), n == 0)
+		if avail == 0 {
+			return
+		}
+		firstPieceIndex := pieceIndex(r.torrentOffset(pos) / r.t.info.PieceLength)
+		firstPieceOffset := r.torrentOffset(pos) % r.t.info.PieceLength
+		b1 := missinggo.LimitLen(b, avail)
+		n, err = r.t.readAt(b1, r.torrentOffset(pos))
+		if n != 0 {
+			err = nil
+			return
+		}
+		if r.t.closed.IsSet() {
+			err = fmt.Errorf("reading from closed torrent: %w", err)
+			return
+		}
+		r.t.cl.lock()
+		// I think there's a panic here caused by the Client being closed before obtaining this
+		// lock. TestDropTorrentWithMmapStorageWhileHashing seems to tickle occasionally in CI.
+		func() {
+			// Just add exceptions already.
+			defer r.t.cl.unlock()
+			if r.t.closed.IsSet() {
+				// Can't update because Torrent's piece order is removed from Client.
+				return
+			}
+			// TODO: Just reset pieces in the readahead window. This might help
+			// prevent thrashing with small caches and file and piece priorities.
+			r.log(log.Fstr("error reading torrent %s piece %d offset %d, %d bytes: %v",
+				r.t.infoHash.HexString(), firstPieceIndex, firstPieceOffset, len(b1), err))
+			if !r.t.updatePieceCompletion(firstPieceIndex) {
+				r.log(log.Fstr("piece %d completion unchanged", firstPieceIndex))
+			}
+			// Update the rest of the piece completions in the readahead window, without alerting to
+			// changes (since only the first piece, the one above, could have generated the read error
+			// we're currently handling).
+			if r.pieces.begin != firstPieceIndex {
+				panic(fmt.Sprint(r.pieces.begin, firstPieceIndex))
+			}
+			for index := r.pieces.begin + 1; index < r.pieces.end; index++ {
+				r.t.updatePieceCompletion(index)
+			}
+		}()
+	}
+}
+
+// Hodor
+func (r *reader) Close() error {
+	r.t.cl.lock()
+	r.t.deleteReader(r)
+	r.t.cl.unlock()
+	return nil
+}
+
+func (r *reader) posChanged() {
+	to := r.piecesUncached()
+	from := r.pieces
+	if to == from {
+		return
+	}
+	r.pieces = to
+	// log.Printf("reader pos changed %v->%v", from, to)
+	r.t.readerPosChanged(from, to)
+}
+
+func (r *reader) Seek(off int64, whence int) (newPos int64, err error) {
+	switch whence {
+	case io.SeekStart:
+		newPos = off
+		r.mu.Lock()
+	case io.SeekCurrent:
+		r.mu.Lock()
+		newPos = r.pos + off
+	case io.SeekEnd:
+		newPos = r.length + off
+		r.mu.Lock()
+	default:
+		return 0, errors.New("bad whence")
+	}
+	if newPos != r.pos {
+		r.reading = false
+		r.pos = newPos
+		r.contiguousReadStartPos = newPos
+		r.posChanged()
+	}
+	r.mu.Unlock()
+	return
+}
+
+func (r *reader) log(m log.Msg) {
+	r.t.logger.LogLevel(log.Debug, m.Skip(1))
+}
+
+// Implementation inspired by https://news.ycombinator.com/item?id=27019613.
+func defaultReadaheadFunc(r ReadaheadContext) int64 {
+	return r.CurrentPos - r.ContiguousReadStartPos
+}
diff --git a/deps/github.com/anacrolix/torrent/reader_test.go b/deps/github.com/anacrolix/torrent/reader_test.go
new file mode 100644
index 0000000..c1017a0
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/reader_test.go
@@ -0,0 +1,26 @@
+package torrent
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent/internal/testutil"
+)
+
+func TestReaderReadContext(t *testing.T) {
+	cl, err := NewClient(TestingConfig(t))
+	require.NoError(t, err)
+	defer cl.Close()
+	tt, err := cl.AddTorrent(testutil.GreetingMetaInfo())
+	require.NoError(t, err)
+	defer tt.Drop()
+	ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Millisecond))
+	defer cancel()
+	r := tt.Files()[0].NewReader()
+	defer r.Close()
+	_, err = r.ReadContext(ctx, make([]byte, 1))
+	require.EqualValues(t, context.DeadlineExceeded, err)
+}
diff --git a/deps/github.com/anacrolix/torrent/request-strategy-impls.go b/deps/github.com/anacrolix/torrent/request-strategy-impls.go
new file mode 100644
index 0000000..0b05ed4
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/request-strategy-impls.go
@@ -0,0 +1,77 @@
+package torrent
+
+import (
+	"github.com/anacrolix/torrent/metainfo"
+	request_strategy "github.com/anacrolix/torrent/request-strategy"
+	"github.com/anacrolix/torrent/storage"
+)
+
+type requestStrategyInput struct {
+	cl      *Client
+	capFunc storage.TorrentCapacity
+}
+
+func (r requestStrategyInput) Torrent(ih metainfo.Hash) request_strategy.Torrent {
+	return requestStrategyTorrent{r.cl.torrents[ih]}
+}
+
+func (r requestStrategyInput) Capacity() (int64, bool) {
+	if r.capFunc == nil {
+		return 0, false
+	}
+	return (*r.capFunc)()
+}
+
+func (r requestStrategyInput) MaxUnverifiedBytes() int64 {
+	return r.cl.config.MaxUnverifiedBytes
+}
+
+var _ request_strategy.Input = requestStrategyInput{}
+
+// Returns what is necessary to run request_strategy.GetRequestablePieces for primaryTorrent.
+func (cl *Client) getRequestStrategyInput(primaryTorrent *Torrent) (input request_strategy.Input) {
+	return requestStrategyInput{
+		cl:      cl,
+		capFunc: primaryTorrent.storage.Capacity,
+	}
+}
+
+func (t *Torrent) getRequestStrategyInput() request_strategy.Input {
+	return t.cl.getRequestStrategyInput(t)
+}
+
+type requestStrategyTorrent struct {
+	t *Torrent
+}
+
+func (r requestStrategyTorrent) IgnorePiece(i int) bool {
+	if r.t.ignorePieceForRequests(i) {
+		return true
+	}
+	if r.t.pieceNumPendingChunks(i) == 0 {
+		return true
+	}
+
+	return false
+}
+
+func (r requestStrategyTorrent) PieceLength() int64 {
+	return r.t.info.PieceLength
+}
+
+var _ request_strategy.Torrent = requestStrategyTorrent{}
+
+type requestStrategyPiece struct {
+	t *Torrent
+	i pieceIndex
+}
+
+func (r requestStrategyPiece) Request() bool {
+	return !r.t.ignorePieceForRequests(r.i)
+}
+
+func (r requestStrategyPiece) NumPendingChunks() int {
+	return int(r.t.pieceNumPendingChunks(r.i))
+}
+
+var _ request_strategy.Piece = requestStrategyPiece{}
diff --git a/deps/github.com/anacrolix/torrent/request-strategy/ajwerner-btree.go b/deps/github.com/anacrolix/torrent/request-strategy/ajwerner-btree.go
new file mode 100644
index 0000000..183e2b9
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/request-strategy/ajwerner-btree.go
@@ -0,0 +1,44 @@
+package requestStrategy
+
+import (
+	"github.com/ajwerner/btree"
+)
+
+type ajwernerBtree struct {
+	btree btree.Set[pieceRequestOrderItem]
+}
+
+var _ Btree = (*ajwernerBtree)(nil)
+
+func NewAjwernerBtree() *ajwernerBtree {
+	return &ajwernerBtree{
+		btree: btree.MakeSet(func(t, t2 pieceRequestOrderItem) int {
+			return pieceOrderLess(&t, &t2).OrderingInt()
+		}),
+	}
+}
+
+func mustValue[V any](b bool, panicValue V) {
+	if !b {
+		panic(panicValue)
+	}
+}
+
+func (a *ajwernerBtree) Delete(item pieceRequestOrderItem) {
+	mustValue(a.btree.Delete(item), item)
+}
+
+func (a *ajwernerBtree) Add(item pieceRequestOrderItem) {
+	_, overwrote := a.btree.Upsert(item)
+	mustValue(!overwrote, item)
+}
+
+func (a *ajwernerBtree) Scan(f func(pieceRequestOrderItem) bool) {
+	it := a.btree.Iterator()
+	it.First()
+	for it.First(); it.Valid(); it.Next() {
+		if !f(it.Cur()) {
+			break
+		}
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/request-strategy/order.go b/deps/github.com/anacrolix/torrent/request-strategy/order.go
new file mode 100644
index 0000000..df656f6
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/request-strategy/order.go
@@ -0,0 +1,99 @@
+package requestStrategy
+
+import (
+	"bytes"
+	"expvar"
+
+	g "github.com/anacrolix/generics"
+	"github.com/anacrolix/multiless"
+
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/types"
+)
+
+type (
+	RequestIndex  uint32
+	ChunkIndex    = RequestIndex
+	Request       = types.Request
+	pieceIndex    = types.PieceIndex
+	piecePriority = types.PiecePriority
+	// This can be made into a type-param later, will be great for testing.
+	ChunkSpec = types.ChunkSpec
+)
+
+func pieceOrderLess(i, j *pieceRequestOrderItem) multiless.Computation {
+	return multiless.New().Int(
+		int(j.state.Priority), int(i.state.Priority),
+		// TODO: Should we match on complete here to prevent churn when availability changes?
+	).Bool(
+		j.state.Partial, i.state.Partial,
+	).Int(
+		// If this is done with relative availability, do we lose some determinism? If completeness
+		// is used, would that push this far enough down?
+		i.state.Availability, j.state.Availability,
+	).Int(
+		i.key.Index, j.key.Index,
+	).Lazy(func() multiless.Computation {
+		return multiless.New().Cmp(bytes.Compare(
+			i.key.InfoHash[:],
+			j.key.InfoHash[:],
+		))
+	})
+}
+
+var packageExpvarMap = expvar.NewMap("request-strategy")
+
+// Calls f with requestable pieces in order.
+func GetRequestablePieces(
+	input Input, pro *PieceRequestOrder,
+	f func(ih metainfo.Hash, pieceIndex int, orderState PieceRequestOrderState),
+) {
+	// Storage capacity left for this run, keyed by the storage capacity pointer on the storage
+	// TorrentImpl. A nil value means no capacity limit.
+	var storageLeft *int64
+	if cap, ok := input.Capacity(); ok {
+		storageLeft = &cap
+	}
+	var allTorrentsUnverifiedBytes int64
+	var lastItem g.Option[pieceRequestOrderItem]
+	pro.tree.Scan(func(_i pieceRequestOrderItem) bool {
+		// Check that scan emits pieces in priority order.
+		if lastItem.Ok {
+			if _i.Less(&lastItem.Value) {
+				panic("scan not in order")
+			}
+		}
+		lastItem.Set(_i)
+
+		ih := _i.key.InfoHash
+		t := input.Torrent(ih)
+		pieceLength := t.PieceLength()
+		if storageLeft != nil {
+			if *storageLeft < pieceLength {
+				return false
+			}
+			*storageLeft -= pieceLength
+		}
+		if t.IgnorePiece(_i.key.Index) {
+			// TODO: Clarify exactly what is verified. Stuff that's being hashed should be
+			// considered unverified and hold up further requests.
+			return true
+		}
+		if input.MaxUnverifiedBytes() != 0 && allTorrentsUnverifiedBytes+pieceLength > input.MaxUnverifiedBytes() {
+			return true
+		}
+		allTorrentsUnverifiedBytes += pieceLength
+		f(ih, _i.key.Index, _i.state)
+		return true
+	})
+	return
+}
+
+type Input interface {
+	Torrent(metainfo.Hash) Torrent
+	// Storage capacity, shared among all Torrents with the same storage.TorrentCapacity pointer in
+	// their storage.Torrent references.
+	Capacity() (cap int64, capped bool)
+	// Across all the Torrents. This might be partitioned by storage capacity key now.
+	MaxUnverifiedBytes() int64
+}
diff --git a/deps/github.com/anacrolix/torrent/request-strategy/peer.go b/deps/github.com/anacrolix/torrent/request-strategy/peer.go
new file mode 100644
index 0000000..4176188
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/request-strategy/peer.go
@@ -0,0 +1,32 @@
+package requestStrategy
+
+import (
+	typedRoaring "github.com/anacrolix/torrent/typed-roaring"
+)
+
+type PeerRequestState struct {
+	Interested bool
+	Requests   PeerRequests
+	// Cancelled and waiting response
+	Cancelled typedRoaring.Bitmap[RequestIndex]
+}
+
+// A set of request indices iterable by order added.
+type PeerRequests interface {
+	// Can be more efficient than GetCardinality.
+	IsEmpty() bool
+	// See roaring.Bitmap.GetCardinality.
+	GetCardinality() uint64
+	Contains(RequestIndex) bool
+	// Should not adjust iteration order if item already exists, although I don't think that usage
+	// exists.
+	Add(RequestIndex)
+	// See roaring.Bitmap.Rank.
+	Rank(RequestIndex) uint64
+	// Must yield in order items were added.
+	Iterate(func(RequestIndex) bool)
+	// See roaring.Bitmap.CheckedRemove.
+	CheckedRemove(RequestIndex) bool
+	// Iterate a snapshot of the values. It is safe to mutate the underlying data structure.
+	IterateSnapshot(func(RequestIndex) bool)
+}
diff --git a/deps/github.com/anacrolix/torrent/request-strategy/piece-request-order.go b/deps/github.com/anacrolix/torrent/request-strategy/piece-request-order.go
new file mode 100644
index 0000000..3056741
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/request-strategy/piece-request-order.go
@@ -0,0 +1,81 @@
+package requestStrategy
+
+import "github.com/anacrolix/torrent/metainfo"
+
+type Btree interface {
+	Delete(pieceRequestOrderItem)
+	Add(pieceRequestOrderItem)
+	Scan(func(pieceRequestOrderItem) bool)
+}
+
+func NewPieceOrder(btree Btree, cap int) *PieceRequestOrder {
+	return &PieceRequestOrder{
+		tree: btree,
+		keys: make(map[PieceRequestOrderKey]PieceRequestOrderState, cap),
+	}
+}
+
+type PieceRequestOrder struct {
+	tree Btree
+	keys map[PieceRequestOrderKey]PieceRequestOrderState
+}
+
+type PieceRequestOrderKey struct {
+	InfoHash metainfo.Hash
+	Index    int
+}
+
+type PieceRequestOrderState struct {
+	Priority     piecePriority
+	Partial      bool
+	Availability int
+}
+
+type pieceRequestOrderItem struct {
+	key   PieceRequestOrderKey
+	state PieceRequestOrderState
+}
+
+func (me *pieceRequestOrderItem) Less(otherConcrete *pieceRequestOrderItem) bool {
+	return pieceOrderLess(me, otherConcrete).Less()
+}
+
+func (me *PieceRequestOrder) Add(key PieceRequestOrderKey, state PieceRequestOrderState) {
+	if _, ok := me.keys[key]; ok {
+		panic(key)
+	}
+	me.tree.Add(pieceRequestOrderItem{key, state})
+	me.keys[key] = state
+}
+
+func (me *PieceRequestOrder) Update(
+	key PieceRequestOrderKey,
+	state PieceRequestOrderState,
+) {
+	oldState, ok := me.keys[key]
+	if !ok {
+		panic("key should have been added already")
+	}
+	if state == oldState {
+		return
+	}
+	me.tree.Delete(pieceRequestOrderItem{key, oldState})
+	me.tree.Add(pieceRequestOrderItem{key, state})
+	me.keys[key] = state
+}
+
+func (me *PieceRequestOrder) existingItemForKey(key PieceRequestOrderKey) pieceRequestOrderItem {
+	return pieceRequestOrderItem{
+		key:   key,
+		state: me.keys[key],
+	}
+}
+
+func (me *PieceRequestOrder) Delete(key PieceRequestOrderKey) {
+	me.tree.Delete(pieceRequestOrderItem{key, me.keys[key]})
+	delete(me.keys, key)
+}
+
+func (me *PieceRequestOrder) Len() int {
+	return len(me.keys)
+}
diff --git a/deps/github.com/anacrolix/torrent/request-strategy/piece-request-order_test.go b/deps/github.com/anacrolix/torrent/request-strategy/piece-request-order_test.go
new file mode 100644
index 0000000..ee5fb39
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/request-strategy/piece-request-order_test.go
@@ -0,0 +1,106 @@
+package requestStrategy
+
+import (
+	"testing"
+
+	"github.com/bradfitz/iter"
+)
+
+func benchmarkPieceRequestOrder[B Btree](
+	b *testing.B,
+	// Initialize the next run, and return a Btree
+	newBtree func() B,
+	// Set any path hinting for the specified piece
+	hintForPiece func(index int),
+	numPieces int,
+) {
+	b.ResetTimer()
+	b.ReportAllocs()
+	for range iter.N(b.N) {
+		pro := NewPieceOrder(newBtree(), numPieces)
+		state := PieceRequestOrderState{}
+		doPieces := func(m func(PieceRequestOrderKey)) {
+			for i := range iter.N(numPieces) {
+				key := PieceRequestOrderKey{
+					Index: i,
+				}
+				hintForPiece(i)
+				m(key)
+			}
+		}
+		doPieces(func(key PieceRequestOrderKey) {
+			pro.Add(key, state)
+		})
+		state.Availability++
+		doPieces(func(key PieceRequestOrderKey) {
+			pro.Update(key, state)
+		})
+		pro.tree.Scan(func(item pieceRequestOrderItem) bool {
+			return true
+		})
+		doPieces(func(key PieceRequestOrderKey) {
+			state.Priority = piecePriority(key.Index / 4)
+			pro.Update(key, state)
+		})
+		pro.tree.Scan(func(item pieceRequestOrderItem) bool {
+			return item.key.Index < 1000
+		})
+		state.Priority = 0
+		state.Availability++
+		doPieces(func(key PieceRequestOrderKey) {
+			pro.Update(key, state)
+		})
+		pro.tree.Scan(func(item pieceRequestOrderItem) bool {
+			return item.key.Index < 1000
+		})
+		state.Availability--
+		doPieces(func(key PieceRequestOrderKey) {
+			pro.Update(key, state)
+		})
+		doPieces(pro.Delete)
+		if pro.Len() != 0 {
+			b.FailNow()
+		}
+	}
+}
+
+func zero[T any](t *T) {
+	var zt T
+	*t = zt
+}
+
+func BenchmarkPieceRequestOrder(b *testing.B) {
+	const numPieces = 2000
+	b.Run("TidwallBtree", func(b *testing.B) {
+		b.Run("NoPathHints", func(b *testing.B) {
+			benchmarkPieceRequestOrder(b, NewTidwallBtree, func(int) {}, numPieces)
+		})
+		b.Run("SharedPathHint", func(b *testing.B) {
+			var pathHint PieceRequestOrderPathHint
+			var btree *tidwallBtree
+			benchmarkPieceRequestOrder(
+				b, func() *tidwallBtree {
+					zero(&pathHint)
+					btree = NewTidwallBtree()
+					btree.PathHint = &pathHint
+					return btree
+				}, func(int) {}, numPieces,
+			)
+		})
+		b.Run("PathHintPerPiece", func(b *testing.B) {
+			pathHints := make([]PieceRequestOrderPathHint, numPieces)
+			var btree *tidwallBtree
+			benchmarkPieceRequestOrder(
+				b, func() *tidwallBtree {
+					btree = NewTidwallBtree()
+					return btree
+				}, func(index int) {
+					btree.PathHint = &pathHints[index]
+				}, numPieces,
+			)
+		})
+	})
+	b.Run("AjwernerBtree", func(b *testing.B) {
+		benchmarkPieceRequestOrder(b, NewAjwernerBtree, func(index int) {}, numPieces)
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/request-strategy/piece.go b/deps/github.com/anacrolix/torrent/request-strategy/piece.go
new file mode 100644
index 0000000..b858dff
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/request-strategy/piece.go
@@ -0,0 +1,12 @@
+package requestStrategy
+
+type ChunksIterFunc func(func(ChunkIndex))
+
+type ChunksIter interface {
+	Iter(func(ci ChunkIndex))
+}
+
+type Piece interface {
+	Request() bool
+	NumPendingChunks() int
+}
diff --git a/deps/github.com/anacrolix/torrent/request-strategy/tidwall-btree.go b/deps/github.com/anacrolix/torrent/request-strategy/tidwall-btree.go
new file mode 100644
index 0000000..f7eabcd
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/request-strategy/tidwall-btree.go
@@ -0,0 +1,37 @@
+package requestStrategy
+
+import (
+	"github.com/tidwall/btree"
+)
+
+type tidwallBtree struct {
+	tree     *btree.BTreeG[pieceRequestOrderItem]
+	PathHint *btree.PathHint
+}
+
+func (me *tidwallBtree) Scan(f func(pieceRequestOrderItem) bool) {
+	me.tree.Scan(f)
+}
+
+func NewTidwallBtree() *tidwallBtree {
+	return &tidwallBtree{
+		tree: btree.NewBTreeGOptions(
+			func(a, b pieceRequestOrderItem) bool {
+				return a.Less(&b)
+			},
+			btree.Options{NoLocks: true, Degree: 64}),
+	}
+}
+
+func (me *tidwallBtree) Add(item pieceRequestOrderItem) {
+	if _, ok := me.tree.SetHint(item, me.PathHint); ok {
+		panic("shouldn't already have this")
+	}
+}
+
+type PieceRequestOrderPathHint = btree.PathHint
+
+func (me *tidwallBtree) Delete(item pieceRequestOrderItem) {
+	_, deleted := me.tree.DeleteHint(item, me.PathHint)
+	mustValue(deleted, item)
+}
diff --git a/deps/github.com/anacrolix/torrent/request-strategy/torrent.go b/deps/github.com/anacrolix/torrent/request-strategy/torrent.go
new file mode 100644
index 0000000..5bc438e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/request-strategy/torrent.go
@@ -0,0 +1,6 @@
+package requestStrategy
+
+type Torrent interface {
+	IgnorePiece(int) bool
+	PieceLength() int64
+}
diff --git a/deps/github.com/anacrolix/torrent/requesting.go b/deps/github.com/anacrolix/torrent/requesting.go
new file mode 100644
index 0000000..b70f264
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/requesting.go
@@ -0,0 +1,313 @@
+package torrent
+
+import (
+	"context"
+	"encoding/gob"
+	"fmt"
+	"reflect"
+	"runtime/pprof"
+	"time"
+	"unsafe"
+
+	"github.com/anacrolix/generics/heap"
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/multiless"
+
+	requestStrategy "github.com/anacrolix/torrent/request-strategy"
+	typedRoaring "github.com/anacrolix/torrent/typed-roaring"
+)
+
+type (
+	// Since we have to store all the requests in memory, we can't reasonably exceed what could be
+	// indexed with the memory space available.
+	maxRequests = int
+)
+
+func (t *Torrent) requestStrategyPieceOrderState(i int) requestStrategy.PieceRequestOrderState {
+	return requestStrategy.PieceRequestOrderState{
+		Priority:     t.piece(i).purePriority(),
+		Partial:      t.piecePartiallyDownloaded(i),
+		Availability: t.piece(i).availability(),
+	}
+}
+
+func init() {
+	gob.Register(peerId{})
+}
+
+type peerId struct {
+	*Peer
+	ptr uintptr
+}
+
+func (p peerId) Uintptr() uintptr {
+	return p.ptr
+}
+
+func (p peerId) GobEncode() (b []byte, _ error) {
+	*(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
+		Data: uintptr(unsafe.Pointer(&p.ptr)),
+		Len:  int(unsafe.Sizeof(p.ptr)),
+		Cap:  int(unsafe.Sizeof(p.ptr)),
+	}
+	return
+}
+
+func (p *peerId) GobDecode(b []byte) error {
+	if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
+		panic(len(b))
+	}
+	ptr := unsafe.Pointer(&b[0])
+	p.ptr = *(*uintptr)(ptr)
+	log.Printf("%p", ptr)
+	dst := reflect.SliceHeader{
+		Data: uintptr(unsafe.Pointer(&p.Peer)),
+		Len:  int(unsafe.Sizeof(p.Peer)),
+		Cap:  int(unsafe.Sizeof(p.Peer)),
+	}
+	copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
+	return nil
+}
+
+type (
+	RequestIndex   = requestStrategy.RequestIndex
+	chunkIndexType = requestStrategy.ChunkIndex
+)
+
+type desiredPeerRequests struct {
+	requestIndexes []RequestIndex
+	peer           *Peer
+	pieceStates    []requestStrategy.PieceRequestOrderState
+}
+
+func (p *desiredPeerRequests) lessByValue(leftRequest, rightRequest RequestIndex) bool {
+	t := p.peer.t
+	leftPieceIndex := t.pieceIndexOfRequestIndex(leftRequest)
+	rightPieceIndex := t.pieceIndexOfRequestIndex(rightRequest)
+	ml := multiless.New()
+	// Push requests that can't be served right now to the end. But we don't throw them away unless
+	// there's a better alternative. This is for when we're using the fast extension and get choked
+	// but our requests could still be good when we get unchoked.
+	if p.peer.peerChoking {
+		ml = ml.Bool(
+			!p.peer.peerAllowedFast.Contains(leftPieceIndex),
+			!p.peer.peerAllowedFast.Contains(rightPieceIndex),
+		)
+	}
+	leftPiece := &p.pieceStates[leftPieceIndex]
+	rightPiece := &p.pieceStates[rightPieceIndex]
+	// Putting this first means we can steal requests from lesser-performing peers for our first few
+	// new requests.
+	priority := func() piecePriority {
+		// Technically we would be happy with the cached priority here, except we don't actually
+		// cache it anymore, and Torrent.piecePriority just does another lookup of *Piece to resolve
+		// the priority through Piece.purePriority, which is probably slower.
+		leftPriority := leftPiece.Priority
+		rightPriority := rightPiece.Priority
+		ml = ml.Int(
+			-int(leftPriority),
+			-int(rightPriority),
+		)
+		if !ml.Ok() {
+			if leftPriority != rightPriority {
+				panic("expected equal")
+			}
+		}
+		return leftPriority
+	}()
+	if ml.Ok() {
+		return ml.MustLess()
+	}
+	leftRequestState := t.requestState[leftRequest]
+	rightRequestState := t.requestState[rightRequest]
+	leftPeer := leftRequestState.peer
+	rightPeer := rightRequestState.peer
+	// Prefer chunks already requested from this peer.
+	ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
+	// Prefer unrequested chunks.
+	ml = ml.Bool(rightPeer == nil, leftPeer == nil)
+	if ml.Ok() {
+		return ml.MustLess()
+	}
+	if leftPeer != nil {
+		// The right peer should also be set, or we'd have resolved the computation by now.
+		ml = ml.Uint64(
+			rightPeer.requestState.Requests.GetCardinality(),
+			leftPeer.requestState.Requests.GetCardinality(),
+		)
+		// Could either of the lastRequested be Zero? That's what checking an existing peer is for.
+		leftLast := leftRequestState.when
+		rightLast := rightRequestState.when
+		if leftLast.IsZero() || rightLast.IsZero() {
+			panic("expected non-zero last requested times")
+		}
+		// We want the most-recently requested on the left. Clients like Transmission serve requests
+		// in received order, so the most recently-requested is the one that has the longest until
+		// it will be served and therefore is the best candidate to cancel.
+		ml = ml.CmpInt64(rightLast.Sub(leftLast).Nanoseconds())
+	}
+	ml = ml.Int(
+		leftPiece.Availability,
+		rightPiece.Availability)
+	if priority == PiecePriorityReadahead {
+		// TODO: For readahead in particular, it would be even better to consider distance from the
+		// reader position so that reads earlier in a torrent don't starve reads later in the
+		// torrent. This would probably require reconsideration of how readahead priority works.
+		ml = ml.Int(leftPieceIndex, rightPieceIndex)
+	} else {
+		ml = ml.Int(t.pieceRequestOrder[leftPieceIndex], t.pieceRequestOrder[rightPieceIndex])
+	}
+	return ml.Less()
+}
+
+type desiredRequestState struct {
+	Requests   desiredPeerRequests
+	Interested bool
+}
+
+func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
+	t := p.t
+	if !t.haveInfo() {
+		return
+	}
+	if t.closed.IsSet() {
+		return
+	}
+	if t.dataDownloadDisallowed.Bool() {
+		return
+	}
+	input := t.getRequestStrategyInput()
+	requestHeap := desiredPeerRequests{
+		peer:           p,
+		pieceStates:    t.requestPieceStates,
+		requestIndexes: t.requestIndexes,
+	}
+	// Caller-provided allocation for roaring bitmap iteration.
+	var it typedRoaring.Iterator[RequestIndex]
+	requestStrategy.GetRequestablePieces(
+		input,
+		t.getPieceRequestOrder(),
+		func(ih InfoHash, pieceIndex int, pieceExtra requestStrategy.PieceRequestOrderState) {
+			if ih != t.infoHash {
+				return
+			}
+			if !p.peerHasPiece(pieceIndex) {
+				return
+			}
+			requestHeap.pieceStates[pieceIndex] = pieceExtra
+			allowedFast := p.peerAllowedFast.Contains(pieceIndex)
+			t.iterUndirtiedRequestIndexesInPiece(&it, pieceIndex, func(r requestStrategy.RequestIndex) {
+				if !allowedFast {
+					// We must signal interest to request this. TODO: We could set interested if the
+					// peers pieces (minus the allowed fast set) overlap with our missing pieces if
+					// there are any readers, or any pending pieces.
+					desired.Interested = true
+					// We can make or will allow sustaining a request here if we're not choked, or
+					// have made the request previously (presumably while unchoked), and haven't had
+					// the peer respond yet (and the request was retained because we are using the
+					// fast extension).
+					if p.peerChoking && !p.requestState.Requests.Contains(r) {
+						// We can't request this right now.
+						return
+					}
+				}
+				if p.requestState.Cancelled.Contains(r) {
+					// Can't re-request while awaiting acknowledgement.
+					return
+				}
+				requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
+			})
+		},
+	)
+	t.assertPendingRequests()
+	desired.Requests = requestHeap
+	return
+}
+
+func (p *Peer) maybeUpdateActualRequestState() {
+	if p.closed.IsSet() {
+		return
+	}
+	if p.needRequestUpdate == "" {
+		return
+	}
+	if p.needRequestUpdate == peerUpdateRequestsTimerReason {
+		since := time.Since(p.lastRequestUpdate)
+		if since < updateRequestsTimerDuration {
+			panic(since)
+		}
+	}
+	pprof.Do(
+		context.Background(),
+		pprof.Labels("update request", p.needRequestUpdate),
+		func(_ context.Context) {
+			next := p.getDesiredRequestState()
+			p.applyRequestState(next)
+			p.t.requestIndexes = next.Requests.requestIndexes[:0]
+		},
+	)
+}
+
+// Transmit/action the request state to the peer.
+func (p *Peer) applyRequestState(next desiredRequestState) {
+	current := &p.requestState
+	if !p.setInterested(next.Interested) {
+		return
+	}
+	more := true
+	requestHeap := heap.InterfaceForSlice(&next.Requests.requestIndexes, next.Requests.lessByValue)
+	heap.Init(requestHeap)
+
+	t := p.t
+	originalRequestCount := current.Requests.GetCardinality()
+	for {
+		if requestHeap.Len() == 0 {
+			break
+		}
+		numPending := maxRequests(current.Requests.GetCardinality() + current.Cancelled.GetCardinality())
+		if numPending >= p.nominalMaxRequests() {
+			break
+		}
+		req := heap.Pop(requestHeap)
+		existing := t.requestingPeer(req)
+		if existing != nil && existing != p {
+			// Don't steal from the poor.
+			diff := int64(current.Requests.GetCardinality()) + 1 - (int64(existing.uncancelledRequests()) - 1)
+			// Steal a request that leaves us with one more request than the existing peer
+			// connection if the stealer more recently received a chunk.
+			if diff > 1 || (diff == 1 && p.lastUsefulChunkReceived.Before(existing.lastUsefulChunkReceived)) {
+				continue
+			}
+			t.cancelRequest(req)
+		}
+		more = p.mustRequest(req)
+		if !more {
+			break
+		}
+	}
+	if !more {
+		// This might fail if we incorrectly determine that we can fit up to the maximum allowed
+		// requests into the available write buffer space. We don't want that to happen because it
+		// makes our peak requests dependent on how much was already in the buffer.
+		panic(fmt.Sprintf(
+			"couldn't fill apply entire request state [newRequests=%v]",
+			current.Requests.GetCardinality()-originalRequestCount))
+	}
+	newPeakRequests := maxRequests(current.Requests.GetCardinality() - originalRequestCount)
+	// log.Printf(
+	// 	"requests %v->%v (peak %v->%v) reason %q (peer %v)",
+	// 	originalRequestCount, current.Requests.GetCardinality(), p.peakRequests, newPeakRequests, p.needRequestUpdate, p)
+	p.peakRequests = newPeakRequests
+	p.needRequestUpdate = ""
+	p.lastRequestUpdate = time.Now()
+	if enableUpdateRequestsTimer {
+		p.updateRequestsTimer.Reset(updateRequestsTimerDuration)
+	}
+}
+
+// This could be set to 10s to match the unchoke/request update interval recommended by some
+// specifications. I've set it shorter to trigger it more often for testing for now.
+const (
+	updateRequestsTimerDuration = 3 * time.Second
+	enableUpdateRequestsTimer   = false
+)
diff --git a/deps/github.com/anacrolix/torrent/requesting_test.go b/deps/github.com/anacrolix/torrent/requesting_test.go
new file mode 100644
index 0000000..6c791a5
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/requesting_test.go
@@ -0,0 +1,78 @@
+package torrent
+
+import (
+	"testing"
+
+	"github.com/bradfitz/iter"
+	qt "github.com/frankban/quicktest"
+
+	pp "github.com/anacrolix/torrent/peer_protocol"
+)
+
+func keysAsSlice(m map[Request]struct{}) (sl []Request) {
+	for k := range m {
+		sl = append(sl, k)
+	}
+	return
+}
+
+func makeTypicalRequests() map[Request]struct{} {
+	m := make(map[Request]struct{})
+	for p := pp.Integer(0); p < 4; p++ {
+		for c := pp.Integer(0); c < 16; c++ {
+			m[Request{p, ChunkSpec{c * defaultChunkSize, defaultChunkSize}}] = struct{}{}
+		}
+	}
+	return m
+}
+
+func TestLogExampleRequestMapOrdering(t *testing.T) {
+	for k := range makeTypicalRequests() {
+		t.Log(k)
+	}
+}
+
+func TestRequestMapOrderingPersistent(t *testing.T) {
+	m := makeTypicalRequests()
+	// Shows that map order is persistent across separate range statements.
+	qt.Assert(t, keysAsSlice(m), qt.ContentEquals, keysAsSlice(m))
+}
+
+func TestRequestMapOrderAcrossInstances(t *testing.T) {
+	// This shows that different map instances with the same contents can have the same range order.
+	qt.Assert(t, keysAsSlice(makeTypicalRequests()), qt.ContentEquals, keysAsSlice(makeTypicalRequests()))
+}
+
+// Added for testing repeating loop iteration after shuffling in Peer.applyRequestState.
+func TestForLoopRepeatItem(t *testing.T) {
+	t.Run("ExplicitLoopVar", func(t *testing.T) {
+		once := false
+		var seen []int
+		for i := 0; i < 4; i++ {
+			seen = append(seen, i)
+			if !once && i == 2 {
+				once = true
+				i--
+				// Will i++ still run?
+				continue
+			}
+		}
+		// We can mutate i and it's observed by the loop. No special treatment of the loop var.
+		qt.Assert(t, seen, qt.DeepEquals, []int{0, 1, 2, 2, 3})
+	})
+	t.Run("Range", func(t *testing.T) {
+		once := false
+		var seen []int
+		for i := range iter.N(4) {
+			seen = append(seen, i)
+			if !once && i == 2 {
+				once = true
+				// Can we actually modify the next value of i produced by the range?
+				i--
+				continue
+			}
+		}
+		// Range ignores any mutation to i.
+		qt.Assert(t, seen, qt.DeepEquals, []int{0, 1, 2, 3})
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/reuse_test.go b/deps/github.com/anacrolix/torrent/reuse_test.go
new file mode 100644
index 0000000..5da4ab8
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/reuse_test.go
@@ -0,0 +1,79 @@
+package torrent
+
+import (
+	"context"
+	"net"
+	"sync/atomic"
+	"syscall"
+	"testing"
+
+	"github.com/anacrolix/log"
+	qt "github.com/frankban/quicktest"
+)
+
+// Show that multiple connections from the same local TCP port to the same remote port will fail.
+func TestTcpPortReuseIsABadIdea(t *testing.T) {
+	remote, err := net.Listen("tcp", "localhost:0")
+	c := qt.New(t)
+	c.Assert(err, qt.IsNil)
+	defer remote.Close()
+	dialer := net.Dialer{}
+	// Show that we can't duplicate an existing connection even with various socket options.
+	dialer.Control = func(network, address string, c syscall.RawConn) (err error) {
+		return c.Control(func(fd uintptr) {
+			err = setReusePortSockOpts(fd)
+		})
+	}
+	// Tie up a local port to the remote.
+	first, err := dialer.Dial("tcp", remote.Addr().String())
+	c.Assert(err, qt.IsNil)
+	defer first.Close()
+	// Show that dialling the remote with the same local port fails.
+	dialer.LocalAddr = first.LocalAddr()
+	_, err = dialer.Dial("tcp", remote.Addr().String())
+	c.Assert(err, qt.IsNotNil)
+	// Show that not fixing the local port again allows connections to succeed.
+	dialer.LocalAddr = nil
+	second, err := dialer.Dial("tcp", remote.Addr().String())
+	c.Assert(err, qt.IsNil)
+	second.Close()
+}
+
+// Show that multiple connections from the same local utp socket to the same remote port will
+// succeed. This is necessary for ut_holepunch to work.
+func TestUtpLocalPortIsReusable(t *testing.T) {
+	const network = "udp"
+	c := qt.New(t)
+	remote, err := NewUtpSocket(network, "localhost:0", nil, log.Default)
+	c.Assert(err, qt.IsNil)
+	defer remote.Close()
+	var remoteAccepts int32
+	doneAccepting := make(chan struct{})
+	go func() {
+		defer close(doneAccepting)
+		for {
+			c, err := remote.Accept()
+			if err != nil {
+				if atomic.LoadInt32(&remoteAccepts) != 2 {
+					t.Logf("error accepting on remote: %v", err)
+				}
+				break
+			}
+			// This is not a leak, bugger off.
+			defer c.Close()
+			atomic.AddInt32(&remoteAccepts, 1)
+		}
+	}()
+	local, err := NewUtpSocket(network, "localhost:0", nil, log.Default)
+	c.Assert(err, qt.IsNil)
+	defer local.Close()
+	first, err := local.DialContext(context.Background(), network, remote.Addr().String())
+	c.Assert(err, qt.IsNil)
+	defer first.Close()
+	second, err := local.DialContext(context.Background(), network, remote.Addr().String())
+	c.Assert(err, qt.IsNil)
+	defer second.Close()
+	remote.Close()
+	<-doneAccepting
+	c.Assert(atomic.LoadInt32(&remoteAccepts), qt.Equals, int32(2))
+}
diff --git a/deps/github.com/anacrolix/torrent/rlreader_test.go b/deps/github.com/anacrolix/torrent/rlreader_test.go
new file mode 100644
index 0000000..6bf25c4
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/rlreader_test.go
@@ -0,0 +1,128 @@
+package torrent
+
+import (
+	"io"
+	"log"
+	"math/rand"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/time/rate"
+)
+
+func writeN(ws []io.Writer, n int) error {
+	b := make([]byte, n)
+	for _, w := range ws[1:] {
+		n1 := rand.Intn(n)
+		wn, err := w.Write(b[:n1])
+		if wn != n1 {
+			if err == nil {
+				panic(n1)
+			}
+			return err
+		}
+		n -= n1
+	}
+	wn, err := ws[0].Write(b[:n])
+	if wn != n {
+		if err == nil {
+			panic(n)
+		}
+	}
+	return err
+}
+
+func TestRateLimitReaders(t *testing.T) {
+	const (
+		numReaders     = 2
+		bytesPerSecond = 100
+		burst          = 5
+		readSize       = 6
+		writeRounds    = 10
+		bytesPerRound  = 12
+	)
+	control := rate.NewLimiter(bytesPerSecond, burst)
+	shared := rate.NewLimiter(bytesPerSecond, burst)
+	var (
+		ws []io.Writer
+		cs []io.Closer
+	)
+	wg := sync.WaitGroup{}
+	type read struct {
+		N int
+		// When the read was allowed.
+		At time.Time
+	}
+	reads := make(chan read)
+	done := make(chan struct{})
+	for i := 0; i < numReaders; i += 1 {
+		r, w := io.Pipe()
+		ws = append(ws, w)
+		cs = append(cs, w)
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			r := rateLimitedReader{
+				l: shared,
+				r: r,
+			}
+			b := make([]byte, readSize)
+			for {
+				n, err := r.Read(b)
+				select {
+				case reads <- read{n, r.lastRead}:
+				case <-done:
+					return
+				}
+				if err == io.EOF {
+					return
+				}
+				if err != nil {
+					panic(err)
+				}
+			}
+		}()
+	}
+	closeAll := func() {
+		for _, c := range cs {
+			c.Close()
+		}
+	}
+	defer func() {
+		close(done)
+		closeAll()
+		wg.Wait()
+	}()
+	written := 0
+	go func() {
+		for i := 0; i < writeRounds; i += 1 {
+			err := writeN(ws, bytesPerRound)
+			if err != nil {
+				log.Printf("error writing: %s", err)
+				break
+			}
+			written += bytesPerRound
+		}
+		closeAll()
+		wg.Wait()
+		close(reads)
+	}()
+	totalBytesRead := 0
+	started := time.Now()
+	for r := range reads {
+		totalBytesRead += r.N
+		require.False(t, r.At.IsZero())
+		// Copy what the reader should have done with its reservation.
+		res := control.ReserveN(r.At, r.N)
+		// If we don't have to wait with the control, the reader has gone too
+		// fast.
+		if res.Delay() > 0 {
+			log.Printf("%d bytes not allowed at %s", r.N, time.Since(started))
+			t.FailNow()
+		}
+	}
+	assert.EqualValues(t, writeRounds*bytesPerRound, totalBytesRead)
+}
diff --git a/deps/github.com/anacrolix/torrent/roaring.go b/deps/github.com/anacrolix/torrent/roaring.go
new file mode 100644
index 0000000..8e39416
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/roaring.go
@@ -0,0 +1,16 @@
+package torrent
+
+import (
+	"github.com/anacrolix/torrent/typed-roaring"
+)
+
+// Return the number of bits set in the range. To do this we need the rank of the item before the
+// first, and the rank of the last item. An off-by-one minefield. Hopefully I haven't missed
+// something in roaring's API that provides this.
+func roaringBitmapRangeCardinality[T typedRoaring.BitConstraint](bm interface{ Rank(T) uint64 }, start, end T) (card uint64) {
+	card = bm.Rank(end - 1)
+	if start != 0 {
+		card -= bm.Rank(start - 1)
+	}
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/segments/index.go b/deps/github.com/anacrolix/torrent/segments/index.go
new file mode 100644
index 0000000..6717dcb
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/segments/index.go
@@ -0,0 +1,45 @@
+package segments
+
+import (
+	"sort"
+)
+
+func NewIndex(segments LengthIter) (ret Index) {
+	var start Length
+	for l, ok := segments(); ok; l, ok = segments() {
+		ret.segments = append(ret.segments, Extent{start, l})
+		start += l
+	}
+	return
+}
+
+type Index struct {
+	segments []Extent
+}
+
+func (me Index) iterSegments() func() (Length, bool) {
+	return func() (Length, bool) {
+		if len(me.segments) == 0 {
+			return 0, false
+		} else {
+			l := me.segments[0].Length
+			me.segments = me.segments[1:]
+			return l, true
+		}
+	}
+}
+
+func (me Index) Locate(e Extent, output Callback) bool {
+	first := sort.Search(len(me.segments), func(i int) bool {
+		_e := me.segments[i]
+		return _e.End() > e.Start
+	})
+	if first == len(me.segments) {
+		return false
+	}
+	e.Start -= me.segments[first].Start
+	me.segments = me.segments[first:]
+	return Scan(me.iterSegments(), e, func(i int, e Extent) bool {
+		return output(i+first, e)
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/segments/segments.go b/deps/github.com/anacrolix/torrent/segments/segments.go
new file mode 100644
index 0000000..90e77ce
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/segments/segments.go
@@ -0,0 +1,63 @@
+package segments
+
+type Int = int64
+
+type Length = Int
+
+func min(i Int, rest ...Int) Int {
+	ret := i
+	for _, i := range rest {
+		if i < ret {
+			ret = i
+		}
+	}
+	return ret
+}
+
+type Extent struct {
+	Start, Length Int
+}
+
+func (e Extent) End() Int {
+	return e.Start + e.Length
+}
+
+type (
+	Callback   = func(int, Extent) bool
+	LengthIter = func() (Length, bool)
+)
+
+func Scan(haystack LengthIter, needle Extent, callback Callback) bool {
+	i := 0
+	for needle.Length != 0 {
+		l, ok := haystack()
+		if !ok {
+			return false
+		}
+		if needle.Start < l || needle.Start == l && l == 0 {
+			e1 := Extent{
+				Start:  needle.Start,
+				Length: min(l, needle.End()) - needle.Start,
+			}
+			if e1.Length >= 0 {
+				if !callback(i, e1) {
+					return true
+				}
+				needle.Start = 0
+				needle.Length -= e1.Length
+			}
+		} else {
+			needle.Start -= l
+		}
+		i++
+	}
+	return true
+}
+
+func LocaterFromLengthIter(li LengthIter) Locater {
+	return func(e Extent, c Callback) bool {
+		return Scan(li, e, c)
+	}
+}
+
+type Locater func(Extent, Callback) bool
diff --git a/deps/github.com/anacrolix/torrent/segments/segments_test.go b/deps/github.com/anacrolix/torrent/segments/segments_test.go
new file mode 100644
index 0000000..9ce9164
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/segments/segments_test.go
@@ -0,0 +1,92 @@
+package segments
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func LengthIterFromSlice(ls []Length) LengthIter {
+	return func() (Length, bool) {
+		switch len(ls) {
+		case 0:
+			return -1, false
+		default:
+			l := ls[0]
+			ls = ls[1:]
+			return l, true
+		}
+	}
+}
+
+type ScanCallbackValue struct {
+	Index int
+	Extent
+}
+
+type collectExtents []ScanCallbackValue
+
+func (me *collectExtents) scanCallback(i int, e Extent) bool {
+	*me = append(*me, ScanCallbackValue{
+		Index:  i,
+		Extent: e,
+	})
+	return true
+}
+
+type newLocater func(LengthIter) Locater
+
+func assertLocate(t *testing.T, nl newLocater, ls []Length, needle Extent, firstExpectedIndex int, expectedExtents []Extent) {
+	var actual collectExtents
+	var expected collectExtents
+	for i, e := range expectedExtents {
+		expected.scanCallback(firstExpectedIndex+i, e)
+	}
+	nl(LengthIterFromSlice(ls))(needle, actual.scanCallback)
+	assert.EqualValues(t, expected, actual)
+}
+
+func testLocater(t *testing.T, newLocater newLocater) {
+	assertLocate(t, newLocater,
+		[]Length{1, 0, 2, 0, 3},
+		Extent{2, 2},
+		2,
+		[]Extent{{1, 1}, {0, 0}, {0, 1}})
+	assertLocate(t, newLocater,
+		[]Length{1, 0, 2, 0, 3},
+		Extent{6, 2},
+		2,
+		[]Extent{})
+	assertLocate(t, newLocater,
+		[]Length{1652, 1514, 1554, 1618, 1546, 129241752, 1537}, // 128737588
+		Extent{0, 16384},
+		0,
+		[]Extent{
+			{0, 1652},
+			{0, 1514},
+			{0, 1554},
+			{0, 1618},
+			{0, 1546},
+			{0, 8500},
+		})
+	assertLocate(t, newLocater,
+		[]Length{1652, 1514, 1554, 1618, 1546, 129241752, 1537, 1536, 1551}, // 128737588
+		Extent{129236992, 16384},
+		5,
+		[]Extent{
+			{129229108, 12644},
+			{0, 1537},
+			{0, 1536},
+			{0, 667},
+		})
+}
+
+func TestScan(t *testing.T) {
+	testLocater(t, LocaterFromLengthIter)
+}
+
+func TestIndex(t *testing.T) {
+	testLocater(t, func(li LengthIter) Locater {
+		return NewIndex(li).Locate
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/smartban.go b/deps/github.com/anacrolix/torrent/smartban.go
new file mode 100644
index 0000000..034a702
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/smartban.go
@@ -0,0 +1,56 @@
+package torrent
+
+import (
+	"bytes"
+	"crypto/sha1"
+	"net/netip"
+
+	g "github.com/anacrolix/generics"
+
+	"github.com/anacrolix/torrent/smartban"
+)
+
+type bannableAddr = netip.Addr
+
+type smartBanCache = smartban.Cache[bannableAddr, RequestIndex, [sha1.Size]byte]
+
+type blockCheckingWriter struct {
+	cache        *smartBanCache
+	requestIndex RequestIndex
+	// Peers that didn't match blocks written now.
+	badPeers    map[bannableAddr]struct{}
+	blockBuffer bytes.Buffer
+	chunkSize   int
+}
+
+func (me *blockCheckingWriter) checkBlock() {
+	b := me.blockBuffer.Next(me.chunkSize)
+	for _, peer := range me.cache.CheckBlock(me.requestIndex, b) {
+		g.MakeMapIfNilAndSet(&me.badPeers, peer, struct{}{})
+	}
+	me.requestIndex++
+}
+
+func (me *blockCheckingWriter) checkFullBlocks() {
+	for me.blockBuffer.Len() >= me.chunkSize {
+		me.checkBlock()
+	}
+}
+
+func (me *blockCheckingWriter) Write(b []byte) (int, error) {
+	n, err := me.blockBuffer.Write(b)
+	if err != nil {
+		// bytes.Buffer.Write should never fail.
+		panic(err)
+	}
+	me.checkFullBlocks()
+	return n, err
+}
+
+// Check any remaining block data. Terminal pieces or piece sizes that don't divide into the chunk
+// size cleanly may leave fragments that should be checked.
+func (me *blockCheckingWriter) Flush() {
+	for me.blockBuffer.Len() != 0 {
+		me.checkBlock()
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/smartban/smartban.go b/deps/github.com/anacrolix/torrent/smartban/smartban.go
new file mode 100644
index 0000000..96e9b75
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/smartban/smartban.go
@@ -0,0 +1,51 @@
+package smartban
+
+import (
+	"sync"
+)
+
+type Cache[Peer, BlockKey, Hash comparable] struct {
+	Hash func([]byte) Hash
+
+	lock   sync.RWMutex
+	blocks map[BlockKey]map[Peer]Hash
+}
+
+type Block[Key any] struct {
+	Key  Key
+	Data []byte
+}
+
+func (me *Cache[Peer, BlockKey, Hash]) Init() {
+	me.blocks = make(map[BlockKey]map[Peer]Hash)
+}
+
+func (me *Cache[Peer, BlockKey, Hash]) RecordBlock(peer Peer, key BlockKey, data []byte) {
+	hash := me.Hash(data)
+	me.lock.Lock()
+	defer me.lock.Unlock()
+	peers := me.blocks[key]
+	if peers == nil {
+		peers = make(map[Peer]Hash)
+		me.blocks[key] = peers
+	}
+	peers[peer] = hash
+}
+
+func (me *Cache[Peer, BlockKey, Hash]) CheckBlock(key BlockKey, data []byte) (bad []Peer) {
+	correct := me.Hash(data)
+	me.lock.RLock()
+	defer me.lock.RUnlock()
+	for peer, hash := range me.blocks[key] {
+		if hash != correct {
+			bad = append(bad, peer)
+		}
+	}
+	return
+}
+
+func (me *Cache[Peer, BlockKey, Hash]) ForgetBlock(key BlockKey) {
+	me.lock.Lock()
+	defer me.lock.Unlock()
+	delete(me.blocks, key)
+}
diff --git a/deps/github.com/anacrolix/torrent/socket.go b/deps/github.com/anacrolix/torrent/socket.go
new file mode 100644
index 0000000..2d4ea86
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/socket.go
@@ -0,0 +1,184 @@
+package torrent
+
+import (
+	"context"
+	"net"
+	"strconv"
+	"syscall"
+
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/missinggo/perf"
+	"github.com/anacrolix/missinggo/v2"
+	"github.com/pkg/errors"
+)
+
+type Listener interface {
+	// Accept waits for and returns the next connection to the listener.
+	Accept() (net.Conn, error)
+
+	// Addr returns the listener's network address.
+	Addr() net.Addr
+}
+
+type socket interface {
+	Listener
+	Dialer
+	Close() error
+}
+
+func listen(n network, addr string, f firewallCallback, logger log.Logger) (socket, error) {
+	switch {
+	case n.Tcp:
+		return listenTcp(n.String(), addr)
+	case n.Udp:
+		return listenUtp(n.String(), addr, f, logger)
+	default:
+		panic(n)
+	}
+}
+
+// Dialing TCP from a local port limits us to a single outgoing TCP connection to each remote
+// client. Instead, this should be a last resort if we need to use holepunching, and only then to
+// connect to other clients that actually try to holepunch TCP.
+const dialTcpFromListenPort = false
+
+var tcpListenConfig = net.ListenConfig{
+	Control: func(network, address string, c syscall.RawConn) (err error) {
+		controlErr := c.Control(func(fd uintptr) {
+			if dialTcpFromListenPort {
+				err = setReusePortSockOpts(fd)
+			}
+		})
+		if err != nil {
+			return
+		}
+		err = controlErr
+		return
+	},
+	// BitTorrent connections manage their own keep-alives.
+	KeepAlive: -1,
+}
+
+func listenTcp(network, address string) (s socket, err error) {
+	l, err := tcpListenConfig.Listen(context.Background(), network, address)
+	if err != nil {
+		return
+	}
+	netDialer := net.Dialer{
+		// We don't want fallback, as we explicitly manage the IPv4/IPv6 distinction ourselves,
+		// although it's probably not triggered as I think the network is already constrained to
+		// tcp4 or tcp6 at this point.
+		FallbackDelay: -1,
+		// BitTorrent connections manage their own keepalives.
+		KeepAlive: tcpListenConfig.KeepAlive,
+		Control: func(network, address string, c syscall.RawConn) (err error) {
+			controlErr := c.Control(func(fd uintptr) {
+				err = setSockNoLinger(fd)
+				if err != nil {
+					// Failing to disable linger is undesirable, but not fatal.
+					log.Levelf(log.Debug, "error setting linger socket option on tcp socket: %v", err)
+					err = nil
+				}
+				// This is no longer required I think, see
+				// https://github.com/anacrolix/torrent/discussions/856. I added this originally to
+				// allow dialling out from the client's listen port, but that doesn't really work. I
+				// think Linux older than ~2013 doesn't support SO_REUSEPORT.
+				if dialTcpFromListenPort {
+					err = setReusePortSockOpts(fd)
+				}
+			})
+			if err == nil {
+				err = controlErr
+			}
+			return
+		},
+	}
+	if dialTcpFromListenPort {
+		netDialer.LocalAddr = l.Addr()
+	}
+	s = tcpSocket{
+		Listener: l,
+		NetworkDialer: NetworkDialer{
+			Network: network,
+			Dialer:  &netDialer,
+		},
+	}
+	return
+}
+
+type tcpSocket struct {
+	net.Listener
+	NetworkDialer
+}
+
+func listenAll(networks []network, getHost func(string) string, port int, f firewallCallback, logger log.Logger) ([]socket, error) {
+	if len(networks) == 0 {
+		return nil, nil
+	}
+	var nahs []networkAndHost
+	for _, n := range networks {
+		nahs = append(nahs, networkAndHost{n, getHost(n.String())})
+	}
+	for {
+		ss, retry, err := listenAllRetry(nahs, port, f, logger)
+		if !retry {
+			return ss, err
+		}
+	}
+}
+
+type networkAndHost struct {
+	Network network
+	Host    string
+}
+
+func listenAllRetry(nahs []networkAndHost, port int, f firewallCallback, logger log.Logger) (ss []socket, retry bool, err error) {
+	ss = make([]socket, 1, len(nahs))
+	portStr := strconv.FormatInt(int64(port), 10)
+	ss[0], err = listen(nahs[0].Network, net.JoinHostPort(nahs[0].Host, portStr), f, logger)
+	if err != nil {
+		return nil, false, errors.Wrap(err, "first listen")
+	}
+	defer func() {
+		if err != nil || retry {
+			for _, s := range ss {
+				s.Close()
+			}
+			ss = nil
+		}
+	}()
+	portStr = strconv.FormatInt(int64(missinggo.AddrPort(ss[0].Addr())), 10)
+	for _, nah := range nahs[1:] {
+		s, err := listen(nah.Network, net.JoinHostPort(nah.Host, portStr), f, logger)
+		if err != nil {
+			return ss,
+				missinggo.IsAddrInUse(err) && port == 0,
+				errors.Wrap(err, "subsequent listen")
+		}
+		ss = append(ss, s)
+	}
+	return
+}
+
+// This isn't aliased from go-libutp since that assumes CGO.
+type firewallCallback func(net.Addr) bool
+
+func listenUtp(network, addr string, fc firewallCallback, logger log.Logger) (socket, error) {
+	us, err := NewUtpSocket(network, addr, fc, logger)
+	return utpSocketSocket{us, network}, err
+}
+
+// utpSocket wrapper, additionally wrapped for the torrent package's socket interface.
+type utpSocketSocket struct {
+	utpSocket
+	network string
+}
+
+func (me utpSocketSocket) DialerNetwork() string {
+	return me.network
+}
+
+func (me utpSocketSocket) Dial(ctx context.Context, addr string) (conn net.Conn, err error) {
+	defer perf.ScopeTimerErr(&err)()
+	return me.utpSocket.DialContext(ctx, me.network, addr)
+}
diff --git a/deps/github.com/anacrolix/torrent/sockopts.go b/deps/github.com/anacrolix/torrent/sockopts.go
new file mode 100644
index 0000000..54f307d
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/sockopts.go
@@ -0,0 +1,10 @@
+//go:build !wasm
+
+package torrent
+
+import "syscall"
+
+var lingerOffVal = syscall.Linger{
+	Onoff:  0,
+	Linger: 0,
+}
diff --git a/deps/github.com/anacrolix/torrent/sockopts_unix.go b/deps/github.com/anacrolix/torrent/sockopts_unix.go
new file mode 100644
index 0000000..52ec9e8
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/sockopts_unix.go
@@ -0,0 +1,29 @@
+//go:build !windows && !wasm
+
+package torrent
+
+import (
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+func setReusePortSockOpts(fd uintptr) (err error) {
+	// I would use libp2p/go-reuseport to do this here, but no surprise it's
+	// implemented incorrectly.
+
+	// Looks like we can get away with just REUSEPORT at least on Darwin, and probably by
+	// extension BSDs and Linux.
+	if false {
+		err = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+		if err != nil {
+			return
+		}
+	}
+	err = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1)
+	return
+}
+
+func setSockNoLinger(fd uintptr) (err error) {
+	return syscall.SetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, &lingerOffVal)
+}
diff --git a/deps/github.com/anacrolix/torrent/sockopts_wasm.go b/deps/github.com/anacrolix/torrent/sockopts_wasm.go
new file mode 100644
index 0000000..9705b91
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/sockopts_wasm.go
@@ -0,0 +1,12 @@
+package torrent
+
+// It's possible that we either need to use JS-specific way to allow port reuse, or to fall back to
+// dialling TCP without forcing the local address to match the listener. If the fallback is
+// implemented, then this should probably return an error to trigger it.
+func setReusePortSockOpts(fd uintptr) error {
+	return nil
+}
+
+func setSockNoLinger(fd uintptr) error {
+	return nil
+}
diff --git a/deps/github.com/anacrolix/torrent/sockopts_windows.go b/deps/github.com/anacrolix/torrent/sockopts_windows.go
new file mode 100644
index 0000000..c3c0ab0
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/sockopts_windows.go
@@ -0,0 +1,15 @@
+package torrent
+
+import (
+	"syscall"
+
+	"golang.org/x/sys/windows"
+)
+
+func setReusePortSockOpts(fd uintptr) (err error) {
+	return windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_REUSEADDR, 1)
+}
+
+func setSockNoLinger(fd uintptr) (err error) {
+	return syscall.SetsockoptLinger(syscall.Handle(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, &lingerOffVal)
+}
diff --git a/deps/github.com/anacrolix/torrent/sources.go b/deps/github.com/anacrolix/torrent/sources.go
new file mode 100644
index 0000000..ed5ecbf
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/sources.go
@@ -0,0 +1,80 @@
+package torrent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+
+	"github.com/anacrolix/log"
+
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+// Add HTTP endpoints that serve the metainfo. They will be used if the torrent info isn't obtained
+// yet. The Client HTTP client is used.
+func (t *Torrent) UseSources(sources []string) {
+	select {
+	case <-t.Closed():
+		return
+	case <-t.GotInfo():
+		return
+	default:
+	}
+	for _, s := range sources {
+		_, loaded := t.activeSources.LoadOrStore(s, struct{}{})
+		if loaded {
+			continue
+		}
+		s := s
+		go func() {
+			err := t.useActiveTorrentSource(s)
+			_, loaded := t.activeSources.LoadAndDelete(s)
+			if !loaded {
+				panic(s)
+			}
+			level := log.Debug
+			if err != nil && !errors.Is(err, context.Canceled) {
+				level = log.Warning
+			}
+			t.logger.Levelf(level, "used torrent source %q [err=%v]", s, err)
+		}()
+	}
+}
+
+func (t *Torrent) useActiveTorrentSource(source string) error {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	go func() {
+		select {
+		case <-t.GotInfo():
+		case <-t.Closed():
+		case <-ctx.Done():
+		}
+		cancel()
+	}()
+	mi, err := getTorrentSource(ctx, source, t.cl.httpClient)
+	if err != nil {
+		return err
+	}
+	return t.MergeSpec(TorrentSpecFromMetaInfo(&mi))
+}
+
+func getTorrentSource(ctx context.Context, source string, hc *http.Client) (mi metainfo.MetaInfo, err error) {
+	var req *http.Request
+	if req, err = http.NewRequestWithContext(ctx, http.MethodGet, source, nil); err != nil {
+		return
+	}
+	var resp *http.Response
+	if resp, err = hc.Do(req); err != nil {
+		return
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		err = fmt.Errorf("unexpected response status code: %v", resp.StatusCode)
+		return
+	}
+	err = bencode.NewDecoder(resp.Body).Decode(&mi)
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/spec.go b/deps/github.com/anacrolix/torrent/spec.go
new file mode 100644
index 0000000..8cce3cb
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/spec.go
@@ -0,0 +1,90 @@
+package torrent
+
+import (
+	"fmt"
+
+	"github.com/anacrolix/torrent/metainfo"
+	pp "github.com/anacrolix/torrent/peer_protocol"
+	"github.com/anacrolix/torrent/storage"
+)
+
+// Specifies a new torrent for adding to a client, or additions to an existing Torrent. There are
+// constructor functions for magnet URIs and torrent metainfo files. TODO: This type should be
+// dismantled into a new Torrent option type, and separate Torrent mutate method(s).
+type TorrentSpec struct {
+	// The tiered tracker URIs.
+	Trackers [][]string
+	// TODO: Move into a "new" Torrent opt type.
+	InfoHash  metainfo.Hash
+	InfoBytes []byte
+	// The name to use if the Name field from the Info isn't available.
+	DisplayName string
+	// WebSeed URLs. For additional options add the URLs separately with Torrent.AddWebSeeds
+	// instead.
+	Webseeds  []string
+	DhtNodes  []string
+	PeerAddrs []string
+	// The combination of the "xs" and "as" fields in magnet links, for now.
+	Sources []string
+
+	// The chunk size to use for outbound requests. Defaults to 16KiB if not set. Can only be set
+	// for new Torrents. TODO: Move into a "new" Torrent opt type.
+	ChunkSize pp.Integer
+	// TODO: Move into a "new" Torrent opt type.
+	Storage storage.ClientImpl
+
+	DisableInitialPieceCheck bool
+
+	// Whether to allow data download or upload
+	DisallowDataUpload   bool
+	DisallowDataDownload bool
+}
+
+func TorrentSpecFromMagnetUri(uri string) (spec *TorrentSpec, err error) {
+	m, err := metainfo.ParseMagnetUri(uri)
+	if err != nil {
+		return
+	}
+	spec = &TorrentSpec{
+		Trackers:    [][]string{m.Trackers},
+		DisplayName: m.DisplayName,
+		InfoHash:    m.InfoHash,
+		Webseeds:    m.Params["ws"],
+		Sources:     append(m.Params["xs"], m.Params["as"]...),
+		PeerAddrs:   m.Params["x.pe"], // BEP 9
+		// TODO: What's the parameter for DHT nodes?
+	}
+	return
+}
+
+// The error will be from unmarshalling the info bytes. The TorrentSpec is still filled out as much
+// as possible in this case.
+func TorrentSpecFromMetaInfoErr(mi *metainfo.MetaInfo) (*TorrentSpec, error) {
+	info, err := mi.UnmarshalInfo()
+	if err != nil {
+		err = fmt.Errorf("unmarshalling info: %w", err)
+	}
+	return &TorrentSpec{
+		Trackers:    mi.UpvertedAnnounceList(),
+		InfoHash:    mi.HashInfoBytes(),
+		InfoBytes:   mi.InfoBytes,
+		DisplayName: info.Name,
+		Webseeds:    mi.UrlList,
+		DhtNodes: func() (ret []string) {
+			ret = make([]string, 0, len(mi.Nodes))
+			for _, node := range mi.Nodes {
+				ret = append(ret, string(node))
+			}
+			return
+		}(),
+	}, err
+}
+
+// Panics if there was anything missing from the metainfo.
+func TorrentSpecFromMetaInfo(mi *metainfo.MetaInfo) *TorrentSpec {
+	ts, err := TorrentSpecFromMetaInfoErr(mi)
+	if err != nil {
+		panic(err)
+	}
+	return ts
+}
diff --git a/deps/github.com/anacrolix/torrent/stats.go b/deps/github.com/anacrolix/torrent/stats.go
new file mode 100644
index 0000000..90144bf
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/stats.go
@@ -0,0 +1,12 @@
+package torrent
+
+import (
+	"io"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+func dumpStats[T any](w io.Writer, stats T) {
+	spew.NewDefaultConfig()
+	spew.Fdump(w, stats)
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/bolt-piece-completion.go b/deps/github.com/anacrolix/torrent/storage/bolt-piece-completion.go
new file mode 100644
index 0000000..442f57c
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/bolt-piece-completion.go
@@ -0,0 +1,97 @@
+//go:build !noboltdb && !wasm
+// +build !noboltdb,!wasm
+
+package storage
+
+import (
+	"encoding/binary"
+	"os"
+	"path/filepath"
+	"time"
+
+	"go.etcd.io/bbolt"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+const (
+	boltDbCompleteValue   = "c"
+	boltDbIncompleteValue = "i"
+)
+
+var completionBucketKey = []byte("completion")
+
+type boltPieceCompletion struct {
+	db *bbolt.DB
+}
+
+var _ PieceCompletion = (*boltPieceCompletion)(nil)
+
+func NewBoltPieceCompletion(dir string) (ret PieceCompletion, err error) {
+	os.MkdirAll(dir, 0o750)
+	p := filepath.Join(dir, ".torrent.bolt.db")
+	db, err := bbolt.Open(p, 0o660, &bbolt.Options{
+		Timeout: time.Second,
+	})
+	if err != nil {
+		return
+	}
+	db.NoSync = true
+	ret = &boltPieceCompletion{db}
+	return
+}
+
+func (me boltPieceCompletion) Get(pk metainfo.PieceKey) (cn Completion, err error) {
+	err = me.db.View(func(tx *bbolt.Tx) error {
+		cb := tx.Bucket(completionBucketKey)
+		if cb == nil {
+			return nil
+		}
+		ih := cb.Bucket(pk.InfoHash[:])
+		if ih == nil {
+			return nil
+		}
+		var key [4]byte
+		binary.BigEndian.PutUint32(key[:], uint32(pk.Index))
+		cn.Ok = true
+		switch string(ih.Get(key[:])) {
+		case boltDbCompleteValue:
+			cn.Complete = true
+		case boltDbIncompleteValue:
+			cn.Complete = false
+		default:
+			cn.Ok = false
+		}
+		return nil
+	})
+	return
+}
+
+func (me boltPieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
+	if c, err := me.Get(pk); err == nil && c.Ok && c.Complete == b {
+		return nil
+	}
+	return me.db.Update(func(tx *bbolt.Tx) error {
+		c, err := tx.CreateBucketIfNotExists(completionBucketKey)
+		if err != nil {
+			return err
+		}
+		ih, err := c.CreateBucketIfNotExists(pk.InfoHash[:])
+		if err != nil {
+			return err
+		}
+		var key [4]byte
+		binary.BigEndian.PutUint32(key[:], uint32(pk.Index))
+		return ih.Put(key[:], []byte(func() string {
+			if b {
+				return boltDbCompleteValue
+			} else {
+				return boltDbIncompleteValue
+			}
+		}()))
+	})
+}
+
+func (me *boltPieceCompletion) Close() error {
+	return me.db.Close()
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/bolt-piece-completion_test.go b/deps/github.com/anacrolix/torrent/storage/bolt-piece-completion_test.go
new file mode 100644
index 0000000..3a778a8
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/bolt-piece-completion_test.go
@@ -0,0 +1,36 @@
+package storage
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+func TestBoltPieceCompletion(t *testing.T) {
+	td := t.TempDir()
+
+	pc, err := NewBoltPieceCompletion(td)
+	require.NoError(t, err)
+	defer pc.Close()
+
+	pk := metainfo.PieceKey{}
+
+	b, err := pc.Get(pk)
+	require.NoError(t, err)
+	assert.False(t, b.Ok)
+
+	require.NoError(t, pc.Set(pk, false))
+
+	b, err = pc.Get(pk)
+	require.NoError(t, err)
+	assert.Equal(t, Completion{Complete: false, Ok: true}, b)
+
+	require.NoError(t, pc.Set(pk, true))
+
+	b, err = pc.Get(pk)
+	require.NoError(t, err)
+	assert.Equal(t, Completion{Complete: true, Ok: true}, b)
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/bolt-piece.go b/deps/github.com/anacrolix/torrent/storage/bolt-piece.go
new file mode 100644
index 0000000..67e03bd
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/bolt-piece.go
@@ -0,0 +1,112 @@
+//go:build !noboltdb && !wasm
+// +build !noboltdb,!wasm
+
+package storage
+
+import (
+	"encoding/binary"
+	"io"
+
+	"go.etcd.io/bbolt"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+type boltPiece struct {
+	db  *bbolt.DB
+	p   metainfo.Piece
+	ih  metainfo.Hash
+	key [24]byte
+}
+
+var (
+	_             PieceImpl = (*boltPiece)(nil)
+	dataBucketKey           = []byte("data")
+)
+
+func (me *boltPiece) pc() PieceCompletionGetSetter {
+	return boltPieceCompletion{me.db}
+}
+
+func (me *boltPiece) pk() metainfo.PieceKey {
+	return metainfo.PieceKey{me.ih, me.p.Index()}
+}
+
+func (me *boltPiece) Completion() Completion {
+	c, err := me.pc().Get(me.pk())
+	switch err {
+	case bbolt.ErrDatabaseNotOpen:
+		return Completion{}
+	case nil:
+	default:
+		panic(err)
+	}
+	return c
+}
+
+func (me *boltPiece) MarkComplete() error {
+	return me.pc().Set(me.pk(), true)
+}
+
+func (me *boltPiece) MarkNotComplete() error {
+	return me.pc().Set(me.pk(), false)
+}
+
+func (me *boltPiece) ReadAt(b []byte, off int64) (n int, err error) {
+	err = me.db.View(func(tx *bbolt.Tx) error {
+		db := tx.Bucket(dataBucketKey)
+		if db == nil {
+			return io.EOF
+		}
+		ci := off / chunkSize
+		off %= chunkSize
+		for len(b) != 0 {
+			ck := me.chunkKey(int(ci))
+			_b := db.Get(ck[:])
+			// If the chunk is the wrong size, assume it's missing as we can't rely on the data.
+			if len(_b) != chunkSize {
+				return io.EOF
+			}
+			n1 := copy(b, _b[off:])
+			off = 0
+			ci++
+			b = b[n1:]
+			n += n1
+		}
+		return nil
+	})
+	return
+}
+
+func (me *boltPiece) chunkKey(index int) (ret [26]byte) {
+	copy(ret[:], me.key[:])
+	binary.BigEndian.PutUint16(ret[24:], uint16(index))
+	return
+}
+
+func (me *boltPiece) WriteAt(b []byte, off int64) (n int, err error) {
+	err = me.db.Update(func(tx *bbolt.Tx) error {
+		db, err := tx.CreateBucketIfNotExists(dataBucketKey)
+		if err != nil {
+			return err
+		}
+		ci := off / chunkSize
+		off %= chunkSize
+		for len(b) != 0 {
+			_b := make([]byte, chunkSize)
+			ck := me.chunkKey(int(ci))
+			copy(_b, db.Get(ck[:]))
+			n1 := copy(_b[off:], b)
+			db.Put(ck[:], _b)
+			if n1 > len(b) {
+				break
+			}
+			b = b[n1:]
+			off = 0
+			ci++
+			n += n1
+		}
+		return nil
+	})
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/bolt-piece_test.go b/deps/github.com/anacrolix/torrent/storage/bolt-piece_test.go
new file mode 100644
index 0000000..8c55848
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/bolt-piece_test.go
@@ -0,0 +1,12 @@
+package storage_test
+
+import (
+	"testing"
+
+	"github.com/anacrolix/torrent/storage"
+	"github.com/anacrolix/torrent/test"
+)
+
+func TestBoltLeecherStorage(t *testing.T) {
+	test.TestLeecherStorage(t, test.LeecherStorageTestCase{"Boltdb", storage.NewBoltDB, 0})
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/bolt.go b/deps/github.com/anacrolix/torrent/storage/bolt.go
new file mode 100644
index 0000000..945b249
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/bolt.go
@@ -0,0 +1,64 @@
+//go:build !noboltdb && !wasm
+// +build !noboltdb,!wasm
+
+package storage
+
+import (
+	"encoding/binary"
+	"path/filepath"
+	"time"
+
+	"github.com/anacrolix/missinggo/expect"
+	"go.etcd.io/bbolt"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+const (
+	// Chosen to match the usual chunk size in a torrent client. This way, most chunk writes are to
+	// exactly one full item in bbolt DB.
+	chunkSize = 1 << 14
+)
+
+type boltClient struct {
+	db *bbolt.DB
+}
+
+type boltTorrent struct {
+	cl *boltClient
+	ih metainfo.Hash
+}
+
+func NewBoltDB(filePath string) ClientImplCloser {
+	db, err := bbolt.Open(filepath.Join(filePath, "bolt.db"), 0o600, &bbolt.Options{
+		Timeout: time.Second,
+	})
+	expect.Nil(err)
+	db.NoSync = true
+	return &boltClient{db}
+}
+
+func (me *boltClient) Close() error {
+	return me.db.Close()
+}
+
+func (me *boltClient) OpenTorrent(_ *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) {
+	t := &boltTorrent{me, infoHash}
+	return TorrentImpl{
+		Piece: t.Piece,
+		Close: t.Close,
+	}, nil
+}
+
+func (me *boltTorrent) Piece(p metainfo.Piece) PieceImpl {
+	ret := &boltPiece{
+		p:  p,
+		db: me.cl.db,
+		ih: me.ih,
+	}
+	copy(ret.key[:], me.ih[:])
+	binary.BigEndian.PutUint32(ret.key[20:], uint32(p.Index()))
+	return ret
+}
+
+func (boltTorrent) Close() error { return nil }
diff --git a/deps/github.com/anacrolix/torrent/storage/default-dir-piece-completion-boltdb.go b/deps/github.com/anacrolix/torrent/storage/default-dir-piece-completion-boltdb.go
new file mode 100644
index 0000000..3ac6a77
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/default-dir-piece-completion-boltdb.go
@@ -0,0 +1,11 @@
+// Bolt piece completion is available, and sqlite is not.
+//go:build !noboltdb && (!cgo || nosqlite) && !wasm
+// +build !noboltdb
+// +build !cgo nosqlite
+// +build !wasm
+
+package storage
+
+func NewDefaultPieceCompletionForDir(dir string) (PieceCompletion, error) {
+	return NewBoltPieceCompletion(dir)
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/default-dir-piece-completion-other.go b/deps/github.com/anacrolix/torrent/storage/default-dir-piece-completion-other.go
new file mode 100644
index 0000000..3cd42fb
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/default-dir-piece-completion-other.go
@@ -0,0 +1,14 @@
+// Bolt piece completion is not available, and neither is sqlite.
+//go:build (!cgo || nosqlite) && (noboltdb || wasm)
+// +build !cgo nosqlite
+// +build noboltdb wasm
+
+package storage
+
+import (
+	"errors"
+)
+
+func NewDefaultPieceCompletionForDir(dir string) (PieceCompletion, error) {
+	return nil, errors.New("y ur OS no have features")
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/disabled/disabled.go b/deps/github.com/anacrolix/torrent/storage/disabled/disabled.go
new file mode 100644
index 0000000..f511222
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/disabled/disabled.go
@@ -0,0 +1,52 @@
+package disabled
+
+import (
+	"errors"
+
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/storage"
+)
+
+type Client struct{}
+
+func (c Client) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (storage.TorrentImpl, error) {
+	capFunc := func() (int64, bool) {
+		return 0, true
+	}
+	return storage.TorrentImpl{
+		Piece: func(piece metainfo.Piece) storage.PieceImpl {
+			return Piece{}
+		},
+		Close: func() error {
+			return nil
+		},
+		Capacity: &capFunc,
+	}, nil
+}
+
+type Piece struct{}
+
+func (Piece) ReadAt(p []byte, off int64) (n int, err error) {
+	err = errors.New("disabled")
+	return
+}
+
+func (Piece) WriteAt(p []byte, off int64) (n int, err error) {
+	err = errors.New("disabled")
+	return
+}
+
+func (Piece) MarkComplete() error {
+	return errors.New("disabled")
+}
+
+func (Piece) MarkNotComplete() error {
+	return errors.New("disabled")
+}
+
+func (Piece) Completion() storage.Completion {
+	return storage.Completion{
+		Complete: false,
+		Ok:       true,
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/doc.go b/deps/github.com/anacrolix/torrent/storage/doc.go
new file mode 100644
index 0000000..5ba6a0a
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/doc.go
@@ -0,0 +1,2 @@
+// Package storage implements storage backends for package torrent.
+package storage
diff --git a/deps/github.com/anacrolix/torrent/storage/file-deprecated.go b/deps/github.com/anacrolix/torrent/storage/file-deprecated.go
new file mode 100644
index 0000000..4560b9d
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/file-deprecated.go
@@ -0,0 +1,34 @@
+package storage
+
+import (
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+func NewFileWithCompletion(baseDir string, completion PieceCompletion) ClientImplCloser {
+	return NewFileWithCustomPathMakerAndCompletion(baseDir, nil, completion)
+}
+
+// File storage with data partitioned by infohash.
+func NewFileByInfoHash(baseDir string) ClientImplCloser {
+	return NewFileWithCustomPathMaker(baseDir, infoHashPathMaker)
+}
+
+// Deprecated: Allows passing a function to determine the path for storing torrent data. The
+// function is responsible for sanitizing the info if it uses some part of it (for example
+// sanitizing info.Name).
+func NewFileWithCustomPathMaker(baseDir string, pathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string) ClientImplCloser {
+	return NewFileWithCustomPathMakerAndCompletion(baseDir, pathMaker, pieceCompletionForDir(baseDir))
+}
+
+// Deprecated: Allows passing custom PieceCompletion
+func NewFileWithCustomPathMakerAndCompletion(
+	baseDir string,
+	pathMaker TorrentDirFilePathMaker,
+	completion PieceCompletion,
+) ClientImplCloser {
+	return NewFileOpts(NewFileClientOpts{
+		ClientBaseDir:   baseDir,
+		TorrentDirMaker: pathMaker,
+		PieceCompletion: completion,
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/file-misc.go b/deps/github.com/anacrolix/torrent/storage/file-misc.go
new file mode 100644
index 0000000..8966ecb
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/file-misc.go
@@ -0,0 +1,34 @@
+package storage
+
+import "github.com/anacrolix/torrent/metainfo"
+
+type requiredLength struct {
+	fileIndex int
+	length    int64
+}
+
+func extentCompleteRequiredLengths(info *metainfo.Info, off, n int64) (ret []requiredLength) {
+	if n == 0 {
+		return
+	}
+	for i, fi := range info.UpvertedFiles() {
+		if off >= fi.Length {
+			off -= fi.Length
+			continue
+		}
+		n1 := n
+		if off+n1 > fi.Length {
+			n1 = fi.Length - off
+		}
+		ret = append(ret, requiredLength{
+			fileIndex: i,
+			length:    off + n1,
+		})
+		n -= n1
+		if n == 0 {
+			return
+		}
+		off = 0
+	}
+	panic("extent exceeds torrent bounds")
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/file-misc_test.go b/deps/github.com/anacrolix/torrent/storage/file-misc_test.go
new file mode 100644
index 0000000..f74196d
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/file-misc_test.go
@@ -0,0 +1,37 @@
+package storage
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+func TestExtentCompleteRequiredLengths(t *testing.T) {
+	info := &metainfo.Info{
+		Files: []metainfo.FileInfo{
+			{Path: []string{"a"}, Length: 2},
+			{Path: []string{"b"}, Length: 3},
+		},
+	}
+	assert.Empty(t, extentCompleteRequiredLengths(info, 0, 0))
+	assert.EqualValues(t, []requiredLength{
+		{fileIndex: 0, length: 1},
+	}, extentCompleteRequiredLengths(info, 0, 1))
+	assert.EqualValues(t, []requiredLength{
+		{fileIndex: 0, length: 2},
+	}, extentCompleteRequiredLengths(info, 0, 2))
+	assert.EqualValues(t, []requiredLength{
+		{fileIndex: 0, length: 2},
+		{fileIndex: 1, length: 1},
+	}, extentCompleteRequiredLengths(info, 0, 3))
+	assert.EqualValues(t, []requiredLength{
+		{fileIndex: 1, length: 2},
+	}, extentCompleteRequiredLengths(info, 2, 2))
+	assert.EqualValues(t, []requiredLength{
+		{fileIndex: 1, length: 3},
+	}, extentCompleteRequiredLengths(info, 4, 1))
+	assert.Len(t, extentCompleteRequiredLengths(info, 5, 0), 0)
+	assert.Panics(t, func() { extentCompleteRequiredLengths(info, 6, 1) })
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/file-paths.go b/deps/github.com/anacrolix/torrent/storage/file-paths.go
new file mode 100644
index 0000000..8d338f8
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/file-paths.go
@@ -0,0 +1,38 @@
+package storage
+
+import (
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+// Determines the filepath to be used for each file in a torrent.
+type FilePathMaker func(opts FilePathMakerOpts) string
+
+// Determines the directory for a given torrent within a storage client.
+type TorrentDirFilePathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string
+
+// Info passed to a FilePathMaker.
+type FilePathMakerOpts struct {
+	Info *metainfo.Info
+	File *metainfo.FileInfo
+}
+
+// defaultPathMaker just returns the storage client's base directory.
+func defaultPathMaker(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {
+	return baseDir
+}
+
+func infoHashPathMaker(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {
+	return filepath.Join(baseDir, infoHash.HexString())
+}
+
+func isSubFilepath(base, sub string) bool {
+	rel, err := filepath.Rel(base, sub)
+	if err != nil {
+		return false
+	}
+	return rel != ".." && !strings.HasPrefix(rel, ".."+string(os.PathSeparator))
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/file-piece.go b/deps/github.com/anacrolix/torrent/storage/file-piece.go
new file mode 100644
index 0000000..4777201
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/file-piece.go
@@ -0,0 +1,59 @@
+package storage
+
+import (
+	"io"
+	"log"
+	"os"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+type filePieceImpl struct {
+	*fileTorrentImpl
+	p metainfo.Piece
+	io.WriterAt
+	io.ReaderAt
+}
+
+var _ PieceImpl = (*filePieceImpl)(nil)
+
+func (me *filePieceImpl) pieceKey() metainfo.PieceKey {
+	return metainfo.PieceKey{me.infoHash, me.p.Index()}
+}
+
+func (fs *filePieceImpl) Completion() Completion {
+	c, err := fs.completion.Get(fs.pieceKey())
+	if err != nil {
+		log.Printf("error getting piece completion: %s", err)
+		c.Ok = false
+		return c
+	}
+
+	verified := true
+	if c.Complete {
+		// If it's allegedly complete, check that its constituent files have the necessary length.
+		for _, fi := range extentCompleteRequiredLengths(fs.p.Info, fs.p.Offset(), fs.p.Length()) {
+			s, err := os.Stat(fs.files[fi.fileIndex].path)
+			if err != nil || s.Size() < fi.length {
+				verified = false
+				break
+			}
+		}
+	}
+
+	if !verified {
+		// The completion was wrong, fix it.
+		c.Complete = false
+		fs.completion.Set(fs.pieceKey(), false)
+	}
+
+	return c
+}
+
+func (fs *filePieceImpl) MarkComplete() error {
+	return fs.completion.Set(fs.pieceKey(), true)
+}
+
+func (fs *filePieceImpl) MarkNotComplete() error {
+	return fs.completion.Set(fs.pieceKey(), false)
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/file.go b/deps/github.com/anacrolix/torrent/storage/file.go
new file mode 100644
index 0000000..b873964
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/file.go
@@ -0,0 +1,212 @@
+package storage
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/anacrolix/missinggo/v2"
+
+	"github.com/anacrolix/torrent/common"
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/segments"
+)
+
+// File-based storage for torrents, that isn't yet bound to a particular torrent.
+type fileClientImpl struct {
+	opts NewFileClientOpts
+}
+
+// All Torrent data stored in this baseDir. The info names of each torrent are used as directories.
+func NewFile(baseDir string) ClientImplCloser {
+	return NewFileWithCompletion(baseDir, pieceCompletionForDir(baseDir))
+}
+
+type NewFileClientOpts struct {
+	// The base directory for all downloads.
+	ClientBaseDir   string
+	FilePathMaker   FilePathMaker
+	TorrentDirMaker TorrentDirFilePathMaker
+	PieceCompletion PieceCompletion
+}
+
+// NewFileOpts creates a new ClientImplCloser that stores files using the OS native filesystem.
+func NewFileOpts(opts NewFileClientOpts) ClientImplCloser {
+	if opts.TorrentDirMaker == nil {
+		opts.TorrentDirMaker = defaultPathMaker
+	}
+	if opts.FilePathMaker == nil {
+		opts.FilePathMaker = func(opts FilePathMakerOpts) string {
+			var parts []string
+			if opts.Info.Name != metainfo.NoName {
+				parts = append(parts, opts.Info.Name)
+			}
+			return filepath.Join(append(parts, opts.File.Path...)...)
+		}
+	}
+	if opts.PieceCompletion == nil {
+		opts.PieceCompletion = pieceCompletionForDir(opts.ClientBaseDir)
+	}
+	return fileClientImpl{opts}
+}
+
+func (me fileClientImpl) Close() error {
+	return me.opts.PieceCompletion.Close()
+}
+
+func (fs fileClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (_ TorrentImpl, err error) {
+	dir := fs.opts.TorrentDirMaker(fs.opts.ClientBaseDir, info, infoHash)
+	upvertedFiles := info.UpvertedFiles()
+	files := make([]file, 0, len(upvertedFiles))
+	for i, fileInfo := range upvertedFiles {
+		filePath := filepath.Join(dir, fs.opts.FilePathMaker(FilePathMakerOpts{
+			Info: info,
+			File: &fileInfo,
+		}))
+		if !isSubFilepath(dir, filePath) {
+			err = fmt.Errorf("file %v: path %q is not sub path of %q", i, filePath, dir)
+			return
+		}
+		f := file{
+			path:   filePath,
+			length: fileInfo.Length,
+		}
+		if f.length == 0 {
+			err = CreateNativeZeroLengthFile(f.path)
+			if err != nil {
+				err = fmt.Errorf("creating zero length file: %w", err)
+				return
+			}
+		}
+		files = append(files, f)
+	}
+	t := &fileTorrentImpl{
+		files,
+		segments.NewIndex(common.LengthIterFromUpvertedFiles(upvertedFiles)),
+		infoHash,
+		fs.opts.PieceCompletion,
+	}
+	return TorrentImpl{
+		Piece: t.Piece,
+		Close: t.Close,
+	}, nil
+}
+
+type file struct {
+	// The safe, OS-local file path.
+	path   string
+	length int64
+}
+
+type fileTorrentImpl struct {
+	files          []file
+	segmentLocater segments.Index
+	infoHash       metainfo.Hash
+	completion     PieceCompletion
+}
+
+func (fts *fileTorrentImpl) Piece(p metainfo.Piece) PieceImpl {
+	// Create a view onto the file-based torrent storage.
+	_io := fileTorrentImplIO{fts}
+	// Return the appropriate segments of this.
+	return &filePieceImpl{
+		fts,
+		p,
+		missinggo.NewSectionWriter(_io, p.Offset(), p.Length()),
+		io.NewSectionReader(_io, p.Offset(), p.Length()),
+	}
+}
+
+func (fs *fileTorrentImpl) Close() error {
+	return nil
+}
+
+// A helper to create zero-length files which won't appear for file-orientated storage since no
+// writes will ever occur to them (no torrent data is associated with a zero-length file). The
+// caller should make sure the file name provided is safe/sanitized.
+func CreateNativeZeroLengthFile(name string) error {
+	os.MkdirAll(filepath.Dir(name), 0o777)
+	var f io.Closer
+	f, err := os.Create(name)
+	if err != nil {
+		return err
+	}
+	return f.Close()
+}
+
+// Exposes file-based storage of a torrent, as one big ReadWriterAt.
+type fileTorrentImplIO struct {
+	fts *fileTorrentImpl
+}
+
+// Returns EOF on short or missing file.
+func (fst *fileTorrentImplIO) readFileAt(file file, b []byte, off int64) (n int, err error) {
+	f, err := os.Open(file.path)
+	if os.IsNotExist(err) {
+		// File missing is treated the same as a short file.
+		err = io.EOF
+		return
+	}
+	if err != nil {
+		return
+	}
+	defer f.Close()
+	// Limit the read to within the expected bounds of this file.
+	if int64(len(b)) > file.length-off {
+		b = b[:file.length-off]
+	}
+	for off < file.length && len(b) != 0 {
+		n1, err1 := f.ReadAt(b, off)
+		b = b[n1:]
+		n += n1
+		off += int64(n1)
+		if n1 == 0 {
+			err = err1
+			break
+		}
+	}
+	return
+}
+
+// Only returns EOF at the end of the torrent. Premature EOF is ErrUnexpectedEOF.
+func (fst fileTorrentImplIO) ReadAt(b []byte, off int64) (n int, err error) {
+	fst.fts.segmentLocater.Locate(segments.Extent{off, int64(len(b))}, func(i int, e segments.Extent) bool {
+		n1, err1 := fst.readFileAt(fst.fts.files[i], b[:e.Length], e.Start)
+		n += n1
+		b = b[n1:]
+		err = err1
+		return err == nil // && int64(n1) == e.Length
+	})
+	if len(b) != 0 && err == nil {
+		err = io.EOF
+	}
+	return
+}
+
+func (fst fileTorrentImplIO) WriteAt(p []byte, off int64) (n int, err error) {
+	// log.Printf("write at %v: %v bytes", off, len(p))
+	fst.fts.segmentLocater.Locate(segments.Extent{off, int64(len(p))}, func(i int, e segments.Extent) bool {
+		name := fst.fts.files[i].path
+		os.MkdirAll(filepath.Dir(name), 0o777)
+		var f *os.File
+		f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0o666)
+		if err != nil {
+			return false
+		}
+		var n1 int
+		n1, err = f.WriteAt(p[:e.Length], e.Start)
+		// log.Printf("%v %v wrote %v: %v", i, e, n1, err)
+		closeErr := f.Close()
+		n += n1
+		p = p[n1:]
+		if err == nil {
+			err = closeErr
+		}
+		if err == nil && int64(n1) != e.Length {
+			err = io.ErrShortWrite
+		}
+		return err == nil
+	})
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/file_test.go b/deps/github.com/anacrolix/torrent/storage/file_test.go
new file mode 100644
index 0000000..a6c69fa
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/file_test.go
@@ -0,0 +1,42 @@
+package storage
+
+import (
+	"bytes"
+	"io"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/anacrolix/missinggo/v2"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+func TestShortFile(t *testing.T) {
+	td := t.TempDir()
+	s := NewFile(td)
+	defer s.Close()
+	info := &metainfo.Info{
+		Name:        "a",
+		Length:      2,
+		PieceLength: missinggo.MiB,
+	}
+	ts, err := s.OpenTorrent(info, metainfo.Hash{})
+	assert.NoError(t, err)
+	f, err := os.Create(filepath.Join(td, "a"))
+	require.NoError(t, err)
+	err = f.Truncate(1)
+	require.NoError(t, err)
+	f.Close()
+	var buf bytes.Buffer
+	p := info.Piece(0)
+	n, err := io.Copy(&buf, io.NewSectionReader(ts.Piece(p), 0, p.Length()))
+	assert.EqualValues(t, 1, n)
+	switch err {
+	case nil, io.EOF:
+	default:
+		t.Errorf("expected nil or EOF error from truncated piece, got %v", err)
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/interface.go b/deps/github.com/anacrolix/torrent/storage/interface.go
new file mode 100644
index 0000000..9e8de06
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/interface.go
@@ -0,0 +1,60 @@
+package storage
+
+import (
+	"io"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+type ClientImplCloser interface {
+	ClientImpl
+	Close() error
+}
+
+// Represents data storage for an unspecified torrent.
+type ClientImpl interface {
+	OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error)
+}
+
+type TorrentCapacity *func() (cap int64, capped bool)
+
+// Data storage bound to a torrent.
+type TorrentImpl struct {
+	Piece func(p metainfo.Piece) PieceImpl
+	Close func() error
+	Flush func() error
+	// Storages that share the same space, will provide equal pointers. The function is called once
+	// to determine the storage for torrents sharing the same function pointer, and mutated in
+	// place.
+	Capacity TorrentCapacity
+}
+
+// Interacts with torrent piece data. Optional interfaces to implement include:
+//
+//	io.WriterTo, such as when a piece supports a more efficient way to write out incomplete chunks.
+//	SelfHashing, such as when a piece supports a more efficient way to hash its contents.
+type PieceImpl interface {
+	// These interfaces are not as strict as normally required. They can
+	// assume that the parameters are appropriate for the dimensions of the
+	// piece.
+	io.ReaderAt
+	io.WriterAt
+	// Called when the client believes the piece data will pass a hash check.
+	// The storage can move or mark the piece data as read-only as it sees
+	// fit.
+	MarkComplete() error
+	MarkNotComplete() error
+	// Returns true if the piece is complete.
+	Completion() Completion
+}
+
+type Completion struct {
+	Complete bool
+	Ok       bool
+	Err      error
+}
+
+// Allows a storage backend to override hashing (i.e. if it can do it more efficiently than the torrent client can)
+type SelfHashing interface {
+	SelfHash() (metainfo.Hash, error)
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/issue95_test.go b/deps/github.com/anacrolix/torrent/storage/issue95_test.go
new file mode 100644
index 0000000..9237079
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/issue95_test.go
@@ -0,0 +1,51 @@
+package storage
+
+import (
+	"testing"
+
+	"github.com/anacrolix/missinggo/v2/resource"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+// Two different torrents opened from the same storage. Closing one should not
+// break the piece completion on the other.
+func testIssue95(t *testing.T, c ClientImpl) {
+	i1 := &metainfo.Info{
+		Files:  []metainfo.FileInfo{{Path: []string{"a"}}},
+		Pieces: make([]byte, 20),
+	}
+	t1, err := c.OpenTorrent(i1, metainfo.HashBytes([]byte("a")))
+	require.NoError(t, err)
+	defer t1.Close()
+	i2 := &metainfo.Info{
+		Files:  []metainfo.FileInfo{{Path: []string{"a"}}},
+		Pieces: make([]byte, 20),
+	}
+	t2, err := c.OpenTorrent(i2, metainfo.HashBytes([]byte("b")))
+	require.NoError(t, err)
+	defer t2.Close()
+	t2p := t2.Piece(i2.Piece(0))
+	assert.NoError(t, t1.Close())
+	assert.NotPanics(t, func() { t2p.Completion() })
+}
+
+func TestIssue95File(t *testing.T) {
+	td := t.TempDir()
+	cs := NewFile(td)
+	defer cs.Close()
+	testIssue95(t, cs)
+}
+
+func TestIssue95MMap(t *testing.T) {
+	td := t.TempDir()
+	cs := NewMMap(td)
+	defer cs.Close()
+	testIssue95(t, cs)
+}
+
+func TestIssue95ResourcePieces(t *testing.T) {
+	testIssue95(t, NewResourcePieces(resource.OSFileProvider{}))
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/issue96_test.go b/deps/github.com/anacrolix/torrent/storage/issue96_test.go
new file mode 100644
index 0000000..726c11c
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/issue96_test.go
@@ -0,0 +1,37 @@
+package storage
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+func testMarkedCompleteMissingOnRead(t *testing.T, csf func(string) ClientImplCloser) {
+	td := t.TempDir()
+	cic := csf(td)
+	defer cic.Close()
+	cs := NewClient(cic)
+	info := &metainfo.Info{
+		PieceLength: 1,
+		Files:       []metainfo.FileInfo{{Path: []string{"a"}, Length: 1}},
+	}
+	ts, err := cs.OpenTorrent(info, metainfo.Hash{})
+	require.NoError(t, err)
+	p := ts.Piece(info.Piece(0))
+	require.NoError(t, p.MarkComplete())
+	// require.False(t, p.GetIsComplete())
+	n, err := p.ReadAt(make([]byte, 1), 0)
+	require.Error(t, err)
+	require.EqualValues(t, 0, n)
+	require.False(t, p.Completion().Complete)
+}
+
+func TestMarkedCompleteMissingOnReadFile(t *testing.T) {
+	testMarkedCompleteMissingOnRead(t, NewFile)
+}
+
+func TestMarkedCompleteMissingOnReadFileBoltDB(t *testing.T) {
+	testMarkedCompleteMissingOnRead(t, NewBoltDB)
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/map-piece-completion.go b/deps/github.com/anacrolix/torrent/storage/map-piece-completion.go
new file mode 100644
index 0000000..afb1e97
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/map-piece-completion.go
@@ -0,0 +1,34 @@
+package storage
+
+import (
+	"sync"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+type mapPieceCompletion struct {
+	// TODO: Generics
+	m sync.Map
+}
+
+var _ PieceCompletion = (*mapPieceCompletion)(nil)
+
+func NewMapPieceCompletion() PieceCompletion {
+	return &mapPieceCompletion{}
+}
+
+func (*mapPieceCompletion) Close() error { return nil }
+
+func (me *mapPieceCompletion) Get(pk metainfo.PieceKey) (c Completion, err error) {
+	v, ok := me.m.Load(pk)
+	if ok {
+		c.Complete = v.(bool)
+	}
+	c.Ok = ok
+	return
+}
+
+func (me *mapPieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
+	me.m.Store(pk, b)
+	return nil
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/mark-complete_test.go b/deps/github.com/anacrolix/torrent/storage/mark-complete_test.go
new file mode 100644
index 0000000..7e50832
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/mark-complete_test.go
@@ -0,0 +1,30 @@
+package storage_test
+
+import (
+	"testing"
+
+	"github.com/anacrolix/torrent/storage"
+	test_storage "github.com/anacrolix/torrent/storage/test"
+)
+
+func BenchmarkMarkComplete(b *testing.B) {
+	bench := func(b *testing.B, ci storage.ClientImpl) {
+		test_storage.BenchmarkPieceMarkComplete(
+			b, ci, test_storage.DefaultPieceSize, test_storage.DefaultNumPieces, 0)
+	}
+	b.Run("File", func(b *testing.B) {
+		ci := storage.NewFile(b.TempDir())
+		b.Cleanup(func() { ci.Close() })
+		bench(b, ci)
+	})
+	b.Run("Mmap", func(b *testing.B) {
+		ci := storage.NewMMap(b.TempDir())
+		b.Cleanup(func() { ci.Close() })
+		bench(b, ci)
+	})
+	b.Run("BoltDb", func(b *testing.B) {
+		ci := storage.NewBoltDB(b.TempDir())
+		b.Cleanup(func() { ci.Close() })
+		bench(b, ci)
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/mmap.go b/deps/github.com/anacrolix/torrent/storage/mmap.go
new file mode 100644
index 0000000..1851c32
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/mmap.go
@@ -0,0 +1,230 @@
+//go:build !wasm
+// +build !wasm
+
+package storage
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/anacrolix/missinggo/v2"
+	"github.com/edsrzf/mmap-go"
+
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/mmap_span"
+)
+
+type mmapClientImpl struct {
+	baseDir string
+	pc      PieceCompletion
+}
+
+// TODO: Support all the same native filepath configuration that NewFileOpts provides.
+func NewMMap(baseDir string) ClientImplCloser {
+	return NewMMapWithCompletion(baseDir, pieceCompletionForDir(baseDir))
+}
+
+func NewMMapWithCompletion(baseDir string, completion PieceCompletion) *mmapClientImpl {
+	return &mmapClientImpl{
+		baseDir: baseDir,
+		pc:      completion,
+	}
+}
+
+func (s *mmapClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (_ TorrentImpl, err error) {
+	span, err := mMapTorrent(info, s.baseDir)
+	t := &mmapTorrentStorage{
+		infoHash: infoHash,
+		span:     span,
+		pc:       s.pc,
+	}
+	return TorrentImpl{Piece: t.Piece, Close: t.Close, Flush: t.Flush}, err
+}
+
+func (s *mmapClientImpl) Close() error {
+	return s.pc.Close()
+}
+
+type mmapTorrentStorage struct {
+	infoHash metainfo.Hash
+	span     *mmap_span.MMapSpan
+	pc       PieceCompletionGetSetter
+}
+
+func (ts *mmapTorrentStorage) Piece(p metainfo.Piece) PieceImpl {
+	return mmapStoragePiece{
+		pc:       ts.pc,
+		p:        p,
+		ih:       ts.infoHash,
+		ReaderAt: io.NewSectionReader(ts.span, p.Offset(), p.Length()),
+		WriterAt: missinggo.NewSectionWriter(ts.span, p.Offset(), p.Length()),
+	}
+}
+
+func (ts *mmapTorrentStorage) Close() error {
+	errs := ts.span.Close()
+	if len(errs) > 0 {
+		return errs[0]
+	}
+	return nil
+}
+
+func (ts *mmapTorrentStorage) Flush() error {
+	errs := ts.span.Flush()
+	if len(errs) > 0 {
+		return errs[0]
+	}
+	return nil
+}
+
+type mmapStoragePiece struct {
+	pc PieceCompletionGetSetter
+	p  metainfo.Piece
+	ih metainfo.Hash
+	io.ReaderAt
+	io.WriterAt
+}
+
+func (me mmapStoragePiece) pieceKey() metainfo.PieceKey {
+	return metainfo.PieceKey{me.ih, me.p.Index()}
+}
+
+func (sp mmapStoragePiece) Completion() Completion {
+	c, err := sp.pc.Get(sp.pieceKey())
+	if err != nil {
+		panic(err)
+	}
+	return c
+}
+
+func (sp mmapStoragePiece) MarkComplete() error {
+	sp.pc.Set(sp.pieceKey(), true)
+	return nil
+}
+
+func (sp mmapStoragePiece) MarkNotComplete() error {
+	sp.pc.Set(sp.pieceKey(), false)
+	return nil
+}
+
+func mMapTorrent(md *metainfo.Info, location string) (mms *mmap_span.MMapSpan, err error) {
+	mms = &mmap_span.MMapSpan{}
+	defer func() {
+		if err != nil {
+			mms.Close()
+		}
+	}()
+	for _, miFile := range md.UpvertedFiles() {
+		var safeName string
+		safeName, err = ToSafeFilePath(append([]string{md.Name}, miFile.Path...)...)
+		if err != nil {
+			return
+		}
+		fileName := filepath.Join(location, safeName)
+		var mm FileMapping
+		mm, err = mmapFile(fileName, miFile.Length)
+		if err != nil {
+			err = fmt.Errorf("file %q: %s", miFile.DisplayPath(md), err)
+			return
+		}
+		mms.Append(mm)
+	}
+	mms.InitIndex()
+	return
+}
+
+func mmapFile(name string, size int64) (_ FileMapping, err error) {
+	dir := filepath.Dir(name)
+	err = os.MkdirAll(dir, 0o750)
+	if err != nil {
+		err = fmt.Errorf("making directory %q: %s", dir, err)
+		return
+	}
+	var file *os.File
+	file, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o666)
+	if err != nil {
+		return
+	}
+	defer func() {
+		if err != nil {
+			file.Close()
+		}
+	}()
+	var fi os.FileInfo
+	fi, err = file.Stat()
+	if err != nil {
+		return
+	}
+	if fi.Size() < size {
+		// I think this is necessary on HFS+. Maybe Linux will SIGBUS too if
+		// you overmap a file but I'm not sure.
+		err = file.Truncate(size)
+		if err != nil {
+			return
+		}
+	}
+	return func() (ret mmapWithFile, err error) {
+		ret.f = file
+		if size == 0 {
+			// Can't mmap() regions with length 0.
+			return
+		}
+		intLen := int(size)
+		if int64(intLen) != size {
+			err = errors.New("size too large for system")
+			return
+		}
+		ret.mmap, err = mmap.MapRegion(file, intLen, mmap.RDWR, 0, 0)
+		if err != nil {
+			err = fmt.Errorf("error mapping region: %s", err)
+			return
+		}
+		if int64(len(ret.mmap)) != size {
+			panic(len(ret.mmap))
+		}
+		return
+	}()
+}
+
+// Combines a mmapped region and file into a storage Mmap abstraction, which handles closing the
+// mmap file handle.
+func WrapFileMapping(region mmap.MMap, file *os.File) FileMapping {
+	return mmapWithFile{
+		f:    file,
+		mmap: region,
+	}
+}
+
+type FileMapping = mmap_span.Mmap
+
+// Handles closing the mmap's file handle (needed for Windows). Could be implemented differently by
+// OS.
+type mmapWithFile struct {
+	f    *os.File
+	mmap mmap.MMap
+}
+
+func (m mmapWithFile) Flush() error {
+	return m.mmap.Flush()
+}
+
+func (m mmapWithFile) Unmap() (err error) {
+	if m.mmap != nil {
+		err = m.mmap.Unmap()
+	}
+	fileErr := m.f.Close()
+	if err == nil {
+		err = fileErr
+	}
+	return
+}
+
+func (m mmapWithFile) Bytes() []byte {
+	if m.mmap == nil {
+		return nil
+	}
+	return m.mmap
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/mmap_test.go b/deps/github.com/anacrolix/torrent/storage/mmap_test.go
new file mode 100644
index 0000000..54260ec
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/mmap_test.go
@@ -0,0 +1,25 @@
+package storage
+
+import (
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+
+	"github.com/anacrolix/torrent/internal/testutil"
+)
+
+func TestMmapWindows(t *testing.T) {
+	c := qt.New(t)
+	dir, mi := testutil.GreetingTestTorrent()
+	cs := NewMMap(dir)
+	defer func() {
+		c.Check(cs.Close(), qt.IsNil)
+	}()
+	info, err := mi.UnmarshalInfo()
+	c.Assert(err, qt.IsNil)
+	ts, err := cs.OpenTorrent(&info, mi.HashInfoBytes())
+	c.Assert(err, qt.IsNil)
+	defer func() {
+		c.Check(ts.Close(), qt.IsNil)
+	}()
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/piece-completion.go b/deps/github.com/anacrolix/torrent/storage/piece-completion.go
new file mode 100644
index 0000000..bc646bd
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/piece-completion.go
@@ -0,0 +1,27 @@
+package storage
+
+import (
+	"github.com/anacrolix/log"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+type PieceCompletionGetSetter interface {
+	Get(metainfo.PieceKey) (Completion, error)
+	Set(_ metainfo.PieceKey, complete bool) error
+}
+
+// Implementations track the completion of pieces. It must be concurrent-safe.
+type PieceCompletion interface {
+	PieceCompletionGetSetter
+	Close() error
+}
+
+func pieceCompletionForDir(dir string) (ret PieceCompletion) {
+	ret, err := NewDefaultPieceCompletionForDir(dir)
+	if err != nil {
+		log.Printf("couldn't open piece completion db in %q: %s", dir, err)
+		ret = NewMapPieceCompletion()
+	}
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/piece-resource.go b/deps/github.com/anacrolix/torrent/storage/piece-resource.go
new file mode 100644
index 0000000..5327f31
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/piece-resource.go
@@ -0,0 +1,279 @@
+package storage
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"path"
+	"sort"
+	"strconv"
+	"sync"
+
+	"github.com/anacrolix/missinggo/v2/resource"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+type piecePerResource struct {
+	rp   PieceProvider
+	opts ResourcePiecesOpts
+}
+
+type ResourcePiecesOpts struct {
+	// After marking a piece complete, don't bother deleting its incomplete blobs.
+	LeaveIncompleteChunks bool
+	// Sized puts require being able to stream from a statement executed on another connection.
+	// Without them, we buffer the entire read and then put that.
+	NoSizedPuts bool
+	Capacity    *int64
+}
+
+func NewResourcePieces(p PieceProvider) ClientImpl {
+	return NewResourcePiecesOpts(p, ResourcePiecesOpts{})
+}
+
+func NewResourcePiecesOpts(p PieceProvider, opts ResourcePiecesOpts) ClientImpl {
+	return &piecePerResource{
+		rp:   p,
+		opts: opts,
+	}
+}
+
+type piecePerResourceTorrentImpl struct {
+	piecePerResource
+	locks []sync.RWMutex
+}
+
+func (piecePerResourceTorrentImpl) Close() error {
+	return nil
+}
+
+func (s piecePerResource) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) {
+	t := piecePerResourceTorrentImpl{
+		s,
+		make([]sync.RWMutex, info.NumPieces()),
+	}
+	return TorrentImpl{Piece: t.Piece, Close: t.Close}, nil
+}
+
+func (s piecePerResourceTorrentImpl) Piece(p metainfo.Piece) PieceImpl {
+	return piecePerResourcePiece{
+		mp:               p,
+		piecePerResource: s.piecePerResource,
+		mu:               &s.locks[p.Index()],
+	}
+}
+
+type PieceProvider interface {
+	resource.Provider
+}
+
+type ConsecutiveChunkReader interface {
+	ReadConsecutiveChunks(prefix string) (io.ReadCloser, error)
+}
+
+type piecePerResourcePiece struct {
+	mp metainfo.Piece
+	piecePerResource
+	// This protects operations that move complete/incomplete pieces around, which can trigger read
+	// errors that may cause callers to do more drastic things.
+	mu *sync.RWMutex
+}
+
+var _ io.WriterTo = piecePerResourcePiece{}
+
+func (s piecePerResourcePiece) WriteTo(w io.Writer) (int64, error) {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	if s.mustIsComplete() {
+		r, err := s.completed().Get()
+		if err != nil {
+			return 0, fmt.Errorf("getting complete instance: %w", err)
+		}
+		defer r.Close()
+		return io.Copy(w, r)
+	}
+	if ccr, ok := s.rp.(ConsecutiveChunkReader); ok {
+		return s.writeConsecutiveIncompleteChunks(ccr, w)
+	}
+	return io.Copy(w, io.NewSectionReader(s, 0, s.mp.Length()))
+}
+
+func (s piecePerResourcePiece) writeConsecutiveIncompleteChunks(ccw ConsecutiveChunkReader, w io.Writer) (int64, error) {
+	r, err := ccw.ReadConsecutiveChunks(s.incompleteDirPath() + "/")
+	if err != nil {
+		return 0, err
+	}
+	defer r.Close()
+	return io.Copy(w, r)
+}
+
+// Returns if the piece is complete. Ok should be true, because we are the definitive source of
+// truth here.
+func (s piecePerResourcePiece) mustIsComplete() bool {
+	completion := s.Completion()
+	if !completion.Ok {
+		panic("must know complete definitively")
+	}
+	return completion.Complete
+}
+
+func (s piecePerResourcePiece) Completion() Completion {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	fi, err := s.completed().Stat()
+	return Completion{
+		Complete: err == nil && fi.Size() == s.mp.Length(),
+		Ok:       true,
+	}
+}
+
+type SizedPutter interface {
+	PutSized(io.Reader, int64) error
+}
+
+func (s piecePerResourcePiece) MarkComplete() error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	incompleteChunks := s.getChunks()
+	r, err := func() (io.ReadCloser, error) {
+		if ccr, ok := s.rp.(ConsecutiveChunkReader); ok {
+			return ccr.ReadConsecutiveChunks(s.incompleteDirPath() + "/")
+		}
+		return io.NopCloser(io.NewSectionReader(incompleteChunks, 0, s.mp.Length())), nil
+	}()
+	if err != nil {
+		return fmt.Errorf("getting incomplete chunks reader: %w", err)
+	}
+	defer r.Close()
+	completedInstance := s.completed()
+	err = func() error {
+		if sp, ok := completedInstance.(SizedPutter); ok && !s.opts.NoSizedPuts {
+			return sp.PutSized(r, s.mp.Length())
+		} else {
+			return completedInstance.Put(r)
+		}
+	}()
+	if err == nil && !s.opts.LeaveIncompleteChunks {
+		// I think we do this synchronously here since we don't want callers to act on the completed
+		// piece if we're concurrently still deleting chunks. The caller may decide to start
+		// downloading chunks again and won't expect us to delete them. It seems to be much faster
+		// to let the resource provider do this if possible.
+		var wg sync.WaitGroup
+		for _, c := range incompleteChunks {
+			wg.Add(1)
+			go func(c chunk) {
+				defer wg.Done()
+				c.instance.Delete()
+			}(c)
+		}
+		wg.Wait()
+	}
+	return err
+}
+
+func (s piecePerResourcePiece) MarkNotComplete() error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.completed().Delete()
+}
+
+func (s piecePerResourcePiece) ReadAt(b []byte, off int64) (int, error) {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	if s.mustIsComplete() {
+		return s.completed().ReadAt(b, off)
+	}
+	return s.getChunks().ReadAt(b, off)
+}
+
+func (s piecePerResourcePiece) WriteAt(b []byte, off int64) (n int, err error) {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	i, err := s.rp.NewInstance(path.Join(s.incompleteDirPath(), strconv.FormatInt(off, 10)))
+	if err != nil {
+		panic(err)
+	}
+	r := bytes.NewReader(b)
+	if sp, ok := i.(SizedPutter); ok {
+		err = sp.PutSized(r, r.Size())
+	} else {
+		err = i.Put(r)
+	}
+	n = len(b) - r.Len()
+	return
+}
+
+type chunk struct {
+	offset   int64
+	instance resource.Instance
+}
+
+type chunks []chunk
+
+func (me chunks) ReadAt(b []byte, off int64) (int, error) {
+	for {
+		if len(me) == 0 {
+			return 0, io.EOF
+		}
+		if me[0].offset <= off {
+			break
+		}
+		me = me[1:]
+	}
+	n, err := me[0].instance.ReadAt(b, off-me[0].offset)
+	if n == len(b) {
+		return n, nil
+	}
+	if err == nil || err == io.EOF {
+		n_, err := me[1:].ReadAt(b[n:], off+int64(n))
+		return n + n_, err
+	}
+	return n, err
+}
+
+func (s piecePerResourcePiece) getChunks() (chunks chunks) {
+	names, err := s.incompleteDir().Readdirnames()
+	if err != nil {
+		return
+	}
+	for _, n := range names {
+		offset, err := strconv.ParseInt(n, 10, 64)
+		if err != nil {
+			panic(err)
+		}
+		i, err := s.rp.NewInstance(path.Join(s.incompleteDirPath(), n))
+		if err != nil {
+			panic(err)
+		}
+		chunks = append(chunks, chunk{offset, i})
+	}
+	sort.Slice(chunks, func(i, j int) bool {
+		return chunks[i].offset < chunks[j].offset
+	})
+	return
+}
+
+func (s piecePerResourcePiece) completedInstancePath() string {
+	return path.Join("completed", s.mp.Hash().HexString())
+}
+
+func (s piecePerResourcePiece) completed() resource.Instance {
+	i, err := s.rp.NewInstance(s.completedInstancePath())
+	if err != nil {
+		panic(err)
+	}
+	return i
+}
+
+func (s piecePerResourcePiece) incompleteDirPath() string {
+	return path.Join("incompleted", s.mp.Hash().HexString())
+}
+
+func (s piecePerResourcePiece) incompleteDir() resource.DirInstance {
+	i, err := s.rp.NewInstance(s.incompleteDirPath())
+	if err != nil {
+		panic(err)
+	}
+	return i.(resource.DirInstance)
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/safe-path.go b/deps/github.com/anacrolix/torrent/storage/safe-path.go
new file mode 100644
index 0000000..9e50b7e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/safe-path.go
@@ -0,0 +1,29 @@
+package storage
+
+import (
+	"errors"
+	"path/filepath"
+	"strings"
+)
+
+// Get the first file path component. We can't use filepath.Split because that breaks off the last
+// one. We could optimize this to avoid allocating a slice down the track.
+func firstComponent(filePath string) string {
+	return strings.SplitN(filePath, string(filepath.Separator), 2)[0]
+}
+
+// Combines file info path components, ensuring the result won't escape into parent directories.
+func ToSafeFilePath(fileInfoComponents ...string) (string, error) {
+	safeComps := make([]string, 0, len(fileInfoComponents))
+	for _, comp := range fileInfoComponents {
+		safeComps = append(safeComps, filepath.Clean(comp))
+	}
+	safeFilePath := filepath.Join(safeComps...)
+	fc := firstComponent(safeFilePath)
+	switch fc {
+	case "..":
+		return "", errors.New("escapes root dir")
+	default:
+		return safeFilePath, nil
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/safe-path_test.go b/deps/github.com/anacrolix/torrent/storage/safe-path_test.go
new file mode 100644
index 0000000..452ab28
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/safe-path_test.go
@@ -0,0 +1,71 @@
+package storage
+
+import (
+	"fmt"
+	"log"
+	"path/filepath"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+func init() {
+	log.SetFlags(log.Flags() | log.Lshortfile)
+}
+
+// I think these are mainly tests for bad metainfos that try to escape the client base directory.
+var safeFilePathTests = []struct {
+	input     []string
+	expectErr bool
+}{
+	// We might want a test for invalid chars inside components, or file maker opt funcs returning
+	// absolute paths (and thus presumably clobbering earlier "makers").
+	{input: []string{"a", filepath.FromSlash(`b/..`)}, expectErr: false},
+	{input: []string{"a", filepath.FromSlash(`b/../../..`)}, expectErr: true},
+	{input: []string{"a", filepath.FromSlash(`b/../.././..`)}, expectErr: true},
+	{
+		input: []string{
+			filepath.FromSlash(`NewSuperHeroMovie-2019-English-720p.avi /../../../../../Roaming/Microsoft/Windows/Start Menu/Programs/Startup/test3.exe`),
+		},
+		expectErr: true,
+	},
+}
+
+// Tests the ToSafeFilePath func.
+func TestToSafeFilePath(t *testing.T) {
+	for _, _case := range safeFilePathTests {
+		actual, err := ToSafeFilePath(_case.input...)
+		if _case.expectErr {
+			if err != nil {
+				continue
+			}
+			t.Errorf("%q: expected error, got output %q", _case.input, actual)
+		}
+	}
+}
+
+// Check that safe file path handling still exists for the newer file-opt-maker variants.
+func TestFileOptsSafeFilePathHandling(t *testing.T) {
+	c := qt.New(t)
+	for i, _case := range safeFilePathTests {
+		c.Run(fmt.Sprintf("Case%v", i), func(c *qt.C) {
+			info := metainfo.Info{
+				Files: []metainfo.FileInfo{
+					{Path: _case.input},
+				},
+			}
+			client := NewFileOpts(NewFileClientOpts{
+				ClientBaseDir: t.TempDir(),
+			})
+			defer func() { c.Check(client.Close(), qt.IsNil) }()
+			torImpl, err := client.OpenTorrent(&info, metainfo.Hash{})
+			if _case.expectErr {
+				c.Check(err, qt.Not(qt.IsNil))
+			} else {
+				c.Check(torImpl.Close(), qt.IsNil)
+			}
+		})
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/sqlite-piece-completion.go b/deps/github.com/anacrolix/torrent/storage/sqlite-piece-completion.go
new file mode 100644
index 0000000..73407f3
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/sqlite-piece-completion.go
@@ -0,0 +1,85 @@
+// modernc.org/sqlite depends on modernc.org/libc which doesn't work for JS (and probably wasm but I
+// think JS is the stronger signal).
+
+//go:build cgo && !nosqlite
+// +build cgo,!nosqlite
+
+package storage
+
+import (
+	"errors"
+	"path/filepath"
+	"sync"
+
+	"github.com/go-llsqlite/adapter"
+	"github.com/go-llsqlite/adapter/sqlitex"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+// sqlite is always the default when available.
+func NewDefaultPieceCompletionForDir(dir string) (PieceCompletion, error) {
+	return NewSqlitePieceCompletion(dir)
+}
+
+type sqlitePieceCompletion struct {
+	mu     sync.Mutex
+	closed bool
+	db     *sqlite.Conn
+}
+
+var _ PieceCompletion = (*sqlitePieceCompletion)(nil)
+
+func NewSqlitePieceCompletion(dir string) (ret *sqlitePieceCompletion, err error) {
+	p := filepath.Join(dir, ".torrent.db")
+	db, err := sqlite.OpenConn(p, 0)
+	if err != nil {
+		return
+	}
+	err = sqlitex.ExecScript(db, `create table if not exists piece_completion(infohash, "index", complete, unique(infohash, "index"))`)
+	if err != nil {
+		db.Close()
+		return
+	}
+	ret = &sqlitePieceCompletion{db: db}
+	return
+}
+
+func (me *sqlitePieceCompletion) Get(pk metainfo.PieceKey) (c Completion, err error) {
+	me.mu.Lock()
+	defer me.mu.Unlock()
+	err = sqlitex.Exec(
+		me.db, `select complete from piece_completion where infohash=? and "index"=?`,
+		func(stmt *sqlite.Stmt) error {
+			c.Complete = stmt.ColumnInt(0) != 0
+			c.Ok = true
+			return nil
+		},
+		pk.InfoHash.HexString(), pk.Index)
+	return
+}
+
+func (me *sqlitePieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
+	me.mu.Lock()
+	defer me.mu.Unlock()
+	if me.closed {
+		return errors.New("closed")
+	}
+	return sqlitex.Exec(
+		me.db,
+		`insert or replace into piece_completion(infohash, "index", complete) values(?, ?, ?)`,
+		nil,
+		pk.InfoHash.HexString(), pk.Index, b)
+}
+
+func (me *sqlitePieceCompletion) Close() (err error) {
+	me.mu.Lock()
+	defer me.mu.Unlock()
+	if me.closed {
+		return
+	}
+	err = me.db.Close()
+	me.db = nil
+	me.closed = true
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/sqlite/deprecated.go b/deps/github.com/anacrolix/torrent/storage/sqlite/deprecated.go
new file mode 100644
index 0000000..47698ef
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/sqlite/deprecated.go
@@ -0,0 +1,10 @@
+//go:build cgo
+// +build cgo
+
+package sqliteStorage
+
+import (
+	"github.com/anacrolix/squirrel"
+)
+
+type NewDirectStorageOpts = squirrel.NewCacheOpts
diff --git a/deps/github.com/anacrolix/torrent/storage/sqlite/direct.go b/deps/github.com/anacrolix/torrent/storage/sqlite/direct.go
new file mode 100644
index 0000000..8e0a4a8
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/sqlite/direct.go
@@ -0,0 +1,83 @@
+//go:build cgo
+// +build cgo
+
+package sqliteStorage
+
+import (
+	"io"
+
+	"github.com/anacrolix/squirrel"
+
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/storage"
+)
+
+// A convenience function that creates a connection pool, resource provider, and a pieces storage
+// ClientImpl and returns them all with a Close attached.
+func NewDirectStorage(opts NewDirectStorageOpts) (_ storage.ClientImplCloser, err error) {
+	cache, err := squirrel.NewCache(opts)
+	if err != nil {
+		return
+	}
+	return &client{
+		cache,
+		cache.GetCapacity,
+	}, nil
+}
+
+func NewWrappingClient(cache *squirrel.Cache) storage.ClientImpl {
+	return &client{
+		cache,
+		cache.GetCapacity,
+	}
+}
+
+type client struct {
+	*squirrel.Cache
+	capacity func() (int64, bool)
+}
+
+func (c *client) OpenTorrent(*metainfo.Info, metainfo.Hash) (storage.TorrentImpl, error) {
+	t := torrent{c.Cache}
+	return storage.TorrentImpl{Piece: t.Piece, Close: t.Close, Capacity: &c.capacity}, nil
+}
+
+type torrent struct {
+	c *squirrel.Cache
+}
+
+func (t torrent) Piece(p metainfo.Piece) storage.PieceImpl {
+	ret := piece{
+		sb: t.c.OpenWithLength(p.Hash().HexString(), p.Length()),
+	}
+	ret.ReaderAt = &ret.sb
+	ret.WriterAt = &ret.sb
+	return ret
+}
+
+func (t torrent) Close() error {
+	return nil
+}
+
+type piece struct {
+	sb squirrel.Blob
+	io.ReaderAt
+	io.WriterAt
+}
+
+func (p piece) MarkComplete() error {
+	return p.sb.SetTag("verified", true)
+}
+
+func (p piece) MarkNotComplete() error {
+	return p.sb.SetTag("verified", false)
+}
+
+func (p piece) Completion() (ret storage.Completion) {
+	err := p.sb.GetTag("verified", func(stmt squirrel.SqliteStmt) {
+		ret.Complete = stmt.ColumnInt(0) != 0
+	})
+	ret.Ok = err == nil
+	ret.Err = err
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/sqlite/dummy.go b/deps/github.com/anacrolix/torrent/storage/sqlite/dummy.go
new file mode 100644
index 0000000..ae48a77
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/sqlite/dummy.go
@@ -0,0 +1 @@
+package sqliteStorage
diff --git a/deps/github.com/anacrolix/torrent/storage/sqlite/sqlite-storage_test.go b/deps/github.com/anacrolix/torrent/storage/sqlite/sqlite-storage_test.go
new file mode 100644
index 0000000..a566322
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/sqlite/sqlite-storage_test.go
@@ -0,0 +1,106 @@
+//go:build cgo
+// +build cgo
+
+package sqliteStorage
+
+import (
+	"errors"
+	"fmt"
+	"path/filepath"
+	"testing"
+
+	_ "github.com/anacrolix/envpprof"
+	"github.com/anacrolix/squirrel"
+	"github.com/dustin/go-humanize"
+	qt "github.com/frankban/quicktest"
+
+	"github.com/anacrolix/torrent/storage"
+	test_storage "github.com/anacrolix/torrent/storage/test"
+	"github.com/anacrolix/torrent/test"
+)
+
+func TestLeecherStorage(t *testing.T) {
+	test.TestLeecherStorage(t, test.LeecherStorageTestCase{
+		"SqliteDirect",
+		func(s string) storage.ClientImplCloser {
+			path := filepath.Join(s, "sqlite3.db")
+			var opts NewDirectStorageOpts
+			opts.Path = path
+			cl, err := NewDirectStorage(opts)
+			if err != nil {
+				panic(err)
+			}
+			return cl
+		},
+		0,
+	})
+}
+
+func BenchmarkMarkComplete(b *testing.B) {
+	const pieceSize = test_storage.DefaultPieceSize
+	const noTriggers = false
+	var capacity int64 = test_storage.DefaultNumPieces * pieceSize / 2
+	if noTriggers {
+		// Since we won't push out old pieces, we have to mark them incomplete manually.
+		capacity = 0
+	}
+	runBench := func(b *testing.B, ci storage.ClientImpl) {
+		test_storage.BenchmarkPieceMarkComplete(b, ci, pieceSize, test_storage.DefaultNumPieces, capacity)
+	}
+	c := qt.New(b)
+	b.Run("CustomDirect", func(b *testing.B) {
+		var opts squirrel.NewCacheOpts
+		opts.Capacity = capacity
+		opts.NoTriggers = noTriggers
+		benchOpts := func(b *testing.B) {
+			opts.Path = filepath.Join(b.TempDir(), "storage.db")
+			ci, err := NewDirectStorage(opts)
+			c.Assert(err, qt.IsNil)
+			defer ci.Close()
+			runBench(b, ci)
+		}
+		b.Run("Default", benchOpts)
+	})
+	for _, memory := range []bool{false, true} {
+		b.Run(fmt.Sprintf("Memory=%v", memory), func(b *testing.B) {
+			b.Run("Direct", func(b *testing.B) {
+				var opts NewDirectStorageOpts
+				opts.Memory = memory
+				opts.Capacity = capacity
+				opts.NoTriggers = noTriggers
+				directBench := func(b *testing.B) {
+					opts.Path = filepath.Join(b.TempDir(), "storage.db")
+					ci, err := NewDirectStorage(opts)
+					var ujm squirrel.ErrUnexpectedJournalMode
+					if errors.As(err, &ujm) {
+						b.Skipf("setting journal mode %q: %v", opts.SetJournalMode, err)
+					}
+					c.Assert(err, qt.IsNil)
+					defer ci.Close()
+					runBench(b, ci)
+				}
+				for _, journalMode := range []string{"", "wal", "off", "truncate", "delete", "persist", "memory"} {
+					opts.SetJournalMode = journalMode
+					b.Run("JournalMode="+journalMode, func(b *testing.B) {
+						for _, mmapSize := range []int64{-1} {
+							if memory && mmapSize >= 0 {
+								continue
+							}
+							b.Run(fmt.Sprintf("MmapSize=%s", func() string {
+								if mmapSize < 0 {
+									return "default"
+								} else {
+									return humanize.IBytes(uint64(mmapSize))
+								}
+							}()), func(b *testing.B) {
+								opts.MmapSize = mmapSize
+								opts.MmapSizeOk = true
+								directBench(b)
+							})
+						}
+					})
+				}
+			})
+		})
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/storage_test.go b/deps/github.com/anacrolix/torrent/storage/storage_test.go
new file mode 100644
index 0000000..8eee160
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/storage_test.go
@@ -0,0 +1,5 @@
+package storage
+
+import (
+	_ "github.com/anacrolix/envpprof"
+)
diff --git a/deps/github.com/anacrolix/torrent/storage/test/bench-piece-mark-complete.go b/deps/github.com/anacrolix/torrent/storage/test/bench-piece-mark-complete.go
new file mode 100644
index 0000000..43390e3
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/test/bench-piece-mark-complete.go
@@ -0,0 +1,91 @@
+package test_storage
+
+import (
+	"bytes"
+	"math/rand"
+	"sync"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/storage"
+)
+
+const (
+	ChunkSize        = 1 << 14
+	DefaultPieceSize = 2 << 20
+	DefaultNumPieces = 16
+)
+
+// This writes chunks to the storage concurrently, and waits for them all to complete. This matches
+// the behaviour from the peer connection read loop.
+func BenchmarkPieceMarkComplete(
+	b *testing.B, ci storage.ClientImpl,
+	pieceSize int64, numPieces int,
+	// This drives any special handling around capacity that may be configured into the storage
+	// implementation.
+	capacity int64,
+) {
+	c := qt.New(b)
+	info := &metainfo.Info{
+		Pieces:      make([]byte, numPieces*metainfo.HashSize),
+		PieceLength: pieceSize,
+		Length:      pieceSize * int64(numPieces),
+		Name:        "TorrentName",
+	}
+	ti, err := ci.OpenTorrent(info, metainfo.Hash{})
+	c.Assert(err, qt.IsNil)
+	tw := storage.Torrent{ti}
+	defer tw.Close()
+	rand.Read(info.Pieces)
+	data := make([]byte, pieceSize)
+	readData := make([]byte, pieceSize)
+	b.SetBytes(int64(numPieces) * pieceSize)
+	oneIter := func() {
+		for pieceIndex := 0; pieceIndex < numPieces; pieceIndex += 1 {
+			pi := tw.Piece(info.Piece(pieceIndex))
+			rand.Read(data)
+			b.StartTimer()
+			var wg sync.WaitGroup
+			for off := int64(0); off < int64(len(data)); off += ChunkSize {
+				wg.Add(1)
+				go func(off int64) {
+					defer wg.Done()
+					n, err := pi.WriteAt(data[off:off+ChunkSize], off)
+					if err != nil {
+						panic(err)
+					}
+					if n != ChunkSize {
+						panic(n)
+					}
+				}(off)
+			}
+			wg.Wait()
+			if capacity == 0 {
+				pi.MarkNotComplete()
+			}
+			// This might not apply if users of this benchmark don't cache with the expected capacity.
+			c.Assert(pi.Completion(), qt.Equals, storage.Completion{Complete: false, Ok: true})
+			c.Assert(pi.MarkComplete(), qt.IsNil)
+			c.Assert(pi.Completion(), qt.Equals, storage.Completion{Complete: true, Ok: true})
+			n, err := pi.WriteTo(bytes.NewBuffer(readData[:0]))
+			b.StopTimer()
+			c.Assert(err, qt.IsNil)
+			c.Assert(n, qt.Equals, int64(len(data)))
+			c.Assert(bytes.Equal(readData[:n], data), qt.IsTrue)
+		}
+	}
+	// Fill the cache
+	if capacity > 0 {
+		iterN := int((capacity + info.TotalLength() - 1) / info.TotalLength())
+		for i := 0; i < iterN; i += 1 {
+			oneIter()
+		}
+	}
+	b.StopTimer()
+	b.ResetTimer()
+	for i := 0; i < b.N; i += 1 {
+		oneIter()
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/storage/wrappers.go b/deps/github.com/anacrolix/torrent/storage/wrappers.go
new file mode 100644
index 0000000..a3907e1
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/storage/wrappers.go
@@ -0,0 +1,103 @@
+package storage
+
+import (
+	"io"
+	"os"
+
+	"github.com/anacrolix/missinggo/v2"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+type Client struct {
+	ci ClientImpl
+}
+
+func NewClient(cl ClientImpl) *Client {
+	return &Client{cl}
+}
+
+func (cl Client) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (*Torrent, error) {
+	t, err := cl.ci.OpenTorrent(info, infoHash)
+	if err != nil {
+		return nil, err
+	}
+	return &Torrent{t}, nil
+}
+
+type Torrent struct {
+	TorrentImpl
+}
+
+func (t Torrent) Piece(p metainfo.Piece) Piece {
+	return Piece{t.TorrentImpl.Piece(p), p}
+}
+
+type Piece struct {
+	PieceImpl
+	mip metainfo.Piece
+}
+
+var _ io.WriterTo = Piece{}
+
+// Why do we have this wrapper? Well PieceImpl doesn't implement io.Reader, so we can't let io.Copy
+// and friends check for io.WriterTo and fallback for us since they expect an io.Reader.
+func (p Piece) WriteTo(w io.Writer) (int64, error) {
+	if i, ok := p.PieceImpl.(io.WriterTo); ok {
+		return i.WriteTo(w)
+	}
+	n := p.mip.Length()
+	r := io.NewSectionReader(p, 0, n)
+	return io.CopyN(w, r, n)
+}
+
+func (p Piece) WriteAt(b []byte, off int64) (n int, err error) {
+	// Callers should not be writing to completed pieces, but it's too
+	// expensive to be checking this on every single write using uncached
+	// completions.
+
+	// c := p.Completion()
+	// if c.Ok && c.Complete {
+	// 	err = errors.New("piece already completed")
+	// 	return
+	// }
+	if off+int64(len(b)) > p.mip.Length() {
+		panic("write overflows piece")
+	}
+	b = missinggo.LimitLen(b, p.mip.Length()-off)
+	return p.PieceImpl.WriteAt(b, off)
+}
+
+func (p Piece) ReadAt(b []byte, off int64) (n int, err error) {
+	if off < 0 {
+		err = os.ErrInvalid
+		return
+	}
+	if off >= p.mip.Length() {
+		err = io.EOF
+		return
+	}
+	b = missinggo.LimitLen(b, p.mip.Length()-off)
+	if len(b) == 0 {
+		return
+	}
+	n, err = p.PieceImpl.ReadAt(b, off)
+	if n > len(b) {
+		panic(n)
+	}
+	if n == 0 && err == nil {
+		panic("io.Copy will get stuck")
+	}
+	off += int64(n)
+
+	// Doing this here may be inaccurate. There's legitimate reasons we may fail to read while the
+	// data is still there, such as too many open files. There should probably be a specific error
+	// to return if the data has been lost.
+	if off < p.mip.Length() {
+		if err == io.EOF {
+			p.MarkNotComplete()
+		}
+	}
+
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/string-addr.go b/deps/github.com/anacrolix/torrent/string-addr.go
new file mode 100644
index 0000000..c124541
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/string-addr.go
@@ -0,0 +1,11 @@
+package torrent
+
+import "net"
+
+// This adds a net.Addr interface to a string address that has no presumed Network.
+type StringAddr string
+
+var _ net.Addr = StringAddr("")
+
+func (StringAddr) Network() string   { return "" }
+func (me StringAddr) String() string { return string(me) }
diff --git a/deps/github.com/anacrolix/torrent/struct_test.go b/deps/github.com/anacrolix/torrent/struct_test.go
new file mode 100644
index 0000000..cee91e1
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/struct_test.go
@@ -0,0 +1,12 @@
+package torrent
+
+import (
+	"testing"
+	"unsafe"
+)
+
+func TestStructSizes(t *testing.T) {
+	t.Log("[]*File", unsafe.Sizeof([]*File(nil)))
+	t.Log("Piece", unsafe.Sizeof(Piece{}))
+	t.Log("map[*peer]struct{}", unsafe.Sizeof(map[*Peer]struct{}(nil)))
+}
diff --git a/deps/github.com/anacrolix/torrent/t.go b/deps/github.com/anacrolix/torrent/t.go
new file mode 100644
index 0000000..6a46070
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/t.go
@@ -0,0 +1,286 @@
+package torrent
+
+import (
+	"strconv"
+	"strings"
+
+	"github.com/anacrolix/chansync/events"
+	"github.com/anacrolix/missinggo/v2/pubsub"
+	"github.com/anacrolix/sync"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+// The Torrent's infohash. This is fixed and cannot change. It uniquely identifies a torrent.
+func (t *Torrent) InfoHash() metainfo.Hash {
+	return t.infoHash
+}
+
+// Returns a channel that is closed when the info (.Info()) for the torrent has become available.
+func (t *Torrent) GotInfo() events.Done {
+	return t.gotMetainfoC
+}
+
+// Returns the metainfo info dictionary, or nil if it's not yet available.
+func (t *Torrent) Info() (info *metainfo.Info) {
+	t.nameMu.RLock()
+	info = t.info
+	t.nameMu.RUnlock()
+	return
+}
+
+// Returns a Reader bound to the torrent's data. All read calls block until the data requested is
+// actually available. Note that you probably want to ensure the Torrent Info is available first.
+func (t *Torrent) NewReader() Reader {
+	return t.newReader(0, t.length())
+}
+
+func (t *Torrent) newReader(offset, length int64) Reader {
+	r := reader{
+		mu:     t.cl.locker(),
+		t:      t,
+		offset: offset,
+		length: length,
+	}
+	r.readaheadFunc = defaultReadaheadFunc
+	t.addReader(&r)
+	return &r
+}
+
+type PieceStateRuns []PieceStateRun
+
+func (me PieceStateRuns) String() (s string) {
+	if len(me) > 0 {
+		var sb strings.Builder
+		sb.WriteString(me[0].String())
+		for i := 1; i < len(me); i += 1 {
+			sb.WriteByte(' ')
+			sb.WriteString(me[i].String())
+		}
+		return sb.String()
+	}
+	return
+}
+
+// Returns the state of pieces of the torrent. They are grouped into runs of same state. The sum of
+// the state run-lengths is the number of pieces in the torrent.
+func (t *Torrent) PieceStateRuns() (runs PieceStateRuns) {
+	t.cl.rLock()
+	runs = t.pieceStateRuns()
+	t.cl.rUnlock()
+	return
+}
+
+func (t *Torrent) PieceState(piece pieceIndex) (ps PieceState) {
+	t.cl.rLock()
+	ps = t.pieceState(piece)
+	t.cl.rUnlock()
+	return
+}
+
+// The number of pieces in the torrent. This requires that the info has been
+// obtained first.
+func (t *Torrent) NumPieces() pieceIndex {
+	return t.numPieces()
+}
+
+// Get missing bytes count for specific piece.
+func (t *Torrent) PieceBytesMissing(piece int) int64 {
+	t.cl.rLock()
+	defer t.cl.rUnlock()
+
+	return int64(t.pieces[piece].bytesLeft())
+}
+
+// Drop the torrent from the client, and close it. It's always safe to do
+// this. No data corruption can, or should occur to either the torrent's data,
+// or connected peers.
+func (t *Torrent) Drop() {
+	var wg sync.WaitGroup
+	defer wg.Wait()
+	t.cl.lock()
+	defer t.cl.unlock()
+	err := t.cl.dropTorrent(t.infoHash, &wg)
+	if err != nil {
+		panic(err)
+	}
+}
+
+// Number of bytes of the entire torrent we have completed. This is the sum of
+// completed pieces, and dirtied chunks of incomplete pieces. Do not use this
+// for download rate, as it can go down when pieces are lost or fail checks.
+// Sample Torrent.Stats.DataBytesRead for actual file data download rate.
+func (t *Torrent) BytesCompleted() int64 {
+	t.cl.rLock()
+	defer t.cl.rUnlock()
+	return t.bytesCompleted()
+}
+
+// The subscription emits as (int) the index of pieces as their state changes.
+// A state change is when the PieceState for a piece alters in value.
+func (t *Torrent) SubscribePieceStateChanges() *pubsub.Subscription[PieceStateChange] {
+	return t.pieceStateChanges.Subscribe()
+}
+
+// Returns true if the torrent is currently being seeded. This occurs when the
+// client is willing to upload without wanting anything in return.
+func (t *Torrent) Seeding() (ret bool) {
+	t.cl.rLock()
+	ret = t.seeding()
+	t.cl.rUnlock()
+	return
+}
+
+// Clobbers the torrent display name if metainfo is unavailable.
+// The display name is used as the torrent name while the metainfo is unavailable.
+func (t *Torrent) SetDisplayName(dn string) {
+	t.nameMu.Lock()
+	if !t.haveInfo() {
+		t.displayName = dn
+	}
+	t.nameMu.Unlock()
+}
+
+// The current working name for the torrent. Either the name in the info dict,
+// or a display name given such as by the dn value in a magnet link, or "".
+func (t *Torrent) Name() string {
+	return t.name()
+}
+
+// The completed length of all the torrent data, in all its files. This is
+// derived from the torrent info, when it is available.
+func (t *Torrent) Length() int64 {
+	return t._length.Value
+}
+
+// Returns a run-time generated metainfo for the torrent that includes the
+// info bytes and announce-list as currently known to the client.
+func (t *Torrent) Metainfo() metainfo.MetaInfo {
+	t.cl.rLock()
+	defer t.cl.rUnlock()
+	return t.newMetaInfo()
+}
+
+func (t *Torrent) addReader(r *reader) {
+	t.cl.lock()
+	defer t.cl.unlock()
+	if t.readers == nil {
+		t.readers = make(map[*reader]struct{})
+	}
+	t.readers[r] = struct{}{}
+	r.posChanged()
+}
+
+func (t *Torrent) deleteReader(r *reader) {
+	delete(t.readers, r)
+	t.readersChanged()
+}
+
+// Raise the priorities of pieces in the range [begin, end) to at least Normal
+// priority. Piece indexes are not the same as bytes. Requires that the info
+// has been obtained, see Torrent.Info and Torrent.GotInfo.
+func (t *Torrent) DownloadPieces(begin, end pieceIndex) {
+	t.cl.lock()
+	t.downloadPiecesLocked(begin, end)
+	t.cl.unlock()
+}
+
+func (t *Torrent) downloadPiecesLocked(begin, end pieceIndex) {
+	for i := begin; i < end; i++ {
+		if t.pieces[i].priority.Raise(PiecePriorityNormal) {
+			t.updatePiecePriority(i, "Torrent.DownloadPieces")
+		}
+	}
+}
+
+func (t *Torrent) CancelPieces(begin, end pieceIndex) {
+	t.cl.lock()
+	t.cancelPiecesLocked(begin, end, "Torrent.CancelPieces")
+	t.cl.unlock()
+}
+
+func (t *Torrent) cancelPiecesLocked(begin, end pieceIndex, reason string) {
+	for i := begin; i < end; i++ {
+		p := &t.pieces[i]
+		if p.priority == PiecePriorityNone {
+			continue
+		}
+		p.priority = PiecePriorityNone
+		t.updatePiecePriority(i, reason)
+	}
+}
+
+func (t *Torrent) initFiles() {
+	var offset int64
+	t.files = new([]*File)
+	for _, fi := range t.info.UpvertedFiles() {
+		*t.files = append(*t.files, &File{
+			t,
+			strings.Join(append([]string{t.info.BestName()}, fi.BestPath()...), "/"),
+			offset,
+			fi.Length,
+			fi,
+			fi.DisplayPath(t.info),
+			PiecePriorityNone,
+		})
+		offset += fi.Length
+	}
+}
+
+// Returns handles to the files in the torrent. This requires that the Info is
+// available first.
+func (t *Torrent) Files() []*File {
+	return *t.files
+}
+
+func (t *Torrent) AddPeers(pp []PeerInfo) (n int) {
+	t.cl.lock()
+	defer t.cl.unlock()
+	n = t.addPeers(pp)
+	return
+}
+
+// Marks the entire torrent for download. Requires the info first, see
+// GotInfo. Sets piece priorities for historical reasons.
+func (t *Torrent) DownloadAll() {
+	t.DownloadPieces(0, t.numPieces())
+}
+
+func (t *Torrent) String() string {
+	s := t.name()
+	if s == "" {
+		return t.infoHash.HexString()
+	} else {
+		return strconv.Quote(s)
+	}
+}
+
+func (t *Torrent) AddTrackers(announceList [][]string) {
+	t.cl.lock()
+	defer t.cl.unlock()
+	t.addTrackers(announceList)
+}
+
+func (t *Torrent) Piece(i pieceIndex) *Piece {
+	return t.piece(i)
+}
+
+func (t *Torrent) PeerConns() []*PeerConn {
+	t.cl.rLock()
+	defer t.cl.rUnlock()
+	ret := make([]*PeerConn, 0, len(t.conns))
+	for c := range t.conns {
+		ret = append(ret, c)
+	}
+	return ret
+}
+
+func (t *Torrent) WebseedPeerConns() []*Peer {
+	t.cl.rLock()
+	defer t.cl.rUnlock()
+	ret := make([]*Peer, 0, len(t.conns))
+	for _, c := range t.webSeeds {
+		ret = append(ret, c)
+	}
+	return ret
+}
diff --git a/deps/github.com/anacrolix/torrent/test/init_test.go b/deps/github.com/anacrolix/torrent/test/init_test.go
new file mode 100644
index 0000000..b862d4b
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/test/init_test.go
@@ -0,0 +1,11 @@
+package test
+
+import (
+	"log"
+
+	_ "github.com/anacrolix/envpprof"
+)
+
+func init() {
+	log.SetFlags(log.Flags() | log.Lshortfile)
+}
diff --git a/deps/github.com/anacrolix/torrent/test/issue377_test.go b/deps/github.com/anacrolix/torrent/test/issue377_test.go
new file mode 100644
index 0000000..5b0e659
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/test/issue377_test.go
@@ -0,0 +1,183 @@
+package test
+
+import (
+	"errors"
+	"io"
+	"os"
+	"sync"
+	"testing"
+	"testing/iotest"
+
+	"github.com/anacrolix/log"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/internal/testutil"
+	"github.com/anacrolix/torrent/metainfo"
+	pp "github.com/anacrolix/torrent/peer_protocol"
+	"github.com/anacrolix/torrent/storage"
+)
+
+func justOneNetwork(cc *torrent.ClientConfig) {
+	cc.DisableTCP = true
+	cc.DisableIPv4 = true
+}
+
+func TestReceiveChunkStorageFailureSeederFastExtensionDisabled(t *testing.T) {
+	testReceiveChunkStorageFailure(t, false)
+}
+
+func TestReceiveChunkStorageFailure(t *testing.T) {
+	testReceiveChunkStorageFailure(t, true)
+}
+
+func testReceiveChunkStorageFailure(t *testing.T, seederFast bool) {
+	seederDataDir, metainfo := testutil.GreetingTestTorrent()
+	defer os.RemoveAll(seederDataDir)
+	seederClientConfig := torrent.TestingConfig(t)
+	seederClientConfig.Debug = true
+	justOneNetwork(seederClientConfig)
+	seederClientStorage := storage.NewMMap(seederDataDir)
+	defer seederClientStorage.Close()
+	seederClientConfig.DefaultStorage = seederClientStorage
+	seederClientConfig.Seed = true
+	seederClientConfig.Debug = true
+	seederClientConfig.Extensions.SetBit(pp.ExtensionBitFast, seederFast)
+	seederClient, err := torrent.NewClient(seederClientConfig)
+	require.NoError(t, err)
+	defer seederClient.Close()
+	defer testutil.ExportStatusWriter(seederClient, "s", t)()
+	leecherClientConfig := torrent.TestingConfig(t)
+	leecherClientConfig.Debug = true
+	// Don't require fast extension, whether the seeder will provide it or not (so we can test mixed
+	// cases).
+	leecherClientConfig.MinPeerExtensions.SetBit(pp.ExtensionBitFast, false)
+	justOneNetwork(leecherClientConfig)
+	leecherClient, err := torrent.NewClient(leecherClientConfig)
+	require.NoError(t, err)
+	defer leecherClient.Close()
+	defer testutil.ExportStatusWriter(leecherClient, "l", t)()
+	info, err := metainfo.UnmarshalInfo()
+	require.NoError(t, err)
+	leecherStorage := diskFullStorage{
+		pieces: make([]pieceState, info.NumPieces()),
+		data:   make([]byte, info.TotalLength()),
+	}
+	defer leecherStorage.Close()
+	leecherTorrent, new, err := leecherClient.AddTorrentSpec(&torrent.TorrentSpec{
+		InfoHash: metainfo.HashInfoBytes(),
+		Storage:  &leecherStorage,
+	})
+	leecherStorage.t = leecherTorrent
+	require.NoError(t, err)
+	assert.True(t, new)
+	seederTorrent, err := seederClient.AddTorrent(metainfo)
+	require.NoError(t, err)
+	// Tell the seeder to find the leecher. Is it guaranteed seeders will always try to do this?
+	seederTorrent.AddClientPeer(leecherClient)
+	<-leecherTorrent.GotInfo()
+	r := leecherTorrent.Files()[0].NewReader()
+	defer r.Close()
+	// We can't use assertReadAllGreeting here, because the default storage write error handler
+	// disables data downloads, which now causes Readers to error when they're blocked.
+	if false {
+		assertReadAllGreeting(t, leecherTorrent.NewReader())
+	} else {
+		for func() bool {
+			// We don't seem to need to seek, but that's probably just because the storage failure is
+			// happening on the first read.
+			r.Seek(0, io.SeekStart)
+			if err := iotest.TestReader(r, []byte(testutil.GreetingFileContents)); err != nil {
+				t.Logf("got error while reading: %v", err)
+				return true
+			}
+			return false
+		}() {
+		}
+	}
+	// TODO: Check that PeerConns fastEnabled matches seederFast?
+	// select {}
+}
+
+type pieceState struct {
+	complete bool
+}
+
+type diskFullStorage struct {
+	pieces                        []pieceState
+	t                             *torrent.Torrent
+	defaultHandledWriteChunkError bool
+	data                          []byte
+
+	mu          sync.Mutex
+	diskNotFull bool
+}
+
+func (me *diskFullStorage) Piece(p metainfo.Piece) storage.PieceImpl {
+	return pieceImpl{
+		mip:             p,
+		diskFullStorage: me,
+	}
+}
+
+func (me *diskFullStorage) Close() error {
+	return nil
+}
+
+func (d *diskFullStorage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (storage.TorrentImpl, error) {
+	return storage.TorrentImpl{Piece: d.Piece, Close: d.Close}, nil
+}
+
+type pieceImpl struct {
+	mip metainfo.Piece
+	*diskFullStorage
+}
+
+func (me pieceImpl) state() *pieceState {
+	return &me.diskFullStorage.pieces[me.mip.Index()]
+}
+
+func (me pieceImpl) ReadAt(p []byte, off int64) (n int, err error) {
+	off += me.mip.Offset()
+	return copy(p, me.data[off:]), nil
+}
+
+func (me pieceImpl) WriteAt(p []byte, off int64) (int, error) {
+	off += me.mip.Offset()
+	if !me.defaultHandledWriteChunkError {
+		go func() {
+			me.t.SetOnWriteChunkError(func(err error) {
+				log.Printf("got write chunk error to custom handler: %v", err)
+				me.mu.Lock()
+				me.diskNotFull = true
+				me.mu.Unlock()
+				me.t.AllowDataDownload()
+			})
+			me.t.AllowDataDownload()
+		}()
+		me.defaultHandledWriteChunkError = true
+	}
+	me.mu.Lock()
+	defer me.mu.Unlock()
+	if me.diskNotFull {
+		return copy(me.data[off:], p), nil
+	}
+	return copy(me.data[off:], p[:1]), errors.New("disk full")
+}
+
+func (me pieceImpl) MarkComplete() error {
+	me.state().complete = true
+	return nil
+}
+
+func (me pieceImpl) MarkNotComplete() error {
+	panic("implement me")
+}
+
+func (me pieceImpl) Completion() storage.Completion {
+	return storage.Completion{
+		Complete: me.state().complete,
+		Ok:       true,
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/test/leecher-storage.go b/deps/github.com/anacrolix/torrent/test/leecher-storage.go
new file mode 100644
index 0000000..a60f077
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/test/leecher-storage.go
@@ -0,0 +1,255 @@
+package test
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"runtime"
+	"testing"
+	"testing/iotest"
+
+	"github.com/anacrolix/missinggo/v2/bitmap"
+	"github.com/frankban/quicktest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/time/rate"
+
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/internal/testutil"
+	"github.com/anacrolix/torrent/storage"
+)
+
+type LeecherStorageTestCase struct {
+	Name       string
+	Factory    StorageFactory
+	GoMaxProcs int
+}
+
+type StorageFactory func(string) storage.ClientImplCloser
+
+func TestLeecherStorage(t *testing.T, ls LeecherStorageTestCase) {
+	// Seeder storage
+	for _, ss := range []struct {
+		name string
+		f    StorageFactory
+	}{
+		{"File", storage.NewFile},
+		{"Mmap", storage.NewMMap},
+	} {
+		t.Run(fmt.Sprintf("%sSeederStorage", ss.name), func(t *testing.T) {
+			for _, responsive := range []bool{false, true} {
+				t.Run(fmt.Sprintf("Responsive=%v", responsive), func(t *testing.T) {
+					t.Run("NoReadahead", func(t *testing.T) {
+						testClientTransfer(t, testClientTransferParams{
+							Responsive:     responsive,
+							SeederStorage:  ss.f,
+							LeecherStorage: ls.Factory,
+							GOMAXPROCS:     ls.GoMaxProcs,
+						})
+					})
+					for _, readahead := range []int64{-1, 0, 1, 2, 9, 20} {
+						t.Run(fmt.Sprintf("readahead=%v", readahead), func(t *testing.T) {
+							testClientTransfer(t, testClientTransferParams{
+								SeederStorage:  ss.f,
+								Responsive:     responsive,
+								SetReadahead:   true,
+								Readahead:      readahead,
+								LeecherStorage: ls.Factory,
+								GOMAXPROCS:     ls.GoMaxProcs,
+							})
+						})
+					}
+				})
+			}
+		})
+	}
+}
+
+type ConfigureClient struct {
+	Config func(cfg *torrent.ClientConfig)
+	Client func(cl *torrent.Client)
+}
+
+type testClientTransferParams struct {
+	Responsive     bool
+	Readahead      int64
+	SetReadahead   bool
+	LeecherStorage func(string) storage.ClientImplCloser
+	// TODO: Use a generic option type. This is the capacity of the leecher storage for determining
+	// whether it's possible for the leecher to be Complete. 0 currently means no limit.
+	LeecherStorageCapacity     int64
+	SeederStorage              func(string) storage.ClientImplCloser
+	SeederUploadRateLimiter    *rate.Limiter
+	LeecherDownloadRateLimiter *rate.Limiter
+	ConfigureSeeder            ConfigureClient
+	ConfigureLeecher           ConfigureClient
+	GOMAXPROCS                 int
+
+	LeecherStartsWithoutMetadata bool
+}
+
+// Creates a seeder and a leecher, and ensures the data transfers when a read
+// is attempted on the leecher.
+func testClientTransfer(t *testing.T, ps testClientTransferParams) {
+	prevGOMAXPROCS := runtime.GOMAXPROCS(ps.GOMAXPROCS)
+	newGOMAXPROCS := prevGOMAXPROCS
+	if ps.GOMAXPROCS > 0 {
+		newGOMAXPROCS = ps.GOMAXPROCS
+	}
+	defer func() {
+		quicktest.Check(t, runtime.GOMAXPROCS(prevGOMAXPROCS), quicktest.ContentEquals, newGOMAXPROCS)
+	}()
+
+	greetingTempDir, mi := testutil.GreetingTestTorrent()
+	defer os.RemoveAll(greetingTempDir)
+	// Create seeder and a Torrent.
+	cfg := torrent.TestingConfig(t)
+	// cfg.Debug = true
+	cfg.Seed = true
+	// Less than a piece, more than a single request.
+	cfg.MaxAllocPeerRequestDataPerConn = 4
+	// Some test instances don't like this being on, even when there's no cache involved.
+	cfg.DropMutuallyCompletePeers = false
+	if ps.SeederUploadRateLimiter != nil {
+		cfg.UploadRateLimiter = ps.SeederUploadRateLimiter
+	}
+	// cfg.ListenAddr = "localhost:4000"
+	if ps.SeederStorage != nil {
+		storage := ps.SeederStorage(greetingTempDir)
+		defer storage.Close()
+		cfg.DefaultStorage = storage
+	} else {
+		cfg.DataDir = greetingTempDir
+	}
+	if ps.ConfigureSeeder.Config != nil {
+		ps.ConfigureSeeder.Config(cfg)
+	}
+	seeder, err := torrent.NewClient(cfg)
+	require.NoError(t, err)
+	if ps.ConfigureSeeder.Client != nil {
+		ps.ConfigureSeeder.Client(seeder)
+	}
+	defer testutil.ExportStatusWriter(seeder, "s", t)()
+	seederTorrent, _, _ := seeder.AddTorrentSpec(torrent.TorrentSpecFromMetaInfo(mi))
+	// Run a Stats right after Closing the Client. This will trigger the Stats
+	// panic in #214 caused by RemoteAddr on Closed uTP sockets.
+	defer seederTorrent.Stats()
+	defer seeder.Close()
+	// Adding a torrent and setting the info should trigger piece checks for everything
+	// automatically. Wait until the seed Torrent agrees that everything is available.
+	<-seederTorrent.Complete.On()
+	// Create leecher and a Torrent.
+	leecherDataDir := t.TempDir()
+	cfg = torrent.TestingConfig(t)
+	// See the seeder client config comment.
+	cfg.DropMutuallyCompletePeers = false
+	if ps.LeecherStorage == nil {
+		cfg.DataDir = leecherDataDir
+	} else {
+		storage := ps.LeecherStorage(leecherDataDir)
+		defer storage.Close()
+		cfg.DefaultStorage = storage
+	}
+	if ps.LeecherDownloadRateLimiter != nil {
+		cfg.DownloadRateLimiter = ps.LeecherDownloadRateLimiter
+	}
+	cfg.Seed = false
+	// cfg.Debug = true
+	if ps.ConfigureLeecher.Config != nil {
+		ps.ConfigureLeecher.Config(cfg)
+	}
+	leecher, err := torrent.NewClient(cfg)
+	require.NoError(t, err)
+	defer leecher.Close()
+	if ps.ConfigureLeecher.Client != nil {
+		ps.ConfigureLeecher.Client(leecher)
+	}
+	defer testutil.ExportStatusWriter(leecher, "l", t)()
+	leecherTorrent, new, err := leecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
+		ret = torrent.TorrentSpecFromMetaInfo(mi)
+		ret.ChunkSize = 2
+		if ps.LeecherStartsWithoutMetadata {
+			ret.InfoBytes = nil
+		}
+		return
+	}())
+	require.NoError(t, err)
+	assert.False(t, leecherTorrent.Complete.Bool())
+	assert.True(t, new)
+
+	//// This was used when observing coalescing of piece state changes.
+	//logPieceStateChanges(leecherTorrent)
+
+	// Now do some things with leecher and seeder.
+	added := leecherTorrent.AddClientPeer(seeder)
+	assert.False(t, leecherTorrent.Seeding())
+	// The leecher will use peers immediately if it doesn't have the metadata. Otherwise, they
+	// should be sitting idle until we demand data.
+	if !ps.LeecherStartsWithoutMetadata {
+		assert.EqualValues(t, added, leecherTorrent.Stats().PendingPeers)
+	}
+	if ps.LeecherStartsWithoutMetadata {
+		<-leecherTorrent.GotInfo()
+	}
+	r := leecherTorrent.NewReader()
+	defer r.Close()
+	go leecherTorrent.SetInfoBytes(mi.InfoBytes)
+	if ps.Responsive {
+		r.SetResponsive()
+	}
+	if ps.SetReadahead {
+		r.SetReadahead(ps.Readahead)
+	}
+	assertReadAllGreeting(t, r)
+	info, err := mi.UnmarshalInfo()
+	require.NoError(t, err)
+	canComplete := ps.LeecherStorageCapacity == 0 || ps.LeecherStorageCapacity >= info.TotalLength()
+	if !canComplete {
+		// Reading from a cache doesn't refresh older pieces until we fail to read those, so we need
+		// to force a refresh since we just read the contents from start to finish.
+		go leecherTorrent.VerifyData()
+	}
+	if canComplete {
+		<-leecherTorrent.Complete.On()
+	} else {
+		<-leecherTorrent.Complete.Off()
+	}
+	assert.NotEmpty(t, seederTorrent.PeerConns())
+	leecherPeerConns := leecherTorrent.PeerConns()
+	if cfg.DropMutuallyCompletePeers {
+		// I don't think we can assume it will be empty already, due to timing.
+		// assert.Empty(t, leecherPeerConns)
+	} else {
+		assert.NotEmpty(t, leecherPeerConns)
+	}
+	foundSeeder := false
+	for _, pc := range leecherPeerConns {
+		completed := pc.PeerPieces().GetCardinality()
+		t.Logf("peer conn %v has %v completed pieces", pc, completed)
+		if completed == bitmap.BitRange(leecherTorrent.Info().NumPieces()) {
+			foundSeeder = true
+		}
+	}
+	if !foundSeeder {
+		t.Errorf("didn't find seeder amongst leecher peer conns")
+	}
+
+	seederStats := seederTorrent.Stats()
+	assert.True(t, 13 <= seederStats.BytesWrittenData.Int64())
+	assert.True(t, 8 <= seederStats.ChunksWritten.Int64())
+
+	leecherStats := leecherTorrent.Stats()
+	assert.True(t, 13 <= leecherStats.BytesReadData.Int64())
+	assert.True(t, 8 <= leecherStats.ChunksRead.Int64())
+
+	// Try reading through again for the cases where the torrent data size
+	// exceeds the size of the cache.
+	assertReadAllGreeting(t, r)
+}
+
+func assertReadAllGreeting(t *testing.T, r io.ReadSeeker) {
+	pos, err := r.Seek(0, io.SeekStart)
+	assert.NoError(t, err)
+	assert.EqualValues(t, 0, pos)
+	quicktest.Check(t, iotest.TestReader(r, []byte(testutil.GreetingFileContents)), quicktest.IsNil)
+}
diff --git a/deps/github.com/anacrolix/torrent/test/sqlite_test.go b/deps/github.com/anacrolix/torrent/test/sqlite_test.go
new file mode 100644
index 0000000..437499d
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/test/sqlite_test.go
@@ -0,0 +1,68 @@
+// This infernal language makes me copy conditional compilation expressions around. This test should
+// run if sqlite storage is enabled, period.
+
+//go:build cgo
+// +build cgo
+
+package test
+
+import (
+	"net"
+	"net/http"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/metainfo"
+	sqliteStorage "github.com/anacrolix/torrent/storage/sqlite"
+)
+
+func TestSqliteStorageClosed(t *testing.T) {
+	c := qt.New(t)
+	cfg := torrent.TestingConfig(t)
+	storage, err := sqliteStorage.NewDirectStorage(sqliteStorage.NewDirectStorageOpts{})
+	defer storage.Close()
+	cfg.DefaultStorage = storage
+	cfg.Debug = true
+	c.Assert(err, qt.IsNil)
+	cl, err := torrent.NewClient(cfg)
+	c.Assert(err, qt.IsNil)
+	defer cl.Close()
+	l, err := net.Listen("tcp", "localhost:0")
+	c.Assert(err, qt.IsNil)
+	defer l.Close()
+	// We need at least once piece to trigger a call to storage to determine completion state. We
+	// need non-zero content length to trigger piece hashing.
+	i := metainfo.Info{
+		Pieces:      make([]byte, metainfo.HashSize),
+		PieceLength: 1,
+		Files: []metainfo.FileInfo{
+			{Length: 1},
+		},
+	}
+	mi := metainfo.MetaInfo{}
+	mi.InfoBytes, err = bencode.Marshal(i)
+	c.Assert(err, qt.IsNil)
+	s := http.Server{
+		Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			mi.Write(w)
+		}),
+	}
+	defer s.Close()
+	go func() {
+		err := s.Serve(l)
+		if err != http.ErrServerClosed {
+			panic(err)
+		}
+	}()
+	// Close storage prematurely.
+	storage.Close()
+	tor, _, err := cl.AddTorrentSpec(&torrent.TorrentSpec{
+		InfoHash: mi.HashInfoBytes(),
+		Sources:  []string{"http://" + l.Addr().String()},
+	})
+	c.Assert(err, qt.IsNil)
+	<-tor.GotInfo()
+}
diff --git a/deps/github.com/anacrolix/torrent/test/transfer_test.go b/deps/github.com/anacrolix/torrent/test/transfer_test.go
new file mode 100644
index 0000000..501872f
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/test/transfer_test.go
@@ -0,0 +1,226 @@
+package test
+
+import (
+	"io"
+	"os"
+	"sync"
+	"testing"
+	"testing/iotest"
+	"time"
+
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/missinggo/v2/filecache"
+	qt "github.com/frankban/quicktest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/time/rate"
+
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/internal/testutil"
+	"github.com/anacrolix/torrent/storage"
+)
+
+type fileCacheClientStorageFactoryParams struct {
+	Capacity    int64
+	SetCapacity bool
+}
+
+func newFileCacheClientStorageFactory(ps fileCacheClientStorageFactoryParams) StorageFactory {
+	return func(dataDir string) storage.ClientImplCloser {
+		fc, err := filecache.NewCache(dataDir)
+		if err != nil {
+			panic(err)
+		}
+		var sharedCapacity *int64
+		if ps.SetCapacity {
+			sharedCapacity = &ps.Capacity
+			fc.SetCapacity(ps.Capacity)
+		}
+		return struct {
+			storage.ClientImpl
+			io.Closer
+		}{
+			storage.NewResourcePiecesOpts(
+				fc.AsResourceProvider(),
+				storage.ResourcePiecesOpts{
+					Capacity: sharedCapacity,
+				}),
+			io.NopCloser(nil),
+		}
+	}
+}
+
+func TestClientTransferDefault(t *testing.T) {
+	testClientTransfer(t, testClientTransferParams{
+		LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{}),
+	})
+}
+
+func TestClientTransferDefaultNoMetadata(t *testing.T) {
+	testClientTransfer(t, testClientTransferParams{
+		LeecherStorage:               newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{}),
+		LeecherStartsWithoutMetadata: true,
+	})
+}
+
+func TestClientTransferRateLimitedUpload(t *testing.T) {
+	started := time.Now()
+	testClientTransfer(t, testClientTransferParams{
+		// We are uploading 13 bytes (the length of the greeting torrent). The
+		// chunks are 2 bytes in length. Then the smallest burst we can run
+		// with is 2. Time taken is (13-burst)/rate.
+		SeederUploadRateLimiter: rate.NewLimiter(11, 2),
+	})
+	require.True(t, time.Since(started) > time.Second)
+}
+
+func TestClientTransferRateLimitedDownload(t *testing.T) {
+	testClientTransfer(t, testClientTransferParams{
+		LeecherDownloadRateLimiter: rate.NewLimiter(512, 512),
+		ConfigureSeeder: ConfigureClient{
+			Config: func(cfg *torrent.ClientConfig) {
+				// If we send too many keep alives, we consume all the leechers available download
+				// rate. The default isn't exposed, but a minute is pretty reasonable.
+				cfg.KeepAliveTimeout = time.Minute
+			},
+		},
+	})
+}
+
+func testClientTransferSmallCache(t *testing.T, setReadahead bool, readahead int64) {
+	testClientTransfer(t, testClientTransferParams{
+		LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
+			SetCapacity: true,
+			// Going below the piece length means it can't complete a piece so
+			// that it can be hashed.
+			Capacity: 5,
+		}),
+		LeecherStorageCapacity: 5,
+		SetReadahead:           setReadahead,
+		// Can't readahead too far or the cache will thrash and drop data we
+		// thought we had.
+		Readahead: readahead,
+
+		// These tests don't work well with more than 1 connection to the seeder.
+		ConfigureLeecher: ConfigureClient{
+			Config: func(cfg *torrent.ClientConfig) {
+				cfg.DropDuplicatePeerIds = true
+				// cfg.DisableIPv6 = true
+				// cfg.DisableUTP = true
+			},
+		},
+	})
+}
+
+func TestClientTransferSmallCachePieceSizedReadahead(t *testing.T) {
+	testClientTransferSmallCache(t, true, 5)
+}
+
+func TestClientTransferSmallCacheLargeReadahead(t *testing.T) {
+	testClientTransferSmallCache(t, true, 15)
+}
+
+func TestClientTransferSmallCacheDefaultReadahead(t *testing.T) {
+	testClientTransferSmallCache(t, false, -1)
+}
+
+func TestFilecacheClientTransferVarious(t *testing.T) {
+	TestLeecherStorage(t, LeecherStorageTestCase{
+		"Filecache", newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{}), 0,
+	})
+}
+
+// Check that after completing leeching, a leecher transitions to a seeding
+// correctly. Connected in a chain like so: Seeder <-> Leecher <-> LeecherLeecher.
+func testSeedAfterDownloading(t *testing.T, disableUtp bool) {
+	greetingTempDir, mi := testutil.GreetingTestTorrent()
+	defer os.RemoveAll(greetingTempDir)
+
+	cfg := torrent.TestingConfig(t)
+	cfg.Seed = true
+	cfg.MaxAllocPeerRequestDataPerConn = 4
+	cfg.DataDir = greetingTempDir
+	cfg.DisableUTP = disableUtp
+	seeder, err := torrent.NewClient(cfg)
+	require.NoError(t, err)
+	defer seeder.Close()
+	defer testutil.ExportStatusWriter(seeder, "s", t)()
+	seederTorrent, ok, err := seeder.AddTorrentSpec(torrent.TorrentSpecFromMetaInfo(mi))
+	require.NoError(t, err)
+	assert.True(t, ok)
+	seederTorrent.VerifyData()
+
+	cfg = torrent.TestingConfig(t)
+	cfg.Seed = true
+	cfg.DataDir = t.TempDir()
+	cfg.DisableUTP = disableUtp
+	// Make sure the leecher-leecher doesn't connect directly to the seeder. This is because I
+	// wanted to see if having the higher chunk-sized leecher-leecher would cause the leecher to
+	// error decoding. However it shouldn't because a client should only be receiving pieces sized
+	// to the chunk size it expects.
+	cfg.DisablePEX = true
+	//cfg.Debug = true
+	cfg.Logger = log.Default.WithContextText("leecher")
+	leecher, err := torrent.NewClient(cfg)
+	require.NoError(t, err)
+	defer leecher.Close()
+	defer testutil.ExportStatusWriter(leecher, "l", t)()
+
+	cfg = torrent.TestingConfig(t)
+	cfg.DisableUTP = disableUtp
+	cfg.Seed = false
+	cfg.DataDir = t.TempDir()
+	cfg.MaxAllocPeerRequestDataPerConn = 4
+	cfg.Logger = log.Default.WithContextText("leecher-leecher")
+	cfg.Debug = true
+	leecherLeecher, _ := torrent.NewClient(cfg)
+	require.NoError(t, err)
+	defer leecherLeecher.Close()
+	defer testutil.ExportStatusWriter(leecherLeecher, "ll", t)()
+	leecherGreeting, ok, err := leecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
+		ret = torrent.TorrentSpecFromMetaInfo(mi)
+		ret.ChunkSize = 2
+		return
+	}())
+	require.NoError(t, err)
+	assert.True(t, ok)
+	llg, ok, err := leecherLeecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
+		ret = torrent.TorrentSpecFromMetaInfo(mi)
+		ret.ChunkSize = 3
+		return
+	}())
+	require.NoError(t, err)
+	assert.True(t, ok)
+	// Simultaneously DownloadAll in Leecher, and read the contents
+	// consecutively in LeecherLeecher. This non-deterministically triggered a
+	// case where the leecher wouldn't unchoke the LeecherLeecher.
+	var wg sync.WaitGroup
+	{
+		// Prioritize a region, and ensure it's been hashed, so we want connections.
+		r := llg.NewReader()
+		llg.VerifyData()
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			defer r.Close()
+			qt.Check(t, iotest.TestReader(r, []byte(testutil.GreetingFileContents)), qt.IsNil)
+		}()
+	}
+	go leecherGreeting.AddClientPeer(seeder)
+	go leecherGreeting.AddClientPeer(leecherLeecher)
+	wg.Add(1)
+	go func() {
+		defer wg.Done()
+		leecherGreeting.DownloadAll()
+		leecher.WaitAll()
+	}()
+	wg.Wait()
+}
+
+func TestSeedAfterDownloadingDisableUtp(t *testing.T) {
+	testSeedAfterDownloading(t, true)
+}
+
+func TestSeedAfterDownloadingAllowUtp(t *testing.T) {
+	testSeedAfterDownloading(t, false)
+}
diff --git a/deps/github.com/anacrolix/torrent/test/unix_test.go b/deps/github.com/anacrolix/torrent/test/unix_test.go
new file mode 100644
index 0000000..e4ffa7e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/test/unix_test.go
@@ -0,0 +1,42 @@
+package test
+
+import (
+	"io"
+	"log"
+	"net"
+	"path/filepath"
+	"testing"
+
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/dialer"
+)
+
+func TestUnixConns(t *testing.T) {
+	var closers []io.Closer
+	defer func() {
+		for _, c := range closers {
+			c.Close()
+		}
+	}()
+	configure := ConfigureClient{
+		Config: func(cfg *torrent.ClientConfig) {
+			cfg.DisableUTP = true
+			cfg.DisableTCP = true
+			cfg.Debug = true
+		},
+		Client: func(cl *torrent.Client) {
+			cl.AddDialer(torrent.NetworkDialer{Network: "unix", Dialer: dialer.Default})
+			l, err := net.Listen("unix", filepath.Join(t.TempDir(), "socket"))
+			if err != nil {
+				panic(err)
+			}
+			log.Printf("created listener %q", l)
+			closers = append(closers, l)
+			cl.AddListener(l)
+		},
+	}
+	testClientTransfer(t, testClientTransferParams{
+		ConfigureSeeder:  configure,
+		ConfigureLeecher: configure,
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/test_test.go b/deps/github.com/anacrolix/torrent/test_test.go
new file mode 100644
index 0000000..6babc91
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/test_test.go
@@ -0,0 +1,23 @@
+package torrent
+
+// Helpers for testing
+
+import (
+	"testing"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+func newTestingClient(t testing.TB) *Client {
+	cl := new(Client)
+	cl.init(TestingConfig(t))
+	t.Cleanup(func() {
+		cl.Close()
+	})
+	cl.initLogger()
+	return cl
+}
+
+func (cl *Client) newTorrentForTesting() *Torrent {
+	return cl.newTorrent(metainfo.Hash{}, nil)
+}
diff --git a/deps/github.com/anacrolix/torrent/testdata/The WIRED CD - Rip. Sample. Mash. Share.torrent b/deps/github.com/anacrolix/torrent/testdata/The WIRED CD - Rip. Sample. Mash. Share.torrent
new file mode 100644
index 0000000..e5acba5
Binary files /dev/null and b/deps/github.com/anacrolix/torrent/testdata/The WIRED CD - Rip. Sample. Mash. Share.torrent differ
diff --git a/deps/github.com/anacrolix/torrent/testdata/The-Fanimatrix-(DivX-5.1-HQ).avi.torrent b/deps/github.com/anacrolix/torrent/testdata/The-Fanimatrix-(DivX-5.1-HQ).avi.torrent
new file mode 100644
index 0000000..06a1054
Binary files /dev/null and b/deps/github.com/anacrolix/torrent/testdata/The-Fanimatrix-(DivX-5.1-HQ).avi.torrent differ
diff --git a/deps/github.com/anacrolix/torrent/testdata/bootstrap.dat.torrent b/deps/github.com/anacrolix/torrent/testdata/bootstrap.dat.torrent
new file mode 100644
index 0000000..e5cdeb7
Binary files /dev/null and b/deps/github.com/anacrolix/torrent/testdata/bootstrap.dat.torrent differ
diff --git a/deps/github.com/anacrolix/torrent/testdata/debian-10.8.0-amd64-netinst.iso.torrent b/deps/github.com/anacrolix/torrent/testdata/debian-10.8.0-amd64-netinst.iso.torrent
new file mode 100644
index 0000000..24e6728
Binary files /dev/null and b/deps/github.com/anacrolix/torrent/testdata/debian-10.8.0-amd64-netinst.iso.torrent differ
diff --git a/deps/github.com/anacrolix/torrent/testdata/debian-9.1.0-amd64-netinst.iso.torrent b/deps/github.com/anacrolix/torrent/testdata/debian-9.1.0-amd64-netinst.iso.torrent
new file mode 100644
index 0000000..bb4b102
Binary files /dev/null and b/deps/github.com/anacrolix/torrent/testdata/debian-9.1.0-amd64-netinst.iso.torrent differ
diff --git a/deps/github.com/anacrolix/torrent/testdata/sintel.torrent b/deps/github.com/anacrolix/torrent/testdata/sintel.torrent
new file mode 100644
index 0000000..c3775de
Binary files /dev/null and b/deps/github.com/anacrolix/torrent/testdata/sintel.torrent differ
diff --git a/deps/github.com/anacrolix/torrent/testing.go b/deps/github.com/anacrolix/torrent/testing.go
new file mode 100644
index 0000000..6fb5411
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/testing.go
@@ -0,0 +1,37 @@
+package torrent
+
+import (
+	"testing"
+	"time"
+
+	"github.com/anacrolix/log"
+
+	pp "github.com/anacrolix/torrent/peer_protocol"
+)
+
+func TestingConfig(t testing.TB) *ClientConfig {
+	cfg := NewDefaultClientConfig()
+	cfg.ListenHost = LoopbackListenHost
+	cfg.NoDHT = true
+	cfg.DataDir = t.TempDir()
+	cfg.DisableTrackers = true
+	cfg.NoDefaultPortForwarding = true
+	cfg.DisableAcceptRateLimiting = true
+	cfg.ListenPort = 0
+	cfg.KeepAliveTimeout = time.Millisecond
+	cfg.MinPeerExtensions.SetBit(pp.ExtensionBitFast, true)
+	cfg.Logger = log.Default.WithContextText(t.Name())
+	// 2 would suffice for the greeting test, but 5 is needed for a few other tests. This should be
+	// something slightly higher than the usual chunk size, so it gets tickled in some tests.
+	cfg.MaxAllocPeerRequestDataPerConn = 5
+	//cfg.Debug = true
+	//cfg.Logger = cfg.Logger.WithText(func(m log.Msg) string {
+	//	t := m.Text()
+	//	m.Values(func(i interface{}) bool {
+	//		t += fmt.Sprintf("\n%[1]T: %[1]v", i)
+	//		return true
+	//	})
+	//	return t
+	//})
+	return cfg
+}
diff --git a/deps/github.com/anacrolix/torrent/tests/issue-798/main.go b/deps/github.com/anacrolix/torrent/tests/issue-798/main.go
new file mode 100644
index 0000000..23f6be3
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tests/issue-798/main.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+	"fmt"
+
+	"github.com/anacrolix/torrent"
+)
+
+func main() {
+	config := torrent.NewDefaultClientConfig()
+	config.DataDir = "./output"
+	c, _ := torrent.NewClient(config)
+	defer c.Close()
+	t, _ := c.AddMagnet("magnet:?xt=urn:btih:99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1&tr=https%3A%2F%2Ftorrent.ubuntu.com%2Fannounce&tr=https%3A%2F%2Fipv6.torrent.ubuntu.com%2Fannounce")
+	<-t.GotInfo()
+	fmt.Println("start downloading")
+	t.DownloadAll()
+	c.WaitAll()
+}
diff --git a/deps/github.com/anacrolix/torrent/torrent-piece-request-order.go b/deps/github.com/anacrolix/torrent/torrent-piece-request-order.go
new file mode 100644
index 0000000..10623da
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/torrent-piece-request-order.go
@@ -0,0 +1,65 @@
+package torrent
+
+import (
+	request_strategy "github.com/anacrolix/torrent/request-strategy"
+)
+
+func (t *Torrent) updatePieceRequestOrder(pieceIndex int) {
+	if t.storage == nil {
+		return
+	}
+	if ro, ok := t.cl.pieceRequestOrder[t.clientPieceRequestOrderKey()]; ok {
+		ro.Update(
+			t.pieceRequestOrderKey(pieceIndex),
+			t.requestStrategyPieceOrderState(pieceIndex))
+	}
+}
+
+func (t *Torrent) clientPieceRequestOrderKey() interface{} {
+	if t.storage.Capacity == nil {
+		return t
+	}
+	return t.storage.Capacity
+}
+
+func (t *Torrent) deletePieceRequestOrder() {
+	if t.storage == nil {
+		return
+	}
+	cpro := t.cl.pieceRequestOrder
+	key := t.clientPieceRequestOrderKey()
+	pro := cpro[key]
+	for i := 0; i < t.numPieces(); i++ {
+		pro.Delete(t.pieceRequestOrderKey(i))
+	}
+	if pro.Len() == 0 {
+		delete(cpro, key)
+	}
+}
+
+func (t *Torrent) initPieceRequestOrder() {
+	if t.storage == nil {
+		return
+	}
+	if t.cl.pieceRequestOrder == nil {
+		t.cl.pieceRequestOrder = make(map[interface{}]*request_strategy.PieceRequestOrder)
+	}
+	key := t.clientPieceRequestOrderKey()
+	cpro := t.cl.pieceRequestOrder
+	if cpro[key] == nil {
+		cpro[key] = request_strategy.NewPieceOrder(request_strategy.NewAjwernerBtree(), t.numPieces())
+	}
+}
+
+func (t *Torrent) addRequestOrderPiece(i int) {
+	if t.storage == nil {
+		return
+	}
+	t.cl.pieceRequestOrder[t.clientPieceRequestOrderKey()].Add(
+		t.pieceRequestOrderKey(i),
+		t.requestStrategyPieceOrderState(i))
+}
+
+func (t *Torrent) getPieceRequestOrder() *request_strategy.PieceRequestOrder {
+	return t.cl.pieceRequestOrder[t.clientPieceRequestOrderKey()]
+}
diff --git a/deps/github.com/anacrolix/torrent/torrent-stats.go b/deps/github.com/anacrolix/torrent/torrent-stats.go
new file mode 100644
index 0000000..0dd58ad
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/torrent-stats.go
@@ -0,0 +1,17 @@
+package torrent
+
+// Due to ConnStats, may require special alignment on some platforms. See
+// https://github.com/anacrolix/torrent/issues/383.
+type TorrentStats struct {
+	// Aggregates stats over all connections past and present. Some values may not have much meaning
+	// in the aggregate context.
+	ConnStats
+
+	// Ordered by expected descending quantities (if all is well).
+	TotalPeers       int
+	PendingPeers     int
+	ActivePeers      int
+	ConnectedSeeders int
+	HalfOpenPeers    int
+	PiecesComplete   int
+}
diff --git a/deps/github.com/anacrolix/torrent/torrent.go b/deps/github.com/anacrolix/torrent/torrent.go
new file mode 100644
index 0000000..0b1baba
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/torrent.go
@@ -0,0 +1,2919 @@
+package torrent
+
+import (
+	"bytes"
+	"container/heap"
+	"context"
+	"crypto/sha1"
+	"errors"
+	"fmt"
+	"io"
+	"math/rand"
+	"net/netip"
+	"net/url"
+	"sort"
+	"strings"
+	"text/tabwriter"
+	"time"
+	"unsafe"
+
+	"github.com/RoaringBitmap/roaring"
+	"github.com/anacrolix/chansync"
+	"github.com/anacrolix/chansync/events"
+	"github.com/anacrolix/dht/v2"
+	. "github.com/anacrolix/generics"
+	g "github.com/anacrolix/generics"
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/missinggo/perf"
+	"github.com/anacrolix/missinggo/slices"
+	"github.com/anacrolix/missinggo/v2"
+	"github.com/anacrolix/missinggo/v2/bitmap"
+	"github.com/anacrolix/missinggo/v2/pubsub"
+	"github.com/anacrolix/multiless"
+	"github.com/anacrolix/sync"
+	"github.com/pion/datachannel"
+	"golang.org/x/exp/maps"
+
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/common"
+	"github.com/anacrolix/torrent/internal/check"
+	"github.com/anacrolix/torrent/internal/nestedmaps"
+	"github.com/anacrolix/torrent/metainfo"
+	pp "github.com/anacrolix/torrent/peer_protocol"
+	utHolepunch "github.com/anacrolix/torrent/peer_protocol/ut-holepunch"
+	request_strategy "github.com/anacrolix/torrent/request-strategy"
+	"github.com/anacrolix/torrent/segments"
+	"github.com/anacrolix/torrent/storage"
+	"github.com/anacrolix/torrent/tracker"
+	typedRoaring "github.com/anacrolix/torrent/typed-roaring"
+	"github.com/anacrolix/torrent/webseed"
+	"github.com/anacrolix/torrent/webtorrent"
+)
+
+// Maintains state of torrent within a Client. Many methods should not be called before the info is
+// available, see .Info and .GotInfo.
+type Torrent struct {
+	// Torrent-level aggregate statistics. First in struct to ensure 64-bit
+	// alignment. See #262.
+	stats  ConnStats
+	cl     *Client
+	logger log.Logger
+
+	networkingEnabled      chansync.Flag
+	dataDownloadDisallowed chansync.Flag
+	dataUploadDisallowed   bool
+	userOnWriteChunkErr    func(error)
+
+	closed   chansync.SetOnce
+	onClose  []func()
+	infoHash metainfo.Hash
+	pieces   []Piece
+
+	// The order pieces are requested if there's no stronger reason like availability or priority.
+	pieceRequestOrder []int
+	// Values are the piece indices that changed.
+	pieceStateChanges pubsub.PubSub[PieceStateChange]
+	// The size of chunks to request from peers over the wire. This is
+	// normally 16KiB by convention these days.
+	chunkSize pp.Integer
+	chunkPool sync.Pool
+	// Total length of the torrent in bytes. Stored because it's not O(1) to
+	// get this from the info dict.
+	_length Option[int64]
+
+	// The storage to open when the info dict becomes available.
+	storageOpener *storage.Client
+	// Storage for torrent data.
+	storage *storage.Torrent
+	// Read-locked for using storage, and write-locked for Closing.
+	storageLock sync.RWMutex
+
+	// TODO: Only announce stuff is used?
+	metainfo metainfo.MetaInfo
+
+	// The info dict. nil if we don't have it (yet).
+	info      *metainfo.Info
+	fileIndex segments.Index
+	files     *[]*File
+
+	_chunksPerRegularPiece chunkIndexType
+
+	webSeeds map[string]*Peer
+	// Active peer connections, running message stream loops. TODO: Make this
+	// open (not-closed) connections only.
+	conns               map[*PeerConn]struct{}
+	maxEstablishedConns int
+	// Set of addrs to which we're attempting to connect. Connections are
+	// half-open until all handshakes are completed.
+	halfOpen map[string]map[outgoingConnAttemptKey]*PeerInfo
+
+	// Reserve of peers to connect to. A peer can be both here and in the
+	// active connections if were told about the peer after connecting with
+	// them. That encourages us to reconnect to peers that are well known in
+	// the swarm.
+	peers prioritizedPeers
+	// Whether we want to know more peers.
+	wantPeersEvent missinggo.Event
+	// An announcer for each tracker URL.
+	trackerAnnouncers map[string]torrentTrackerAnnouncer
+	// How many times we've initiated a DHT announce. TODO: Move into stats.
+	numDHTAnnounces int
+
+	// Name used if the info name isn't available. Should be cleared when the
+	// Info does become available.
+	nameMu      sync.RWMutex
+	displayName string
+
+	// The bencoded bytes of the info dict. This is actively manipulated if
+	// the info bytes aren't initially available, and we try to fetch them
+	// from peers.
+	metadataBytes []byte
+	// Each element corresponds to the 16KiB metadata pieces. If true, we have
+	// received that piece.
+	metadataCompletedChunks []bool
+	metadataChanged         sync.Cond
+
+	// Closed when .Info is obtained.
+	gotMetainfoC chan struct{}
+
+	readers                map[*reader]struct{}
+	_readerNowPieces       bitmap.Bitmap
+	_readerReadaheadPieces bitmap.Bitmap
+
+	// A cache of pieces we need to get. Calculated from various piece and
+	// file priorities and completion states elsewhere.
+	_pendingPieces roaring.Bitmap
+	// A cache of completed piece indices.
+	_completedPieces roaring.Bitmap
+	// Pieces that need to be hashed.
+	piecesQueuedForHash       bitmap.Bitmap
+	activePieceHashes         int
+	initialPieceCheckDisabled bool
+
+	connsWithAllPieces map[*Peer]struct{}
+
+	requestState map[RequestIndex]requestState
+	// Chunks we've written to since the corresponding piece was last checked.
+	dirtyChunks typedRoaring.Bitmap[RequestIndex]
+
+	pex pexState
+
+	// Is On when all pieces are complete.
+	Complete chansync.Flag
+
+	// Torrent sources in use keyed by the source string.
+	activeSources sync.Map
+	sourcesLogger log.Logger
+
+	smartBanCache smartBanCache
+
+	// Large allocations reused between request state updates.
+	requestPieceStates []request_strategy.PieceRequestOrderState
+	requestIndexes     []RequestIndex
+}
+
+type outgoingConnAttemptKey = *PeerInfo
+
+func (t *Torrent) length() int64 {
+	return t._length.Value
+}
+
+func (t *Torrent) selectivePieceAvailabilityFromPeers(i pieceIndex) (count int) {
+	// This could be done with roaring.BitSliceIndexing.
+	t.iterPeers(func(peer *Peer) {
+		if _, ok := t.connsWithAllPieces[peer]; ok {
+			return
+		}
+		if peer.peerHasPiece(i) {
+			count++
+		}
+	})
+	return
+}
+
+func (t *Torrent) decPieceAvailability(i pieceIndex) {
+	if !t.haveInfo() {
+		return
+	}
+	p := t.piece(i)
+	if p.relativeAvailability <= 0 {
+		panic(p.relativeAvailability)
+	}
+	p.relativeAvailability--
+	t.updatePieceRequestOrder(i)
+}
+
+func (t *Torrent) incPieceAvailability(i pieceIndex) {
+	// If we don't the info, this should be reconciled when we do.
+	if t.haveInfo() {
+		p := t.piece(i)
+		p.relativeAvailability++
+		t.updatePieceRequestOrder(i)
+	}
+}
+
+func (t *Torrent) readerNowPieces() bitmap.Bitmap {
+	return t._readerNowPieces
+}
+
+func (t *Torrent) readerReadaheadPieces() bitmap.Bitmap {
+	return t._readerReadaheadPieces
+}
+
+func (t *Torrent) ignorePieceForRequests(i pieceIndex) bool {
+	return !t.wantPieceIndex(i)
+}
+
+// Returns a channel that is closed when the Torrent is closed.
+func (t *Torrent) Closed() events.Done {
+	return t.closed.Done()
+}
+
+// KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
+// pending, and half-open peers.
+func (t *Torrent) KnownSwarm() (ks []PeerInfo) {
+	// Add pending peers to the list
+	t.peers.Each(func(peer PeerInfo) {
+		ks = append(ks, peer)
+	})
+
+	// Add half-open peers to the list
+	for _, attempts := range t.halfOpen {
+		for _, peer := range attempts {
+			ks = append(ks, *peer)
+		}
+	}
+
+	// Add active peers to the list
+	t.cl.rLock()
+	defer t.cl.rUnlock()
+	for conn := range t.conns {
+		ks = append(ks, PeerInfo{
+			Id:     conn.PeerID,
+			Addr:   conn.RemoteAddr,
+			Source: conn.Discovery,
+			// > If the connection is encrypted, that's certainly enough to set SupportsEncryption.
+			// > But if we're not connected to them with an encrypted connection, I couldn't say
+			// > what's appropriate. We can carry forward the SupportsEncryption value as we
+			// > received it from trackers/DHT/PEX, or just use the encryption state for the
+			// > connection. It's probably easiest to do the latter for now.
+			// https://github.com/anacrolix/torrent/pull/188
+			SupportsEncryption: conn.headerEncrypted,
+		})
+	}
+
+	return
+}
+
+func (t *Torrent) setChunkSize(size pp.Integer) {
+	t.chunkSize = size
+	t.chunkPool = sync.Pool{
+		New: func() interface{} {
+			b := make([]byte, size)
+			return &b
+		},
+	}
+}
+
+func (t *Torrent) pieceComplete(piece pieceIndex) bool {
+	return t._completedPieces.Contains(bitmap.BitIndex(piece))
+}
+
+func (t *Torrent) pieceCompleteUncached(piece pieceIndex) storage.Completion {
+	if t.storage == nil {
+		return storage.Completion{Complete: false, Ok: true}
+	}
+	return t.pieces[piece].Storage().Completion()
+}
+
+// There's a connection to that address already.
+func (t *Torrent) addrActive(addr string) bool {
+	if _, ok := t.halfOpen[addr]; ok {
+		return true
+	}
+	for c := range t.conns {
+		ra := c.RemoteAddr
+		if ra.String() == addr {
+			return true
+		}
+	}
+	return false
+}
+
+func (t *Torrent) appendUnclosedConns(ret []*PeerConn) []*PeerConn {
+	return t.appendConns(ret, func(conn *PeerConn) bool {
+		return !conn.closed.IsSet()
+	})
+}
+
+func (t *Torrent) appendConns(ret []*PeerConn, f func(*PeerConn) bool) []*PeerConn {
+	for c := range t.conns {
+		if f(c) {
+			ret = append(ret, c)
+		}
+	}
+	return ret
+}
+
+func (t *Torrent) addPeer(p PeerInfo) (added bool) {
+	cl := t.cl
+	torrent.Add(fmt.Sprintf("peers added by source %q", p.Source), 1)
+	if t.closed.IsSet() {
+		return false
+	}
+	if ipAddr, ok := tryIpPortFromNetAddr(p.Addr); ok {
+		if cl.badPeerIPPort(ipAddr.IP, ipAddr.Port) {
+			torrent.Add("peers not added because of bad addr", 1)
+			// cl.logger.Printf("peers not added because of bad addr: %v", p)
+			return false
+		}
+	}
+	if replaced, ok := t.peers.AddReturningReplacedPeer(p); ok {
+		torrent.Add("peers replaced", 1)
+		if !replaced.equal(p) {
+			t.logger.WithDefaultLevel(log.Debug).Printf("added %v replacing %v", p, replaced)
+			added = true
+		}
+	} else {
+		added = true
+	}
+	t.openNewConns()
+	for t.peers.Len() > cl.config.TorrentPeersHighWater {
+		_, ok := t.peers.DeleteMin()
+		if ok {
+			torrent.Add("excess reserve peers discarded", 1)
+		}
+	}
+	return
+}
+
+func (t *Torrent) invalidateMetadata() {
+	for i := 0; i < len(t.metadataCompletedChunks); i++ {
+		t.metadataCompletedChunks[i] = false
+	}
+	t.nameMu.Lock()
+	t.gotMetainfoC = make(chan struct{})
+	t.info = nil
+	t.nameMu.Unlock()
+}
+
+func (t *Torrent) saveMetadataPiece(index int, data []byte) {
+	if t.haveInfo() {
+		return
+	}
+	if index >= len(t.metadataCompletedChunks) {
+		t.logger.Printf("%s: ignoring metadata piece %d", t, index)
+		return
+	}
+	copy(t.metadataBytes[(1<<14)*index:], data)
+	t.metadataCompletedChunks[index] = true
+}
+
+func (t *Torrent) metadataPieceCount() int {
+	return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
+}
+
+func (t *Torrent) haveMetadataPiece(piece int) bool {
+	if t.haveInfo() {
+		return (1<<14)*piece < len(t.metadataBytes)
+	} else {
+		return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
+	}
+}
+
+func (t *Torrent) metadataSize() int {
+	return len(t.metadataBytes)
+}
+
+func infoPieceHashes(info *metainfo.Info) (ret [][]byte) {
+	for i := 0; i < len(info.Pieces); i += sha1.Size {
+		ret = append(ret, info.Pieces[i:i+sha1.Size])
+	}
+	return
+}
+
+func (t *Torrent) makePieces() {
+	hashes := infoPieceHashes(t.info)
+	t.pieces = make([]Piece, len(hashes))
+	for i, hash := range hashes {
+		piece := &t.pieces[i]
+		piece.t = t
+		piece.index = pieceIndex(i)
+		piece.noPendingWrites.L = &piece.pendingWritesMutex
+		piece.hash = (*metainfo.Hash)(unsafe.Pointer(&hash[0]))
+		files := *t.files
+		beginFile := pieceFirstFileIndex(piece.torrentBeginOffset(), files)
+		endFile := pieceEndFileIndex(piece.torrentEndOffset(), files)
+		piece.files = files[beginFile:endFile]
+	}
+}
+
+// Returns the index of the first file containing the piece. files must be
+// ordered by offset.
+func pieceFirstFileIndex(pieceOffset int64, files []*File) int {
+	for i, f := range files {
+		if f.offset+f.length > pieceOffset {
+			return i
+		}
+	}
+	return 0
+}
+
+// Returns the index after the last file containing the piece. files must be
+// ordered by offset.
+func pieceEndFileIndex(pieceEndOffset int64, files []*File) int {
+	for i, f := range files {
+		if f.offset+f.length >= pieceEndOffset {
+			return i + 1
+		}
+	}
+	return 0
+}
+
+func (t *Torrent) cacheLength() {
+	var l int64
+	for _, f := range t.info.UpvertedFiles() {
+		l += f.Length
+	}
+	t._length = Some(l)
+}
+
+// TODO: This shouldn't fail for storage reasons. Instead we should handle storage failure
+// separately.
+func (t *Torrent) setInfo(info *metainfo.Info) error {
+	if err := validateInfo(info); err != nil {
+		return fmt.Errorf("bad info: %s", err)
+	}
+	if t.storageOpener != nil {
+		var err error
+		t.storage, err = t.storageOpener.OpenTorrent(info, t.infoHash)
+		if err != nil {
+			return fmt.Errorf("error opening torrent storage: %s", err)
+		}
+	}
+	t.nameMu.Lock()
+	t.info = info
+	t.nameMu.Unlock()
+	t._chunksPerRegularPiece = chunkIndexType((pp.Integer(t.usualPieceSize()) + t.chunkSize - 1) / t.chunkSize)
+	t.updateComplete()
+	t.fileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
+	t.displayName = "" // Save a few bytes lol.
+	t.initFiles()
+	t.cacheLength()
+	t.makePieces()
+	return nil
+}
+
+func (t *Torrent) pieceRequestOrderKey(i int) request_strategy.PieceRequestOrderKey {
+	return request_strategy.PieceRequestOrderKey{
+		InfoHash: t.infoHash,
+		Index:    i,
+	}
+}
+
+// This seems to be all the follow-up tasks after info is set, that can't fail.
+func (t *Torrent) onSetInfo() {
+	t.pieceRequestOrder = rand.Perm(t.numPieces())
+	t.initPieceRequestOrder()
+	MakeSliceWithLength(&t.requestPieceStates, t.numPieces())
+	for i := range t.pieces {
+		p := &t.pieces[i]
+		// Need to add relativeAvailability before updating piece completion, as that may result in conns
+		// being dropped.
+		if p.relativeAvailability != 0 {
+			panic(p.relativeAvailability)
+		}
+		p.relativeAvailability = t.selectivePieceAvailabilityFromPeers(i)
+		t.addRequestOrderPiece(i)
+		t.updatePieceCompletion(i)
+		if !t.initialPieceCheckDisabled && !p.storageCompletionOk {
+			// t.logger.Printf("piece %s completion unknown, queueing check", p)
+			t.queuePieceCheck(i)
+		}
+	}
+	t.cl.event.Broadcast()
+	close(t.gotMetainfoC)
+	t.updateWantPeersEvent()
+	t.requestState = make(map[RequestIndex]requestState)
+	t.tryCreateMorePieceHashers()
+	t.iterPeers(func(p *Peer) {
+		p.onGotInfo(t.info)
+		p.updateRequests("onSetInfo")
+	})
+}
+
+// Called when metadata for a torrent becomes available.
+func (t *Torrent) setInfoBytesLocked(b []byte) error {
+	if metainfo.HashBytes(b) != t.infoHash {
+		return errors.New("info bytes have wrong hash")
+	}
+	var info metainfo.Info
+	if err := bencode.Unmarshal(b, &info); err != nil {
+		return fmt.Errorf("error unmarshalling info bytes: %s", err)
+	}
+	t.metadataBytes = b
+	t.metadataCompletedChunks = nil
+	if t.info != nil {
+		return nil
+	}
+	if err := t.setInfo(&info); err != nil {
+		return err
+	}
+	t.onSetInfo()
+	return nil
+}
+
+func (t *Torrent) haveAllMetadataPieces() bool {
+	if t.haveInfo() {
+		return true
+	}
+	if t.metadataCompletedChunks == nil {
+		return false
+	}
+	for _, have := range t.metadataCompletedChunks {
+		if !have {
+			return false
+		}
+	}
+	return true
+}
+
+// TODO: Propagate errors to disconnect peer.
+func (t *Torrent) setMetadataSize(size int) (err error) {
+	if t.haveInfo() {
+		// We already know the correct metadata size.
+		return
+	}
+	if uint32(size) > maxMetadataSize {
+		return log.WithLevel(log.Warning, errors.New("bad size"))
+	}
+	if len(t.metadataBytes) == size {
+		return
+	}
+	t.metadataBytes = make([]byte, size)
+	t.metadataCompletedChunks = make([]bool, (size+(1<<14)-1)/(1<<14))
+	t.metadataChanged.Broadcast()
+	for c := range t.conns {
+		c.requestPendingMetadata()
+	}
+	return
+}
+
+// The current working name for the torrent. Either the name in the info dict,
+// or a display name given such as by the dn value in a magnet link, or "".
+func (t *Torrent) name() string {
+	t.nameMu.RLock()
+	defer t.nameMu.RUnlock()
+	if t.haveInfo() {
+		return t.info.BestName()
+	}
+	if t.displayName != "" {
+		return t.displayName
+	}
+	return "infohash:" + t.infoHash.HexString()
+}
+
+func (t *Torrent) pieceState(index pieceIndex) (ret PieceState) {
+	p := &t.pieces[index]
+	ret.Priority = t.piecePriority(index)
+	ret.Completion = p.completion()
+	ret.QueuedForHash = p.queuedForHash()
+	ret.Hashing = p.hashing
+	ret.Checking = ret.QueuedForHash || ret.Hashing
+	ret.Marking = p.marking
+	if !ret.Complete && t.piecePartiallyDownloaded(index) {
+		ret.Partial = true
+	}
+	return
+}
+
+func (t *Torrent) metadataPieceSize(piece int) int {
+	return metadataPieceSize(len(t.metadataBytes), piece)
+}
+
+func (t *Torrent) newMetadataExtensionMessage(c *PeerConn, msgType pp.ExtendedMetadataRequestMsgType, piece int, data []byte) pp.Message {
+	return pp.Message{
+		Type:       pp.Extended,
+		ExtendedID: c.PeerExtensionIDs[pp.ExtensionNameMetadata],
+		ExtendedPayload: append(bencode.MustMarshal(pp.ExtendedMetadataRequestMsg{
+			Piece:     piece,
+			TotalSize: len(t.metadataBytes),
+			Type:      msgType,
+		}), data...),
+	}
+}
+
+type pieceAvailabilityRun struct {
+	Count        pieceIndex
+	Availability int
+}
+
+func (me pieceAvailabilityRun) String() string {
+	return fmt.Sprintf("%v(%v)", me.Count, me.Availability)
+}
+
+func (t *Torrent) pieceAvailabilityRuns() (ret []pieceAvailabilityRun) {
+	rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
+		ret = append(ret, pieceAvailabilityRun{Availability: el.(int), Count: int(count)})
+	})
+	for i := range t.pieces {
+		rle.Append(t.pieces[i].availability(), 1)
+	}
+	rle.Flush()
+	return
+}
+
+func (t *Torrent) pieceAvailabilityFrequencies() (freqs []int) {
+	freqs = make([]int, t.numActivePeers()+1)
+	for i := range t.pieces {
+		freqs[t.piece(i).availability()]++
+	}
+	return
+}
+
+func (t *Torrent) pieceStateRuns() (ret PieceStateRuns) {
+	rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
+		ret = append(ret, PieceStateRun{
+			PieceState: el.(PieceState),
+			Length:     int(count),
+		})
+	})
+	for index := range t.pieces {
+		rle.Append(t.pieceState(pieceIndex(index)), 1)
+	}
+	rle.Flush()
+	return
+}
+
+// Produces a small string representing a PieceStateRun.
+func (psr PieceStateRun) String() (ret string) {
+	ret = fmt.Sprintf("%d", psr.Length)
+	ret += func() string {
+		switch psr.Priority {
+		case PiecePriorityNext:
+			return "N"
+		case PiecePriorityNormal:
+			return "."
+		case PiecePriorityReadahead:
+			return "R"
+		case PiecePriorityNow:
+			return "!"
+		case PiecePriorityHigh:
+			return "H"
+		default:
+			return ""
+		}
+	}()
+	if psr.Hashing {
+		ret += "H"
+	}
+	if psr.QueuedForHash {
+		ret += "Q"
+	}
+	if psr.Marking {
+		ret += "M"
+	}
+	if psr.Partial {
+		ret += "P"
+	}
+	if psr.Complete {
+		ret += "C"
+	}
+	if !psr.Ok {
+		ret += "?"
+	}
+	return
+}
+
+func (t *Torrent) writeStatus(w io.Writer) {
+	fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.HexString())
+	fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
+	if !t.haveInfo() {
+		fmt.Fprintf(w, "Metadata have: ")
+		for _, h := range t.metadataCompletedChunks {
+			fmt.Fprintf(w, "%c", func() rune {
+				if h {
+					return 'H'
+				} else {
+					return '.'
+				}
+			}())
+		}
+		fmt.Fprintln(w)
+	}
+	fmt.Fprintf(w, "Piece length: %s\n",
+		func() string {
+			if t.haveInfo() {
+				return fmt.Sprintf("%v (%v chunks)",
+					t.usualPieceSize(),
+					float64(t.usualPieceSize())/float64(t.chunkSize))
+			} else {
+				return "no info"
+			}
+		}(),
+	)
+	if t.info != nil {
+		fmt.Fprintf(w, "Num Pieces: %d (%d completed)\n", t.numPieces(), t.numPiecesCompleted())
+		fmt.Fprintf(w, "Piece States: %s\n", t.pieceStateRuns())
+		// Generates a huge, unhelpful listing when piece availability is very scattered. Prefer
+		// availability frequencies instead.
+		if false {
+			fmt.Fprintf(w, "Piece availability: %v\n", strings.Join(func() (ret []string) {
+				for _, run := range t.pieceAvailabilityRuns() {
+					ret = append(ret, run.String())
+				}
+				return
+			}(), " "))
+		}
+		fmt.Fprintf(w, "Piece availability frequency: %v\n", strings.Join(
+			func() (ret []string) {
+				for avail, freq := range t.pieceAvailabilityFrequencies() {
+					if freq == 0 {
+						continue
+					}
+					ret = append(ret, fmt.Sprintf("%v: %v", avail, freq))
+				}
+				return
+			}(),
+			", "))
+	}
+	fmt.Fprintf(w, "Reader Pieces:")
+	t.forReaderOffsetPieces(func(begin, end pieceIndex) (again bool) {
+		fmt.Fprintf(w, " %d:%d", begin, end)
+		return true
+	})
+	fmt.Fprintln(w)
+
+	fmt.Fprintf(w, "Enabled trackers:\n")
+	func() {
+		tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
+		fmt.Fprintf(tw, "    URL\tExtra\n")
+		for _, ta := range slices.Sort(slices.FromMapElems(t.trackerAnnouncers), func(l, r torrentTrackerAnnouncer) bool {
+			lu := l.URL()
+			ru := r.URL()
+			var luns, runs url.URL = *lu, *ru
+			luns.Scheme = ""
+			runs.Scheme = ""
+			var ml missinggo.MultiLess
+			ml.StrictNext(luns.String() == runs.String(), luns.String() < runs.String())
+			ml.StrictNext(lu.String() == ru.String(), lu.String() < ru.String())
+			return ml.Less()
+		}).([]torrentTrackerAnnouncer) {
+			fmt.Fprintf(tw, "    %q\t%v\n", ta.URL(), ta.statusLine())
+		}
+		tw.Flush()
+	}()
+
+	fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces)
+
+	dumpStats(w, t.statsLocked())
+
+	fmt.Fprintf(w, "webseeds:\n")
+	t.writePeerStatuses(w, maps.Values(t.webSeeds))
+
+	peerConns := maps.Keys(t.conns)
+	// Peers without priorities first, then those with. I'm undecided about how to order peers
+	// without priorities.
+	sort.Slice(peerConns, func(li, ri int) bool {
+		l := peerConns[li]
+		r := peerConns[ri]
+		ml := multiless.New()
+		lpp := g.ResultFromTuple(l.peerPriority()).ToOption()
+		rpp := g.ResultFromTuple(r.peerPriority()).ToOption()
+		ml = ml.Bool(lpp.Ok, rpp.Ok)
+		ml = ml.Uint32(rpp.Value, lpp.Value)
+		return ml.Less()
+	})
+
+	fmt.Fprintf(w, "%v peer conns:\n", len(peerConns))
+	t.writePeerStatuses(w, g.SliceMap(peerConns, func(pc *PeerConn) *Peer {
+		return &pc.Peer
+	}))
+}
+
+func (t *Torrent) writePeerStatuses(w io.Writer, peers []*Peer) {
+	var buf bytes.Buffer
+	for _, c := range peers {
+		fmt.Fprintf(w, "- ")
+		buf.Reset()
+		c.writeStatus(&buf)
+		w.Write(bytes.TrimRight(
+			bytes.ReplaceAll(buf.Bytes(), []byte("\n"), []byte("\n  ")),
+			" "))
+	}
+}
+
+func (t *Torrent) haveInfo() bool {
+	return t.info != nil
+}
+
+// Returns a run-time generated MetaInfo that includes the info bytes and
+// announce-list as currently known to the client.
+func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
+	return metainfo.MetaInfo{
+		CreationDate: time.Now().Unix(),
+		Comment:      "dynamic metainfo from client",
+		CreatedBy:    "go.torrent",
+		AnnounceList: t.metainfo.UpvertedAnnounceList().Clone(),
+		InfoBytes: func() []byte {
+			if t.haveInfo() {
+				return t.metadataBytes
+			} else {
+				return nil
+			}
+		}(),
+		UrlList: func() []string {
+			ret := make([]string, 0, len(t.webSeeds))
+			for url := range t.webSeeds {
+				ret = append(ret, url)
+			}
+			return ret
+		}(),
+	}
+}
+
+// Returns a count of bytes that are not complete in storage, and not pending being written to
+// storage. This value is from the perspective of the download manager, and may not agree with the
+// actual state in storage. If you want read data synchronously you should use a Reader. See
+// https://github.com/anacrolix/torrent/issues/828.
+func (t *Torrent) BytesMissing() (n int64) {
+	t.cl.rLock()
+	n = t.bytesMissingLocked()
+	t.cl.rUnlock()
+	return
+}
+
+func (t *Torrent) bytesMissingLocked() int64 {
+	return t.bytesLeft()
+}
+
+func iterFlipped(b *roaring.Bitmap, end uint64, cb func(uint32) bool) {
+	roaring.Flip(b, 0, end).Iterate(cb)
+}
+
+func (t *Torrent) bytesLeft() (left int64) {
+	iterFlipped(&t._completedPieces, uint64(t.numPieces()), func(x uint32) bool {
+		p := t.piece(pieceIndex(x))
+		left += int64(p.length() - p.numDirtyBytes())
+		return true
+	})
+	return
+}
+
+// Bytes left to give in tracker announces.
+func (t *Torrent) bytesLeftAnnounce() int64 {
+	if t.haveInfo() {
+		return t.bytesLeft()
+	} else {
+		return -1
+	}
+}
+
+func (t *Torrent) piecePartiallyDownloaded(piece pieceIndex) bool {
+	if t.pieceComplete(piece) {
+		return false
+	}
+	if t.pieceAllDirty(piece) {
+		return false
+	}
+	return t.pieces[piece].hasDirtyChunks()
+}
+
+func (t *Torrent) usualPieceSize() int {
+	return int(t.info.PieceLength)
+}
+
+func (t *Torrent) numPieces() pieceIndex {
+	return t.info.NumPieces()
+}
+
+func (t *Torrent) numPiecesCompleted() (num pieceIndex) {
+	return pieceIndex(t._completedPieces.GetCardinality())
+}
+
+func (t *Torrent) close(wg *sync.WaitGroup) (err error) {
+	if !t.closed.Set() {
+		err = errors.New("already closed")
+		return
+	}
+	for _, f := range t.onClose {
+		f()
+	}
+	if t.storage != nil {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			t.storageLock.Lock()
+			defer t.storageLock.Unlock()
+			if f := t.storage.Close; f != nil {
+				err1 := f()
+				if err1 != nil {
+					t.logger.WithDefaultLevel(log.Warning).Printf("error closing storage: %v", err1)
+				}
+			}
+		}()
+	}
+	t.iterPeers(func(p *Peer) {
+		p.close()
+	})
+	if t.storage != nil {
+		t.deletePieceRequestOrder()
+	}
+	t.assertAllPiecesRelativeAvailabilityZero()
+	t.pex.Reset()
+	t.cl.event.Broadcast()
+	t.pieceStateChanges.Close()
+	t.updateWantPeersEvent()
+	return
+}
+
+func (t *Torrent) assertAllPiecesRelativeAvailabilityZero() {
+	for i := range t.pieces {
+		p := t.piece(i)
+		if p.relativeAvailability != 0 {
+			panic(fmt.Sprintf("piece %v has relative availability %v", i, p.relativeAvailability))
+		}
+	}
+}
+
+func (t *Torrent) requestOffset(r Request) int64 {
+	return torrentRequestOffset(t.length(), int64(t.usualPieceSize()), r)
+}
+
+// Return the request that would include the given offset into the torrent data. Returns !ok if
+// there is no such request.
+func (t *Torrent) offsetRequest(off int64) (req Request, ok bool) {
+	return torrentOffsetRequest(t.length(), t.info.PieceLength, int64(t.chunkSize), off)
+}
+
+func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
+	defer perf.ScopeTimerErr(&err)()
+	n, err := t.pieces[piece].Storage().WriteAt(data, begin)
+	if err == nil && n != len(data) {
+		err = io.ErrShortWrite
+	}
+	return err
+}
+
+func (t *Torrent) bitfield() (bf []bool) {
+	bf = make([]bool, t.numPieces())
+	t._completedPieces.Iterate(func(piece uint32) (again bool) {
+		bf[piece] = true
+		return true
+	})
+	return
+}
+
+func (t *Torrent) pieceNumChunks(piece pieceIndex) chunkIndexType {
+	return chunkIndexType((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
+}
+
+func (t *Torrent) chunksPerRegularPiece() chunkIndexType {
+	return t._chunksPerRegularPiece
+}
+
+func (t *Torrent) numChunks() RequestIndex {
+	if t.numPieces() == 0 {
+		return 0
+	}
+	return RequestIndex(t.numPieces()-1)*t.chunksPerRegularPiece() + t.pieceNumChunks(t.numPieces()-1)
+}
+
+func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) {
+	t.dirtyChunks.RemoveRange(
+		uint64(t.pieceRequestIndexOffset(pieceIndex)),
+		uint64(t.pieceRequestIndexOffset(pieceIndex+1)))
+}
+
+func (t *Torrent) pieceLength(piece pieceIndex) pp.Integer {
+	if t.info.PieceLength == 0 {
+		// There will be no variance amongst pieces. Only pain.
+		return 0
+	}
+	if piece == t.numPieces()-1 {
+		ret := pp.Integer(t.length() % t.info.PieceLength)
+		if ret != 0 {
+			return ret
+		}
+	}
+	return pp.Integer(t.info.PieceLength)
+}
+
+func (t *Torrent) smartBanBlockCheckingWriter(piece pieceIndex) *blockCheckingWriter {
+	return &blockCheckingWriter{
+		cache:        &t.smartBanCache,
+		requestIndex: t.pieceRequestIndexOffset(piece),
+		chunkSize:    t.chunkSize.Int(),
+	}
+}
+
+func (t *Torrent) hashPiece(piece pieceIndex) (
+	ret metainfo.Hash,
+	// These are peers that sent us blocks that differ from what we hash here.
+	differingPeers map[bannableAddr]struct{},
+	err error,
+) {
+	p := t.piece(piece)
+	p.waitNoPendingWrites()
+	storagePiece := t.pieces[piece].Storage()
+
+	// Does the backend want to do its own hashing?
+	if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
+		var sum metainfo.Hash
+		// log.Printf("A piece decided to self-hash: %d", piece)
+		sum, err = i.SelfHash()
+		missinggo.CopyExact(&ret, sum)
+		return
+	}
+
+	hash := pieceHash.New()
+	const logPieceContents = false
+	smartBanWriter := t.smartBanBlockCheckingWriter(piece)
+	writers := []io.Writer{hash, smartBanWriter}
+	var examineBuf bytes.Buffer
+	if logPieceContents {
+		writers = append(writers, &examineBuf)
+	}
+	_, err = storagePiece.WriteTo(io.MultiWriter(writers...))
+	if logPieceContents {
+		t.logger.WithDefaultLevel(log.Debug).Printf("hashed %q with copy err %v", examineBuf.Bytes(), err)
+	}
+	smartBanWriter.Flush()
+	differingPeers = smartBanWriter.badPeers
+	missinggo.CopyExact(&ret, hash.Sum(nil))
+	return
+}
+
+func (t *Torrent) haveAnyPieces() bool {
+	return !t._completedPieces.IsEmpty()
+}
+
+func (t *Torrent) haveAllPieces() bool {
+	if !t.haveInfo() {
+		return false
+	}
+	return t._completedPieces.GetCardinality() == bitmap.BitRange(t.numPieces())
+}
+
+func (t *Torrent) havePiece(index pieceIndex) bool {
+	return t.haveInfo() && t.pieceComplete(index)
+}
+
+func (t *Torrent) maybeDropMutuallyCompletePeer(
+	// I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's
+	// okay?
+	p *PeerConn,
+) {
+	if !t.cl.config.DropMutuallyCompletePeers {
+		return
+	}
+	if !t.haveAllPieces() {
+		return
+	}
+	if all, known := p.peerHasAllPieces(); !(known && all) {
+		return
+	}
+	if p.useful() {
+		return
+	}
+	p.logger.Levelf(log.Debug, "is mutually complete; dropping")
+	p.drop()
+}
+
+func (t *Torrent) haveChunk(r Request) (ret bool) {
+	// defer func() {
+	// 	log.Println("have chunk", r, ret)
+	// }()
+	if !t.haveInfo() {
+		return false
+	}
+	if t.pieceComplete(pieceIndex(r.Index)) {
+		return true
+	}
+	p := &t.pieces[r.Index]
+	return !p.pendingChunk(r.ChunkSpec, t.chunkSize)
+}
+
+func chunkIndexFromChunkSpec(cs ChunkSpec, chunkSize pp.Integer) chunkIndexType {
+	return chunkIndexType(cs.Begin / chunkSize)
+}
+
+func (t *Torrent) wantPieceIndex(index pieceIndex) bool {
+	return t._pendingPieces.Contains(uint32(index))
+}
+
+// A pool of []*PeerConn, to reduce allocations in functions that need to index or sort Torrent
+// conns (which is a map).
+var peerConnSlices sync.Pool
+
+func getPeerConnSlice(cap int) []*PeerConn {
+	getInterface := peerConnSlices.Get()
+	if getInterface == nil {
+		return make([]*PeerConn, 0, cap)
+	} else {
+		return getInterface.([]*PeerConn)[:0]
+	}
+}
+
+// Calls the given function with a slice of unclosed conns. It uses a pool to reduce allocations as
+// this is a frequent occurrence.
+func (t *Torrent) withUnclosedConns(f func([]*PeerConn)) {
+	sl := t.appendUnclosedConns(getPeerConnSlice(len(t.conns)))
+	f(sl)
+	peerConnSlices.Put(sl)
+}
+
+func (t *Torrent) worstBadConnFromSlice(opts worseConnLensOpts, sl []*PeerConn) *PeerConn {
+	wcs := worseConnSlice{conns: sl}
+	wcs.initKeys(opts)
+	heap.Init(&wcs)
+	for wcs.Len() != 0 {
+		c := heap.Pop(&wcs).(*PeerConn)
+		if opts.incomingIsBad && !c.outgoing {
+			return c
+		}
+		if opts.outgoingIsBad && c.outgoing {
+			return c
+		}
+		if c._stats.ChunksReadWasted.Int64() >= 6 && c._stats.ChunksReadWasted.Int64() > c._stats.ChunksReadUseful.Int64() {
+			return c
+		}
+		// If the connection is in the worst half of the established
+		// connection quota and is older than a minute.
+		if wcs.Len() >= (t.maxEstablishedConns+1)/2 {
+			// Give connections 1 minute to prove themselves.
+			if time.Since(c.completedHandshake) > time.Minute {
+				return c
+			}
+		}
+	}
+	return nil
+}
+
+// The worst connection is one that hasn't been sent, or sent anything useful for the longest. A bad
+// connection is one that usually sends us unwanted pieces, or has been in the worse half of the
+// established connections for more than a minute. This is O(n log n). If there was a way to not
+// consider the position of a conn relative to the total number, it could be reduced to O(n).
+func (t *Torrent) worstBadConn(opts worseConnLensOpts) (ret *PeerConn) {
+	t.withUnclosedConns(func(ucs []*PeerConn) {
+		ret = t.worstBadConnFromSlice(opts, ucs)
+	})
+	return
+}
+
+type PieceStateChange struct {
+	Index int
+	PieceState
+}
+
+func (t *Torrent) publishPieceChange(piece pieceIndex) {
+	t.cl._mu.Defer(func() {
+		cur := t.pieceState(piece)
+		p := &t.pieces[piece]
+		if cur != p.publicPieceState {
+			p.publicPieceState = cur
+			t.pieceStateChanges.Publish(PieceStateChange{
+				int(piece),
+				cur,
+			})
+		}
+	})
+}
+
+func (t *Torrent) pieceNumPendingChunks(piece pieceIndex) pp.Integer {
+	if t.pieceComplete(piece) {
+		return 0
+	}
+	return pp.Integer(t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks())
+}
+
+func (t *Torrent) pieceAllDirty(piece pieceIndex) bool {
+	return t.pieces[piece].allChunksDirty()
+}
+
+func (t *Torrent) readersChanged() {
+	t.updateReaderPieces()
+	t.updateAllPiecePriorities("Torrent.readersChanged")
+}
+
+func (t *Torrent) updateReaderPieces() {
+	t._readerNowPieces, t._readerReadaheadPieces = t.readerPiecePriorities()
+}
+
+func (t *Torrent) readerPosChanged(from, to pieceRange) {
+	if from == to {
+		return
+	}
+	t.updateReaderPieces()
+	// Order the ranges, high and low.
+	l, h := from, to
+	if l.begin > h.begin {
+		l, h = h, l
+	}
+	if l.end < h.begin {
+		// Two distinct ranges.
+		t.updatePiecePriorities(l.begin, l.end, "Torrent.readerPosChanged")
+		t.updatePiecePriorities(h.begin, h.end, "Torrent.readerPosChanged")
+	} else {
+		// Ranges overlap.
+		end := l.end
+		if h.end > end {
+			end = h.end
+		}
+		t.updatePiecePriorities(l.begin, end, "Torrent.readerPosChanged")
+	}
+}
+
+func (t *Torrent) maybeNewConns() {
+	// Tickle the accept routine.
+	t.cl.event.Broadcast()
+	t.openNewConns()
+}
+
+func (t *Torrent) piecePriorityChanged(piece pieceIndex, reason string) {
+	if t._pendingPieces.Contains(uint32(piece)) {
+		t.iterPeers(func(c *Peer) {
+			// if c.requestState.Interested {
+			// 	return
+			// }
+			if !c.isLowOnRequests() {
+				return
+			}
+			if !c.peerHasPiece(piece) {
+				return
+			}
+			if c.requestState.Interested && c.peerChoking && !c.peerAllowedFast.Contains(piece) {
+				return
+			}
+			c.updateRequests(reason)
+		})
+	}
+	t.maybeNewConns()
+	t.publishPieceChange(piece)
+}
+
+func (t *Torrent) updatePiecePriority(piece pieceIndex, reason string) {
+	if !t.closed.IsSet() {
+		// It would be possible to filter on pure-priority changes here to avoid churning the piece
+		// request order.
+		t.updatePieceRequestOrder(piece)
+	}
+	p := &t.pieces[piece]
+	newPrio := p.uncachedPriority()
+	// t.logger.Printf("torrent %p: piece %d: uncached priority: %v", t, piece, newPrio)
+	if newPrio == PiecePriorityNone {
+		if !t._pendingPieces.CheckedRemove(uint32(piece)) {
+			return
+		}
+	} else {
+		if !t._pendingPieces.CheckedAdd(uint32(piece)) {
+			return
+		}
+	}
+	t.piecePriorityChanged(piece, reason)
+}
+
+func (t *Torrent) updateAllPiecePriorities(reason string) {
+	t.updatePiecePriorities(0, t.numPieces(), reason)
+}
+
+// Update all piece priorities in one hit. This function should have the same
+// output as updatePiecePriority, but across all pieces.
+func (t *Torrent) updatePiecePriorities(begin, end pieceIndex, reason string) {
+	for i := begin; i < end; i++ {
+		t.updatePiecePriority(i, reason)
+	}
+}
+
+// Returns the range of pieces [begin, end) that contains the extent of bytes.
+func (t *Torrent) byteRegionPieces(off, size int64) (begin, end pieceIndex) {
+	if off >= t.length() {
+		return
+	}
+	if off < 0 {
+		size += off
+		off = 0
+	}
+	if size <= 0 {
+		return
+	}
+	begin = pieceIndex(off / t.info.PieceLength)
+	end = pieceIndex((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
+	if end > pieceIndex(t.info.NumPieces()) {
+		end = pieceIndex(t.info.NumPieces())
+	}
+	return
+}
+
+// Returns true if all iterations complete without breaking. Returns the read regions for all
+// readers. The reader regions should not be merged as some callers depend on this method to
+// enumerate readers.
+func (t *Torrent) forReaderOffsetPieces(f func(begin, end pieceIndex) (more bool)) (all bool) {
+	for r := range t.readers {
+		p := r.pieces
+		if p.begin >= p.end {
+			continue
+		}
+		if !f(p.begin, p.end) {
+			return false
+		}
+	}
+	return true
+}
+
+func (t *Torrent) piecePriority(piece pieceIndex) piecePriority {
+	return t.piece(piece).uncachedPriority()
+}
+
+func (t *Torrent) pendRequest(req RequestIndex) {
+	t.piece(t.pieceIndexOfRequestIndex(req)).pendChunkIndex(req % t.chunksPerRegularPiece())
+}
+
+func (t *Torrent) pieceCompletionChanged(piece pieceIndex, reason string) {
+	t.cl.event.Broadcast()
+	if t.pieceComplete(piece) {
+		t.onPieceCompleted(piece)
+	} else {
+		t.onIncompletePiece(piece)
+	}
+	t.updatePiecePriority(piece, reason)
+}
+
+func (t *Torrent) numReceivedConns() (ret int) {
+	for c := range t.conns {
+		if c.Discovery == PeerSourceIncoming {
+			ret++
+		}
+	}
+	return
+}
+
+func (t *Torrent) numOutgoingConns() (ret int) {
+	for c := range t.conns {
+		if c.outgoing {
+			ret++
+		}
+	}
+	return
+}
+
+func (t *Torrent) maxHalfOpen() int {
+	// Note that if we somehow exceed the maximum established conns, we want
+	// the negative value to have an effect.
+	establishedHeadroom := int64(t.maxEstablishedConns - len(t.conns))
+	extraIncoming := int64(t.numReceivedConns() - t.maxEstablishedConns/2)
+	// We want to allow some experimentation with new peers, and to try to
+	// upset an oversupply of received connections.
+	return int(min(
+		max(5, extraIncoming)+establishedHeadroom,
+		int64(t.cl.config.HalfOpenConnsPerTorrent),
+	))
+}
+
+func (t *Torrent) openNewConns() (initiated int) {
+	defer t.updateWantPeersEvent()
+	for t.peers.Len() != 0 {
+		if !t.wantOutgoingConns() {
+			return
+		}
+		if len(t.halfOpen) >= t.maxHalfOpen() {
+			return
+		}
+		if len(t.cl.dialers) == 0 {
+			return
+		}
+		if t.cl.numHalfOpen >= t.cl.config.TotalHalfOpenConns {
+			return
+		}
+		p := t.peers.PopMax()
+		opts := outgoingConnOpts{
+			peerInfo:                 p,
+			t:                        t,
+			requireRendezvous:        false,
+			skipHolepunchRendezvous:  false,
+			receivedHolepunchConnect: false,
+			HeaderObfuscationPolicy:  t.cl.config.HeaderObfuscationPolicy,
+		}
+		initiateConn(opts, false)
+		initiated++
+	}
+	return
+}
+
+func (t *Torrent) updatePieceCompletion(piece pieceIndex) bool {
+	p := t.piece(piece)
+	uncached := t.pieceCompleteUncached(piece)
+	cached := p.completion()
+	changed := cached != uncached
+	complete := uncached.Complete
+	p.storageCompletionOk = uncached.Ok
+	x := uint32(piece)
+	if complete {
+		t._completedPieces.Add(x)
+		t.openNewConns()
+	} else {
+		t._completedPieces.Remove(x)
+	}
+	p.t.updatePieceRequestOrder(piece)
+	t.updateComplete()
+	if complete && len(p.dirtiers) != 0 {
+		t.logger.Printf("marked piece %v complete but still has dirtiers", piece)
+	}
+	if changed {
+		log.Fstr("piece %d completion changed: %+v -> %+v", piece, cached, uncached).LogLevel(log.Debug, t.logger)
+		t.pieceCompletionChanged(piece, "Torrent.updatePieceCompletion")
+	}
+	return changed
+}
+
+// Non-blocking read. Client lock is not required.
+func (t *Torrent) readAt(b []byte, off int64) (n int, err error) {
+	for len(b) != 0 {
+		p := &t.pieces[off/t.info.PieceLength]
+		p.waitNoPendingWrites()
+		var n1 int
+		n1, err = p.Storage().ReadAt(b, off-p.Info().Offset())
+		if n1 == 0 {
+			break
+		}
+		off += int64(n1)
+		n += n1
+		b = b[n1:]
+	}
+	return
+}
+
+// Returns an error if the metadata was completed, but couldn't be set for some reason. Blame it on
+// the last peer to contribute. TODO: Actually we shouldn't blame peers for failure to open storage
+// etc. Also we should probably cached metadata pieces per-Peer, to isolate failure appropriately.
+func (t *Torrent) maybeCompleteMetadata() error {
+	if t.haveInfo() {
+		// Nothing to do.
+		return nil
+	}
+	if !t.haveAllMetadataPieces() {
+		// Don't have enough metadata pieces.
+		return nil
+	}
+	err := t.setInfoBytesLocked(t.metadataBytes)
+	if err != nil {
+		t.invalidateMetadata()
+		return fmt.Errorf("error setting info bytes: %s", err)
+	}
+	if t.cl.config.Debug {
+		t.logger.Printf("%s: got metadata from peers", t)
+	}
+	return nil
+}
+
+func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) {
+	t.forReaderOffsetPieces(func(begin, end pieceIndex) bool {
+		if end > begin {
+			now.Add(bitmap.BitIndex(begin))
+			readahead.AddRange(bitmap.BitRange(begin)+1, bitmap.BitRange(end))
+		}
+		return true
+	})
+	return
+}
+
+func (t *Torrent) needData() bool {
+	if t.closed.IsSet() {
+		return false
+	}
+	if !t.haveInfo() {
+		return true
+	}
+	return !t._pendingPieces.IsEmpty()
+}
+
+func appendMissingStrings(old, new []string) (ret []string) {
+	ret = old
+new:
+	for _, n := range new {
+		for _, o := range old {
+			if o == n {
+				continue new
+			}
+		}
+		ret = append(ret, n)
+	}
+	return
+}
+
+func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) {
+	ret = existing
+	for minNumTiers > len(ret) {
+		ret = append(ret, nil)
+	}
+	return
+}
+
+func (t *Torrent) addTrackers(announceList [][]string) {
+	fullAnnounceList := &t.metainfo.AnnounceList
+	t.metainfo.AnnounceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList))
+	for tierIndex, trackerURLs := range announceList {
+		(*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs)
+	}
+	t.startMissingTrackerScrapers()
+	t.updateWantPeersEvent()
+}
+
+// Don't call this before the info is available.
+func (t *Torrent) bytesCompleted() int64 {
+	if !t.haveInfo() {
+		return 0
+	}
+	return t.length() - t.bytesLeft()
+}
+
+func (t *Torrent) SetInfoBytes(b []byte) (err error) {
+	t.cl.lock()
+	defer t.cl.unlock()
+	return t.setInfoBytesLocked(b)
+}
+
+// Returns true if connection is removed from torrent.Conns.
+func (t *Torrent) deletePeerConn(c *PeerConn) (ret bool) {
+	if !c.closed.IsSet() {
+		panic("connection is not closed")
+		// There are behaviours prevented by the closed state that will fail
+		// if the connection has been deleted.
+	}
+	_, ret = t.conns[c]
+	delete(t.conns, c)
+	// Avoid adding a drop event more than once. Probably we should track whether we've generated
+	// the drop event against the PexConnState instead.
+	if ret {
+		if !t.cl.config.DisablePEX {
+			t.pex.Drop(c)
+		}
+	}
+	torrent.Add("deleted connections", 1)
+	c.deleteAllRequests("Torrent.deletePeerConn")
+	t.assertPendingRequests()
+	if t.numActivePeers() == 0 && len(t.connsWithAllPieces) != 0 {
+		panic(t.connsWithAllPieces)
+	}
+	return
+}
+
+func (t *Torrent) decPeerPieceAvailability(p *Peer) {
+	if t.deleteConnWithAllPieces(p) {
+		return
+	}
+	if !t.haveInfo() {
+		return
+	}
+	p.peerPieces().Iterate(func(i uint32) bool {
+		p.t.decPieceAvailability(pieceIndex(i))
+		return true
+	})
+}
+
+func (t *Torrent) assertPendingRequests() {
+	if !check.Enabled {
+		return
+	}
+	// var actual pendingRequests
+	// if t.haveInfo() {
+	// 	actual.m = make([]int, t.numChunks())
+	// }
+	// t.iterPeers(func(p *Peer) {
+	// 	p.requestState.Requests.Iterate(func(x uint32) bool {
+	// 		actual.Inc(x)
+	// 		return true
+	// 	})
+	// })
+	// diff := cmp.Diff(actual.m, t.pendingRequests.m)
+	// if diff != "" {
+	// 	panic(diff)
+	// }
+}
+
+func (t *Torrent) dropConnection(c *PeerConn) {
+	t.cl.event.Broadcast()
+	c.close()
+	if t.deletePeerConn(c) {
+		t.openNewConns()
+	}
+}
+
+// Peers as in contact information for dialing out.
+func (t *Torrent) wantPeers() bool {
+	if t.closed.IsSet() {
+		return false
+	}
+	if t.peers.Len() > t.cl.config.TorrentPeersLowWater {
+		return false
+	}
+	return t.wantOutgoingConns()
+}
+
+func (t *Torrent) updateWantPeersEvent() {
+	if t.wantPeers() {
+		t.wantPeersEvent.Set()
+	} else {
+		t.wantPeersEvent.Clear()
+	}
+}
+
+// Returns whether the client should make effort to seed the torrent.
+func (t *Torrent) seeding() bool {
+	cl := t.cl
+	if t.closed.IsSet() {
+		return false
+	}
+	if t.dataUploadDisallowed {
+		return false
+	}
+	if cl.config.NoUpload {
+		return false
+	}
+	if !cl.config.Seed {
+		return false
+	}
+	if cl.config.DisableAggressiveUpload && t.needData() {
+		return false
+	}
+	return true
+}
+
+func (t *Torrent) onWebRtcConn(
+	c datachannel.ReadWriteCloser,
+	dcc webtorrent.DataChannelContext,
+) {
+	defer c.Close()
+	netConn := webrtcNetConn{
+		ReadWriteCloser:    c,
+		DataChannelContext: dcc,
+	}
+	peerRemoteAddr := netConn.RemoteAddr()
+	//t.logger.Levelf(log.Critical, "onWebRtcConn remote addr: %v", peerRemoteAddr)
+	if t.cl.badPeerAddr(peerRemoteAddr) {
+		return
+	}
+	localAddrIpPort := missinggo.IpPortFromNetAddr(netConn.LocalAddr())
+	pc, err := t.cl.initiateProtocolHandshakes(
+		context.Background(),
+		netConn,
+		t,
+		false,
+		newConnectionOpts{
+			outgoing:        dcc.LocalOffered,
+			remoteAddr:      peerRemoteAddr,
+			localPublicAddr: localAddrIpPort,
+			network:         webrtcNetwork,
+			connString:      fmt.Sprintf("webrtc offer_id %x: %v", dcc.OfferId, regularNetConnPeerConnConnString(netConn)),
+		},
+	)
+	if err != nil {
+		t.logger.WithDefaultLevel(log.Error).Printf("error in handshaking webrtc connection: %v", err)
+		return
+	}
+	if dcc.LocalOffered {
+		pc.Discovery = PeerSourceTracker
+	} else {
+		pc.Discovery = PeerSourceIncoming
+	}
+	pc.conn.SetWriteDeadline(time.Time{})
+	t.cl.lock()
+	defer t.cl.unlock()
+	err = t.runHandshookConn(pc)
+	if err != nil {
+		t.logger.WithDefaultLevel(log.Debug).Printf("error running handshook webrtc conn: %v", err)
+	}
+}
+
+func (t *Torrent) logRunHandshookConn(pc *PeerConn, logAll bool, level log.Level) {
+	err := t.runHandshookConn(pc)
+	if err != nil || logAll {
+		t.logger.WithDefaultLevel(level).Levelf(log.ErrorLevel(err), "error running handshook conn: %v", err)
+	}
+}
+
+func (t *Torrent) runHandshookConnLoggingErr(pc *PeerConn) {
+	t.logRunHandshookConn(pc, false, log.Debug)
+}
+
+func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
+	wtc, release := t.cl.websocketTrackers.Get(u.String(), t.infoHash)
+	// This needs to run before the Torrent is dropped from the Client, to prevent a new webtorrent.TrackerClient for
+	// the same info hash before the old one is cleaned up.
+	t.onClose = append(t.onClose, release)
+	wst := websocketTrackerStatus{u, wtc}
+	go func() {
+		err := wtc.Announce(tracker.Started, t.infoHash)
+		if err != nil {
+			t.logger.WithDefaultLevel(log.Warning).Printf(
+				"error in initial announce to %q: %v",
+				u.String(), err,
+			)
+		}
+	}()
+	return wst
+}
+
+func (t *Torrent) startScrapingTracker(_url string) {
+	if _url == "" {
+		return
+	}
+	u, err := url.Parse(_url)
+	if err != nil {
+		// URLs with a leading '*' appear to be a uTorrent convention to disable trackers.
+		if _url[0] != '*' {
+			t.logger.Levelf(log.Warning, "error parsing tracker url: %v", err)
+		}
+		return
+	}
+	if u.Scheme == "udp" {
+		u.Scheme = "udp4"
+		t.startScrapingTracker(u.String())
+		u.Scheme = "udp6"
+		t.startScrapingTracker(u.String())
+		return
+	}
+	if _, ok := t.trackerAnnouncers[_url]; ok {
+		return
+	}
+	sl := func() torrentTrackerAnnouncer {
+		switch u.Scheme {
+		case "ws", "wss":
+			if t.cl.config.DisableWebtorrent {
+				return nil
+			}
+			return t.startWebsocketAnnouncer(*u)
+		case "udp4":
+			if t.cl.config.DisableIPv4Peers || t.cl.config.DisableIPv4 {
+				return nil
+			}
+		case "udp6":
+			if t.cl.config.DisableIPv6 {
+				return nil
+			}
+		}
+		newAnnouncer := &trackerScraper{
+			u:               *u,
+			t:               t,
+			lookupTrackerIp: t.cl.config.LookupTrackerIp,
+		}
+		go newAnnouncer.Run()
+		return newAnnouncer
+	}()
+	if sl == nil {
+		return
+	}
+	if t.trackerAnnouncers == nil {
+		t.trackerAnnouncers = make(map[string]torrentTrackerAnnouncer)
+	}
+	t.trackerAnnouncers[_url] = sl
+}
+
+// Adds and starts tracker scrapers for tracker URLs that aren't already
+// running.
+func (t *Torrent) startMissingTrackerScrapers() {
+	if t.cl.config.DisableTrackers {
+		return
+	}
+	t.startScrapingTracker(t.metainfo.Announce)
+	for _, tier := range t.metainfo.AnnounceList {
+		for _, url := range tier {
+			t.startScrapingTracker(url)
+		}
+	}
+}
+
+// Returns an AnnounceRequest with fields filled out to defaults and current
+// values.
+func (t *Torrent) announceRequest(event tracker.AnnounceEvent) tracker.AnnounceRequest {
+	// Note that IPAddress is not set. It's set for UDP inside the tracker code, since it's
+	// dependent on the network in use.
+	return tracker.AnnounceRequest{
+		Event: event,
+		NumWant: func() int32 {
+			if t.wantPeers() && len(t.cl.dialers) > 0 {
+				return 200 // Win has UDP packet limit. See: https://github.com/anacrolix/torrent/issues/764
+			} else {
+				return 0
+			}
+		}(),
+		Port:     uint16(t.cl.incomingPeerPort()),
+		PeerId:   t.cl.peerID,
+		InfoHash: t.infoHash,
+		Key:      t.cl.announceKey(),
+
+		// The following are vaguely described in BEP 3.
+
+		Left:     t.bytesLeftAnnounce(),
+		Uploaded: t.stats.BytesWrittenData.Int64(),
+		// There's no mention of wasted or unwanted download in the BEP.
+		Downloaded: t.stats.BytesReadUsefulData.Int64(),
+	}
+}
+
+// Adds peers revealed in an announce until the announce ends, or we have
+// enough peers.
+func (t *Torrent) consumeDhtAnnouncePeers(pvs <-chan dht.PeersValues) {
+	cl := t.cl
+	for v := range pvs {
+		cl.lock()
+		added := 0
+		for _, cp := range v.Peers {
+			if cp.Port == 0 {
+				// Can't do anything with this.
+				continue
+			}
+			if t.addPeer(PeerInfo{
+				Addr:   ipPortAddr{cp.IP, cp.Port},
+				Source: PeerSourceDhtGetPeers,
+			}) {
+				added++
+			}
+		}
+		cl.unlock()
+		// if added != 0 {
+		// 	log.Printf("added %v peers from dht for %v", added, t.InfoHash().HexString())
+		// }
+	}
+}
+
+// Announce using the provided DHT server. Peers are consumed automatically. done is closed when the
+// announce ends. stop will force the announce to end.
+func (t *Torrent) AnnounceToDht(s DhtServer) (done <-chan struct{}, stop func(), err error) {
+	ps, err := s.Announce(t.infoHash, t.cl.incomingPeerPort(), true)
+	if err != nil {
+		return
+	}
+	_done := make(chan struct{})
+	done = _done
+	stop = ps.Close
+	go func() {
+		t.consumeDhtAnnouncePeers(ps.Peers())
+		close(_done)
+	}()
+	return
+}
+
+func (t *Torrent) timeboxedAnnounceToDht(s DhtServer) error {
+	_, stop, err := t.AnnounceToDht(s)
+	if err != nil {
+		return err
+	}
+	select {
+	case <-t.closed.Done():
+	case <-time.After(5 * time.Minute):
+	}
+	stop()
+	return nil
+}
+
+func (t *Torrent) dhtAnnouncer(s DhtServer) {
+	cl := t.cl
+	cl.lock()
+	defer cl.unlock()
+	for {
+		for {
+			if t.closed.IsSet() {
+				return
+			}
+			// We're also announcing ourselves as a listener, so we don't just want peer addresses.
+			// TODO: We can include the announce_peer step depending on whether we can receive
+			// inbound connections. We should probably only announce once every 15 mins too.
+			if !t.wantAnyConns() {
+				goto wait
+			}
+			// TODO: Determine if there's a listener on the port we're announcing.
+			if len(cl.dialers) == 0 && len(cl.listeners) == 0 {
+				goto wait
+			}
+			break
+		wait:
+			cl.event.Wait()
+		}
+		func() {
+			t.numDHTAnnounces++
+			cl.unlock()
+			defer cl.lock()
+			err := t.timeboxedAnnounceToDht(s)
+			if err != nil {
+				t.logger.WithDefaultLevel(log.Warning).Printf("error announcing %q to DHT: %s", t, err)
+			}
+		}()
+	}
+}
+
+func (t *Torrent) addPeers(peers []PeerInfo) (added int) {
+	for _, p := range peers {
+		if t.addPeer(p) {
+			added++
+		}
+	}
+	return
+}
+
+// The returned TorrentStats may require alignment in memory. See
+// https://github.com/anacrolix/torrent/issues/383.
+func (t *Torrent) Stats() TorrentStats {
+	t.cl.rLock()
+	defer t.cl.rUnlock()
+	return t.statsLocked()
+}
+
+func (t *Torrent) statsLocked() (ret TorrentStats) {
+	ret.ActivePeers = len(t.conns)
+	ret.HalfOpenPeers = len(t.halfOpen)
+	ret.PendingPeers = t.peers.Len()
+	ret.TotalPeers = t.numTotalPeers()
+	ret.ConnectedSeeders = 0
+	for c := range t.conns {
+		if all, ok := c.peerHasAllPieces(); all && ok {
+			ret.ConnectedSeeders++
+		}
+	}
+	ret.ConnStats = t.stats.Copy()
+	ret.PiecesComplete = t.numPiecesCompleted()
+	return
+}
+
+// The total number of peers in the torrent.
+func (t *Torrent) numTotalPeers() int {
+	peers := make(map[string]struct{})
+	for conn := range t.conns {
+		ra := conn.conn.RemoteAddr()
+		if ra == nil {
+			// It's been closed and doesn't support RemoteAddr.
+			continue
+		}
+		peers[ra.String()] = struct{}{}
+	}
+	for addr := range t.halfOpen {
+		peers[addr] = struct{}{}
+	}
+	t.peers.Each(func(peer PeerInfo) {
+		peers[peer.Addr.String()] = struct{}{}
+	})
+	return len(peers)
+}
+
+// Reconcile bytes transferred before connection was associated with a
+// torrent.
+func (t *Torrent) reconcileHandshakeStats(c *PeerConn) {
+	if c._stats != (ConnStats{
+		// Handshakes should only increment these fields:
+		BytesWritten: c._stats.BytesWritten,
+		BytesRead:    c._stats.BytesRead,
+	}) {
+		panic("bad stats")
+	}
+	c.postHandshakeStats(func(cs *ConnStats) {
+		cs.BytesRead.Add(c._stats.BytesRead.Int64())
+		cs.BytesWritten.Add(c._stats.BytesWritten.Int64())
+	})
+	c.reconciledHandshakeStats = true
+}
+
+// Returns true if the connection is added.
+func (t *Torrent) addPeerConn(c *PeerConn) (err error) {
+	defer func() {
+		if err == nil {
+			torrent.Add("added connections", 1)
+		}
+	}()
+	if t.closed.IsSet() {
+		return errors.New("torrent closed")
+	}
+	for c0 := range t.conns {
+		if c.PeerID != c0.PeerID {
+			continue
+		}
+		if !t.cl.config.DropDuplicatePeerIds {
+			continue
+		}
+		if c.hasPreferredNetworkOver(c0) {
+			c0.close()
+			t.deletePeerConn(c0)
+		} else {
+			return errors.New("existing connection preferred")
+		}
+	}
+	if len(t.conns) >= t.maxEstablishedConns {
+		numOutgoing := t.numOutgoingConns()
+		numIncoming := len(t.conns) - numOutgoing
+		c := t.worstBadConn(worseConnLensOpts{
+			// We've already established that we have too many connections at this point, so we just
+			// need to match what kind we have too many of vs. what we're trying to add now.
+			incomingIsBad: (numIncoming-numOutgoing > 1) && c.outgoing,
+			outgoingIsBad: (numOutgoing-numIncoming > 1) && !c.outgoing,
+		})
+		if c == nil {
+			return errors.New("don't want conn")
+		}
+		c.close()
+		t.deletePeerConn(c)
+	}
+	if len(t.conns) >= t.maxEstablishedConns {
+		panic(len(t.conns))
+	}
+	t.conns[c] = struct{}{}
+	t.cl.event.Broadcast()
+	// We'll never receive the "p" extended handshake parameter.
+	if !t.cl.config.DisablePEX && !c.PeerExtensionBytes.SupportsExtended() {
+		t.pex.Add(c)
+	}
+	return nil
+}
+
+func (t *Torrent) newConnsAllowed() bool {
+	if !t.networkingEnabled.Bool() {
+		return false
+	}
+	if t.closed.IsSet() {
+		return false
+	}
+	if !t.needData() && (!t.seeding() || !t.haveAnyPieces()) {
+		return false
+	}
+	return true
+}
+
+func (t *Torrent) wantAnyConns() bool {
+	if !t.networkingEnabled.Bool() {
+		return false
+	}
+	if t.closed.IsSet() {
+		return false
+	}
+	if !t.needData() && (!t.seeding() || !t.haveAnyPieces()) {
+		return false
+	}
+	return len(t.conns) < t.maxEstablishedConns
+}
+
+func (t *Torrent) wantOutgoingConns() bool {
+	if !t.newConnsAllowed() {
+		return false
+	}
+	if len(t.conns) < t.maxEstablishedConns {
+		return true
+	}
+	numIncomingConns := len(t.conns) - t.numOutgoingConns()
+	return t.worstBadConn(worseConnLensOpts{
+		incomingIsBad: numIncomingConns-t.numOutgoingConns() > 1,
+		outgoingIsBad: false,
+	}) != nil
+}
+
+func (t *Torrent) wantIncomingConns() bool {
+	if !t.newConnsAllowed() {
+		return false
+	}
+	if len(t.conns) < t.maxEstablishedConns {
+		return true
+	}
+	numIncomingConns := len(t.conns) - t.numOutgoingConns()
+	return t.worstBadConn(worseConnLensOpts{
+		incomingIsBad: false,
+		outgoingIsBad: t.numOutgoingConns()-numIncomingConns > 1,
+	}) != nil
+}
+
+func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
+	t.cl.lock()
+	defer t.cl.unlock()
+	oldMax = t.maxEstablishedConns
+	t.maxEstablishedConns = max
+	wcs := worseConnSlice{
+		conns: t.appendConns(nil, func(*PeerConn) bool {
+			return true
+		}),
+	}
+	wcs.initKeys(worseConnLensOpts{})
+	heap.Init(&wcs)
+	for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
+		t.dropConnection(heap.Pop(&wcs).(*PeerConn))
+	}
+	t.openNewConns()
+	return oldMax
+}
+
+func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) {
+	t.logger.LazyLog(log.Debug, func() log.Msg {
+		return log.Fstr("hashed piece %d (passed=%t)", piece, passed)
+	})
+	p := t.piece(piece)
+	p.numVerifies++
+	t.cl.event.Broadcast()
+	if t.closed.IsSet() {
+		return
+	}
+
+	// Don't score the first time a piece is hashed, it could be an initial check.
+	if p.storageCompletionOk {
+		if passed {
+			pieceHashedCorrect.Add(1)
+		} else {
+			log.Fmsg(
+				"piece %d failed hash: %d connections contributed", piece, len(p.dirtiers),
+			).AddValues(t, p).LogLevel(
+
+				log.Debug, t.logger)
+
+			pieceHashedNotCorrect.Add(1)
+		}
+	}
+
+	p.marking = true
+	t.publishPieceChange(piece)
+	defer func() {
+		p.marking = false
+		t.publishPieceChange(piece)
+	}()
+
+	if passed {
+		if len(p.dirtiers) != 0 {
+			// Don't increment stats above connection-level for every involved connection.
+			t.allStats((*ConnStats).incrementPiecesDirtiedGood)
+		}
+		for c := range p.dirtiers {
+			c._stats.incrementPiecesDirtiedGood()
+		}
+		t.clearPieceTouchers(piece)
+		hasDirty := p.hasDirtyChunks()
+		t.cl.unlock()
+		if hasDirty {
+			p.Flush() // You can be synchronous here!
+		}
+		err := p.Storage().MarkComplete()
+		if err != nil {
+			t.logger.Levelf(log.Warning, "%T: error marking piece complete %d: %s", t.storage, piece, err)
+		}
+		t.cl.lock()
+
+		if t.closed.IsSet() {
+			return
+		}
+		t.pendAllChunkSpecs(piece)
+	} else {
+		if len(p.dirtiers) != 0 && p.allChunksDirty() && hashIoErr == nil {
+			// Peers contributed to all the data for this piece hash failure, and the failure was
+			// not due to errors in the storage (such as data being dropped in a cache).
+
+			// Increment Torrent and above stats, and then specific connections.
+			t.allStats((*ConnStats).incrementPiecesDirtiedBad)
+			for c := range p.dirtiers {
+				// Y u do dis peer?!
+				c.stats().incrementPiecesDirtiedBad()
+			}
+
+			bannableTouchers := make([]*Peer, 0, len(p.dirtiers))
+			for c := range p.dirtiers {
+				if !c.trusted {
+					bannableTouchers = append(bannableTouchers, c)
+				}
+			}
+			t.clearPieceTouchers(piece)
+			slices.Sort(bannableTouchers, connLessTrusted)
+
+			if t.cl.config.Debug {
+				t.logger.Printf(
+					"bannable conns by trust for piece %d: %v",
+					piece,
+					func() (ret []connectionTrust) {
+						for _, c := range bannableTouchers {
+							ret = append(ret, c.trust())
+						}
+						return
+					}(),
+				)
+			}
+
+			if len(bannableTouchers) >= 1 {
+				c := bannableTouchers[0]
+				if len(bannableTouchers) != 1 {
+					t.logger.Levelf(log.Debug, "would have banned %v for touching piece %v after failed piece check", c.remoteIp(), piece)
+				} else {
+					// Turns out it's still useful to ban peers like this because if there's only a
+					// single peer for a piece, and we never progress that piece to completion, we
+					// will never smart-ban them. Discovered in
+					// https://github.com/anacrolix/torrent/issues/715.
+					t.logger.Levelf(log.Warning, "banning %v for being sole dirtier of piece %v after failed piece check", c, piece)
+					c.ban()
+				}
+			}
+		}
+		t.onIncompletePiece(piece)
+		p.Storage().MarkNotComplete()
+	}
+	t.updatePieceCompletion(piece)
+}
+
+func (t *Torrent) cancelRequestsForPiece(piece pieceIndex) {
+	start := t.pieceRequestIndexOffset(piece)
+	end := start + t.pieceNumChunks(piece)
+	for ri := start; ri < end; ri++ {
+		t.cancelRequest(ri)
+	}
+}
+
+func (t *Torrent) onPieceCompleted(piece pieceIndex) {
+	t.pendAllChunkSpecs(piece)
+	t.cancelRequestsForPiece(piece)
+	t.piece(piece).readerCond.Broadcast()
+	for conn := range t.conns {
+		conn.have(piece)
+		t.maybeDropMutuallyCompletePeer(conn)
+	}
+}
+
+// Called when a piece is found to be not complete.
+func (t *Torrent) onIncompletePiece(piece pieceIndex) {
+	if t.pieceAllDirty(piece) {
+		t.pendAllChunkSpecs(piece)
+	}
+	if !t.wantPieceIndex(piece) {
+		// t.logger.Printf("piece %d incomplete and unwanted", piece)
+		return
+	}
+	// We could drop any connections that we told we have a piece that we
+	// don't here. But there's a test failure, and it seems clients don't care
+	// if you request pieces that you already claim to have. Pruning bad
+	// connections might just remove any connections that aren't treating us
+	// favourably anyway.
+
+	// for c := range t.conns {
+	// 	if c.sentHave(piece) {
+	// 		c.drop()
+	// 	}
+	// }
+	t.iterPeers(func(conn *Peer) {
+		if conn.peerHasPiece(piece) {
+			conn.updateRequests("piece incomplete")
+		}
+	})
+}
+
+func (t *Torrent) tryCreateMorePieceHashers() {
+	for !t.closed.IsSet() && t.activePieceHashes < t.cl.config.PieceHashersPerTorrent && t.tryCreatePieceHasher() {
+	}
+}
+
+func (t *Torrent) tryCreatePieceHasher() bool {
+	if t.storage == nil {
+		return false
+	}
+	pi, ok := t.getPieceToHash()
+	if !ok {
+		return false
+	}
+	p := t.piece(pi)
+	t.piecesQueuedForHash.Remove(bitmap.BitIndex(pi))
+	p.hashing = true
+	t.publishPieceChange(pi)
+	t.updatePiecePriority(pi, "Torrent.tryCreatePieceHasher")
+	t.storageLock.RLock()
+	t.activePieceHashes++
+	go t.pieceHasher(pi)
+	return true
+}
+
+func (t *Torrent) getPieceToHash() (ret pieceIndex, ok bool) {
+	t.piecesQueuedForHash.IterTyped(func(i pieceIndex) bool {
+		if t.piece(i).hashing {
+			return true
+		}
+		ret = i
+		ok = true
+		return false
+	})
+	return
+}
+
+func (t *Torrent) dropBannedPeers() {
+	t.iterPeers(func(p *Peer) {
+		remoteIp := p.remoteIp()
+		if remoteIp == nil {
+			if p.bannableAddr.Ok {
+				t.logger.WithDefaultLevel(log.Debug).Printf("can't get remote ip for peer %v", p)
+			}
+			return
+		}
+		netipAddr := netip.MustParseAddr(remoteIp.String())
+		if Some(netipAddr) != p.bannableAddr {
+			t.logger.WithDefaultLevel(log.Debug).Printf(
+				"peer remote ip does not match its bannable addr [peer=%v, remote ip=%v, bannable addr=%v]",
+				p, remoteIp, p.bannableAddr)
+		}
+		if _, ok := t.cl.badPeerIPs[netipAddr]; ok {
+			// Should this be a close?
+			p.drop()
+			t.logger.WithDefaultLevel(log.Debug).Printf("dropped %v for banned remote IP %v", p, netipAddr)
+		}
+	})
+}
+
+func (t *Torrent) pieceHasher(index pieceIndex) {
+	p := t.piece(index)
+	sum, failedPeers, copyErr := t.hashPiece(index)
+	correct := sum == *p.hash
+	switch copyErr {
+	case nil, io.EOF:
+	default:
+		log.Fmsg("piece %v (%s) hash failure copy error: %v", p, p.hash.HexString(), copyErr).Log(t.logger)
+	}
+	t.storageLock.RUnlock()
+	t.cl.lock()
+	defer t.cl.unlock()
+	if correct {
+		for peer := range failedPeers {
+			t.cl.banPeerIP(peer.AsSlice())
+			t.logger.WithDefaultLevel(log.Debug).Printf("smart banned %v for piece %v", peer, index)
+		}
+		t.dropBannedPeers()
+		for ri := t.pieceRequestIndexOffset(index); ri < t.pieceRequestIndexOffset(index+1); ri++ {
+			t.smartBanCache.ForgetBlock(ri)
+		}
+	}
+	p.hashing = false
+	t.pieceHashed(index, correct, copyErr)
+	t.updatePiecePriority(index, "Torrent.pieceHasher")
+	t.activePieceHashes--
+	t.tryCreateMorePieceHashers()
+}
+
+// Return the connections that touched a piece, and clear the entries while doing it.
+func (t *Torrent) clearPieceTouchers(pi pieceIndex) {
+	p := t.piece(pi)
+	for c := range p.dirtiers {
+		delete(c.peerTouchedPieces, pi)
+		delete(p.dirtiers, c)
+	}
+}
+
+func (t *Torrent) peersAsSlice() (ret []*Peer) {
+	t.iterPeers(func(p *Peer) {
+		ret = append(ret, p)
+	})
+	return
+}
+
+func (t *Torrent) queuePieceCheck(pieceIndex pieceIndex) {
+	piece := t.piece(pieceIndex)
+	if piece.queuedForHash() {
+		return
+	}
+	t.piecesQueuedForHash.Add(bitmap.BitIndex(pieceIndex))
+	t.publishPieceChange(pieceIndex)
+	t.updatePiecePriority(pieceIndex, "Torrent.queuePieceCheck")
+	t.tryCreateMorePieceHashers()
+}
+
+// Forces all the pieces to be re-hashed. See also Piece.VerifyData. This should not be called
+// before the Info is available.
+func (t *Torrent) VerifyData() {
+	for i := pieceIndex(0); i < t.NumPieces(); i++ {
+		t.Piece(i).VerifyData()
+	}
+}
+
+func (t *Torrent) connectingToPeerAddr(addrStr string) bool {
+	return len(t.halfOpen[addrStr]) != 0
+}
+
+func (t *Torrent) hasPeerConnForAddr(x PeerRemoteAddr) bool {
+	addrStr := x.String()
+	for c := range t.conns {
+		ra := c.RemoteAddr
+		if ra.String() == addrStr {
+			return true
+		}
+	}
+	return false
+}
+
+func (t *Torrent) getHalfOpenPath(
+	addrStr string,
+	attemptKey outgoingConnAttemptKey,
+) nestedmaps.Path[*PeerInfo] {
+	return nestedmaps.Next(nestedmaps.Next(nestedmaps.Begin(&t.halfOpen), addrStr), attemptKey)
+}
+
+func (t *Torrent) addHalfOpen(addrStr string, attemptKey *PeerInfo) {
+	path := t.getHalfOpenPath(addrStr, attemptKey)
+	if path.Exists() {
+		panic("should be unique")
+	}
+	path.Set(attemptKey)
+	t.cl.numHalfOpen++
+}
+
+// Start the process of connecting to the given peer for the given torrent if appropriate. I'm not
+// sure all the PeerInfo fields are being used.
+func initiateConn(
+	opts outgoingConnOpts,
+	ignoreLimits bool,
+) {
+	t := opts.t
+	peer := opts.peerInfo
+	if peer.Id == t.cl.peerID {
+		return
+	}
+	if t.cl.badPeerAddr(peer.Addr) && !peer.Trusted {
+		return
+	}
+	addr := peer.Addr
+	addrStr := addr.String()
+	if !ignoreLimits {
+		if t.connectingToPeerAddr(addrStr) {
+			return
+		}
+	}
+	if t.hasPeerConnForAddr(addr) {
+		return
+	}
+	attemptKey := &peer
+	t.addHalfOpen(addrStr, attemptKey)
+	go t.cl.outgoingConnection(
+		opts,
+		attemptKey,
+	)
+}
+
+// Adds a trusted, pending peer for each of the given Client's addresses. Typically used in tests to
+// quickly make one Client visible to the Torrent of another Client.
+func (t *Torrent) AddClientPeer(cl *Client) int {
+	return t.AddPeers(func() (ps []PeerInfo) {
+		for _, la := range cl.ListenAddrs() {
+			ps = append(ps, PeerInfo{
+				Addr:    la,
+				Trusted: true,
+			})
+		}
+		return
+	}())
+}
+
+// All stats that include this Torrent. Useful when we want to increment ConnStats but not for every
+// connection.
+func (t *Torrent) allStats(f func(*ConnStats)) {
+	f(&t.stats)
+	f(&t.cl.connStats)
+}
+
+func (t *Torrent) hashingPiece(i pieceIndex) bool {
+	return t.pieces[i].hashing
+}
+
+func (t *Torrent) pieceQueuedForHash(i pieceIndex) bool {
+	return t.piecesQueuedForHash.Get(bitmap.BitIndex(i))
+}
+
+func (t *Torrent) dialTimeout() time.Duration {
+	return reducedDialTimeout(t.cl.config.MinDialTimeout, t.cl.config.NominalDialTimeout, t.cl.config.HalfOpenConnsPerTorrent, t.peers.Len())
+}
+
+func (t *Torrent) piece(i int) *Piece {
+	return &t.pieces[i]
+}
+
+func (t *Torrent) onWriteChunkErr(err error) {
+	if t.userOnWriteChunkErr != nil {
+		go t.userOnWriteChunkErr(err)
+		return
+	}
+	t.logger.WithDefaultLevel(log.Critical).Printf("default chunk write error handler: disabling data download")
+	t.disallowDataDownloadLocked()
+}
+
+func (t *Torrent) DisallowDataDownload() {
+	t.cl.lock()
+	defer t.cl.unlock()
+	t.disallowDataDownloadLocked()
+}
+
+func (t *Torrent) disallowDataDownloadLocked() {
+	t.dataDownloadDisallowed.Set()
+	t.iterPeers(func(p *Peer) {
+		// Could check if peer request state is empty/not interested?
+		p.updateRequests("disallow data download")
+		p.cancelAllRequests()
+	})
+}
+
+func (t *Torrent) AllowDataDownload() {
+	t.cl.lock()
+	defer t.cl.unlock()
+	t.dataDownloadDisallowed.Clear()
+	t.iterPeers(func(p *Peer) {
+		p.updateRequests("allow data download")
+	})
+}
+
+// Enables uploading data, if it was disabled.
+func (t *Torrent) AllowDataUpload() {
+	t.cl.lock()
+	defer t.cl.unlock()
+	t.dataUploadDisallowed = false
+	t.iterPeers(func(p *Peer) {
+		p.updateRequests("allow data upload")
+	})
+}
+
+// Disables uploading data, if it was enabled.
+func (t *Torrent) DisallowDataUpload() {
+	t.cl.lock()
+	defer t.cl.unlock()
+	t.dataUploadDisallowed = true
+	for c := range t.conns {
+		// TODO: This doesn't look right. Shouldn't we tickle writers to choke peers or something instead?
+		c.updateRequests("disallow data upload")
+	}
+}
+
+// Sets a handler that is called if there's an error writing a chunk to local storage. By default,
+// or if nil, a critical message is logged, and data download is disabled.
+func (t *Torrent) SetOnWriteChunkError(f func(error)) {
+	t.cl.lock()
+	defer t.cl.unlock()
+	t.userOnWriteChunkErr = f
+}
+
+func (t *Torrent) iterPeers(f func(p *Peer)) {
+	for pc := range t.conns {
+		f(&pc.Peer)
+	}
+	for _, ws := range t.webSeeds {
+		f(ws)
+	}
+}
+
+func (t *Torrent) callbacks() *Callbacks {
+	return &t.cl.config.Callbacks
+}
+
+type AddWebSeedsOpt func(*webseed.Client)
+
+// Sets the WebSeed trailing path escaper for a webseed.Client.
+func WebSeedPathEscaper(custom webseed.PathEscaper) AddWebSeedsOpt {
+	return func(c *webseed.Client) {
+		c.PathEscaper = custom
+	}
+}
+
+func (t *Torrent) AddWebSeeds(urls []string, opts ...AddWebSeedsOpt) {
+	t.cl.lock()
+	defer t.cl.unlock()
+	for _, u := range urls {
+		t.addWebSeed(u, opts...)
+	}
+}
+
+func (t *Torrent) addWebSeed(url string, opts ...AddWebSeedsOpt) {
+	if t.cl.config.DisableWebseeds {
+		return
+	}
+	if _, ok := t.webSeeds[url]; ok {
+		return
+	}
+	// I don't think Go http supports pipelining requests. However, we can have more ready to go
+	// right away. This value should be some multiple of the number of connections to a host. I
+	// would expect that double maxRequests plus a bit would be appropriate. This value is based on
+	// downloading Sintel (08ada5a7a6183aae1e09d831df6748d566095a10) from
+	// "https://webtorrent.io/torrents/".
+	const maxRequests = 16
+	ws := webseedPeer{
+		peer: Peer{
+			t:                        t,
+			outgoing:                 true,
+			Network:                  "http",
+			reconciledHandshakeStats: true,
+			// This should affect how often we have to recompute requests for this peer. Note that
+			// because we can request more than 1 thing at a time over HTTP, we will hit the low
+			// requests mark more often, so recomputation is probably sooner than with regular peer
+			// conns. ~4x maxRequests would be about right.
+			PeerMaxRequests: 128,
+			// TODO: Set ban prefix?
+			RemoteAddr: remoteAddrFromUrl(url),
+			callbacks:  t.callbacks(),
+		},
+		client: webseed.Client{
+			HttpClient: t.cl.httpClient,
+			Url:        url,
+			ResponseBodyWrapper: func(r io.Reader) io.Reader {
+				return &rateLimitedReader{
+					l: t.cl.config.DownloadRateLimiter,
+					r: r,
+				}
+			},
+		},
+		activeRequests: make(map[Request]webseed.Request, maxRequests),
+	}
+	ws.peer.initRequestState()
+	for _, opt := range opts {
+		opt(&ws.client)
+	}
+	ws.peer.initUpdateRequestsTimer()
+	ws.requesterCond.L = t.cl.locker()
+	for i := 0; i < maxRequests; i += 1 {
+		go ws.requester(i)
+	}
+	for _, f := range t.callbacks().NewPeer {
+		f(&ws.peer)
+	}
+	ws.peer.logger = t.logger.WithContextValue(&ws)
+	ws.peer.peerImpl = &ws
+	if t.haveInfo() {
+		ws.onGotInfo(t.info)
+	}
+	t.webSeeds[url] = &ws.peer
+}
+
+func (t *Torrent) peerIsActive(p *Peer) (active bool) {
+	t.iterPeers(func(p1 *Peer) {
+		if p1 == p {
+			active = true
+		}
+	})
+	return
+}
+
+func (t *Torrent) requestIndexToRequest(ri RequestIndex) Request {
+	index := t.pieceIndexOfRequestIndex(ri)
+	return Request{
+		pp.Integer(index),
+		t.piece(index).chunkIndexSpec(ri % t.chunksPerRegularPiece()),
+	}
+}
+
+func (t *Torrent) requestIndexFromRequest(r Request) RequestIndex {
+	return t.pieceRequestIndexOffset(pieceIndex(r.Index)) + RequestIndex(r.Begin/t.chunkSize)
+}
+
+func (t *Torrent) pieceRequestIndexOffset(piece pieceIndex) RequestIndex {
+	return RequestIndex(piece) * t.chunksPerRegularPiece()
+}
+
+func (t *Torrent) updateComplete() {
+	t.Complete.SetBool(t.haveAllPieces())
+}
+
+func (t *Torrent) cancelRequest(r RequestIndex) *Peer {
+	p := t.requestingPeer(r)
+	if p != nil {
+		p.cancel(r)
+	}
+	// TODO: This is a check that an old invariant holds. It can be removed after some testing.
+	//delete(t.pendingRequests, r)
+	if _, ok := t.requestState[r]; ok {
+		panic("expected request state to be gone")
+	}
+	return p
+}
+
+func (t *Torrent) requestingPeer(r RequestIndex) *Peer {
+	return t.requestState[r].peer
+}
+
+func (t *Torrent) addConnWithAllPieces(p *Peer) {
+	if t.connsWithAllPieces == nil {
+		t.connsWithAllPieces = make(map[*Peer]struct{}, t.maxEstablishedConns)
+	}
+	t.connsWithAllPieces[p] = struct{}{}
+}
+
+func (t *Torrent) deleteConnWithAllPieces(p *Peer) bool {
+	_, ok := t.connsWithAllPieces[p]
+	delete(t.connsWithAllPieces, p)
+	return ok
+}
+
+func (t *Torrent) numActivePeers() int {
+	return len(t.conns) + len(t.webSeeds)
+}
+
+func (t *Torrent) hasStorageCap() bool {
+	f := t.storage.Capacity
+	if f == nil {
+		return false
+	}
+	_, ok := (*f)()
+	return ok
+}
+
+func (t *Torrent) pieceIndexOfRequestIndex(ri RequestIndex) pieceIndex {
+	return pieceIndex(ri / t.chunksPerRegularPiece())
+}
+
+func (t *Torrent) iterUndirtiedRequestIndexesInPiece(
+	reuseIter *typedRoaring.Iterator[RequestIndex],
+	piece pieceIndex,
+	f func(RequestIndex),
+) {
+	reuseIter.Initialize(&t.dirtyChunks)
+	pieceRequestIndexOffset := t.pieceRequestIndexOffset(piece)
+	iterBitmapUnsetInRange(
+		reuseIter,
+		pieceRequestIndexOffset, pieceRequestIndexOffset+t.pieceNumChunks(piece),
+		f,
+	)
+}
+
+type requestState struct {
+	peer *Peer
+	when time.Time
+}
+
+// Returns an error if a received chunk is out of bounds in someway.
+func (t *Torrent) checkValidReceiveChunk(r Request) error {
+	if !t.haveInfo() {
+		return errors.New("torrent missing info")
+	}
+	if int(r.Index) >= t.numPieces() {
+		return fmt.Errorf("chunk index %v, torrent num pieces %v", r.Index, t.numPieces())
+	}
+	pieceLength := t.pieceLength(pieceIndex(r.Index))
+	if r.Begin >= pieceLength {
+		return fmt.Errorf("chunk begins beyond end of piece (%v >= %v)", r.Begin, pieceLength)
+	}
+	// We could check chunk lengths here, but chunk request size is not changed often, and tricky
+	// for peers to manipulate as they need to send potentially large buffers to begin with. There
+	// should be considerable checks elsewhere for this case due to the network overhead. We should
+	// catch most of the overflow manipulation stuff by checking index and begin above.
+	return nil
+}
+
+func (t *Torrent) peerConnsWithDialAddrPort(target netip.AddrPort) (ret []*PeerConn) {
+	for pc := range t.conns {
+		dialAddr, err := pc.remoteDialAddrPort()
+		if err != nil {
+			continue
+		}
+		if dialAddr != target {
+			continue
+		}
+		ret = append(ret, pc)
+	}
+	return
+}
+
+func wrapUtHolepunchMsgForPeerConn(
+	recipient *PeerConn,
+	msg utHolepunch.Msg,
+) pp.Message {
+	extendedPayload, err := msg.MarshalBinary()
+	if err != nil {
+		panic(err)
+	}
+	return pp.Message{
+		Type:            pp.Extended,
+		ExtendedID:      MapMustGet(recipient.PeerExtensionIDs, utHolepunch.ExtensionName),
+		ExtendedPayload: extendedPayload,
+	}
+}
+
+func sendUtHolepunchMsg(
+	pc *PeerConn,
+	msgType utHolepunch.MsgType,
+	addrPort netip.AddrPort,
+	errCode utHolepunch.ErrCode,
+) {
+	holepunchMsg := utHolepunch.Msg{
+		MsgType:  msgType,
+		AddrPort: addrPort,
+		ErrCode:  errCode,
+	}
+	incHolepunchMessagesSent(holepunchMsg)
+	ppMsg := wrapUtHolepunchMsgForPeerConn(pc, holepunchMsg)
+	pc.write(ppMsg)
+}
+
+func incHolepunchMessages(msg utHolepunch.Msg, verb string) {
+	torrent.Add(
+		fmt.Sprintf(
+			"holepunch %v %v messages %v",
+			msg.MsgType,
+			addrPortProtocolStr(msg.AddrPort),
+			verb,
+		),
+		1,
+	)
+}
+
+func incHolepunchMessagesReceived(msg utHolepunch.Msg) {
+	incHolepunchMessages(msg, "received")
+}
+
+func incHolepunchMessagesSent(msg utHolepunch.Msg) {
+	incHolepunchMessages(msg, "sent")
+}
+
+func (t *Torrent) handleReceivedUtHolepunchMsg(msg utHolepunch.Msg, sender *PeerConn) error {
+	incHolepunchMessagesReceived(msg)
+	switch msg.MsgType {
+	case utHolepunch.Rendezvous:
+		t.logger.Printf("got holepunch rendezvous request for %v from %p", msg.AddrPort, sender)
+		sendMsg := sendUtHolepunchMsg
+		senderAddrPort, err := sender.remoteDialAddrPort()
+		if err != nil {
+			sender.logger.Levelf(
+				log.Warning,
+				"error getting ut_holepunch rendezvous sender's dial address: %v",
+				err,
+			)
+			// There's no better error code. The sender's address itself is invalid. I don't see
+			// this error message being appropriate anywhere else anyway.
+			sendMsg(sender, utHolepunch.Error, msg.AddrPort, utHolepunch.NoSuchPeer)
+		}
+		targets := t.peerConnsWithDialAddrPort(msg.AddrPort)
+		if len(targets) == 0 {
+			sendMsg(sender, utHolepunch.Error, msg.AddrPort, utHolepunch.NotConnected)
+			return nil
+		}
+		for _, pc := range targets {
+			if !pc.supportsExtension(utHolepunch.ExtensionName) {
+				sendMsg(sender, utHolepunch.Error, msg.AddrPort, utHolepunch.NoSupport)
+				continue
+			}
+			sendMsg(sender, utHolepunch.Connect, msg.AddrPort, 0)
+			sendMsg(pc, utHolepunch.Connect, senderAddrPort, 0)
+		}
+		return nil
+	case utHolepunch.Connect:
+		holepunchAddr := msg.AddrPort
+		t.logger.Printf("got holepunch connect request for %v from %p", holepunchAddr, sender)
+		if g.MapContains(t.cl.undialableWithoutHolepunch, holepunchAddr) {
+			setAdd(&t.cl.undialableWithoutHolepunchDialedAfterHolepunchConnect, holepunchAddr)
+			if g.MapContains(t.cl.accepted, holepunchAddr) {
+				setAdd(&t.cl.probablyOnlyConnectedDueToHolepunch, holepunchAddr)
+			}
+		}
+		opts := outgoingConnOpts{
+			peerInfo: PeerInfo{
+				Addr:         msg.AddrPort,
+				Source:       PeerSourceUtHolepunch,
+				PexPeerFlags: sender.pex.remoteLiveConns[msg.AddrPort].UnwrapOrZeroValue(),
+			},
+			t: t,
+			// Don't attempt to start our own rendezvous if we fail to connect.
+			skipHolepunchRendezvous:  true,
+			receivedHolepunchConnect: true,
+			// Assume that the other end initiated the rendezvous, and will use our preferred
+			// encryption. So we will act normally.
+			HeaderObfuscationPolicy: t.cl.config.HeaderObfuscationPolicy,
+		}
+		initiateConn(opts, true)
+		return nil
+	case utHolepunch.Error:
+		torrent.Add("holepunch error messages received", 1)
+		t.logger.Levelf(log.Debug, "received ut_holepunch error message from %v: %v", sender, msg.ErrCode)
+		return nil
+	default:
+		return fmt.Errorf("unhandled msg type %v", msg.MsgType)
+	}
+}
+
+func addrPortProtocolStr(addrPort netip.AddrPort) string {
+	addr := addrPort.Addr()
+	switch {
+	case addr.Is4():
+		return "ipv4"
+	case addr.Is6():
+		return "ipv6"
+	default:
+		panic(addrPort)
+	}
+}
+
+func (t *Torrent) trySendHolepunchRendezvous(addrPort netip.AddrPort) error {
+	rzsSent := 0
+	for pc := range t.conns {
+		if !pc.supportsExtension(utHolepunch.ExtensionName) {
+			continue
+		}
+		if pc.supportsExtension(pp.ExtensionNamePex) {
+			if !g.MapContains(pc.pex.remoteLiveConns, addrPort) {
+				continue
+			}
+		}
+		t.logger.Levelf(log.Debug, "sent ut_holepunch rendezvous message to %v for %v", pc, addrPort)
+		sendUtHolepunchMsg(pc, utHolepunch.Rendezvous, addrPort, 0)
+		rzsSent++
+	}
+	if rzsSent == 0 {
+		return errors.New("no eligible relays")
+	}
+	return nil
+}
+
+func (t *Torrent) numHalfOpenAttempts() (num int) {
+	for _, attempts := range t.halfOpen {
+		num += len(attempts)
+	}
+	return
+}
+
+func (t *Torrent) getDialTimeoutUnlocked() time.Duration {
+	cl := t.cl
+	cl.rLock()
+	defer cl.rUnlock()
+	return t.dialTimeout()
+}
diff --git a/deps/github.com/anacrolix/torrent/torrent_mmap_test.go b/deps/github.com/anacrolix/torrent/torrent_mmap_test.go
new file mode 100644
index 0000000..8114309
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/torrent_mmap_test.go
@@ -0,0 +1,18 @@
+//go:build !wasm
+// +build !wasm
+
+package torrent
+
+import (
+	"testing"
+
+	"github.com/anacrolix/torrent/storage"
+)
+
+func TestEmptyFilesAndZeroPieceLengthWithMMapStorage(t *testing.T) {
+	cfg := TestingConfig(t)
+	ci := storage.NewMMap(cfg.DataDir)
+	defer ci.Close()
+	cfg.DefaultStorage = ci
+	testEmptyFilesAndZeroPieceLength(t, cfg)
+}
diff --git a/deps/github.com/anacrolix/torrent/torrent_test.go b/deps/github.com/anacrolix/torrent/torrent_test.go
new file mode 100644
index 0000000..808947e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/torrent_test.go
@@ -0,0 +1,253 @@
+package torrent
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"path/filepath"
+	"sync"
+	"testing"
+
+	g "github.com/anacrolix/generics"
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/missinggo/v2"
+	"github.com/anacrolix/missinggo/v2/bitmap"
+	qt "github.com/frankban/quicktest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/internal/testutil"
+	"github.com/anacrolix/torrent/metainfo"
+	pp "github.com/anacrolix/torrent/peer_protocol"
+	"github.com/anacrolix/torrent/storage"
+)
+
+func r(i, b, l pp.Integer) Request {
+	return Request{i, ChunkSpec{b, l}}
+}
+
+// Check the given request is correct for various torrent offsets.
+func TestTorrentRequest(t *testing.T) {
+	const s = 472183431 // Length of torrent.
+	for _, _case := range []struct {
+		off int64   // An offset into the torrent.
+		req Request // The expected request. The zero value means !ok.
+	}{
+		// Invalid offset.
+		{-1, Request{}},
+		{0, r(0, 0, 16384)},
+		// One before the end of a piece.
+		{1<<18 - 1, r(0, 1<<18-16384, 16384)},
+		// Offset beyond torrent length.
+		{472 * 1 << 20, Request{}},
+		// One before the end of the torrent. Complicates the chunk length.
+		{s - 1, r((s-1)/(1<<18), (s-1)%(1<<18)/(16384)*(16384), 12935)},
+		{1, r(0, 0, 16384)},
+		// One before end of chunk.
+		{16383, r(0, 0, 16384)},
+		// Second chunk.
+		{16384, r(0, 16384, 16384)},
+	} {
+		req, ok := torrentOffsetRequest(472183431, 1<<18, 16384, _case.off)
+		if (_case.req == Request{}) == ok {
+			t.Fatalf("expected %v, got %v", _case.req, req)
+		}
+		if req != _case.req {
+			t.Fatalf("expected %v, got %v", _case.req, req)
+		}
+	}
+}
+
+func TestAppendToCopySlice(t *testing.T) {
+	orig := []int{1, 2, 3}
+	dupe := append([]int{}, orig...)
+	dupe[0] = 4
+	if orig[0] != 1 {
+		t.FailNow()
+	}
+}
+
+func TestTorrentString(t *testing.T) {
+	tor := &Torrent{}
+	s := tor.InfoHash().HexString()
+	if s != "0000000000000000000000000000000000000000" {
+		t.FailNow()
+	}
+}
+
+// This benchmark is from the observation that a lot of overlapping Readers on
+// a large torrent with small pieces had a lot of overhead in recalculating
+// piece priorities everytime a reader (possibly in another Torrent) changed.
+func BenchmarkUpdatePiecePriorities(b *testing.B) {
+	const (
+		numPieces   = 13410
+		pieceLength = 256 << 10
+	)
+	cl := &Client{config: TestingConfig(b)}
+	cl.initLogger()
+	t := cl.newTorrent(metainfo.Hash{}, nil)
+	require.NoError(b, t.setInfo(&metainfo.Info{
+		Pieces:      make([]byte, metainfo.HashSize*numPieces),
+		PieceLength: pieceLength,
+		Length:      pieceLength * numPieces,
+	}))
+	t.onSetInfo()
+	assert.EqualValues(b, 13410, t.numPieces())
+	for i := 0; i < 7; i += 1 {
+		r := t.NewReader()
+		r.SetReadahead(32 << 20)
+		r.Seek(3500000, io.SeekStart)
+	}
+	assert.Len(b, t.readers, 7)
+	for i := 0; i < t.numPieces(); i += 3 {
+		t._completedPieces.Add(bitmap.BitIndex(i))
+	}
+	t.DownloadPieces(0, t.numPieces())
+	for i := 0; i < b.N; i += 1 {
+		t.updateAllPiecePriorities("")
+	}
+}
+
+// Check that a torrent containing zero-length file(s) will start, and that
+// they're created in the filesystem. The client storage is assumed to be
+// file-based on the native filesystem based.
+func testEmptyFilesAndZeroPieceLength(t *testing.T, cfg *ClientConfig) {
+	cl, err := NewClient(cfg)
+	require.NoError(t, err)
+	defer cl.Close()
+	ib, err := bencode.Marshal(metainfo.Info{
+		Name:        "empty",
+		Length:      0,
+		PieceLength: 0,
+	})
+	require.NoError(t, err)
+	fp := filepath.Join(cfg.DataDir, "empty")
+	os.Remove(fp)
+	assert.False(t, missinggo.FilePathExists(fp))
+	tt, err := cl.AddTorrent(&metainfo.MetaInfo{
+		InfoBytes: ib,
+	})
+	require.NoError(t, err)
+	defer tt.Drop()
+	tt.DownloadAll()
+	require.True(t, cl.WaitAll())
+	assert.True(t, tt.Complete.Bool())
+	assert.True(t, missinggo.FilePathExists(fp))
+}
+
+func TestEmptyFilesAndZeroPieceLengthWithFileStorage(t *testing.T) {
+	cfg := TestingConfig(t)
+	ci := storage.NewFile(cfg.DataDir)
+	defer ci.Close()
+	cfg.DefaultStorage = ci
+	testEmptyFilesAndZeroPieceLength(t, cfg)
+}
+
+func TestPieceHashFailed(t *testing.T) {
+	mi := testutil.GreetingMetaInfo()
+	cl := newTestingClient(t)
+	tt := cl.newTorrent(mi.HashInfoBytes(), badStorage{})
+	tt.setChunkSize(2)
+	require.NoError(t, tt.setInfoBytesLocked(mi.InfoBytes))
+	tt.cl.lock()
+	tt.dirtyChunks.AddRange(
+		uint64(tt.pieceRequestIndexOffset(1)),
+		uint64(tt.pieceRequestIndexOffset(1)+3))
+	require.True(t, tt.pieceAllDirty(1))
+	tt.pieceHashed(1, false, nil)
+	// Dirty chunks should be cleared so we can try again.
+	require.False(t, tt.pieceAllDirty(1))
+	tt.cl.unlock()
+}
+
+// Check the behaviour of Torrent.Metainfo when metadata is not completed.
+func TestTorrentMetainfoIncompleteMetadata(t *testing.T) {
+	cfg := TestingConfig(t)
+	cfg.Debug = true
+	// Disable this just because we manually initiate a connection without it.
+	cfg.MinPeerExtensions.SetBit(pp.ExtensionBitFast, false)
+	cl, err := NewClient(cfg)
+	require.NoError(t, err)
+	defer cl.Close()
+
+	mi := testutil.GreetingMetaInfo()
+	ih := mi.HashInfoBytes()
+
+	tt, _ := cl.AddTorrentInfoHash(ih)
+	assert.Nil(t, tt.Metainfo().InfoBytes)
+	assert.False(t, tt.haveAllMetadataPieces())
+
+	nc, err := net.Dial("tcp", fmt.Sprintf(":%d", cl.LocalPort()))
+	require.NoError(t, err)
+	defer nc.Close()
+
+	var pex PeerExtensionBits
+	pex.SetBit(pp.ExtensionBitLtep, true)
+	hr, err := pp.Handshake(nc, &ih, [20]byte{}, pex)
+	require.NoError(t, err)
+	assert.True(t, hr.PeerExtensionBits.GetBit(pp.ExtensionBitLtep))
+	assert.EqualValues(t, cl.PeerID(), hr.PeerID)
+	assert.EqualValues(t, ih, hr.Hash)
+
+	assert.EqualValues(t, 0, tt.metadataSize())
+
+	func() {
+		cl.lock()
+		defer cl.unlock()
+		go func() {
+			_, err = nc.Write(pp.Message{
+				Type:       pp.Extended,
+				ExtendedID: pp.HandshakeExtendedID,
+				ExtendedPayload: func() []byte {
+					d := map[string]interface{}{
+						"metadata_size": len(mi.InfoBytes),
+					}
+					b, err := bencode.Marshal(d)
+					if err != nil {
+						panic(err)
+					}
+					return b
+				}(),
+			}.MustMarshalBinary())
+			require.NoError(t, err)
+		}()
+		tt.metadataChanged.Wait()
+	}()
+	assert.Equal(t, make([]byte, len(mi.InfoBytes)), tt.metadataBytes)
+	assert.False(t, tt.haveAllMetadataPieces())
+	assert.Nil(t, tt.Metainfo().InfoBytes)
+}
+
+func TestRelativeAvailabilityHaveNone(t *testing.T) {
+	c := qt.New(t)
+	var err error
+	cl := Client{
+		config: TestingConfig(t),
+	}
+	tt := Torrent{
+		cl:           &cl,
+		logger:       log.Default,
+		gotMetainfoC: make(chan struct{}),
+	}
+	tt.setChunkSize(2)
+	g.MakeMapIfNil(&tt.conns)
+	pc := PeerConn{}
+	pc.t = &tt
+	pc.peerImpl = &pc
+	pc.initRequestState()
+	g.InitNew(&pc.callbacks)
+	tt.conns[&pc] = struct{}{}
+	err = pc.peerSentHave(0)
+	c.Assert(err, qt.IsNil)
+	info := testutil.Greeting.Info(5)
+	err = tt.setInfo(&info)
+	c.Assert(err, qt.IsNil)
+	tt.onSetInfo()
+	err = pc.peerSentHaveNone()
+	c.Assert(err, qt.IsNil)
+	var wg sync.WaitGroup
+	tt.close(&wg)
+	tt.assertAllPiecesRelativeAvailabilityZero()
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/client.go b/deps/github.com/anacrolix/torrent/tracker/client.go
new file mode 100644
index 0000000..3b7e2ab
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/client.go
@@ -0,0 +1,60 @@
+package tracker
+
+import (
+	"context"
+	"net"
+	"net/url"
+
+	"github.com/anacrolix/log"
+
+	trHttp "github.com/anacrolix/torrent/tracker/http"
+	"github.com/anacrolix/torrent/tracker/udp"
+	"github.com/anacrolix/torrent/types/infohash"
+)
+
+type Client interface {
+	Announce(context.Context, AnnounceRequest, AnnounceOpt) (AnnounceResponse, error)
+	Scrape(ctx context.Context, ihs []infohash.T) (out udp.ScrapeResponse, err error)
+	Close() error
+}
+
+type AnnounceOpt = trHttp.AnnounceOpt
+
+type NewClientOpts struct {
+	Http trHttp.NewClientOpts
+	// Overrides the network in the scheme. Probably a legacy thing.
+	UdpNetwork   string
+	Logger       log.Logger
+	ListenPacket func(network, addr string) (net.PacketConn, error)
+}
+
+func NewClient(urlStr string, opts NewClientOpts) (Client, error) {
+	_url, err := url.Parse(urlStr)
+	if err != nil {
+		return nil, err
+	}
+	switch _url.Scheme {
+	case "http", "https":
+		return trHttp.NewClient(_url, opts.Http), nil
+	case "udp", "udp4", "udp6":
+		network := _url.Scheme
+		if opts.UdpNetwork != "" {
+			network = opts.UdpNetwork
+		}
+		cc, err := udp.NewConnClient(udp.NewConnClientOpts{
+			Network:      network,
+			Host:         _url.Host,
+			Logger:       opts.Logger,
+			ListenPacket: opts.ListenPacket,
+		})
+		if err != nil {
+			return nil, err
+		}
+		return &udpClient{
+			cl:         cc,
+			requestUri: _url.RequestURI(),
+		}, nil
+	default:
+		return nil, ErrBadScheme
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/http/client.go b/deps/github.com/anacrolix/torrent/tracker/http/client.go
new file mode 100644
index 0000000..c6b06fc
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/http/client.go
@@ -0,0 +1,49 @@
+package httpTracker
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"net/http"
+	"net/url"
+)
+
+type Client struct {
+	hc   *http.Client
+	url_ *url.URL
+}
+
+type (
+	ProxyFunc       func(*http.Request) (*url.URL, error)
+	DialContextFunc func(ctx context.Context, network, addr string) (net.Conn, error)
+)
+
+type NewClientOpts struct {
+	Proxy          ProxyFunc
+	DialContext    DialContextFunc
+	ServerName     string
+	AllowKeepAlive bool
+}
+
+func NewClient(url_ *url.URL, opts NewClientOpts) Client {
+	return Client{
+		url_: url_,
+		hc: &http.Client{
+			Transport: &http.Transport{
+				DialContext: opts.DialContext,
+				Proxy:       opts.Proxy,
+				TLSClientConfig: &tls.Config{
+					InsecureSkipVerify: true,
+					ServerName:         opts.ServerName,
+				},
+				// This is for S3 trackers that hold connections open.
+				DisableKeepAlives: !opts.AllowKeepAlive,
+			},
+		},
+	}
+}
+
+func (cl Client) Close() error {
+	cl.hc.CloseIdleConnections()
+	return nil
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/http/http.go b/deps/github.com/anacrolix/torrent/tracker/http/http.go
new file mode 100644
index 0000000..a7022a5
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/http/http.go
@@ -0,0 +1,157 @@
+package httpTracker
+
+import (
+	"bytes"
+	"context"
+	"expvar"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+
+	"github.com/anacrolix/missinggo/httptoo"
+
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/tracker/shared"
+	"github.com/anacrolix/torrent/tracker/udp"
+	"github.com/anacrolix/torrent/version"
+)
+
+var vars = expvar.NewMap("tracker/http")
+
+func setAnnounceParams(_url *url.URL, ar *AnnounceRequest, opts AnnounceOpt) {
+	q := url.Values{}
+
+	q.Set("key", strconv.FormatInt(int64(ar.Key), 10))
+	q.Set("info_hash", string(ar.InfoHash[:]))
+	q.Set("peer_id", string(ar.PeerId[:]))
+	// AFAICT, port is mandatory, and there's no implied port key.
+	q.Set("port", fmt.Sprintf("%d", ar.Port))
+	q.Set("uploaded", strconv.FormatInt(ar.Uploaded, 10))
+	q.Set("downloaded", strconv.FormatInt(ar.Downloaded, 10))
+
+	// The AWS S3 tracker returns "400 Bad Request: left(-1) was not in the valid range 0 -
+	// 9223372036854775807" if left is out of range, or "500 Internal Server Error: Internal Server
+	// Error" if omitted entirely.
+	left := ar.Left
+	if left < 0 {
+		left = math.MaxInt64
+	}
+	q.Set("left", strconv.FormatInt(left, 10))
+
+	if ar.Event != shared.None {
+		q.Set("event", ar.Event.String())
+	}
+	// http://stackoverflow.com/questions/17418004/why-does-tracker-server-not-understand-my-request-bittorrent-protocol
+	q.Set("compact", "1")
+	// According to https://wiki.vuze.com/w/Message_Stream_Encryption. TODO:
+	// Take EncryptionPolicy or something like it as a parameter.
+	q.Set("supportcrypto", "1")
+	doIp := func(versionKey string, ip net.IP) {
+		if ip == nil {
+			return
+		}
+		ipString := ip.String()
+		q.Set(versionKey, ipString)
+		// Let's try listing them. BEP 3 mentions having an "ip" param, and BEP 7 says we can list
+		// addresses for other address-families, although it's not encouraged.
+		q.Add("ip", ipString)
+	}
+	doIp("ipv4", opts.ClientIp4)
+	doIp("ipv6", opts.ClientIp6)
+	// We're operating purely on query-escaped strings, where + would have already been encoded to
+	// %2B, and + has no other special meaning. See https://github.com/anacrolix/torrent/issues/534.
+	qstr := strings.ReplaceAll(q.Encode(), "+", "%20")
+
+	// Some private trackers require the original query param to be in the first position.
+	if _url.RawQuery != "" {
+		_url.RawQuery += "&" + qstr
+	} else {
+		_url.RawQuery = qstr
+	}
+}
+
+type AnnounceOpt struct {
+	UserAgent           string
+	HostHeader          string
+	ClientIp4           net.IP
+	ClientIp6           net.IP
+	HttpRequestDirector func(*http.Request) error
+}
+
+type AnnounceRequest = udp.AnnounceRequest
+
+func (cl Client) Announce(ctx context.Context, ar AnnounceRequest, opt AnnounceOpt) (ret AnnounceResponse, err error) {
+	_url := httptoo.CopyURL(cl.url_)
+	setAnnounceParams(_url, &ar, opt)
+	req, err := http.NewRequestWithContext(ctx, http.MethodGet, _url.String(), nil)
+	userAgent := opt.UserAgent
+	if userAgent == "" {
+		userAgent = version.DefaultHttpUserAgent
+	}
+	if userAgent != "" {
+		req.Header.Set("User-Agent", userAgent)
+	}
+
+	if opt.HttpRequestDirector != nil {
+		err = opt.HttpRequestDirector(req)
+		if err != nil {
+			err = fmt.Errorf("error modifying HTTP request: %w", err)
+			return
+		}
+	}
+
+	req.Host = opt.HostHeader
+	resp, err := cl.hc.Do(req)
+	if err != nil {
+		return
+	}
+	defer resp.Body.Close()
+	var buf bytes.Buffer
+	io.Copy(&buf, resp.Body)
+	if resp.StatusCode != 200 {
+		err = fmt.Errorf("response from tracker: %s: %q", resp.Status, buf.Bytes())
+		return
+	}
+	var trackerResponse HttpResponse
+	err = bencode.Unmarshal(buf.Bytes(), &trackerResponse)
+	if _, ok := err.(bencode.ErrUnusedTrailingBytes); ok {
+		err = nil
+	} else if err != nil {
+		err = fmt.Errorf("error decoding %q: %s", buf.Bytes(), err)
+		return
+	}
+	if trackerResponse.FailureReason != "" {
+		err = fmt.Errorf("tracker gave failure reason: %q", trackerResponse.FailureReason)
+		return
+	}
+	vars.Add("successful http announces", 1)
+	ret.Interval = trackerResponse.Interval
+	ret.Leechers = trackerResponse.Incomplete
+	ret.Seeders = trackerResponse.Complete
+	if len(trackerResponse.Peers.List) != 0 {
+		vars.Add("http responses with nonempty peers key", 1)
+	}
+	ret.Peers = trackerResponse.Peers.List
+	if len(trackerResponse.Peers6) != 0 {
+		vars.Add("http responses with nonempty peers6 key", 1)
+	}
+	for _, na := range trackerResponse.Peers6 {
+		ret.Peers = append(ret.Peers, Peer{
+			IP:   na.IP,
+			Port: na.Port,
+		})
+	}
+	return
+}
+
+type AnnounceResponse struct {
+	Interval int32 // Minimum seconds the local peer should wait before next announce.
+	Leechers int32
+	Seeders  int32
+	Peers    []Peer
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/http/http_test.go b/deps/github.com/anacrolix/torrent/tracker/http/http_test.go
new file mode 100644
index 0000000..4e5efaf
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/http/http_test.go
@@ -0,0 +1,76 @@
+package httpTracker
+
+import (
+	"net/url"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/tracker/udp"
+)
+
+func TestUnmarshalHTTPResponsePeerDicts(t *testing.T) {
+	var hr HttpResponse
+	require.NoError(t, bencode.Unmarshal(
+		[]byte("d5:peersl"+
+			"d2:ip7:1.2.3.47:peer id20:thisisthe20bytepeeri4:porti9999ee"+
+			"d2:ip39:2001:0db8:85a3:0000:0000:8a2e:0370:73347:peer id20:thisisthe20bytepeeri4:porti9998ee"+
+			"e"+
+			"6:peers618:123412341234123456"+
+			"e"),
+		&hr))
+
+	require.Len(t, hr.Peers.List, 2)
+	assert.Equal(t, []byte("thisisthe20bytepeeri"), hr.Peers.List[0].ID)
+	assert.EqualValues(t, 9999, hr.Peers.List[0].Port)
+	assert.EqualValues(t, 9998, hr.Peers.List[1].Port)
+	assert.NotNil(t, hr.Peers.List[0].IP)
+	assert.NotNil(t, hr.Peers.List[1].IP)
+
+	assert.Len(t, hr.Peers6, 1)
+	assert.EqualValues(t, "1234123412341234", hr.Peers6[0].IP)
+	assert.EqualValues(t, 0x3536, hr.Peers6[0].Port)
+}
+
+func TestUnmarshalHttpResponseNoPeers(t *testing.T) {
+	var hr HttpResponse
+	require.NoError(t, bencode.Unmarshal(
+		[]byte("d6:peers618:123412341234123456e"),
+		&hr,
+	))
+	require.Len(t, hr.Peers.List, 0)
+	assert.Len(t, hr.Peers6, 1)
+}
+
+func TestUnmarshalHttpResponsePeers6NotCompact(t *testing.T) {
+	var hr HttpResponse
+	require.Error(t, bencode.Unmarshal(
+		[]byte("d6:peers6lee"),
+		&hr,
+	))
+}
+
+// Checks that infohash bytes that correspond to spaces are escaped with %20 instead of +. See
+// https://github.com/anacrolix/torrent/issues/534
+func TestSetAnnounceInfohashParamWithSpaces(t *testing.T) {
+	someUrl := &url.URL{}
+	ihBytes := [20]uint8{
+		0x2b, 0x76, 0xa, 0xa1, 0x78, 0x93, 0x20, 0x30, 0xc8, 0x47,
+		0xdc, 0xdf, 0x8e, 0xae, 0xbf, 0x56, 0xa, 0x1b, 0xd1, 0x6c,
+	}
+	setAnnounceParams(
+		someUrl,
+		&udp.AnnounceRequest{
+			InfoHash: ihBytes,
+		},
+		AnnounceOpt{})
+	t.Logf("%q", someUrl)
+	qt.Assert(t, someUrl.Query().Get("info_hash"), qt.Equals, string(ihBytes[:]))
+	qt.Check(t,
+		someUrl.String(),
+		qt.Contains,
+		"info_hash=%2Bv%0A%A1x%93%200%C8G%DC%DF%8E%AE%BFV%0A%1B%D1l")
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/http/peer.go b/deps/github.com/anacrolix/torrent/tracker/http/peer.go
new file mode 100644
index 0000000..b0deee0
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/http/peer.go
@@ -0,0 +1,46 @@
+package httpTracker
+
+import (
+	"fmt"
+	"net"
+	"net/netip"
+
+	"github.com/anacrolix/dht/v2/krpc"
+)
+
+// TODO: Use netip.Addr and Option[[20]byte].
+type Peer struct {
+	IP   net.IP `bencode:"ip"`
+	Port int    `bencode:"port"`
+	ID   []byte `bencode:"peer id"`
+}
+
+func (p Peer) ToNetipAddrPort() (addrPort netip.AddrPort, ok bool) {
+	addr, ok := netip.AddrFromSlice(p.IP)
+	addrPort = netip.AddrPortFrom(addr, uint16(p.Port))
+	return
+}
+
+func (p Peer) String() string {
+	loc := net.JoinHostPort(p.IP.String(), fmt.Sprintf("%d", p.Port))
+	if len(p.ID) != 0 {
+		return fmt.Sprintf("%x at %s", p.ID, loc)
+	} else {
+		return loc
+	}
+}
+
+// Set from the non-compact form in BEP 3.
+func (p *Peer) FromDictInterface(d map[string]interface{}) {
+	p.IP = net.ParseIP(d["ip"].(string))
+	if _, ok := d["peer id"]; ok {
+		p.ID = []byte(d["peer id"].(string))
+	}
+	p.Port = int(d["port"].(int64))
+}
+
+func (p Peer) FromNodeAddr(na krpc.NodeAddr) Peer {
+	p.IP = na.IP
+	p.Port = na.Port
+	return p
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/http/protocol.go b/deps/github.com/anacrolix/torrent/tracker/http/protocol.go
new file mode 100644
index 0000000..11a90b2
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/http/protocol.go
@@ -0,0 +1,83 @@
+package httpTracker
+
+import (
+	"fmt"
+
+	"github.com/anacrolix/dht/v2/krpc"
+
+	"github.com/anacrolix/torrent/bencode"
+)
+
+type HttpResponse struct {
+	FailureReason string `bencode:"failure reason"`
+	Interval      int32  `bencode:"interval"`
+	TrackerId     string `bencode:"tracker id"`
+	Complete      int32  `bencode:"complete"`
+	Incomplete    int32  `bencode:"incomplete"`
+	Peers         Peers  `bencode:"peers"`
+	// BEP 7
+	Peers6 krpc.CompactIPv6NodeAddrs `bencode:"peers6"`
+}
+
+type Peers struct {
+	List    []Peer
+	Compact bool
+}
+
+func (me Peers) MarshalBencode() ([]byte, error) {
+	if me.Compact {
+		cnas := make([]krpc.NodeAddr, 0, len(me.List))
+		for _, peer := range me.List {
+			cnas = append(cnas, krpc.NodeAddr{
+				IP:   peer.IP,
+				Port: peer.Port,
+			})
+		}
+		return krpc.CompactIPv4NodeAddrs(cnas).MarshalBencode()
+	} else {
+		return bencode.Marshal(me.List)
+	}
+}
+
+var (
+	_ bencode.Unmarshaler = (*Peers)(nil)
+	_ bencode.Marshaler   = Peers{}
+)
+
+func (me *Peers) UnmarshalBencode(b []byte) (err error) {
+	var _v interface{}
+	err = bencode.Unmarshal(b, &_v)
+	if err != nil {
+		return
+	}
+	switch v := _v.(type) {
+	case string:
+		vars.Add("http responses with string peers", 1)
+		var cnas krpc.CompactIPv4NodeAddrs
+		err = cnas.UnmarshalBinary([]byte(v))
+		if err != nil {
+			return
+		}
+		me.Compact = true
+		for _, cp := range cnas {
+			me.List = append(me.List, Peer{
+				IP:   cp.IP[:],
+				Port: cp.Port,
+			})
+		}
+		return
+	case []interface{}:
+		vars.Add("http responses with list peers", 1)
+		me.Compact = false
+		for _, i := range v {
+			var p Peer
+			p.FromDictInterface(i.(map[string]interface{}))
+			me.List = append(me.List, p)
+		}
+		return
+	default:
+		vars.Add("http responses with unhandled peers type", 1)
+		err = fmt.Errorf("unsupported type: %T", _v)
+		return
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/http/scrape.go b/deps/github.com/anacrolix/torrent/tracker/http/scrape.go
new file mode 100644
index 0000000..6940370
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/http/scrape.go
@@ -0,0 +1,47 @@
+package httpTracker
+
+import (
+	"context"
+	"log"
+	"net/http"
+	"net/url"
+
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/tracker/udp"
+	"github.com/anacrolix/torrent/types/infohash"
+)
+
+type scrapeResponse struct {
+	Files files `bencode:"files"`
+}
+
+// Bencode should support bencode.Unmarshalers from a string in the dict key position.
+type files = map[string]udp.ScrapeInfohashResult
+
+func (cl Client) Scrape(ctx context.Context, ihs []infohash.T) (out udp.ScrapeResponse, err error) {
+	_url := cl.url_.JoinPath("..", "scrape")
+	query, err := url.ParseQuery(_url.RawQuery)
+	if err != nil {
+		return
+	}
+	for _, ih := range ihs {
+		query.Add("info_hash", ih.AsString())
+	}
+	_url.RawQuery = query.Encode()
+	log.Printf("%q", _url.String())
+	req, err := http.NewRequestWithContext(ctx, http.MethodGet, _url.String(), nil)
+	if err != nil {
+		return
+	}
+	resp, err := cl.hc.Do(req)
+	if err != nil {
+		return
+	}
+	defer resp.Body.Close()
+	var decodedResp scrapeResponse
+	err = bencode.NewDecoder(resp.Body).Decode(&decodedResp)
+	for _, ih := range ihs {
+		out = append(out, decodedResp.Files[ih.AsString()])
+	}
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/http/server/server.go b/deps/github.com/anacrolix/torrent/tracker/http/server/server.go
new file mode 100644
index 0000000..541ef3f
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/http/server/server.go
@@ -0,0 +1,125 @@
+package httpTrackerServer
+
+import (
+	"fmt"
+	"net"
+	"net/http"
+	"net/netip"
+	"net/url"
+	"strconv"
+
+	"github.com/anacrolix/dht/v2/krpc"
+	"github.com/anacrolix/generics"
+	"github.com/anacrolix/log"
+
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/tracker"
+	httpTracker "github.com/anacrolix/torrent/tracker/http"
+	trackerServer "github.com/anacrolix/torrent/tracker/server"
+)
+
+type Handler struct {
+	Announce *trackerServer.AnnounceHandler
+	// Called to derive an announcer's IP if non-nil. If not specified, the Request.RemoteAddr is
+	// used. Necessary for instances running behind reverse proxies for example.
+	RequestHost func(r *http.Request) (netip.Addr, error)
+}
+
+func unmarshalQueryKeyToArray(w http.ResponseWriter, key string, query url.Values) (ret [20]byte, ok bool) {
+	str := query.Get(key)
+	if len(str) != len(ret) {
+		http.Error(w, fmt.Sprintf("%v has wrong length", key), http.StatusBadRequest)
+		return
+	}
+	copy(ret[:], str)
+	ok = true
+	return
+}
+
+// Returns false if there was an error and it was served.
+func (me Handler) requestHostAddr(r *http.Request) (_ netip.Addr, err error) {
+	if me.RequestHost != nil {
+		return me.RequestHost(r)
+	}
+	host, _, err := net.SplitHostPort(r.RemoteAddr)
+	if err != nil {
+		return
+	}
+	return netip.ParseAddr(host)
+}
+
+var requestHeadersLogger = log.Default.WithNames("request", "headers")
+
+func (me Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	vs := r.URL.Query()
+	var event tracker.AnnounceEvent
+	err := event.UnmarshalText([]byte(vs.Get("event")))
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusBadRequest)
+		return
+	}
+	infoHash, ok := unmarshalQueryKeyToArray(w, "info_hash", vs)
+	if !ok {
+		return
+	}
+	peerId, ok := unmarshalQueryKeyToArray(w, "peer_id", vs)
+	if !ok {
+		return
+	}
+	requestHeadersLogger.Levelf(log.Debug, "request RemoteAddr=%q, header=%q", r.RemoteAddr, r.Header)
+	addr, err := me.requestHostAddr(r)
+	if err != nil {
+		log.Printf("error getting requester IP: %v", err)
+		http.Error(w, "error determining your IP", http.StatusBadGateway)
+		return
+	}
+	portU64, _ := strconv.ParseUint(vs.Get("port"), 0, 16)
+	addrPort := netip.AddrPortFrom(addr, uint16(portU64))
+	left, err := strconv.ParseInt(vs.Get("left"), 0, 64)
+	if err != nil {
+		left = -1
+	}
+	res := me.Announce.Serve(
+		r.Context(),
+		tracker.AnnounceRequest{
+			InfoHash: infoHash,
+			PeerId:   peerId,
+			Event:    event,
+			Port:     addrPort.Port(),
+			NumWant:  -1,
+			Left:     left,
+		},
+		addrPort,
+		trackerServer.GetPeersOpts{
+			MaxCount: generics.Some[uint](200),
+		},
+	)
+	err = res.Err
+	if err != nil {
+		log.Printf("error serving announce: %v", err)
+		http.Error(w, "error handling announce", http.StatusInternalServerError)
+		return
+	}
+	var resp httpTracker.HttpResponse
+	resp.Incomplete = res.Leechers.Value
+	resp.Complete = res.Seeders.Value
+	resp.Interval = res.Interval.UnwrapOr(5 * 60)
+	resp.Peers.Compact = true
+	for _, peer := range res.Peers {
+		if peer.Addr().Is4() {
+			resp.Peers.List = append(resp.Peers.List, tracker.Peer{
+				IP:   peer.Addr().AsSlice(),
+				Port: int(peer.Port()),
+			})
+		} else if peer.Addr().Is6() {
+			resp.Peers6 = append(resp.Peers6, krpc.NodeAddr{
+				IP:   peer.Addr().AsSlice(),
+				Port: int(peer.Port()),
+			})
+		}
+	}
+	err = bencode.NewEncoder(w).Encode(resp)
+	if err != nil {
+		log.Printf("error encoding and writing response body: %v", err)
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/server/server.go b/deps/github.com/anacrolix/torrent/tracker/server/server.go
new file mode 100644
index 0000000..bedc027
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/server/server.go
@@ -0,0 +1,324 @@
+package trackerServer
+
+import (
+	"context"
+	"encoding/hex"
+	"fmt"
+	"net/netip"
+	"sync"
+	"time"
+
+	"github.com/anacrolix/generics"
+	"github.com/anacrolix/log"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace"
+
+	"github.com/anacrolix/torrent/tracker"
+	"github.com/anacrolix/torrent/tracker/udp"
+)
+
+// This is reserved for stuff like filtering by IP version, avoiding an announcer's IP or key,
+// limiting return count, etc.
+type GetPeersOpts struct {
+	// Negative numbers are not allowed.
+	MaxCount generics.Option[uint]
+}
+
+type InfoHash = [20]byte
+
+type PeerInfo struct {
+	AnnounceAddr
+}
+
+type AnnounceAddr = netip.AddrPort
+
+type AnnounceTracker interface {
+	TrackAnnounce(ctx context.Context, req udp.AnnounceRequest, addr AnnounceAddr) error
+	Scrape(ctx context.Context, infoHashes []InfoHash) ([]udp.ScrapeInfohashResult, error)
+	GetPeers(
+		ctx context.Context,
+		infoHash InfoHash,
+		opts GetPeersOpts,
+		remote AnnounceAddr,
+	) ServerAnnounceResult
+}
+
+type ServerAnnounceResult struct {
+	Err      error
+	Peers    []PeerInfo
+	Interval generics.Option[int32]
+	Leechers generics.Option[int32]
+	Seeders  generics.Option[int32]
+}
+
+type AnnounceHandler struct {
+	AnnounceTracker AnnounceTracker
+
+	UpstreamTrackers       []Client
+	UpstreamTrackerUrls    []string
+	UpstreamAnnouncePeerId [20]byte
+	UpstreamAnnounceGate   UpstreamAnnounceGater
+
+	mu sync.Mutex
+	// Operations are only removed when all the upstream peers have been tracked.
+	ongoingUpstreamAugmentations map[InfoHash]augmentationOperation
+}
+
+type peerSet = map[PeerInfo]struct{}
+
+type augmentationOperation struct {
+	// Closed when no more announce responses are pending. finalPeers will contain all the peers
+	// seen.
+	doneAnnouncing chan struct{}
+	// This receives the latest peerSet until doneAnnouncing is closed.
+	curPeers chan peerSet
+	// This contains the final peerSet after doneAnnouncing is closed.
+	finalPeers peerSet
+}
+
+func (me augmentationOperation) getCurPeers() (ret peerSet) {
+	ret, _ = me.getCurPeersAndDone()
+	return
+}
+
+func (me augmentationOperation) getCurPeersAndDone() (ret peerSet, done bool) {
+	select {
+	case ret = <-me.curPeers:
+	case <-me.doneAnnouncing:
+		ret = copyPeerSet(me.finalPeers)
+		done = true
+	}
+	return
+}
+
+// Adds peers from new that aren't in orig. Modifies both arguments.
+func addMissing(orig []PeerInfo, new peerSet) {
+	for _, peer := range orig {
+		delete(new, peer)
+	}
+	for peer := range new {
+		orig = append(orig, peer)
+	}
+}
+
+var tracer = otel.Tracer("torrent.tracker.udp")
+
+func (me *AnnounceHandler) Serve(
+	ctx context.Context, req AnnounceRequest, addr AnnounceAddr, opts GetPeersOpts,
+) (ret ServerAnnounceResult) {
+	ctx, span := tracer.Start(
+		ctx,
+		"AnnounceHandler.Serve",
+		trace.WithAttributes(
+			attribute.Int64("announce.request.num_want", int64(req.NumWant)),
+			attribute.Int("announce.request.port", int(req.Port)),
+			attribute.String("announce.request.info_hash", hex.EncodeToString(req.InfoHash[:])),
+			attribute.String("announce.request.event", req.Event.String()),
+			attribute.Int64("announce.get_peers.opts.max_count_value", int64(opts.MaxCount.Value)),
+			attribute.Bool("announce.get_peers.opts.max_count_ok", opts.MaxCount.Ok),
+			attribute.String("announce.source.addr.ip", addr.Addr().String()),
+			attribute.Int("announce.source.addr.port", int(addr.Port())),
+		),
+	)
+	defer span.End()
+	defer func() {
+		span.SetAttributes(attribute.Int("announce.get_peers.len", len(ret.Peers)))
+		if ret.Err != nil {
+			span.SetStatus(codes.Error, ret.Err.Error())
+		}
+	}()
+
+	if req.Port != 0 {
+		addr = netip.AddrPortFrom(addr.Addr(), req.Port)
+	}
+	ret.Err = me.AnnounceTracker.TrackAnnounce(ctx, req, addr)
+	if ret.Err != nil {
+		ret.Err = fmt.Errorf("tracking announce: %w", ret.Err)
+		return
+	}
+	infoHash := req.InfoHash
+	var op generics.Option[augmentationOperation]
+	// Grab a handle to any augmentations that are already running.
+	me.mu.Lock()
+	op.Value, op.Ok = me.ongoingUpstreamAugmentations[infoHash]
+	me.mu.Unlock()
+	// Apply num_want limit to max count. I really can't tell if this is the right place to do it,
+	// but it seems the most flexible.
+	if req.NumWant != -1 {
+		newCount := uint(req.NumWant)
+		if opts.MaxCount.Ok {
+			if newCount < opts.MaxCount.Value {
+				opts.MaxCount.Value = newCount
+			}
+		} else {
+			opts.MaxCount = generics.Some(newCount)
+		}
+	}
+	ret = me.AnnounceTracker.GetPeers(ctx, infoHash, opts, addr)
+	if ret.Err != nil {
+		return
+	}
+	// Take whatever peers it has ready. If it's finished, it doesn't matter if we do this inside
+	// the mutex or not.
+	if op.Ok {
+		curPeers, done := op.Value.getCurPeersAndDone()
+		addMissing(ret.Peers, curPeers)
+		if done {
+			// It doesn't get any better with this operation. Forget it.
+			op.Ok = false
+		}
+	}
+	me.mu.Lock()
+	// If we didn't have an operation, and don't have enough peers, start one. Allowing 1 is
+	// assuming the announcing peer might be that one. Really we should record a value to prevent
+	// duplicate announces. Also don't announce upstream if we got no peers because the caller asked
+	// for none.
+	if !op.Ok && len(ret.Peers) <= 1 && opts.MaxCount.UnwrapOr(1) > 0 {
+		op.Value, op.Ok = me.ongoingUpstreamAugmentations[infoHash]
+		if !op.Ok {
+			op.Set(me.augmentPeersFromUpstream(req.InfoHash))
+			generics.MakeMapIfNilAndSet(&me.ongoingUpstreamAugmentations, infoHash, op.Value)
+		}
+	}
+	me.mu.Unlock()
+	// Wait a while for the current operation.
+	if op.Ok {
+		// Force the augmentation to return with whatever it has if it hasn't completed in a
+		// reasonable time.
+		ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+		select {
+		case <-ctx.Done():
+		case <-op.Value.doneAnnouncing:
+		}
+		cancel()
+		addMissing(ret.Peers, op.Value.getCurPeers())
+	}
+	return
+}
+
+func (me *AnnounceHandler) augmentPeersFromUpstream(infoHash [20]byte) augmentationOperation {
+	const announceTimeout = time.Minute
+	announceCtx, cancel := context.WithTimeout(context.Background(), announceTimeout)
+	subReq := AnnounceRequest{
+		InfoHash: infoHash,
+		PeerId:   me.UpstreamAnnouncePeerId,
+		Event:    tracker.None,
+		Key:      0,
+		NumWant:  -1,
+		Port:     0,
+	}
+	peersChan := make(chan []Peer)
+	var pendingUpstreams sync.WaitGroup
+	for i := range me.UpstreamTrackers {
+		client := me.UpstreamTrackers[i]
+		url := me.UpstreamTrackerUrls[i]
+		pendingUpstreams.Add(1)
+		go func() {
+			started, err := me.UpstreamAnnounceGate.Start(announceCtx, url, infoHash, announceTimeout)
+			if err != nil {
+				log.Printf("error reserving announce for %x to %v: %v", infoHash, url, err)
+			}
+			if err != nil || !started {
+				peersChan <- nil
+				return
+			}
+			log.Printf("announcing %x upstream to %v", infoHash, url)
+			resp, err := client.Announce(announceCtx, subReq, tracker.AnnounceOpt{
+				UserAgent: "aragorn",
+			})
+			interval := resp.Interval
+			go func() {
+				if interval < 5*60 {
+					// This is as much to reduce load on upstream trackers in the event of errors,
+					// as it is to reduce load on our peer store.
+					interval = 5 * 60
+				}
+				err := me.UpstreamAnnounceGate.Completed(context.Background(), url, infoHash, interval)
+				if err != nil {
+					log.Printf("error recording completed announce for %x to %v: %v", infoHash, url, err)
+				}
+			}()
+			peersChan <- resp.Peers
+			if err != nil {
+				log.Levelf(log.Warning, "error announcing to upstream %q: %v", url, err)
+			}
+		}()
+	}
+	peersToTrack := make(map[string]Peer)
+	go func() {
+		pendingUpstreams.Wait()
+		cancel()
+		close(peersChan)
+		log.Levelf(log.Debug, "adding %v distinct peers from upstream trackers", len(peersToTrack))
+		for _, peer := range peersToTrack {
+			addrPort, ok := peer.ToNetipAddrPort()
+			if !ok {
+				continue
+			}
+			trackReq := AnnounceRequest{
+				InfoHash: infoHash,
+				Event:    tracker.Started,
+				Port:     uint16(peer.Port),
+				// Let's assume upstream peers are leechers without knowing better.
+				Left: -1,
+			}
+			copy(trackReq.PeerId[:], peer.ID)
+			// TODO: How do we know if these peers are leechers or seeders?
+			err := me.AnnounceTracker.TrackAnnounce(context.TODO(), trackReq, addrPort)
+			if err != nil {
+				log.Levelf(log.Error, "error tracking upstream peer: %v", err)
+			}
+		}
+		me.mu.Lock()
+		delete(me.ongoingUpstreamAugmentations, infoHash)
+		me.mu.Unlock()
+	}()
+	curPeersChan := make(chan map[PeerInfo]struct{})
+	doneChan := make(chan struct{})
+	retPeers := make(map[PeerInfo]struct{})
+	go func() {
+		defer close(doneChan)
+		for {
+			select {
+			case peers, ok := <-peersChan:
+				if !ok {
+					return
+				}
+				voldemort(peers, peersToTrack, retPeers)
+				pendingUpstreams.Done()
+			case curPeersChan <- copyPeerSet(retPeers):
+			}
+		}
+	}()
+	// Take return references.
+	return augmentationOperation{
+		curPeers:       curPeersChan,
+		finalPeers:     retPeers,
+		doneAnnouncing: doneChan,
+	}
+}
+
+func copyPeerSet(orig peerSet) (ret peerSet) {
+	ret = make(peerSet, len(orig))
+	for k, v := range orig {
+		ret[k] = v
+	}
+	return
+}
+
+// Adds peers to trailing containers.
+func voldemort(peers []Peer, toTrack map[string]Peer, sets ...map[PeerInfo]struct{}) {
+	for _, protoPeer := range peers {
+		toTrack[protoPeer.String()] = protoPeer
+		addr, ok := netip.AddrFromSlice(protoPeer.IP)
+		if !ok {
+			continue
+		}
+		handlerPeer := PeerInfo{netip.AddrPortFrom(addr, uint16(protoPeer.Port))}
+		for _, set := range sets {
+			set[handlerPeer] = struct{}{}
+		}
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/server/upstream-announcing.go b/deps/github.com/anacrolix/torrent/tracker/server/upstream-announcing.go
new file mode 100644
index 0000000..cfbf61c
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/server/upstream-announcing.go
@@ -0,0 +1,18 @@
+package trackerServer
+
+import (
+	"context"
+	"time"
+)
+
+type UpstreamAnnounceGater interface {
+	Start(ctx context.Context, tracker string, infoHash InfoHash,
+		// How long the announce block remains before discarding it.
+		timeout time.Duration,
+	) (bool, error)
+	Completed(
+		ctx context.Context, tracker string, infoHash InfoHash,
+		// Num of seconds reported by tracker, or some suitable value the caller has chosen.
+		interval int32,
+	) error
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/server/use.go b/deps/github.com/anacrolix/torrent/tracker/server/use.go
new file mode 100644
index 0000000..942321c
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/server/use.go
@@ -0,0 +1,9 @@
+package trackerServer
+
+import "github.com/anacrolix/torrent/tracker"
+
+type (
+	AnnounceRequest = tracker.AnnounceRequest
+	Client          = tracker.Client
+	Peer            = tracker.Peer
+)
diff --git a/deps/github.com/anacrolix/torrent/tracker/shared/shared.go b/deps/github.com/anacrolix/torrent/tracker/shared/shared.go
new file mode 100644
index 0000000..7859ea9
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/shared/shared.go
@@ -0,0 +1,10 @@
+package shared
+
+import "github.com/anacrolix/torrent/tracker/udp"
+
+const (
+	None      udp.AnnounceEvent = iota
+	Completed                   // The local peer just completed the torrent.
+	Started                     // The local peer has just resumed this torrent.
+	Stopped                     // The local peer is leaving the swarm.
+)
diff --git a/deps/github.com/anacrolix/torrent/tracker/tracker.go b/deps/github.com/anacrolix/torrent/tracker/tracker.go
new file mode 100644
index 0000000..f3bc9c5
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/tracker.go
@@ -0,0 +1,89 @@
+package tracker
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"net/http"
+	"net/url"
+	"time"
+
+	"github.com/anacrolix/dht/v2/krpc"
+	"github.com/anacrolix/log"
+
+	trHttp "github.com/anacrolix/torrent/tracker/http"
+	"github.com/anacrolix/torrent/tracker/shared"
+	"github.com/anacrolix/torrent/tracker/udp"
+)
+
+const (
+	None      = shared.None
+	Started   = shared.Started
+	Stopped   = shared.Stopped
+	Completed = shared.Completed
+)
+
+type AnnounceRequest = udp.AnnounceRequest
+
+type AnnounceResponse = trHttp.AnnounceResponse
+
+type Peer = trHttp.Peer
+
+type AnnounceEvent = udp.AnnounceEvent
+
+var ErrBadScheme = errors.New("unknown scheme")
+
+type Announce struct {
+	TrackerUrl          string
+	Request             AnnounceRequest
+	HostHeader          string
+	HttpProxy           func(*http.Request) (*url.URL, error)
+	HttpRequestDirector func(*http.Request) error
+	DialContext         func(ctx context.Context, network, addr string) (net.Conn, error)
+	ListenPacket        func(network, addr string) (net.PacketConn, error)
+	ServerName          string
+	UserAgent           string
+	UdpNetwork          string
+	// If the port is zero, it's assumed to be the same as the Request.SmtpPort.
+	ClientIp4 krpc.NodeAddr
+	// If the port is zero, it's assumed to be the same as the Request.SmtpPort.
+	ClientIp6 krpc.NodeAddr
+	Context   context.Context
+	Logger    log.Logger
+}
+
+// The code *is* the documentation.
+const DefaultTrackerAnnounceTimeout = 15 * time.Second
+
+func (me Announce) Do() (res AnnounceResponse, err error) {
+	cl, err := NewClient(me.TrackerUrl, NewClientOpts{
+		Http: trHttp.NewClientOpts{
+			Proxy:       me.HttpProxy,
+			DialContext: me.DialContext,
+			ServerName:  me.ServerName,
+		},
+		UdpNetwork:   me.UdpNetwork,
+		Logger:       me.Logger.WithContextValue(fmt.Sprintf("tracker client for %q", me.TrackerUrl)),
+		ListenPacket: me.ListenPacket,
+	})
+	if err != nil {
+		return
+	}
+	defer cl.Close()
+	if me.Context == nil {
+		// This is just to maintain the old behaviour that should be a timeout of 15s. Users can
+		// override it by providing their own Context. See comments elsewhere about longer timeouts
+		// acting as rate limiting overloaded trackers.
+		ctx, cancel := context.WithTimeout(context.Background(), DefaultTrackerAnnounceTimeout)
+		defer cancel()
+		me.Context = ctx
+	}
+	return cl.Announce(me.Context, me.Request, trHttp.AnnounceOpt{
+		UserAgent:           me.UserAgent,
+		HostHeader:          me.HostHeader,
+		ClientIp4:           me.ClientIp4.IP,
+		ClientIp6:           me.ClientIp6.IP,
+		HttpRequestDirector: me.HttpRequestDirector,
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/tracker_test.go b/deps/github.com/anacrolix/torrent/tracker/tracker_test.go
new file mode 100644
index 0000000..998248d
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/tracker_test.go
@@ -0,0 +1,13 @@
+package tracker
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestUnsupportedTrackerScheme(t *testing.T) {
+	t.Parallel()
+	_, err := Announce{TrackerUrl: "lol://tracker.openbittorrent.com:80/announce"}.Do()
+	require.Equal(t, ErrBadScheme, err)
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp-server_test.go b/deps/github.com/anacrolix/torrent/tracker/udp-server_test.go
new file mode 100644
index 0000000..84408bf
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp-server_test.go
@@ -0,0 +1,126 @@
+package tracker
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/binary"
+	"fmt"
+	"math/rand"
+	"net"
+
+	"github.com/anacrolix/dht/v2/krpc"
+	"github.com/anacrolix/missinggo/v2"
+
+	"github.com/anacrolix/torrent/tracker/udp"
+)
+
+type torrent struct {
+	Leechers int32
+	Seeders  int32
+	Peers    []krpc.NodeAddr
+}
+
+type server struct {
+	pc    net.PacketConn
+	conns map[udp.ConnectionId]struct{}
+	t     map[[20]byte]torrent
+}
+
+func marshal(parts ...interface{}) (ret []byte, err error) {
+	var buf bytes.Buffer
+	for _, p := range parts {
+		err = binary.Write(&buf, binary.BigEndian, p)
+		if err != nil {
+			return
+		}
+	}
+	ret = buf.Bytes()
+	return
+}
+
+func (s *server) respond(addr net.Addr, rh udp.ResponseHeader, parts ...interface{}) (err error) {
+	b, err := marshal(append([]interface{}{rh}, parts...)...)
+	if err != nil {
+		return
+	}
+	_, err = s.pc.WriteTo(b, addr)
+	return
+}
+
+func (s *server) newConn() (ret udp.ConnectionId) {
+	ret = rand.Uint64()
+	if s.conns == nil {
+		s.conns = make(map[udp.ConnectionId]struct{})
+	}
+	s.conns[ret] = struct{}{}
+	return
+}
+
+func (s *server) serveOne() (err error) {
+	b := make([]byte, 0x10000)
+	n, addr, err := s.pc.ReadFrom(b)
+	if err != nil {
+		return
+	}
+	r := bytes.NewReader(b[:n])
+	var h udp.RequestHeader
+	err = udp.Read(r, &h)
+	if err != nil {
+		return
+	}
+	switch h.Action {
+	case udp.ActionConnect:
+		if h.ConnectionId != udp.ConnectRequestConnectionId {
+			return
+		}
+		connId := s.newConn()
+		err = s.respond(addr, udp.ResponseHeader{
+			udp.ActionConnect,
+			h.TransactionId,
+		}, udp.ConnectionResponse{
+			connId,
+		})
+		return
+	case udp.ActionAnnounce:
+		if _, ok := s.conns[h.ConnectionId]; !ok {
+			s.respond(addr, udp.ResponseHeader{
+				TransactionId: h.TransactionId,
+				Action:        udp.ActionError,
+			}, []byte("not connected"))
+			return
+		}
+		var ar AnnounceRequest
+		err = udp.Read(r, &ar)
+		if err != nil {
+			return
+		}
+		t := s.t[ar.InfoHash]
+		bm := func() encoding.BinaryMarshaler {
+			ip := missinggo.AddrIP(addr)
+			if ip.To4() != nil {
+				return krpc.CompactIPv4NodeAddrs(t.Peers)
+			}
+			return krpc.CompactIPv6NodeAddrs(t.Peers)
+		}()
+		b, err = bm.MarshalBinary()
+		if err != nil {
+			panic(err)
+		}
+		err = s.respond(addr, udp.ResponseHeader{
+			TransactionId: h.TransactionId,
+			Action:        udp.ActionAnnounce,
+		}, udp.AnnounceResponseHeader{
+			Interval: 900,
+			Leechers: t.Leechers,
+			Seeders:  t.Seeders,
+		}, b)
+		return
+	default:
+		err = fmt.Errorf("unhandled action: %d", h.Action)
+		s.respond(addr, udp.ResponseHeader{
+			TransactionId: h.TransactionId,
+			Action:        udp.ActionError,
+		}, []byte("unhandled action"))
+		return
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp.go b/deps/github.com/anacrolix/torrent/tracker/udp.go
new file mode 100644
index 0000000..cf68188
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp.go
@@ -0,0 +1,51 @@
+package tracker
+
+import (
+	"context"
+	"encoding/binary"
+
+	"github.com/anacrolix/generics"
+
+	trHttp "github.com/anacrolix/torrent/tracker/http"
+	"github.com/anacrolix/torrent/tracker/udp"
+	"github.com/anacrolix/torrent/types/infohash"
+)
+
+type udpClient struct {
+	cl         *udp.ConnClient
+	requestUri string
+}
+
+func (c *udpClient) Scrape(ctx context.Context, ihs []infohash.T) (out udp.ScrapeResponse, err error) {
+	return c.cl.Client.Scrape(
+		ctx,
+		generics.SliceMap(ihs, func(from infohash.T) udp.InfoHash {
+			return from
+		}),
+	)
+}
+
+func (c *udpClient) Close() error {
+	return c.cl.Close()
+}
+
+func (c *udpClient) Announce(ctx context.Context, req AnnounceRequest, opts trHttp.AnnounceOpt) (res AnnounceResponse, err error) {
+	if req.IPAddress == 0 && opts.ClientIp4 != nil {
+		// I think we're taking bytes in big-endian order (all IPs), and writing it to a natively
+		// ordered uint32. This will be correctly ordered when written back out by the UDP client
+		// later. I'm ignoring the fact that IPv6 announces shouldn't have an IP address, we have a
+		// perfectly good IPv4 address.
+		req.IPAddress = binary.BigEndian.Uint32(opts.ClientIp4.To4())
+	}
+	h, nas, err := c.cl.Announce(ctx, req, udp.Options{RequestUri: c.requestUri})
+	if err != nil {
+		return
+	}
+	res.Interval = h.Interval
+	res.Leechers = h.Leechers
+	res.Seeders = h.Seeders
+	for _, cp := range nas.NodeAddrs() {
+		res.Peers = append(res.Peers, trHttp.Peer{}.FromNodeAddr(cp))
+	}
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/addr-family.go b/deps/github.com/anacrolix/torrent/tracker/udp/addr-family.go
new file mode 100644
index 0000000..ddecb4c
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/addr-family.go
@@ -0,0 +1,26 @@
+package udp
+
+import (
+	"encoding"
+
+	"github.com/anacrolix/dht/v2/krpc"
+)
+
+// Discriminates behaviours based on address family in use.
+type AddrFamily int
+
+const (
+	AddrFamilyIpv4 = iota + 1
+	AddrFamilyIpv6
+)
+
+// Returns a marshaler for the given node addrs for the specified family.
+func GetNodeAddrsCompactMarshaler(nas []krpc.NodeAddr, family AddrFamily) encoding.BinaryMarshaler {
+	switch family {
+	case AddrFamilyIpv4:
+		return krpc.CompactIPv4NodeAddrs(nas)
+	case AddrFamilyIpv6:
+		return krpc.CompactIPv6NodeAddrs(nas)
+	}
+	return nil
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/announce.go b/deps/github.com/anacrolix/torrent/tracker/udp/announce.go
new file mode 100644
index 0000000..b5c9f8f
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/announce.go
@@ -0,0 +1,53 @@
+package udp
+
+import (
+	"encoding"
+	"fmt"
+
+	"github.com/anacrolix/dht/v2/krpc"
+)
+
+// Marshalled as binary by the UDP client, so be careful making changes.
+type AnnounceRequest struct {
+	InfoHash   [20]byte
+	PeerId     [20]byte
+	Downloaded int64
+	Left       int64 // If less than 0, math.MaxInt64 will be used for HTTP trackers instead.
+	Uploaded   int64
+	// Apparently this is optional. None can be used for announces done at
+	// regular intervals.
+	Event     AnnounceEvent
+	IPAddress uint32
+	Key       int32
+	NumWant   int32 // How many peer addresses are desired. -1 for default.
+	Port      uint16
+} // 82 bytes
+
+type AnnounceEvent int32
+
+func (me *AnnounceEvent) UnmarshalText(text []byte) error {
+	for key, str := range announceEventStrings {
+		if string(text) == str {
+			*me = AnnounceEvent(key)
+			return nil
+		}
+	}
+	return fmt.Errorf("unknown event")
+}
+
+var announceEventStrings = []string{"", "completed", "started", "stopped"}
+
+func (e AnnounceEvent) String() string {
+	// See BEP 3, "event", and
+	// https://github.com/anacrolix/torrent/issues/416#issuecomment-751427001. Return a safe default
+	// in case event values are not sanitized.
+	if e < 0 || int(e) >= len(announceEventStrings) {
+		return ""
+	}
+	return announceEventStrings[e]
+}
+
+type AnnounceResponsePeers interface {
+	encoding.BinaryUnmarshaler
+	NodeAddrs() []krpc.NodeAddr
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/client.go b/deps/github.com/anacrolix/torrent/tracker/udp/client.go
new file mode 100644
index 0000000..6b97ddc
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/client.go
@@ -0,0 +1,225 @@
+package udp
+
+import (
+	"bytes"
+	"context"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"net"
+	"sync"
+	"time"
+
+	"github.com/anacrolix/dht/v2/krpc"
+)
+
+// Client interacts with UDP trackers via its Writer and Dispatcher. It has no knowledge of
+// connection specifics.
+type Client struct {
+	mu           sync.Mutex
+	connId       ConnectionId
+	connIdIssued time.Time
+
+	shouldReconnectOverride func() bool
+
+	Dispatcher *Dispatcher
+	Writer     io.Writer
+}
+
+func (cl *Client) Announce(
+	ctx context.Context, req AnnounceRequest, opts Options,
+	// Decides whether the response body is IPv6 or IPv4, see BEP 15.
+	ipv6 func(net.Addr) bool,
+) (
+	respHdr AnnounceResponseHeader,
+	// A slice of krpc.NodeAddr, likely wrapped in an appropriate unmarshalling wrapper.
+	peers AnnounceResponsePeers,
+	err error,
+) {
+	respBody, addr, err := cl.request(ctx, ActionAnnounce, append(mustMarshal(req), opts.Encode()...))
+	if err != nil {
+		return
+	}
+	r := bytes.NewBuffer(respBody)
+	err = Read(r, &respHdr)
+	if err != nil {
+		err = fmt.Errorf("reading response header: %w", err)
+		return
+	}
+	if ipv6(addr) {
+		peers = &krpc.CompactIPv6NodeAddrs{}
+	} else {
+		peers = &krpc.CompactIPv4NodeAddrs{}
+	}
+	err = peers.UnmarshalBinary(r.Bytes())
+	if err != nil {
+		err = fmt.Errorf("reading response peers: %w", err)
+	}
+	return
+}
+
+// There's no way to pass options in a scrape, since we don't when the request body ends.
+func (cl *Client) Scrape(
+	ctx context.Context, ihs []InfoHash,
+) (
+	out ScrapeResponse, err error,
+) {
+	respBody, _, err := cl.request(ctx, ActionScrape, mustMarshal(ScrapeRequest(ihs)))
+	if err != nil {
+		return
+	}
+	r := bytes.NewBuffer(respBody)
+	for r.Len() != 0 {
+		var item ScrapeInfohashResult
+		err = Read(r, &item)
+		if err != nil {
+			return
+		}
+		out = append(out, item)
+	}
+	if len(out) > len(ihs) {
+		err = fmt.Errorf("got %v results but expected %v", len(out), len(ihs))
+		return
+	}
+	return
+}
+
+func (cl *Client) shouldReconnectDefault() bool {
+	return cl.connIdIssued.IsZero() || time.Since(cl.connIdIssued) >= time.Minute
+}
+
+func (cl *Client) shouldReconnect() bool {
+	if cl.shouldReconnectOverride != nil {
+		return cl.shouldReconnectOverride()
+	}
+	return cl.shouldReconnectDefault()
+}
+
+func (cl *Client) connect(ctx context.Context) (err error) {
+	if !cl.shouldReconnect() {
+		return nil
+	}
+	return cl.doConnectRoundTrip(ctx)
+}
+
+// This just does the connect request and updates local state if it succeeds.
+func (cl *Client) doConnectRoundTrip(ctx context.Context) (err error) {
+	respBody, _, err := cl.request(ctx, ActionConnect, nil)
+	if err != nil {
+		return err
+	}
+	var connResp ConnectionResponse
+	err = binary.Read(bytes.NewReader(respBody), binary.BigEndian, &connResp)
+	if err != nil {
+		return
+	}
+	cl.connId = connResp.ConnectionId
+	cl.connIdIssued = time.Now()
+	//log.Printf("conn id set to %x", cl.connId)
+	return
+}
+
+func (cl *Client) connIdForRequest(ctx context.Context, action Action) (id ConnectionId, err error) {
+	if action == ActionConnect {
+		id = ConnectRequestConnectionId
+		return
+	}
+	err = cl.connect(ctx)
+	if err != nil {
+		return
+	}
+	id = cl.connId
+	return
+}
+
+func (cl *Client) writeRequest(
+	ctx context.Context, action Action, body []byte, tId TransactionId, buf *bytes.Buffer,
+) (
+	err error,
+) {
+	var connId ConnectionId
+	if action == ActionConnect {
+		connId = ConnectRequestConnectionId
+	} else {
+		// We lock here while establishing a connection ID, and then ensuring that the request is
+		// written before allowing the connection ID to change again. This is to ensure the server
+		// doesn't assign us another ID before we've sent this request. Note that this doesn't allow
+		// for us to return if the context is cancelled while we wait to obtain a new ID.
+		cl.mu.Lock()
+		defer cl.mu.Unlock()
+		connId, err = cl.connIdForRequest(ctx, action)
+		if err != nil {
+			return
+		}
+	}
+	buf.Reset()
+	err = Write(buf, RequestHeader{
+		ConnectionId:  connId,
+		Action:        action,
+		TransactionId: tId,
+	})
+	if err != nil {
+		panic(err)
+	}
+	buf.Write(body)
+	_, err = cl.Writer.Write(buf.Bytes())
+	//log.Printf("sent request with conn id %x", connId)
+	return
+}
+
+func (cl *Client) requestWriter(ctx context.Context, action Action, body []byte, tId TransactionId) (err error) {
+	var buf bytes.Buffer
+	for n := 0; ; n++ {
+		err = cl.writeRequest(ctx, action, body, tId, &buf)
+		if err != nil {
+			return
+		}
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		case <-time.After(timeout(n)):
+		}
+	}
+}
+
+const ConnectionIdMissmatchNul = "Connection ID missmatch.\x00"
+
+type ErrorResponse struct {
+	Message string
+}
+
+func (me ErrorResponse) Error() string {
+	return fmt.Sprintf("error response: %#q", me.Message)
+}
+
+func (cl *Client) request(ctx context.Context, action Action, body []byte) (respBody []byte, addr net.Addr, err error) {
+	respChan := make(chan DispatchedResponse, 1)
+	t := cl.Dispatcher.NewTransaction(func(dr DispatchedResponse) {
+		respChan <- dr
+	})
+	defer t.End()
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+	writeErr := make(chan error, 1)
+	go func() {
+		writeErr <- cl.requestWriter(ctx, action, body, t.Id())
+	}()
+	select {
+	case dr := <-respChan:
+		if dr.Header.Action == action {
+			respBody = dr.Body
+			addr = dr.Addr
+		} else if dr.Header.Action == ActionError {
+			// udp://tracker.torrent.eu.org:451/announce frequently returns "Connection ID
+			// missmatch.\x00"
+			err = ErrorResponse{Message: string(dr.Body)}
+		} else {
+			err = fmt.Errorf("unexpected response action %v", dr.Header.Action)
+		}
+	case err = <-writeErr:
+		err = fmt.Errorf("write error: %w", err)
+	case <-ctx.Done():
+		err = ctx.Err()
+	}
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/conn-client.go b/deps/github.com/anacrolix/torrent/tracker/udp/conn-client.go
new file mode 100644
index 0000000..da4d7c0
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/conn-client.go
@@ -0,0 +1,133 @@
+package udp
+
+import (
+	"context"
+	"net"
+
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/missinggo/v2"
+)
+
+type listenPacketFunc func(network, addr string) (net.PacketConn, error)
+
+type NewConnClientOpts struct {
+	// The network to operate to use, such as "udp4", "udp", "udp6".
+	Network string
+	// Tracker address
+	Host string
+	// If non-nil, forces either IPv4 or IPv6 in the UDP tracker wire protocol.
+	Ipv6 *bool
+	// Logger to use for internal errors.
+	Logger log.Logger
+	// Custom function to use as a substitute for net.ListenPacket
+	ListenPacket listenPacketFunc
+}
+
+// Manages a Client with a specific connection.
+type ConnClient struct {
+	Client  Client
+	conn    net.PacketConn
+	d       Dispatcher
+	readErr error
+	closed  bool
+	newOpts NewConnClientOpts
+}
+
+func (cc *ConnClient) reader() {
+	b := make([]byte, 0x800)
+	for {
+		n, addr, err := cc.conn.ReadFrom(b)
+		if err != nil {
+			// TODO: Do bad things to the dispatcher, and incoming calls to the client if we have a
+			// read error.
+			cc.readErr = err
+			if !cc.closed {
+				// don't panic, just close the connection, fix https://github.com/anacrolix/torrent/issues/845
+				cc.Close()
+			}
+			break
+		}
+		err = cc.d.Dispatch(b[:n], addr)
+		if err != nil {
+			cc.newOpts.Logger.Levelf(log.Debug, "dispatching packet received on %v: %v", cc.conn.LocalAddr(), err)
+		}
+	}
+}
+
+func ipv6(opt *bool, network string, remoteAddr net.Addr) bool {
+	if opt != nil {
+		return *opt
+	}
+	switch network {
+	case "udp4":
+		return false
+	case "udp6":
+		return true
+	}
+	rip := missinggo.AddrIP(remoteAddr)
+	return rip.To16() != nil && rip.To4() == nil
+}
+
+// Allows a UDP Client to write packets to an endpoint without knowing about the network specifics.
+type clientWriter struct {
+	pc      net.PacketConn
+	network string
+	address string
+}
+
+func (me clientWriter) Write(p []byte) (n int, err error) {
+	addr, err := net.ResolveUDPAddr(me.network, me.address)
+	if err != nil {
+		return
+	}
+	return me.pc.WriteTo(p, addr)
+}
+
+func NewConnClient(opts NewConnClientOpts) (cc *ConnClient, err error) {
+	var conn net.PacketConn
+	if opts.ListenPacket != nil {
+		conn, err = opts.ListenPacket(opts.Network, ":0")
+	} else {
+		conn, err = net.ListenPacket(opts.Network, ":0")
+	}
+
+	if err != nil {
+		return
+	}
+	if opts.Logger.IsZero() {
+		opts.Logger = log.Default
+	}
+	cc = &ConnClient{
+		Client: Client{
+			Writer: clientWriter{
+				pc:      conn,
+				network: opts.Network,
+				address: opts.Host,
+			},
+		},
+		conn:    conn,
+		newOpts: opts,
+	}
+	cc.Client.Dispatcher = &cc.d
+	go cc.reader()
+	return
+}
+
+func (cc *ConnClient) Close() error {
+	cc.closed = true
+	return cc.conn.Close()
+}
+
+func (cc *ConnClient) Announce(
+	ctx context.Context, req AnnounceRequest, opts Options,
+) (
+	h AnnounceResponseHeader, nas AnnounceResponsePeers, err error,
+) {
+	return cc.Client.Announce(ctx, req, opts, func(addr net.Addr) bool {
+		return ipv6(cc.newOpts.Ipv6, cc.newOpts.Network, addr)
+	})
+}
+
+func (cc *ConnClient) LocalAddr() net.Addr {
+	return cc.conn.LocalAddr()
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/dispatcher.go b/deps/github.com/anacrolix/torrent/tracker/udp/dispatcher.go
new file mode 100644
index 0000000..5709bd5
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/dispatcher.go
@@ -0,0 +1,71 @@
+package udp
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+	"sync"
+)
+
+// Maintains a mapping of transaction IDs to handlers.
+type Dispatcher struct {
+	mu           sync.RWMutex
+	transactions map[TransactionId]Transaction
+}
+
+// The caller owns b.
+func (me *Dispatcher) Dispatch(b []byte, addr net.Addr) error {
+	buf := bytes.NewBuffer(b)
+	var rh ResponseHeader
+	err := Read(buf, &rh)
+	if err != nil {
+		return err
+	}
+	me.mu.RLock()
+	defer me.mu.RUnlock()
+	if t, ok := me.transactions[rh.TransactionId]; ok {
+		t.h(DispatchedResponse{
+			Header: rh,
+			Body:   append([]byte(nil), buf.Bytes()...),
+			Addr:   addr,
+		})
+		return nil
+	} else {
+		return fmt.Errorf("unknown transaction id %v", rh.TransactionId)
+	}
+}
+
+func (me *Dispatcher) forgetTransaction(id TransactionId) {
+	me.mu.Lock()
+	defer me.mu.Unlock()
+	delete(me.transactions, id)
+}
+
+func (me *Dispatcher) NewTransaction(h TransactionResponseHandler) Transaction {
+	me.mu.Lock()
+	defer me.mu.Unlock()
+	for {
+		id := RandomTransactionId()
+		if _, ok := me.transactions[id]; ok {
+			continue
+		}
+		t := Transaction{
+			d:  me,
+			h:  h,
+			id: id,
+		}
+		if me.transactions == nil {
+			me.transactions = make(map[TransactionId]Transaction)
+		}
+		me.transactions[id] = t
+		return t
+	}
+}
+
+type DispatchedResponse struct {
+	Header ResponseHeader
+	// Response payload, after the header.
+	Body []byte
+	// Response source address
+	Addr net.Addr
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/options.go b/deps/github.com/anacrolix/torrent/tracker/udp/options.go
new file mode 100644
index 0000000..a2c223d
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/options.go
@@ -0,0 +1,24 @@
+package udp
+
+import (
+	"math"
+)
+
+type Options struct {
+	RequestUri string
+}
+
+func (opts Options) Encode() (ret []byte) {
+	for {
+		l := len(opts.RequestUri)
+		if l == 0 {
+			break
+		}
+		if l > math.MaxUint8 {
+			l = math.MaxUint8
+		}
+		ret = append(append(ret, optionTypeURLData, byte(l)), opts.RequestUri[:l]...)
+		opts.RequestUri = opts.RequestUri[l:]
+	}
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/protocol.go b/deps/github.com/anacrolix/torrent/tracker/udp/protocol.go
new file mode 100644
index 0000000..653d013
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/protocol.go
@@ -0,0 +1,82 @@
+package udp
+
+import (
+	"bytes"
+	"encoding/binary"
+	"io"
+)
+
+type Action int32
+
+const (
+	ActionConnect Action = iota
+	ActionAnnounce
+	ActionScrape
+	ActionError
+)
+
+const ConnectRequestConnectionId = 0x41727101980
+
+const (
+	// BEP 41
+	optionTypeEndOfOptions = 0
+	optionTypeNOP          = 1
+	optionTypeURLData      = 2
+)
+
+type TransactionId = int32
+
+type ConnectionId = uint64
+
+type ConnectionRequest struct {
+	ConnectionId  ConnectionId
+	Action        Action
+	TransactionId TransactionId
+}
+
+type ConnectionResponse struct {
+	ConnectionId ConnectionId
+}
+
+type ResponseHeader struct {
+	Action        Action
+	TransactionId TransactionId
+}
+
+type RequestHeader struct {
+	ConnectionId  ConnectionId
+	Action        Action
+	TransactionId TransactionId
+} // 16 bytes
+
+type AnnounceResponseHeader struct {
+	Interval int32
+	Leechers int32
+	Seeders  int32
+}
+
+type InfoHash = [20]byte
+
+func marshal(data interface{}) (b []byte, err error) {
+	var buf bytes.Buffer
+	err = Write(&buf, data)
+	b = buf.Bytes()
+	return
+}
+
+func mustMarshal(data interface{}) []byte {
+	b, err := marshal(data)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
+
+// This is for fixed-size, builtin types only I think.
+func Write(w io.Writer, data interface{}) error {
+	return binary.Write(w, binary.BigEndian, data)
+}
+
+func Read(r io.Reader, data interface{}) error {
+	return binary.Read(r, binary.BigEndian, data)
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/scrape.go b/deps/github.com/anacrolix/torrent/tracker/udp/scrape.go
new file mode 100644
index 0000000..13a69b9
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/scrape.go
@@ -0,0 +1,13 @@
+package udp
+
+type ScrapeRequest []InfoHash
+
+type ScrapeResponse []ScrapeInfohashResult
+
+type ScrapeInfohashResult struct {
+	// I'm not sure why the fields are named differently for HTTP scrapes.
+	// https://www.bittorrent.org/beps/bep_0048.html
+	Seeders   int32 `bencode:"complete"`
+	Completed int32 `bencode:"downloaded"`
+	Leechers  int32 `bencode:"incomplete"`
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/server/server.go b/deps/github.com/anacrolix/torrent/tracker/udp/server/server.go
new file mode 100644
index 0000000..3568839
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/server/server.go
@@ -0,0 +1,241 @@
+package udpTrackerServer
+
+import (
+	"bytes"
+	"context"
+	"crypto/rand"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"net"
+	"net/netip"
+
+	"github.com/anacrolix/dht/v2/krpc"
+	"github.com/anacrolix/generics"
+	"github.com/anacrolix/log"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace"
+
+	trackerServer "github.com/anacrolix/torrent/tracker/server"
+	"github.com/anacrolix/torrent/tracker/udp"
+)
+
+type ConnectionTrackerAddr = string
+
+type ConnectionTracker interface {
+	Add(ctx context.Context, addr ConnectionTrackerAddr, id udp.ConnectionId) error
+	Check(ctx context.Context, addr ConnectionTrackerAddr, id udp.ConnectionId) (bool, error)
+}
+
+type InfoHash = [20]byte
+
+type AnnounceTracker = trackerServer.AnnounceTracker
+
+type Server struct {
+	ConnTracker  ConnectionTracker
+	SendResponse func(ctx context.Context, data []byte, addr net.Addr) (int, error)
+	Announce     *trackerServer.AnnounceHandler
+}
+
+type RequestSourceAddr = net.Addr
+
+var tracer = otel.Tracer("torrent.tracker.udp")
+
+func (me *Server) HandleRequest(
+	ctx context.Context,
+	family udp.AddrFamily,
+	source RequestSourceAddr,
+	body []byte,
+) (err error) {
+	ctx, span := tracer.Start(ctx, "Server.HandleRequest",
+		trace.WithAttributes(attribute.Int("payload.len", len(body))))
+	defer span.End()
+	defer func() {
+		if err != nil {
+			span.SetStatus(codes.Error, err.Error())
+		}
+	}()
+	var h udp.RequestHeader
+	var r bytes.Reader
+	r.Reset(body)
+	err = udp.Read(&r, &h)
+	if err != nil {
+		err = fmt.Errorf("reading request header: %w", err)
+		return err
+	}
+	switch h.Action {
+	case udp.ActionConnect:
+		err = me.handleConnect(ctx, source, h.TransactionId)
+	case udp.ActionAnnounce:
+		err = me.handleAnnounce(ctx, family, source, h.ConnectionId, h.TransactionId, &r)
+	default:
+		err = fmt.Errorf("unimplemented")
+	}
+	if err != nil {
+		err = fmt.Errorf("handling action %v: %w", h.Action, err)
+	}
+	return err
+}
+
+func (me *Server) handleAnnounce(
+	ctx context.Context,
+	addrFamily udp.AddrFamily,
+	source RequestSourceAddr,
+	connId udp.ConnectionId,
+	tid udp.TransactionId,
+	r *bytes.Reader,
+) error {
+	// Should we set a timeout of 10s or something for the entire response, so that we give up if a
+	// retry is imminent?
+
+	ok, err := me.ConnTracker.Check(ctx, source.String(), connId)
+	if err != nil {
+		err = fmt.Errorf("checking conn id: %w", err)
+		return err
+	}
+	if !ok {
+		return fmt.Errorf("incorrect connection id: %x", connId)
+	}
+	var req udp.AnnounceRequest
+	err = udp.Read(r, &req)
+	if err != nil {
+		return err
+	}
+	// TODO: This should be done asynchronously to responding to the announce.
+	announceAddr, err := netip.ParseAddrPort(source.String())
+	if err != nil {
+		err = fmt.Errorf("converting source net.Addr to AnnounceAddr: %w", err)
+		return err
+	}
+	opts := trackerServer.GetPeersOpts{MaxCount: generics.Some[uint](50)}
+	if addrFamily == udp.AddrFamilyIpv4 {
+		opts.MaxCount = generics.Some[uint](150)
+	}
+	res := me.Announce.Serve(ctx, req, announceAddr, opts)
+	if res.Err != nil {
+		return res.Err
+	}
+	nodeAddrs := make([]krpc.NodeAddr, 0, len(res.Peers))
+	for _, p := range res.Peers {
+		var ip net.IP
+		switch addrFamily {
+		default:
+			continue
+		case udp.AddrFamilyIpv4:
+			if !p.Addr().Unmap().Is4() {
+				continue
+			}
+			ipBuf := p.Addr().As4()
+			ip = ipBuf[:]
+		case udp.AddrFamilyIpv6:
+			ipBuf := p.Addr().As16()
+			ip = ipBuf[:]
+		}
+		nodeAddrs = append(nodeAddrs, krpc.NodeAddr{
+			IP:   ip[:],
+			Port: int(p.Port()),
+		})
+	}
+	var buf bytes.Buffer
+	err = udp.Write(&buf, udp.ResponseHeader{
+		Action:        udp.ActionAnnounce,
+		TransactionId: tid,
+	})
+	if err != nil {
+		return err
+	}
+	err = udp.Write(&buf, udp.AnnounceResponseHeader{
+		Interval: res.Interval.UnwrapOr(5 * 60),
+		Seeders:  res.Seeders.Value,
+		Leechers: res.Leechers.Value,
+	})
+	if err != nil {
+		return err
+	}
+	b, err := udp.GetNodeAddrsCompactMarshaler(nodeAddrs, addrFamily).MarshalBinary()
+	if err != nil {
+		err = fmt.Errorf("marshalling compact node addrs: %w", err)
+		return err
+	}
+	buf.Write(b)
+	n, err := me.SendResponse(ctx, buf.Bytes(), source)
+	if err != nil {
+		return err
+	}
+	if n < buf.Len() {
+		err = io.ErrShortWrite
+	}
+	return err
+}
+
+func (me *Server) handleConnect(ctx context.Context, source RequestSourceAddr, tid udp.TransactionId) error {
+	connId := randomConnectionId()
+	err := me.ConnTracker.Add(ctx, source.String(), connId)
+	if err != nil {
+		err = fmt.Errorf("recording conn id: %w", err)
+		return err
+	}
+	var buf bytes.Buffer
+	udp.Write(&buf, udp.ResponseHeader{
+		Action:        udp.ActionConnect,
+		TransactionId: tid,
+	})
+	udp.Write(&buf, udp.ConnectionResponse{connId})
+	n, err := me.SendResponse(ctx, buf.Bytes(), source)
+	if err != nil {
+		return err
+	}
+	if n < buf.Len() {
+		err = io.ErrShortWrite
+	}
+	return err
+}
+
+func randomConnectionId() udp.ConnectionId {
+	var b [8]byte
+	_, err := rand.Read(b[:])
+	if err != nil {
+		panic(err)
+	}
+	return binary.BigEndian.Uint64(b[:])
+}
+
+func RunSimple(ctx context.Context, s *Server, pc net.PacketConn, family udp.AddrFamily) error {
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+	var b [1500]byte
+	// Limit concurrent handled requests.
+	sem := make(chan struct{}, 1000)
+	for {
+		n, addr, err := pc.ReadFrom(b[:])
+		ctx, span := tracer.Start(ctx, "handle udp packet")
+		if err != nil {
+			span.SetStatus(codes.Error, err.Error())
+			span.End()
+			return err
+		}
+		select {
+		case <-ctx.Done():
+			span.SetStatus(codes.Error, err.Error())
+			span.End()
+			return ctx.Err()
+		default:
+			span.SetStatus(codes.Error, "concurrency limit reached")
+			span.End()
+			log.Levelf(log.Debug, "dropping request from %v: concurrency limit reached", addr)
+			continue
+		case sem <- struct{}{}:
+		}
+		b := append([]byte(nil), b[:n]...)
+		go func() {
+			defer span.End()
+			defer func() { <-sem }()
+			err := s.HandleRequest(ctx, family, addr, b)
+			if err != nil {
+				log.Printf("error handling %v byte request from %v: %v", n, addr, err)
+			}
+		}()
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/timeout.go b/deps/github.com/anacrolix/torrent/tracker/udp/timeout.go
new file mode 100644
index 0000000..b5e1832
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/timeout.go
@@ -0,0 +1,18 @@
+package udp
+
+import (
+	"time"
+)
+
+const maxTimeout = 3840 * time.Second
+
+func timeout(contiguousTimeouts int) (d time.Duration) {
+	if contiguousTimeouts > 8 {
+		contiguousTimeouts = 8
+	}
+	d = 15 * time.Second
+	for ; contiguousTimeouts > 0; contiguousTimeouts-- {
+		d *= 2
+	}
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/timeout_test.go b/deps/github.com/anacrolix/torrent/tracker/udp/timeout_test.go
new file mode 100644
index 0000000..4bb0dc8
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/timeout_test.go
@@ -0,0 +1,15 @@
+package udp
+
+import (
+	"math"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+)
+
+func TestTimeoutMax(t *testing.T) {
+	c := qt.New(t)
+	c.Check(timeout(8), qt.Equals, maxTimeout)
+	c.Check(timeout(9), qt.Equals, maxTimeout)
+	c.Check(timeout(math.MaxInt32), qt.Equals, maxTimeout)
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/transaction.go b/deps/github.com/anacrolix/torrent/tracker/udp/transaction.go
new file mode 100644
index 0000000..2018b35
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/transaction.go
@@ -0,0 +1,23 @@
+package udp
+
+import "math/rand"
+
+func RandomTransactionId() TransactionId {
+	return TransactionId(rand.Uint32())
+}
+
+type TransactionResponseHandler func(dr DispatchedResponse)
+
+type Transaction struct {
+	id int32
+	d  *Dispatcher
+	h  TransactionResponseHandler
+}
+
+func (t *Transaction) Id() TransactionId {
+	return t.id
+}
+
+func (t *Transaction) End() {
+	t.d.forgetTransaction(t.id)
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/udp_test.go b/deps/github.com/anacrolix/torrent/tracker/udp/udp_test.go
new file mode 100644
index 0000000..64aeb80
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/udp_test.go
@@ -0,0 +1,139 @@
+package udp
+
+import (
+	"bytes"
+	"context"
+	"crypto/rand"
+	"encoding/binary"
+	"io"
+	"net"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/anacrolix/dht/v2/krpc"
+	_ "github.com/anacrolix/envpprof"
+	"github.com/anacrolix/missinggo/v2/iter"
+	qt "github.com/frankban/quicktest"
+	"github.com/stretchr/testify/require"
+)
+
+// Ensure net.IPs are stored big-endian, to match the way they're read from
+// the wire.
+func TestNetIPv4Bytes(t *testing.T) {
+	ip := net.IP([]byte{127, 0, 0, 1})
+	if ip.String() != "127.0.0.1" {
+		t.FailNow()
+	}
+	if string(ip) != "\x7f\x00\x00\x01" {
+		t.Fatal([]byte(ip))
+	}
+}
+
+func TestMarshalAnnounceResponse(t *testing.T) {
+	peers := krpc.CompactIPv4NodeAddrs{
+		{[]byte{127, 0, 0, 1}, 2},
+		{[]byte{255, 0, 0, 3}, 4},
+	}
+	b, err := peers.MarshalBinary()
+	require.NoError(t, err)
+	require.EqualValues(t,
+		"\x7f\x00\x00\x01\x00\x02\xff\x00\x00\x03\x00\x04",
+		b)
+	require.EqualValues(t, 12, binary.Size(AnnounceResponseHeader{}))
+}
+
+// Failure to write an entire packet to UDP is expected to given an error.
+func TestLongWriteUDP(t *testing.T) {
+	t.Parallel()
+	l, err := net.ListenUDP("udp4", nil)
+	require.NoError(t, err)
+	defer l.Close()
+	c, err := net.DialUDP("udp", nil, l.LocalAddr().(*net.UDPAddr))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer c.Close()
+	for msgLen := 1; ; msgLen *= 2 {
+		n, err := c.Write(make([]byte, msgLen))
+		if err != nil {
+			if isErrMessageTooLong(err) {
+				return
+			}
+			t.Fatalf("expected message too long error: %v", err)
+		}
+		if n < msgLen {
+			t.FailNow()
+		}
+	}
+}
+
+func TestShortBinaryRead(t *testing.T) {
+	var data ResponseHeader
+	err := binary.Read(bytes.NewBufferString("\x00\x00\x00\x01"), binary.BigEndian, &data)
+	if err != io.ErrUnexpectedEOF {
+		t.FailNow()
+	}
+}
+
+func TestConvertInt16ToInt(t *testing.T) {
+	i := 50000
+	if int(uint16(int16(i))) != 50000 {
+		t.FailNow()
+	}
+}
+
+func TestConnClientLogDispatchUnknownTransactionId(t *testing.T) {
+	const network = "udp"
+	cc, err := NewConnClient(NewConnClientOpts{
+		Network: network,
+	})
+	c := qt.New(t)
+	c.Assert(err, qt.IsNil)
+	defer cc.Close()
+	pc, err := net.ListenPacket(network, ":0")
+	c.Assert(err, qt.IsNil)
+	defer pc.Close()
+	ccAddr := *cc.LocalAddr().(*net.UDPAddr)
+	ccAddr.IP = net.IPv6loopback
+	_, err = pc.WriteTo(make([]byte, 30), &ccAddr)
+	c.Assert(err, qt.IsNil)
+}
+
+func TestConnectionIdMismatch(t *testing.T) {
+	t.Skip("Server host returns consistent connection ID in limited tests and so isn't effective.")
+	cl, err := NewConnClient(NewConnClientOpts{
+		// This host seems to return `Connection ID missmatch.\x00` every 2 minutes or so under
+		// heavy use.
+		Host: "tracker.torrent.eu.org:451",
+		//Host:    "tracker.opentrackr.org:1337",
+		Network: "udp",
+	})
+	c := qt.New(t)
+	c.Assert(err, qt.IsNil)
+	defer cl.Close()
+	ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+	defer cancel()
+	// Force every request to use a different connection ID. It's racey, but we want to get a
+	// different ID issued before a request can be sent with an old ID.
+	cl.Client.shouldReconnectOverride = func() bool { return true }
+	started := time.Now()
+	var wg sync.WaitGroup
+	for range iter.N(2) {
+		ar := AnnounceRequest{
+			NumWant: -1,
+			Event:   2,
+		}
+		rand.Read(ar.InfoHash[:])
+		rand.Read(ar.PeerId[:])
+		//spew.Dump(ar)
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			_, _, err := cl.Announce(ctx, ar, Options{})
+			// I'm looking for `error response: "Connection ID missmatch.\x00"`.
+			t.Logf("announce error after %v: %v", time.Since(started), err)
+		}()
+	}
+	wg.Wait()
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/udp_unix.go b/deps/github.com/anacrolix/torrent/tracker/udp/udp_unix.go
new file mode 100644
index 0000000..6fcf9ed
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/udp_unix.go
@@ -0,0 +1,14 @@
+//go:build !windows
+
+package udp
+
+import (
+	"strings"
+)
+
+func isErrMessageTooLong(err error) bool {
+	if err == nil {
+		return false
+	}
+	return strings.Contains(err.Error(), "message too long")
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp/udp_windows.go b/deps/github.com/anacrolix/torrent/tracker/udp/udp_windows.go
new file mode 100644
index 0000000..a289e9e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp/udp_windows.go
@@ -0,0 +1,11 @@
+package udp
+
+import (
+	"errors"
+
+	"golang.org/x/sys/windows"
+)
+
+func isErrMessageTooLong(err error) bool {
+	return errors.Is(err, windows.WSAEMSGSIZE)
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker/udp_test.go b/deps/github.com/anacrolix/torrent/tracker/udp_test.go
new file mode 100644
index 0000000..232aeb1
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker/udp_test.go
@@ -0,0 +1,194 @@
+package tracker
+
+import (
+	"bytes"
+	"context"
+	"crypto/rand"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/url"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/anacrolix/dht/v2/krpc"
+	_ "github.com/anacrolix/envpprof"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/anacrolix/torrent/tracker/udp"
+)
+
+var trackers = []string{
+	"udp://tracker.opentrackr.org:1337/announce",
+	"udp://tracker.openbittorrent.com:6969/announce",
+	"udp://localhost:42069",
+}
+
+func TestAnnounceLocalhost(t *testing.T) {
+	t.Parallel()
+	srv := server{
+		t: map[[20]byte]torrent{
+			{0xa3, 0x56, 0x41, 0x43, 0x74, 0x23, 0xe6, 0x26, 0xd9, 0x38, 0x25, 0x4a, 0x6b, 0x80, 0x49, 0x10, 0xa6, 0x67, 0xa, 0xc1}: {
+				Seeders:  1,
+				Leechers: 2,
+				Peers: krpc.CompactIPv4NodeAddrs{
+					{[]byte{1, 2, 3, 4}, 5},
+					{[]byte{6, 7, 8, 9}, 10},
+				},
+			},
+		},
+	}
+	var err error
+	srv.pc, err = net.ListenPacket("udp", "localhost:0")
+	require.NoError(t, err)
+	defer srv.pc.Close()
+	go func() {
+		require.NoError(t, srv.serveOne())
+	}()
+	req := AnnounceRequest{
+		NumWant: -1,
+		Event:   Started,
+	}
+	rand.Read(req.PeerId[:])
+	copy(req.InfoHash[:], []uint8{0xa3, 0x56, 0x41, 0x43, 0x74, 0x23, 0xe6, 0x26, 0xd9, 0x38, 0x25, 0x4a, 0x6b, 0x80, 0x49, 0x10, 0xa6, 0x67, 0xa, 0xc1})
+	go func() {
+		require.NoError(t, srv.serveOne())
+	}()
+	ar, err := Announce{
+		TrackerUrl: fmt.Sprintf("udp://%s/announce", srv.pc.LocalAddr().String()),
+		Request:    req,
+	}.Do()
+	require.NoError(t, err)
+	assert.EqualValues(t, 1, ar.Seeders)
+	assert.EqualValues(t, 2, len(ar.Peers))
+}
+
+func TestUDPTracker(t *testing.T) {
+	t.Parallel()
+	if testing.Short() {
+		t.SkipNow()
+	}
+	req := AnnounceRequest{
+		NumWant: -1,
+	}
+	rand.Read(req.PeerId[:])
+	copy(req.InfoHash[:], []uint8{0xa3, 0x56, 0x41, 0x43, 0x74, 0x23, 0xe6, 0x26, 0xd9, 0x38, 0x25, 0x4a, 0x6b, 0x80, 0x49, 0x10, 0xa6, 0x67, 0xa, 0xc1})
+	ctx, cancel := context.WithTimeout(context.Background(), DefaultTrackerAnnounceTimeout)
+	defer cancel()
+	if dl, ok := t.Deadline(); ok {
+		var cancel func()
+		ctx, cancel = context.WithDeadline(context.Background(), dl.Add(-time.Second))
+		defer cancel()
+	}
+	ar, err := Announce{
+		TrackerUrl: trackers[0],
+		Request:    req,
+		Context:    ctx,
+	}.Do()
+	// Skip any net errors as we don't control the server.
+	var ne net.Error
+	if errors.As(err, &ne) {
+		t.Skip(err)
+	}
+	require.NoError(t, err)
+	t.Logf("%+v", ar)
+}
+
+func TestAnnounceRandomInfoHashThirdParty(t *testing.T) {
+	t.Parallel()
+	if testing.Short() {
+		// This test involves contacting third party servers that may have
+		// unpredictable results.
+		t.SkipNow()
+	}
+	req := AnnounceRequest{
+		Event: Stopped,
+	}
+	rand.Read(req.PeerId[:])
+	rand.Read(req.InfoHash[:])
+	wg := sync.WaitGroup{}
+	ctx, cancel := context.WithTimeout(context.Background(), DefaultTrackerAnnounceTimeout)
+	defer cancel()
+	if dl, ok := t.Deadline(); ok {
+		var cancel func()
+		ctx, cancel = context.WithDeadline(ctx, dl.Add(-time.Second))
+		defer cancel()
+	}
+	for _, url := range trackers {
+		wg.Add(1)
+		go func(url string) {
+			defer wg.Done()
+			resp, err := Announce{
+				TrackerUrl: url,
+				Request:    req,
+				Context:    ctx,
+			}.Do()
+			if err != nil {
+				t.Logf("error announcing to %s: %s", url, err)
+				return
+			}
+			if resp.Leechers != 0 || resp.Seeders != 0 || len(resp.Peers) != 0 {
+				// The info hash we generated was random in 2^160 space. If we
+				// get a hit, something is weird.
+				t.Fatal(resp)
+			}
+			t.Logf("announced to %s", url)
+			cancel()
+		}(url)
+	}
+	wg.Wait()
+	cancel()
+}
+
+// Check that URLPath option is done correctly.
+func TestURLPathOption(t *testing.T) {
+	conn, err := net.ListenPacket("udp", "localhost:0")
+	if err != nil {
+		panic(err)
+	}
+	defer conn.Close()
+	announceErr := make(chan error)
+	go func() {
+		_, err := Announce{
+			TrackerUrl: (&url.URL{
+				Scheme: "udp",
+				Host:   conn.LocalAddr().String(),
+				Path:   "/announce",
+			}).String(),
+		}.Do()
+		defer conn.Close()
+		announceErr <- err
+	}()
+	var b [512]byte
+	// conn.SetReadDeadline(time.Now().Add(time.Second))
+	_, addr, _ := conn.ReadFrom(b[:])
+	r := bytes.NewReader(b[:])
+	var h udp.RequestHeader
+	udp.Read(r, &h)
+	w := &bytes.Buffer{}
+	udp.Write(w, udp.ResponseHeader{
+		Action:        udp.ActionConnect,
+		TransactionId: h.TransactionId,
+	})
+	udp.Write(w, udp.ConnectionResponse{42})
+	conn.WriteTo(w.Bytes(), addr)
+	n, _, _ := conn.ReadFrom(b[:])
+	r = bytes.NewReader(b[:n])
+	udp.Read(r, &h)
+	udp.Read(r, &AnnounceRequest{})
+	all, _ := io.ReadAll(r)
+	if string(all) != "\x02\x09/announce" {
+		t.FailNow()
+	}
+	w = &bytes.Buffer{}
+	udp.Write(w, udp.ResponseHeader{
+		Action:        udp.ActionAnnounce,
+		TransactionId: h.TransactionId,
+	})
+	udp.Write(w, udp.AnnounceResponseHeader{})
+	conn.WriteTo(w.Bytes(), addr)
+	require.NoError(t, <-announceErr)
+}
diff --git a/deps/github.com/anacrolix/torrent/tracker_scraper.go b/deps/github.com/anacrolix/torrent/tracker_scraper.go
new file mode 100644
index 0000000..863838a
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/tracker_scraper.go
@@ -0,0 +1,265 @@
+package torrent
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"time"
+
+	"github.com/anacrolix/dht/v2/krpc"
+	"github.com/anacrolix/log"
+
+	"github.com/anacrolix/torrent/tracker"
+)
+
+// Announces a torrent to a tracker at regular intervals, when peers are
+// required.
+type trackerScraper struct {
+	u               url.URL
+	t               *Torrent
+	lastAnnounce    trackerAnnounceResult
+	lookupTrackerIp func(*url.URL) ([]net.IP, error)
+}
+
+type torrentTrackerAnnouncer interface {
+	statusLine() string
+	URL() *url.URL
+}
+
+func (me trackerScraper) URL() *url.URL {
+	return &me.u
+}
+
+func (ts *trackerScraper) statusLine() string {
+	var w bytes.Buffer
+	fmt.Fprintf(&w, "next ann: %v, last ann: %v",
+		func() string {
+			na := time.Until(ts.lastAnnounce.Completed.Add(ts.lastAnnounce.Interval))
+			if na > 0 {
+				na /= time.Second
+				na *= time.Second
+				return na.String()
+			} else {
+				return "anytime"
+			}
+		}(),
+		func() string {
+			if ts.lastAnnounce.Err != nil {
+				return ts.lastAnnounce.Err.Error()
+			}
+			if ts.lastAnnounce.Completed.IsZero() {
+				return "never"
+			}
+			return fmt.Sprintf("%d peers", ts.lastAnnounce.NumPeers)
+		}(),
+	)
+	return w.String()
+}
+
+type trackerAnnounceResult struct {
+	Err       error
+	NumPeers  int
+	Interval  time.Duration
+	Completed time.Time
+}
+
+func (me *trackerScraper) getIp() (ip net.IP, err error) {
+	var ips []net.IP
+	if me.lookupTrackerIp != nil {
+		ips, err = me.lookupTrackerIp(&me.u)
+	} else {
+		// Do a regular dns lookup
+		ips, err = net.LookupIP(me.u.Hostname())
+	}
+	if err != nil {
+		return
+	}
+	if len(ips) == 0 {
+		err = errors.New("no ips")
+		return
+	}
+	me.t.cl.rLock()
+	defer me.t.cl.rUnlock()
+	if me.t.cl.closed.IsSet() {
+		err = errors.New("client is closed")
+		return
+	}
+	for _, ip = range ips {
+		if me.t.cl.ipIsBlocked(ip) {
+			continue
+		}
+		switch me.u.Scheme {
+		case "udp4":
+			if ip.To4() == nil {
+				continue
+			}
+		case "udp6":
+			if ip.To4() != nil {
+				continue
+			}
+		}
+		return
+	}
+	err = errors.New("no acceptable ips")
+	return
+}
+
+func (me *trackerScraper) trackerUrl(ip net.IP) string {
+	u := me.u
+	if u.Port() != "" {
+		u.Host = net.JoinHostPort(ip.String(), u.Port())
+	}
+	return u.String()
+}
+
+// Return how long to wait before trying again. For most errors, we return 5
+// minutes, a relatively quick turn around for DNS changes.
+func (me *trackerScraper) announce(ctx context.Context, event tracker.AnnounceEvent) (ret trackerAnnounceResult) {
+	defer func() {
+		ret.Completed = time.Now()
+	}()
+	ret.Interval = time.Minute
+
+	// Limit concurrent use of the same tracker URL by the Client.
+	ref := me.t.cl.activeAnnounceLimiter.GetRef(me.u.String())
+	defer ref.Drop()
+	select {
+	case <-ctx.Done():
+		ret.Err = ctx.Err()
+		return
+	case ref.C() <- struct{}{}:
+	}
+	defer func() {
+		select {
+		case <-ref.C():
+		default:
+			panic("should return immediately")
+		}
+	}()
+
+	ip, err := me.getIp()
+	if err != nil {
+		ret.Err = fmt.Errorf("error getting ip: %s", err)
+		return
+	}
+	me.t.cl.rLock()
+	req := me.t.announceRequest(event)
+	me.t.cl.rUnlock()
+	// The default timeout works well as backpressure on concurrent access to the tracker. Since
+	// we're passing our own Context now, we will include that timeout ourselves to maintain similar
+	// behavior to previously, albeit with this context now being cancelled when the Torrent is
+	// closed.
+	ctx, cancel := context.WithTimeout(ctx, tracker.DefaultTrackerAnnounceTimeout)
+	defer cancel()
+	me.t.logger.WithDefaultLevel(log.Debug).Printf("announcing to %q: %#v", me.u.String(), req)
+	res, err := tracker.Announce{
+		Context:             ctx,
+		HttpProxy:           me.t.cl.config.HTTPProxy,
+		HttpRequestDirector: me.t.cl.config.HttpRequestDirector,
+		DialContext:         me.t.cl.config.TrackerDialContext,
+		ListenPacket:        me.t.cl.config.TrackerListenPacket,
+		UserAgent:           me.t.cl.config.HTTPUserAgent,
+		TrackerUrl:          me.trackerUrl(ip),
+		Request:             req,
+		HostHeader:          me.u.Host,
+		ServerName:          me.u.Hostname(),
+		UdpNetwork:          me.u.Scheme,
+		ClientIp4:           krpc.NodeAddr{IP: me.t.cl.config.PublicIp4},
+		ClientIp6:           krpc.NodeAddr{IP: me.t.cl.config.PublicIp6},
+		Logger:              me.t.logger,
+	}.Do()
+	me.t.logger.WithDefaultLevel(log.Debug).Printf("announce to %q returned %#v: %v", me.u.String(), res, err)
+	if err != nil {
+		ret.Err = fmt.Errorf("announcing: %w", err)
+		return
+	}
+	me.t.AddPeers(peerInfos(nil).AppendFromTracker(res.Peers))
+	ret.NumPeers = len(res.Peers)
+	ret.Interval = time.Duration(res.Interval) * time.Second
+	return
+}
+
+// Returns whether we can shorten the interval, and sets notify to a channel that receives when we
+// might change our mind, or leaves it if we won't.
+func (me *trackerScraper) canIgnoreInterval(notify *<-chan struct{}) bool {
+	gotInfo := me.t.GotInfo()
+	select {
+	case <-gotInfo:
+		// Private trackers really don't like us announcing more than they specify. They're also
+		// tracking us very carefully, so it's best to comply.
+		private := me.t.info.Private
+		return private == nil || !*private
+	default:
+		*notify = gotInfo
+		return false
+	}
+}
+
+func (me *trackerScraper) Run() {
+	defer me.announceStopped()
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	go func() {
+		defer cancel()
+		select {
+		case <-ctx.Done():
+		case <-me.t.Closed():
+		}
+	}()
+
+	// make sure first announce is a "started"
+	e := tracker.Started
+
+	for {
+		ar := me.announce(ctx, e)
+		// after first announce, get back to regular "none"
+		e = tracker.None
+		me.t.cl.lock()
+		me.lastAnnounce = ar
+		me.t.cl.unlock()
+
+	recalculate:
+		// Make sure we don't announce for at least a minute since the last one.
+		interval := ar.Interval
+		if interval < time.Minute {
+			interval = time.Minute
+		}
+
+		me.t.cl.lock()
+		wantPeers := me.t.wantPeersEvent.C()
+		me.t.cl.unlock()
+
+		// If we want peers, reduce the interval to the minimum if it's appropriate.
+
+		// A channel that receives when we should reconsider our interval. Starts as nil since that
+		// never receives.
+		var reconsider <-chan struct{}
+		select {
+		case <-wantPeers:
+			if interval > time.Minute && me.canIgnoreInterval(&reconsider) {
+				interval = time.Minute
+			}
+		default:
+			reconsider = wantPeers
+		}
+
+		select {
+		case <-me.t.closed.Done():
+			return
+		case <-reconsider:
+			// Recalculate the interval.
+			goto recalculate
+		case <-time.After(time.Until(ar.Completed.Add(interval))):
+		}
+	}
+}
+
+func (me *trackerScraper) announceStopped() {
+	ctx, cancel := context.WithTimeout(context.Background(), tracker.DefaultTrackerAnnounceTimeout)
+	defer cancel()
+	me.announce(ctx, tracker.Stopped)
+}
diff --git a/deps/github.com/anacrolix/torrent/typed-roaring/bitmap.go b/deps/github.com/anacrolix/torrent/typed-roaring/bitmap.go
new file mode 100644
index 0000000..7f7b1a7
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/typed-roaring/bitmap.go
@@ -0,0 +1,48 @@
+package typedRoaring
+
+import (
+	"github.com/RoaringBitmap/roaring"
+)
+
+type Bitmap[T BitConstraint] struct {
+	roaring.Bitmap
+}
+
+func (me *Bitmap[T]) Contains(x T) bool {
+	return me.Bitmap.Contains(uint32(x))
+}
+
+func (me Bitmap[T]) Iterate(f func(x T) bool) {
+	me.Bitmap.Iterate(func(x uint32) bool {
+		return f(T(x))
+	})
+}
+
+func (me *Bitmap[T]) Add(x T) {
+	me.Bitmap.Add(uint32(x))
+}
+
+func (me *Bitmap[T]) Rank(x T) uint64 {
+	return me.Bitmap.Rank(uint32(x))
+}
+
+func (me *Bitmap[T]) CheckedRemove(x T) bool {
+	return me.Bitmap.CheckedRemove(uint32(x))
+}
+
+func (me *Bitmap[T]) Clone() Bitmap[T] {
+	return Bitmap[T]{*me.Bitmap.Clone()}
+}
+
+func (me *Bitmap[T]) CheckedAdd(x T) bool {
+	return me.Bitmap.CheckedAdd(uint32(x))
+}
+
+func (me *Bitmap[T]) Remove(x T) {
+	me.Bitmap.Remove(uint32(x))
+}
+
+// Returns an uninitialized iterator for the type of the receiver.
+func (Bitmap[T]) IteratorType() Iterator[T] {
+	return Iterator[T]{}
+}
diff --git a/deps/github.com/anacrolix/torrent/typed-roaring/constraints.go b/deps/github.com/anacrolix/torrent/typed-roaring/constraints.go
new file mode 100644
index 0000000..d6e191f
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/typed-roaring/constraints.go
@@ -0,0 +1,5 @@
+package typedRoaring
+
+type BitConstraint interface {
+	~int | ~uint32
+}
diff --git a/deps/github.com/anacrolix/torrent/typed-roaring/iterator.go b/deps/github.com/anacrolix/torrent/typed-roaring/iterator.go
new file mode 100644
index 0000000..8766db1
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/typed-roaring/iterator.go
@@ -0,0 +1,21 @@
+package typedRoaring
+
+import (
+	"github.com/RoaringBitmap/roaring"
+)
+
+type Iterator[T BitConstraint] struct {
+	roaring.IntIterator
+}
+
+func (t *Iterator[T]) Next() T {
+	return T(t.IntIterator.Next())
+}
+
+func (t *Iterator[T]) AdvanceIfNeeded(minVal T) {
+	t.IntIterator.AdvanceIfNeeded(uint32(minVal))
+}
+
+func (t *Iterator[T]) Initialize(a *Bitmap[T]) {
+	t.IntIterator.Initialize(&a.Bitmap)
+}
diff --git a/deps/github.com/anacrolix/torrent/types/infohash/infohash.go b/deps/github.com/anacrolix/torrent/types/infohash/infohash.go
new file mode 100644
index 0000000..0763b01
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/types/infohash/infohash.go
@@ -0,0 +1,80 @@
+package infohash
+
+import (
+	"crypto/sha1"
+	"encoding"
+	"encoding/hex"
+	"fmt"
+)
+
+const Size = 20
+
+// 20-byte SHA1 hash used for info and pieces.
+type T [Size]byte
+
+var _ fmt.Formatter = (*T)(nil)
+
+func (t T) Format(f fmt.State, c rune) {
+	// TODO: I can't figure out a nice way to just override the 'x' rune, since it's meaningless
+	// with the "default" 'v', or .String() already returning the hex.
+	f.Write([]byte(t.HexString()))
+}
+
+func (t T) Bytes() []byte {
+	return t[:]
+}
+
+func (t T) AsString() string {
+	return string(t[:])
+}
+
+func (t T) String() string {
+	return t.HexString()
+}
+
+func (t T) HexString() string {
+	return fmt.Sprintf("%x", t[:])
+}
+
+func (t *T) FromHexString(s string) (err error) {
+	if len(s) != 2*Size {
+		err = fmt.Errorf("hash hex string has bad length: %d", len(s))
+		return
+	}
+	n, err := hex.Decode(t[:], []byte(s))
+	if err != nil {
+		return
+	}
+	if n != Size {
+		panic(n)
+	}
+	return
+}
+
+var (
+	_ encoding.TextUnmarshaler = (*T)(nil)
+	_ encoding.TextMarshaler   = T{}
+)
+
+func (t *T) UnmarshalText(b []byte) error {
+	return t.FromHexString(string(b))
+}
+
+func (t T) MarshalText() (text []byte, err error) {
+	return []byte(t.HexString()), nil
+}
+
+func FromHexString(s string) (h T) {
+	err := h.FromHexString(s)
+	if err != nil {
+		panic(err)
+	}
+	return
+}
+
+func HashBytes(b []byte) (ret T) {
+	hasher := sha1.New()
+	hasher.Write(b)
+	copy(ret[:], hasher.Sum(nil))
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/types/peerid.go b/deps/github.com/anacrolix/torrent/types/peerid.go
new file mode 100644
index 0000000..0e13473
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/types/peerid.go
@@ -0,0 +1,14 @@
+package types
+
+// Peer client ID.
+type PeerID [20]byte
+
+// // Pretty prints the ID as hex, except parts that adher to the PeerInfo ID
+// // Conventions of BEP 20.
+// func (me PeerID) String() string {
+// 	// if me[0] == '-' && me[7] == '-' {
+// 	// 	return string(me[:8]) + hex.EncodeToString(me[8:])
+// 	// }
+// 	// return hex.EncodeToString(me[:])
+// 	return fmt.Sprintf("%+q", me[:])
+// }
diff --git a/deps/github.com/anacrolix/torrent/types/types.go b/deps/github.com/anacrolix/torrent/types/types.go
new file mode 100644
index 0000000..a06f7e6
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/types/types.go
@@ -0,0 +1,52 @@
+package types
+
+import (
+	pp "github.com/anacrolix/torrent/peer_protocol"
+)
+
+type PieceIndex = int
+
+type ChunkSpec struct {
+	Begin, Length pp.Integer
+}
+
+type Request struct {
+	Index pp.Integer
+	ChunkSpec
+}
+
+func (r Request) ToMsg(mt pp.MessageType) pp.Message {
+	return pp.Message{
+		Type:   mt,
+		Index:  r.Index,
+		Begin:  r.Begin,
+		Length: r.Length,
+	}
+}
+
+// Describes the importance of obtaining a particular piece.
+type PiecePriority byte
+
+func (pp *PiecePriority) Raise(maybe PiecePriority) bool {
+	if maybe > *pp {
+		*pp = maybe
+		return true
+	}
+	return false
+}
+
+// Priority for use in PriorityBitmap
+func (me PiecePriority) BitmapPriority() int {
+	return -int(me)
+}
+
+const (
+	PiecePriorityNone      PiecePriority = iota // Not wanted. Must be the zero value.
+	PiecePriorityNormal                         // Wanted.
+	PiecePriorityHigh                           // Wanted a lot.
+	PiecePriorityReadahead                      // May be required soon.
+	// Succeeds a piece where a read occurred. Currently the same as Now,
+	// apparently due to issues with caching.
+	PiecePriorityNext
+	PiecePriorityNow // A Reader is reading in this piece. Highest urgency.
+)
diff --git a/deps/github.com/anacrolix/torrent/undirtied-chunks-iter.go b/deps/github.com/anacrolix/torrent/undirtied-chunks-iter.go
new file mode 100644
index 0000000..de0cce0
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/undirtied-chunks-iter.go
@@ -0,0 +1,23 @@
+package torrent
+
+import (
+	"github.com/anacrolix/torrent/typed-roaring"
+)
+
+func iterBitmapUnsetInRange[T typedRoaring.BitConstraint](it *typedRoaring.Iterator[T], start, end T, f func(T)) {
+	it.AdvanceIfNeeded(start)
+	lastDirty := start - 1
+	for it.HasNext() {
+		next := it.Next()
+		if next >= end {
+			break
+		}
+		for index := lastDirty + 1; index < next; index++ {
+			f(index)
+		}
+		lastDirty = next
+	}
+	for index := lastDirty + 1; index < end; index++ {
+		f(index)
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/undirtied-chunks-iter_test.go b/deps/github.com/anacrolix/torrent/undirtied-chunks-iter_test.go
new file mode 100644
index 0000000..9ee6ecf
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/undirtied-chunks-iter_test.go
@@ -0,0 +1,19 @@
+package torrent
+
+import (
+	"testing"
+
+	typedRoaring "github.com/anacrolix/torrent/typed-roaring"
+)
+
+func BenchmarkIterUndirtiedRequestIndexesInPiece(b *testing.B) {
+	var bitmap typedRoaring.Bitmap[RequestIndex]
+	it := bitmap.IteratorType()
+	b.ReportAllocs()
+	for i := 0; i < b.N; i++ {
+		// This is the worst case, when Torrent.iterUndirtiedRequestIndexesInPiece can't find a
+		// usable cached iterator. This should be the only allocation.
+		it.Initialize(&bitmap)
+		iterBitmapUnsetInRange(&it, 69, 420, func(RequestIndex) {})
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/url-net-addr.go b/deps/github.com/anacrolix/torrent/url-net-addr.go
new file mode 100644
index 0000000..6558e89
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/url-net-addr.go
@@ -0,0 +1,26 @@
+package torrent
+
+import (
+	"net"
+	"net/url"
+)
+
+type urlNetAddr struct {
+	u *url.URL
+}
+
+func (me urlNetAddr) Network() string {
+	return me.u.Scheme
+}
+
+func (me urlNetAddr) String() string {
+	return me.u.Host
+}
+
+func remoteAddrFromUrl(urlStr string) net.Addr {
+	u, err := url.Parse(urlStr)
+	if err != nil {
+		return nil
+	}
+	return urlNetAddr{u}
+}
diff --git a/deps/github.com/anacrolix/torrent/ut-holepunching.go b/deps/github.com/anacrolix/torrent/ut-holepunching.go
new file mode 100644
index 0000000..10cbafc
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/ut-holepunching.go
@@ -0,0 +1 @@
+package torrent
diff --git a/deps/github.com/anacrolix/torrent/ut-holepunching_test.go b/deps/github.com/anacrolix/torrent/ut-holepunching_test.go
new file mode 100644
index 0000000..ef7cda6
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/ut-holepunching_test.go
@@ -0,0 +1,407 @@
+package torrent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"math/rand"
+	"net"
+	"os"
+	"sync"
+	"testing"
+	"testing/iotest"
+	"time"
+
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/missinggo/v2/iter"
+	qt "github.com/frankban/quicktest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/time/rate"
+
+	"github.com/anacrolix/torrent/internal/testutil"
+)
+
+// Check that after completing leeching, a leecher transitions to a seeding
+// correctly. Connected in a chain like so: Seeder <-> Leecher <-> LeecherLeecher.
+func TestHolepunchConnect(t *testing.T) {
+	c := qt.New(t)
+	greetingTempDir, mi := testutil.GreetingTestTorrent()
+	defer os.RemoveAll(greetingTempDir)
+
+	cfg := TestingConfig(t)
+	cfg.Seed = true
+	cfg.MaxAllocPeerRequestDataPerConn = 4
+	cfg.DataDir = greetingTempDir
+	cfg.DisablePEX = true
+	cfg.Debug = true
+	cfg.AcceptPeerConnections = false
+	// Listening, even without accepting, still means the leecher-leecher completes the dial to the
+	// seeder, and so it won't attempt to holepunch.
+	cfg.DisableTCP = true
+	// Ensure that responding to holepunch connects don't wait around for the dial limit. We also
+	// have to allow the initial connection to the leecher though, so it can rendezvous for us.
+	cfg.DialRateLimiter = rate.NewLimiter(0, 1)
+	cfg.Logger = cfg.Logger.WithContextText("seeder")
+	seeder, err := NewClient(cfg)
+	require.NoError(t, err)
+	defer seeder.Close()
+	defer testutil.ExportStatusWriter(seeder, "s", t)()
+	seederTorrent, ok, err := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
+	require.NoError(t, err)
+	assert.True(t, ok)
+	seederTorrent.VerifyData()
+
+	cfg = TestingConfig(t)
+	cfg.Seed = true
+	cfg.DataDir = t.TempDir()
+	cfg.AlwaysWantConns = true
+	cfg.Logger = cfg.Logger.WithContextText("leecher")
+	// This way the leecher leecher will still try to use this peer as a relay, but won't be told
+	// about the seeder via PEX.
+	//cfg.DisablePEX = true
+	cfg.Debug = true
+	leecher, err := NewClient(cfg)
+	require.NoError(t, err)
+	defer leecher.Close()
+	defer testutil.ExportStatusWriter(leecher, "l", t)()
+
+	cfg = TestingConfig(t)
+	cfg.Seed = false
+	cfg.DataDir = t.TempDir()
+	cfg.MaxAllocPeerRequestDataPerConn = 4
+	cfg.Debug = true
+	cfg.NominalDialTimeout = time.Second
+	cfg.Logger = cfg.Logger.WithContextText("leecher-leecher")
+	//cfg.DisableUTP = true
+	leecherLeecher, _ := NewClient(cfg)
+	require.NoError(t, err)
+	defer leecherLeecher.Close()
+	defer testutil.ExportStatusWriter(leecherLeecher, "ll", t)()
+	leecherGreeting, ok, err := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
+		ret = TorrentSpecFromMetaInfo(mi)
+		ret.ChunkSize = 2
+		return
+	}())
+	_ = leecherGreeting
+	require.NoError(t, err)
+	assert.True(t, ok)
+	llg, ok, err := leecherLeecher.AddTorrentSpec(func() (ret *TorrentSpec) {
+		ret = TorrentSpecFromMetaInfo(mi)
+		ret.ChunkSize = 3
+		return
+	}())
+	require.NoError(t, err)
+	assert.True(t, ok)
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+	go func() {
+		defer wg.Done()
+		r := llg.NewReader()
+		defer r.Close()
+		qt.Check(t, iotest.TestReader(r, []byte(testutil.GreetingFileContents)), qt.IsNil)
+	}()
+	go seederTorrent.AddClientPeer(leecher)
+	waitForConns(seederTorrent)
+	go llg.AddClientPeer(leecher)
+	waitForConns(llg)
+	time.Sleep(time.Second)
+	llg.cl.lock()
+	targetAddr := seeder.ListenAddrs()[0]
+	log.Printf("trying to initiate to %v", targetAddr)
+	initiateConn(outgoingConnOpts{
+		peerInfo: PeerInfo{
+			Addr: targetAddr,
+		},
+		t:                       llg,
+		requireRendezvous:       true,
+		skipHolepunchRendezvous: false,
+		HeaderObfuscationPolicy: llg.cl.config.HeaderObfuscationPolicy,
+	}, true)
+	llg.cl.unlock()
+	wg.Wait()
+
+	c.Check(seeder.dialedSuccessfullyAfterHolepunchConnect, qt.Not(qt.HasLen), 0)
+	c.Check(leecherLeecher.probablyOnlyConnectedDueToHolepunch, qt.Not(qt.HasLen), 0)
+
+	llClientStats := leecherLeecher.Stats()
+	c.Check(llClientStats.NumPeersUndialableWithoutHolepunch, qt.Not(qt.Equals), 0)
+	c.Check(llClientStats.NumPeersUndialableWithoutHolepunchDialedAfterHolepunchConnect, qt.Not(qt.Equals), 0)
+	c.Check(llClientStats.NumPeersProbablyOnlyConnectedDueToHolepunch, qt.Not(qt.Equals), 0)
+}
+
+func waitForConns(t *Torrent) {
+	t.cl.lock()
+	defer t.cl.unlock()
+	for {
+		for range t.conns {
+			return
+		}
+		t.cl.event.Wait()
+	}
+}
+
+// Show that dialling TCP will complete before the other side accepts.
+func TestDialTcpNotAccepting(t *testing.T) {
+	l, err := net.Listen("tcp", "localhost:0")
+	c := qt.New(t)
+	c.Check(err, qt.IsNil)
+	defer l.Close()
+	dialedConn, err := net.Dial("tcp", l.Addr().String())
+	c.Assert(err, qt.IsNil)
+	dialedConn.Close()
+}
+
+func TestTcpSimultaneousOpen(t *testing.T) {
+	const network = "tcp"
+	ctx := context.Background()
+	makeDialer := func(localPort int, remoteAddr string) func() (net.Conn, error) {
+		dialer := net.Dialer{
+			LocalAddr: &net.TCPAddr{
+				//IP:   net.IPv6loopback,
+				Port: localPort,
+			},
+		}
+		return func() (net.Conn, error) {
+			return dialer.DialContext(ctx, network, remoteAddr)
+		}
+	}
+	c := qt.New(t)
+	// I really hate doing this in unit tests, but we would need to pick apart Dialer to get
+	// perfectly synchronized simultaneous dials.
+	for range iter.N(10) {
+		first, second := randPortPair()
+		t.Logf("ports are %v and %v", first, second)
+		err := testSimultaneousOpen(
+			c.Cleanup,
+			makeDialer(first, fmt.Sprintf("localhost:%d", second)),
+			makeDialer(second, fmt.Sprintf("localhost:%d", first)),
+		)
+		if err == nil {
+			return
+		}
+		// This proves that the connections are not the same.
+		if errors.Is(err, errMsgNotReceived) {
+			t.Fatal(err)
+		}
+		// Could be a timing issue, so try again.
+		t.Log(err)
+	}
+	// If we weren't able to get a simultaneous dial to occur, then we can't call it a failure.
+	t.Skip("couldn't synchronize dials")
+}
+
+func randIntInRange(low, high int) int {
+	return rand.Intn(high-low+1) + low
+}
+
+func randDynamicPort() int {
+	return randIntInRange(49152, 65535)
+}
+
+func randPortPair() (first int, second int) {
+	first = randDynamicPort()
+	for {
+		second = randDynamicPort()
+		if second != first {
+			return
+		}
+	}
+}
+
+func writeMsg(conn net.Conn) {
+	conn.Write([]byte(defaultMsg))
+	// Writing must be closed so the reader will get EOF and stop reading.
+	conn.Close()
+}
+
+func readMsg(conn net.Conn) error {
+	msgBytes, err := io.ReadAll(conn)
+	if err != nil {
+		return err
+	}
+	msgStr := string(msgBytes)
+	if msgStr != defaultMsg {
+		return fmt.Errorf("read %q", msgStr)
+	}
+	return nil
+}
+
+var errMsgNotReceived = errors.New("msg not received in time")
+
+// Runs two dialers simultaneously, then sends a message on one connection and check it reads from
+// the other, thereby showing that both dials obtained endpoints to the same connection.
+func testSimultaneousOpen(
+	cleanup func(func()),
+	firstDialer, secondDialer func() (net.Conn, error),
+) error {
+	errs := make(chan error)
+	var dialsDone sync.WaitGroup
+	const numDials = 2
+	dialsDone.Add(numDials)
+	signal := make(chan struct{})
+	var dialersDone sync.WaitGroup
+	dialersDone.Add(numDials)
+	doDial := func(
+		dialer func() (net.Conn, error),
+		onSignal func(net.Conn),
+	) {
+		defer dialersDone.Done()
+		conn, err := dialer()
+		dialsDone.Done()
+		errs <- err
+		if err != nil {
+			return
+		}
+		cleanup(func() {
+			conn.Close()
+		})
+		<-signal
+		onSignal(conn)
+		//if err == nil {
+		//	conn.Close()
+		//}
+	}
+	go doDial(
+		firstDialer,
+		func(conn net.Conn) {
+			writeMsg(conn)
+			errs <- nil
+		},
+	)
+	go doDial(
+		secondDialer,
+		func(conn net.Conn) {
+			gotMsg := make(chan error, 1)
+			go func() {
+				gotMsg <- readMsg(conn)
+			}()
+			select {
+			case err := <-gotMsg:
+				errs <- err
+			case <-time.After(time.Second):
+				errs <- errMsgNotReceived
+			}
+		},
+	)
+	dialsDone.Wait()
+	for range iter.N(numDials) {
+		err := <-errs
+		if err != nil {
+			return err
+		}
+	}
+	close(signal)
+	for range iter.N(numDials) {
+		err := <-errs
+		if err != nil {
+			return err
+		}
+	}
+	dialersDone.Wait()
+	return nil
+}
+
+const defaultMsg = "hello"
+
+// Show that uTP doesn't implement simultaneous open. When two sockets dial each other, they both
+// get separate connections. This means that holepunch connect may result in an accept (and dial)
+// for one or both peers involved.
+func TestUtpSimultaneousOpen(t *testing.T) {
+	t.Parallel()
+	c := qt.New(t)
+	const network = "udp"
+	ctx := context.Background()
+	newUtpSocket := func(addr string) utpSocket {
+		socket, err := NewUtpSocket(
+			network,
+			addr,
+			func(net.Addr) bool {
+				return false
+			},
+			log.Default,
+		)
+		c.Assert(err, qt.IsNil)
+		return socket
+	}
+	first := newUtpSocket("localhost:0")
+	defer first.Close()
+	second := newUtpSocket("localhost:0")
+	defer second.Close()
+	getDial := func(sock utpSocket, addr string) func() (net.Conn, error) {
+		return func() (net.Conn, error) {
+			return sock.DialContext(ctx, network, addr)
+		}
+	}
+	t.Logf("first addr is %v. second addr is %v", first.Addr().String(), second.Addr().String())
+	for range iter.N(10) {
+		err := testSimultaneousOpen(
+			c.Cleanup,
+			getDial(first, second.Addr().String()),
+			getDial(second, first.Addr().String()),
+		)
+		if err == nil {
+			t.Fatal("expected utp to fail simultaneous open")
+		}
+		if errors.Is(err, errMsgNotReceived) {
+			return
+		}
+		skipGoUtpDialIssue(t, err)
+		t.Log(err)
+		time.Sleep(time.Second)
+	}
+	t.FailNow()
+}
+
+func writeAndReadMsg(r, w net.Conn) error {
+	go writeMsg(w)
+	return readMsg(r)
+}
+
+func skipGoUtpDialIssue(t *testing.T, err error) {
+	if err.Error() == "timed out waiting for ack" {
+		t.Skip("anacrolix go utp implementation has issues. Use anacrolix/go-libutp by enabling CGO.")
+	}
+}
+
+// Show that dialling one socket and accepting from the other results in them having ends of the
+// same connection.
+func TestUtpDirectDialMsg(t *testing.T) {
+	t.Parallel()
+	c := qt.New(t)
+	const network = "udp4"
+	ctx := context.Background()
+	newUtpSocket := func(addr string) utpSocket {
+		socket, err := NewUtpSocket(network, addr, func(net.Addr) bool {
+			return false
+		}, log.Default)
+		c.Assert(err, qt.IsNil)
+		return socket
+	}
+	for range iter.N(10) {
+		err := func() error {
+			first := newUtpSocket("localhost:0")
+			defer first.Close()
+			second := newUtpSocket("localhost:0")
+			defer second.Close()
+			writer, err := first.DialContext(ctx, network, second.Addr().String())
+			if err != nil {
+				return err
+			}
+			defer writer.Close()
+			reader, err := second.Accept()
+			defer reader.Close()
+			c.Assert(err, qt.IsNil)
+			return writeAndReadMsg(reader, writer)
+		}()
+		if err == nil {
+			return
+		}
+		skipGoUtpDialIssue(t, err)
+		t.Log(err)
+		time.Sleep(time.Second)
+	}
+	t.FailNow()
+}
diff --git a/deps/github.com/anacrolix/torrent/util/dirwatch/dirwatch.go b/deps/github.com/anacrolix/torrent/util/dirwatch/dirwatch.go
new file mode 100644
index 0000000..f617aee
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/util/dirwatch/dirwatch.go
@@ -0,0 +1,215 @@
+// Package dirwatch provides filesystem-notification based tracking of torrent
+// info files and magnet URIs in a directory.
+package dirwatch
+
+import (
+	"bufio"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/missinggo/v2"
+	"github.com/fsnotify/fsnotify"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+type Change uint
+
+const (
+	Added Change = iota
+	Removed
+)
+
+type Event struct {
+	MagnetURI string
+	Change
+	TorrentFilePath string
+	InfoHash        metainfo.Hash
+}
+
+type entity struct {
+	metainfo.Hash
+	MagnetURI       string
+	TorrentFilePath string
+}
+
+type Instance struct {
+	w        *fsnotify.Watcher
+	dirName  string
+	Events   chan Event
+	dirState map[metainfo.Hash]entity
+	Logger   log.Logger
+}
+
+func (i *Instance) Close() {
+	i.w.Close()
+}
+
+func (i *Instance) handleEvents() {
+	defer close(i.Events)
+	for e := range i.w.Events {
+		i.Logger.WithDefaultLevel(log.Debug).Printf("event: %v", e)
+		if e.Op == fsnotify.Write {
+			// TODO: Special treatment as an existing torrent may have changed.
+		} else {
+			i.refresh()
+		}
+	}
+}
+
+func (i *Instance) handleErrors() {
+	for err := range i.w.Errors {
+		log.Printf("error in torrent directory watcher: %s", err)
+	}
+}
+
+func torrentFileInfoHash(fileName string) (ih metainfo.Hash, ok bool) {
+	mi, _ := metainfo.LoadFromFile(fileName)
+	if mi == nil {
+		return
+	}
+	ih = mi.HashInfoBytes()
+	ok = true
+	return
+}
+
+func scanDir(dirName string) (ee map[metainfo.Hash]entity) {
+	d, err := os.Open(dirName)
+	if err != nil {
+		log.Print(err)
+		return
+	}
+	defer d.Close()
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		log.Print(err)
+		return
+	}
+	ee = make(map[metainfo.Hash]entity, len(names))
+	addEntity := func(e entity) {
+		e0, ok := ee[e.Hash]
+		if ok {
+			if e0.MagnetURI == "" || len(e.MagnetURI) < len(e0.MagnetURI) {
+				return
+			}
+		}
+		ee[e.Hash] = e
+	}
+	for _, n := range names {
+		fullName := filepath.Join(dirName, n)
+		switch filepath.Ext(n) {
+		case ".torrent":
+			ih, ok := torrentFileInfoHash(fullName)
+			if !ok {
+				break
+			}
+			e := entity{
+				TorrentFilePath: fullName,
+			}
+			missinggo.CopyExact(&e.Hash, ih)
+			addEntity(e)
+		case ".magnet":
+			uris, err := magnetFileURIs(fullName)
+			if err != nil {
+				log.Print(err)
+				break
+			}
+			for _, uri := range uris {
+				m, err := metainfo.ParseMagnetUri(uri)
+				if err != nil {
+					log.Printf("error parsing %q in file %q: %s", uri, fullName, err)
+					continue
+				}
+				addEntity(entity{
+					Hash:      m.InfoHash,
+					MagnetURI: uri,
+				})
+			}
+		}
+	}
+	return
+}
+
+func magnetFileURIs(name string) (uris []string, err error) {
+	f, err := os.Open(name)
+	if err != nil {
+		return
+	}
+	defer f.Close()
+	scanner := bufio.NewScanner(f)
+	scanner.Split(bufio.ScanWords)
+	for scanner.Scan() {
+		// Allow magnet URIs to be "commented" out.
+		if strings.HasPrefix(scanner.Text(), "#") {
+			continue
+		}
+		uris = append(uris, scanner.Text())
+	}
+	err = scanner.Err()
+	return
+}
+
+func (i *Instance) torrentRemoved(ih metainfo.Hash) {
+	i.Events <- Event{
+		InfoHash: ih,
+		Change:   Removed,
+	}
+}
+
+func (i *Instance) torrentAdded(e entity) {
+	i.Events <- Event{
+		InfoHash:        e.Hash,
+		Change:          Added,
+		MagnetURI:       e.MagnetURI,
+		TorrentFilePath: e.TorrentFilePath,
+	}
+}
+
+func (i *Instance) refresh() {
+	_new := scanDir(i.dirName)
+	old := i.dirState
+	for ih := range old {
+		_, ok := _new[ih]
+		if !ok {
+			i.torrentRemoved(ih)
+		}
+	}
+	for ih, newE := range _new {
+		oldE, ok := old[ih]
+		if ok {
+			if newE == oldE {
+				continue
+			}
+			i.torrentRemoved(ih)
+		}
+		i.torrentAdded(newE)
+	}
+	i.dirState = _new
+}
+
+func New(dirName string) (i *Instance, err error) {
+	w, err := fsnotify.NewWatcher()
+	if err != nil {
+		return
+	}
+	err = w.Add(dirName)
+	if err != nil {
+		w.Close()
+		return
+	}
+	i = &Instance{
+		w:        w,
+		dirName:  dirName,
+		Events:   make(chan Event),
+		dirState: make(map[metainfo.Hash]entity),
+		Logger:   log.Default,
+	}
+	go func() {
+		i.refresh()
+		go i.handleEvents()
+		go i.handleErrors()
+	}()
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/util/dirwatch/dirwatch_test.go b/deps/github.com/anacrolix/torrent/util/dirwatch/dirwatch_test.go
new file mode 100644
index 0000000..0447599
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/util/dirwatch/dirwatch_test.go
@@ -0,0 +1,15 @@
+package dirwatch
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestDirwatch(t *testing.T) {
+	tempDirName := t.TempDir()
+	t.Logf("tempdir: %q", tempDirName)
+	dw, err := New(tempDirName)
+	require.NoError(t, err)
+	defer dw.Close()
+}
diff --git a/deps/github.com/anacrolix/torrent/utp.go b/deps/github.com/anacrolix/torrent/utp.go
new file mode 100644
index 0000000..3066ca0
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/utp.go
@@ -0,0 +1,18 @@
+package torrent
+
+import (
+	"context"
+	"net"
+)
+
+// Abstracts the utp Socket, so the implementation can be selected from
+// different packages.
+type utpSocket interface {
+	net.PacketConn
+	// net.Listener, but we can't have duplicate Close.
+	Accept() (net.Conn, error)
+	Addr() net.Addr
+	// net.Dialer but there's no interface.
+	DialContext(ctx context.Context, network, addr string) (net.Conn, error)
+	// Dial(addr string) (net.Conn, error)
+}
diff --git a/deps/github.com/anacrolix/torrent/utp_go.go b/deps/github.com/anacrolix/torrent/utp_go.go
new file mode 100644
index 0000000..1e60f82
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/utp_go.go
@@ -0,0 +1,18 @@
+//go:build !cgo || disable_libutp
+// +build !cgo disable_libutp
+
+package torrent
+
+import (
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/utp"
+)
+
+func NewUtpSocket(network, addr string, _ firewallCallback, _ log.Logger) (utpSocket, error) {
+	s, err := utp.NewSocket(network, addr)
+	if s == nil {
+		return nil, err
+	} else {
+		return s, err
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/utp_libutp.go b/deps/github.com/anacrolix/torrent/utp_libutp.go
new file mode 100644
index 0000000..6da9402
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/utp_libutp.go
@@ -0,0 +1,23 @@
+//go:build cgo && !disable_libutp
+// +build cgo,!disable_libutp
+
+package torrent
+
+import (
+	utp "github.com/anacrolix/go-libutp"
+	"github.com/anacrolix/log"
+)
+
+func NewUtpSocket(network, addr string, fc firewallCallback, logger log.Logger) (utpSocket, error) {
+	s, err := utp.NewSocket(network, addr, utp.WithLogger(logger))
+	if s == nil {
+		return nil, err
+	}
+	if err != nil {
+		return s, err
+	}
+	if fc != nil {
+		s.SetSyncFirewallCallback(utp.FirewallCallback(fc))
+	}
+	return s, err
+}
diff --git a/deps/github.com/anacrolix/torrent/utp_test.go b/deps/github.com/anacrolix/torrent/utp_test.go
new file mode 100644
index 0000000..18d62ca
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/utp_test.go
@@ -0,0 +1,16 @@
+package torrent
+
+import (
+	"testing"
+
+	"github.com/anacrolix/log"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestNewUtpSocketErrorNilInterface(t *testing.T) {
+	s, err := NewUtpSocket("fix", "your:language", nil, log.Default)
+	assert.Error(t, err)
+	if s != nil {
+		t.Fatalf("expected nil, got %#v", s)
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/version/version.go b/deps/github.com/anacrolix/torrent/version/version.go
new file mode 100644
index 0000000..66483b6
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/version/version.go
@@ -0,0 +1,61 @@
+// Package version provides default versions, user-agents etc. for client identification.
+package version
+
+import (
+	"fmt"
+	"reflect"
+	"runtime/debug"
+	"strings"
+)
+
+var (
+	DefaultExtendedHandshakeClientVersion string
+	// This should be updated when client behaviour changes in a way that other peers could care
+	// about.
+	DefaultBep20Prefix   = "-GT0003-"
+	DefaultHttpUserAgent string
+	DefaultUpnpId        string
+)
+
+func init() {
+	const (
+		longNamespace   = "anacrolix"
+		longPackageName = "torrent"
+	)
+	type Newtype struct{}
+	var newtype Newtype
+	thisPkg := reflect.TypeOf(newtype).PkgPath()
+	var (
+		mainPath       = "unknown"
+		mainVersion    = "unknown"
+		torrentVersion = "unknown"
+	)
+	if buildInfo, ok := debug.ReadBuildInfo(); ok {
+		mainPath = buildInfo.Main.Path
+		mainVersion = buildInfo.Main.Version
+		thisModule := ""
+		// Note that if the main module is the same as this module, we get a version of "(devel)".
+		for _, dep := range append(buildInfo.Deps, &buildInfo.Main) {
+			if strings.HasPrefix(thisPkg, dep.Path) && len(dep.Path) >= len(thisModule) {
+				thisModule = dep.Path
+				torrentVersion = dep.Version
+			}
+		}
+	}
+	DefaultExtendedHandshakeClientVersion = fmt.Sprintf(
+		"%v %v (%v/%v %v)",
+		mainPath,
+		mainVersion,
+		longNamespace,
+		longPackageName,
+		torrentVersion,
+	)
+	DefaultUpnpId = fmt.Sprintf("%v %v", mainPath, mainVersion)
+	// Per https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/User-Agent#library_and_net_tool_ua_strings
+	DefaultHttpUserAgent = fmt.Sprintf(
+		"%v-%v/%v",
+		longNamespace,
+		longPackageName,
+		torrentVersion,
+	)
+}
diff --git a/deps/github.com/anacrolix/torrent/webrtc.go b/deps/github.com/anacrolix/torrent/webrtc.go
new file mode 100644
index 0000000..ca4f80f
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webrtc.go
@@ -0,0 +1,85 @@
+package torrent
+
+import (
+	"net"
+	"strconv"
+	"time"
+
+	"github.com/pion/datachannel"
+	"github.com/pion/webrtc/v3"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+
+	"github.com/anacrolix/torrent/webtorrent"
+)
+
+const webrtcNetwork = "webrtc"
+
+type webrtcNetConn struct {
+	datachannel.ReadWriteCloser
+	webtorrent.DataChannelContext
+}
+
+type webrtcNetAddr struct {
+	*webrtc.ICECandidate
+}
+
+var _ net.Addr = webrtcNetAddr{}
+
+func (webrtcNetAddr) Network() string {
+	// Now that we have the ICE candidate, we can tell if it's over udp or tcp. But should we use
+	// that for the network?
+	return webrtcNetwork
+}
+
+func (me webrtcNetAddr) String() string {
+	return net.JoinHostPort(me.Address, strconv.FormatUint(uint64(me.Port), 10))
+}
+
+func (me webrtcNetConn) LocalAddr() net.Addr {
+	// I'm not sure if this evolves over time. It might also be unavailable if the PeerConnection is
+	// closed or closes itself. The same concern applies to RemoteAddr.
+	pair, err := me.DataChannelContext.GetSelectedIceCandidatePair()
+	if err != nil {
+		panic(err)
+	}
+	return webrtcNetAddr{pair.Local}
+}
+
+func (me webrtcNetConn) RemoteAddr() net.Addr {
+	// See comments on LocalAddr.
+	pair, err := me.DataChannelContext.GetSelectedIceCandidatePair()
+	if err != nil {
+		panic(err)
+	}
+	return webrtcNetAddr{pair.Remote}
+}
+
+// Do we need these for WebRTC connections exposed as net.Conns? Can we set them somewhere inside
+// PeerConnection or on the channel or some transport?
+
+func (w webrtcNetConn) SetDeadline(t time.Time) error {
+	w.Span.AddEvent("SetDeadline", trace.WithAttributes(attribute.String("time", t.String())))
+	return nil
+}
+
+func (w webrtcNetConn) SetReadDeadline(t time.Time) error {
+	w.Span.AddEvent("SetReadDeadline", trace.WithAttributes(attribute.String("time", t.String())))
+	return nil
+}
+
+func (w webrtcNetConn) SetWriteDeadline(t time.Time) error {
+	w.Span.AddEvent("SetWriteDeadline", trace.WithAttributes(attribute.String("time", t.String())))
+	return nil
+}
+
+func (w webrtcNetConn) Read(b []byte) (n int, err error) {
+	_, span := otel.Tracer(tracerName).Start(w.Context, "Read")
+	defer span.End()
+	span.SetAttributes(attribute.Int("buf_len", len(b)))
+	n, err = w.ReadWriteCloser.Read(b)
+	span.RecordError(err)
+	span.SetAttributes(attribute.Int("bytes_read", n))
+	return
+}
diff --git a/deps/github.com/anacrolix/torrent/webseed-peer.go b/deps/github.com/anacrolix/torrent/webseed-peer.go
new file mode 100644
index 0000000..5b6632b
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webseed-peer.go
@@ -0,0 +1,222 @@
+package torrent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math/rand"
+	"sync"
+	"time"
+
+	"github.com/RoaringBitmap/roaring"
+	"github.com/anacrolix/log"
+
+	"github.com/anacrolix/torrent/metainfo"
+	pp "github.com/anacrolix/torrent/peer_protocol"
+	"github.com/anacrolix/torrent/webseed"
+)
+
+const (
+	webseedPeerUnhandledErrorSleep   = 5 * time.Second
+	webseedPeerCloseOnUnhandledError = false
+)
+
+type webseedPeer struct {
+	// First field for stats alignment.
+	peer             Peer
+	client           webseed.Client
+	activeRequests   map[Request]webseed.Request
+	requesterCond    sync.Cond
+	lastUnhandledErr time.Time
+}
+
+var _ peerImpl = (*webseedPeer)(nil)
+
+func (me *webseedPeer) peerImplStatusLines() []string {
+	return []string{
+		me.client.Url,
+		fmt.Sprintf("last unhandled error: %v", eventAgeString(me.lastUnhandledErr)),
+	}
+}
+
+func (ws *webseedPeer) String() string {
+	return fmt.Sprintf("webseed peer for %q", ws.client.Url)
+}
+
+func (ws *webseedPeer) onGotInfo(info *metainfo.Info) {
+	ws.client.SetInfo(info)
+	// There should be probably be a callback in Client instead, so it can remove pieces at its whim
+	// too.
+	ws.client.Pieces.Iterate(func(x uint32) bool {
+		ws.peer.t.incPieceAvailability(pieceIndex(x))
+		return true
+	})
+}
+
+func (ws *webseedPeer) writeInterested(interested bool) bool {
+	return true
+}
+
+func (ws *webseedPeer) _cancel(r RequestIndex) bool {
+	if active, ok := ws.activeRequests[ws.peer.t.requestIndexToRequest(r)]; ok {
+		active.Cancel()
+		// The requester is running and will handle the result.
+		return true
+	}
+	// There should be no requester handling this, so no further events will occur.
+	return false
+}
+
+func (ws *webseedPeer) intoSpec(r Request) webseed.RequestSpec {
+	return webseed.RequestSpec{ws.peer.t.requestOffset(r), int64(r.Length)}
+}
+
+func (ws *webseedPeer) _request(r Request) bool {
+	ws.requesterCond.Signal()
+	return true
+}
+
+func (ws *webseedPeer) doRequest(r Request) error {
+	webseedRequest := ws.client.NewRequest(ws.intoSpec(r))
+	ws.activeRequests[r] = webseedRequest
+	err := func() error {
+		ws.requesterCond.L.Unlock()
+		defer ws.requesterCond.L.Lock()
+		return ws.requestResultHandler(r, webseedRequest)
+	}()
+	delete(ws.activeRequests, r)
+	return err
+}
+
+func (ws *webseedPeer) requester(i int) {
+	ws.requesterCond.L.Lock()
+	defer ws.requesterCond.L.Unlock()
+start:
+	for !ws.peer.closed.IsSet() {
+		// Restart is set if we don't need to wait for the requestCond before trying again.
+		restart := false
+		ws.peer.requestState.Requests.Iterate(func(x RequestIndex) bool {
+			r := ws.peer.t.requestIndexToRequest(x)
+			if _, ok := ws.activeRequests[r]; ok {
+				return true
+			}
+			err := ws.doRequest(r)
+			ws.requesterCond.L.Unlock()
+			if err != nil && !errors.Is(err, context.Canceled) {
+				log.Printf("requester %v: error doing webseed request %v: %v", i, r, err)
+			}
+			restart = true
+			if errors.Is(err, webseed.ErrTooFast) {
+				time.Sleep(time.Duration(rand.Int63n(int64(10 * time.Second))))
+			}
+			// Demeter is throwing a tantrum on Mount Olympus for this
+			ws.peer.t.cl.locker().RLock()
+			duration := time.Until(ws.lastUnhandledErr.Add(webseedPeerUnhandledErrorSleep))
+			ws.peer.t.cl.locker().RUnlock()
+			time.Sleep(duration)
+			ws.requesterCond.L.Lock()
+			return false
+		})
+		if restart {
+			goto start
+		}
+		ws.requesterCond.Wait()
+	}
+}
+
+func (ws *webseedPeer) connectionFlags() string {
+	return "WS"
+}
+
+// Maybe this should drop all existing connections, or something like that.
+func (ws *webseedPeer) drop() {}
+
+func (cn *webseedPeer) ban() {
+	cn.peer.close()
+}
+
+func (ws *webseedPeer) handleUpdateRequests() {
+	// Because this is synchronous, webseed peers seem to get first dibs on newly prioritized
+	// pieces.
+	go func() {
+		ws.peer.t.cl.lock()
+		defer ws.peer.t.cl.unlock()
+		ws.peer.maybeUpdateActualRequestState()
+	}()
+}
+
+func (ws *webseedPeer) onClose() {
+	ws.peer.logger.Levelf(log.Debug, "closing")
+	// Just deleting them means we would have to manually cancel active requests.
+	ws.peer.cancelAllRequests()
+	ws.peer.t.iterPeers(func(p *Peer) {
+		if p.isLowOnRequests() {
+			p.updateRequests("webseedPeer.onClose")
+		}
+	})
+	ws.requesterCond.Broadcast()
+}
+
+func (ws *webseedPeer) requestResultHandler(r Request, webseedRequest webseed.Request) error {
+	result := <-webseedRequest.Result
+	close(webseedRequest.Result) // one-shot
+	// We do this here rather than inside receiveChunk, since we want to count errors too. I'm not
+	// sure if we can divine which errors indicate cancellation on our end without hitting the
+	// network though.
+	if len(result.Bytes) != 0 || result.Err == nil {
+		// Increment ChunksRead and friends
+		ws.peer.doChunkReadStats(int64(len(result.Bytes)))
+	}
+	ws.peer.readBytes(int64(len(result.Bytes)))
+	ws.peer.t.cl.lock()
+	defer ws.peer.t.cl.unlock()
+	if ws.peer.t.closed.IsSet() {
+		return nil
+	}
+	err := result.Err
+	if err != nil {
+		switch {
+		case errors.Is(err, context.Canceled):
+		case errors.Is(err, webseed.ErrTooFast):
+		case ws.peer.closed.IsSet():
+		default:
+			ws.peer.logger.Printf("Request %v rejected: %v", r, result.Err)
+			// // Here lies my attempt to extract something concrete from Go's error system. RIP.
+			// cfg := spew.NewDefaultConfig()
+			// cfg.DisableMethods = true
+			// cfg.Dump(result.Err)
+
+			if webseedPeerCloseOnUnhandledError {
+				log.Printf("closing %v", ws)
+				ws.peer.close()
+			} else {
+				ws.lastUnhandledErr = time.Now()
+			}
+		}
+		if !ws.peer.remoteRejectedRequest(ws.peer.t.requestIndexFromRequest(r)) {
+			panic("invalid reject")
+		}
+		return err
+	}
+	err = ws.peer.receiveChunk(&pp.Message{
+		Type:  pp.Piece,
+		Index: r.Index,
+		Begin: r.Begin,
+		Piece: result.Bytes,
+	})
+	if err != nil {
+		panic(err)
+	}
+	return err
+}
+
+func (me *webseedPeer) peerPieces() *roaring.Bitmap {
+	return &me.client.Pieces
+}
+
+func (cn *webseedPeer) peerHasAllPieces() (all, known bool) {
+	if !cn.peer.t.haveInfo() {
+		return true, false
+	}
+	return cn.client.Pieces.GetCardinality() == uint64(cn.peer.t.numPieces()), true
+}
diff --git a/deps/github.com/anacrolix/torrent/webseed/client.go b/deps/github.com/anacrolix/torrent/webseed/client.go
new file mode 100644
index 0000000..ac42b8a
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webseed/client.go
@@ -0,0 +1,206 @@
+package webseed
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"net/http"
+	"strings"
+
+	"github.com/RoaringBitmap/roaring"
+
+	"github.com/anacrolix/torrent/common"
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/segments"
+)
+
+type RequestSpec = segments.Extent
+
+type requestPartResult struct {
+	resp *http.Response
+	err  error
+}
+
+type requestPart struct {
+	req    *http.Request
+	e      segments.Extent
+	result chan requestPartResult
+	start  func()
+	// Wrap http response bodies for such things as download rate limiting.
+	responseBodyWrapper ResponseBodyWrapper
+}
+
+type Request struct {
+	cancel func()
+	Result chan RequestResult
+}
+
+func (r Request) Cancel() {
+	r.cancel()
+}
+
+type Client struct {
+	HttpClient *http.Client
+	Url        string
+	fileIndex  segments.Index
+	info       *metainfo.Info
+	// The pieces we can request with the Url. We're more likely to ban/block at the file-level
+	// given that's how requests are mapped to webseeds, but the torrent.Client works at the piece
+	// level. We can map our file-level adjustments to the pieces here. This probably need to be
+	// private in the future, if Client ever starts removing pieces.
+	Pieces              roaring.Bitmap
+	ResponseBodyWrapper ResponseBodyWrapper
+	PathEscaper         PathEscaper
+}
+
+type ResponseBodyWrapper func(io.Reader) io.Reader
+
+func (me *Client) SetInfo(info *metainfo.Info) {
+	if !strings.HasSuffix(me.Url, "/") && info.IsDir() {
+		// In my experience, this is a non-conforming webseed. For example the
+		// http://ia600500.us.archive.org/1/items URLs in archive.org torrents.
+		return
+	}
+	me.fileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
+	me.info = info
+	me.Pieces.AddRange(0, uint64(info.NumPieces()))
+}
+
+type RequestResult struct {
+	Bytes []byte
+	Err   error
+}
+
+func (ws *Client) NewRequest(r RequestSpec) Request {
+	ctx, cancel := context.WithCancel(context.Background())
+	var requestParts []requestPart
+	if !ws.fileIndex.Locate(r, func(i int, e segments.Extent) bool {
+		req, err := newRequest(
+			ws.Url, i, ws.info, e.Start, e.Length,
+			ws.PathEscaper,
+		)
+		if err != nil {
+			panic(err)
+		}
+		req = req.WithContext(ctx)
+		part := requestPart{
+			req:                 req,
+			result:              make(chan requestPartResult, 1),
+			e:                   e,
+			responseBodyWrapper: ws.ResponseBodyWrapper,
+		}
+		part.start = func() {
+			go func() {
+				resp, err := ws.HttpClient.Do(req)
+				part.result <- requestPartResult{
+					resp: resp,
+					err:  err,
+				}
+			}()
+		}
+		requestParts = append(requestParts, part)
+		return true
+	}) {
+		panic("request out of file bounds")
+	}
+	req := Request{
+		cancel: cancel,
+		Result: make(chan RequestResult, 1),
+	}
+	go func() {
+		b, err := readRequestPartResponses(ctx, requestParts)
+		req.Result <- RequestResult{
+			Bytes: b,
+			Err:   err,
+		}
+	}()
+	return req
+}
+
+type ErrBadResponse struct {
+	Msg      string
+	Response *http.Response
+}
+
+func (me ErrBadResponse) Error() string {
+	return me.Msg
+}
+
+func recvPartResult(ctx context.Context, buf io.Writer, part requestPart) error {
+	result := <-part.result
+	// Make sure there's no further results coming, it should be a one-shot channel.
+	close(part.result)
+	if result.err != nil {
+		return result.err
+	}
+	defer result.resp.Body.Close()
+	var body io.Reader = result.resp.Body
+	if part.responseBodyWrapper != nil {
+		body = part.responseBodyWrapper(body)
+	}
+	// Prevent further accidental use
+	result.resp.Body = nil
+	if ctx.Err() != nil {
+		return ctx.Err()
+	}
+	switch result.resp.StatusCode {
+	case http.StatusPartialContent:
+		copied, err := io.Copy(buf, body)
+		if err != nil {
+			return err
+		}
+		if copied != part.e.Length {
+			return fmt.Errorf("got %v bytes, expected %v", copied, part.e.Length)
+		}
+		return nil
+	case http.StatusOK:
+		// This number is based on
+		// https://archive.org/download/BloodyPitOfHorror/BloodyPitOfHorror.asr.srt. It seems that
+		// archive.org might be using a webserver implementation that refuses to do partial
+		// responses to small files.
+		if part.e.Start < 48<<10 {
+			if part.e.Start != 0 {
+				log.Printf("resp status ok but requested range [url=%q, range=%q]",
+					part.req.URL,
+					part.req.Header.Get("Range"))
+			}
+			// Instead of discarding, we could try receiving all the chunks present in the response
+			// body. I don't know how one would handle multiple chunk requests resulting in an OK
+			// response for the same file. The request algorithm might be need to be smarter for
+			// that.
+			discarded, _ := io.CopyN(io.Discard, body, part.e.Start)
+			if discarded != 0 {
+				log.Printf("discarded %v bytes in webseed request response part", discarded)
+			}
+			_, err := io.CopyN(buf, body, part.e.Length)
+			return err
+		} else {
+			return ErrBadResponse{"resp status ok but requested range", result.resp}
+		}
+	case http.StatusServiceUnavailable:
+		return ErrTooFast
+	default:
+		return ErrBadResponse{
+			fmt.Sprintf("unhandled response status code (%v)", result.resp.StatusCode),
+			result.resp,
+		}
+	}
+}
+
+var ErrTooFast = errors.New("making requests too fast")
+
+func readRequestPartResponses(ctx context.Context, parts []requestPart) (_ []byte, err error) {
+	var buf bytes.Buffer
+	for _, part := range parts {
+		part.start()
+		err = recvPartResult(ctx, &buf, part)
+		if err != nil {
+			err = fmt.Errorf("reading %q at %q: %w", part.req.URL, part.req.Header.Get("Range"), err)
+			break
+		}
+	}
+	return buf.Bytes(), err
+}
diff --git a/deps/github.com/anacrolix/torrent/webseed/request.go b/deps/github.com/anacrolix/torrent/webseed/request.go
new file mode 100644
index 0000000..53fe6db
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webseed/request.go
@@ -0,0 +1,68 @@
+package webseed
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/anacrolix/torrent/metainfo"
+)
+
+type PathEscaper func(pathComps []string) string
+
+// Escapes path name components suitable for appending to a webseed URL. This works for converting
+// S3 object keys to URLs too.
+//
+// Contrary to the name, this actually does a QueryEscape, rather than a PathEscape. This works
+// better with most S3 providers.
+func EscapePath(pathComps []string) string {
+	return defaultPathEscaper(pathComps)
+}
+
+func defaultPathEscaper(pathComps []string) string {
+	var ret []string
+	for _, comp := range pathComps {
+		esc := url.PathEscape(comp)
+		// S3 incorrectly escapes + in paths to spaces, so we add an extra encoding for that. This
+		// seems to be handled correctly regardless of whether an endpoint uses query or path
+		// escaping.
+		esc = strings.ReplaceAll(esc, "+", "%2B")
+		ret = append(ret, esc)
+	}
+	return strings.Join(ret, "/")
+}
+
+func trailingPath(
+	infoName string,
+	fileComps []string,
+	pathEscaper PathEscaper,
+) string {
+	if pathEscaper == nil {
+		pathEscaper = defaultPathEscaper
+	}
+	return pathEscaper(append([]string{infoName}, fileComps...))
+}
+
+// Creates a request per BEP 19.
+func newRequest(
+	url_ string, fileIndex int,
+	info *metainfo.Info,
+	offset, length int64,
+	pathEscaper PathEscaper,
+) (*http.Request, error) {
+	fileInfo := info.UpvertedFiles()[fileIndex]
+	if strings.HasSuffix(url_, "/") {
+		// BEP specifies that we append the file path. We need to escape each component of the path
+		// for things like spaces and '#'.
+		url_ += trailingPath(info.Name, fileInfo.Path, pathEscaper)
+	}
+	req, err := http.NewRequest(http.MethodGet, url_, nil)
+	if err != nil {
+		return nil, err
+	}
+	if offset != 0 || length != fileInfo.Length {
+		req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
+	}
+	return req, nil
+}
diff --git a/deps/github.com/anacrolix/torrent/webseed/request_test.go b/deps/github.com/anacrolix/torrent/webseed/request_test.go
new file mode 100644
index 0000000..af3071f
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webseed/request_test.go
@@ -0,0 +1,60 @@
+package webseed
+
+import (
+	"net/url"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+)
+
+func TestDefaultPathEscaper(t *testing.T) {
+	c := qt.New(t)
+	test := func(unescaped string, parts ...string) {
+		assertPartsUnescape(c, unescaped, parts...)
+	}
+	for _, tc := range defaultPathEscapeTestCases {
+		test(tc.escaped, tc.parts...)
+	}
+}
+
+// So we can manually check, and use these to seed fuzzing.
+var defaultPathEscapeTestCases = []struct {
+	escaped string
+	parts   []string
+}{
+	{"/", []string{"", ""}},
+	{"a_b-c/d + e.f", []string{"a_b-c", "d + e.f"}},
+	{"a_1-b_c2/d 3. (e, f).g", []string{"a_1-b_c2", "d 3. (e, f).g"}},
+	{"a_b-c/d + e.f", []string{"a_b-c", "d + e.f"}},
+	{"a_1-b_c2/d 3. (e, f).g", []string{"a_1-b_c2", "d 3. (e, f).g"}},
+	{"war/and/peace", []string{"war", "and", "peace"}},
+	{"he//o#world/world", []string{"he//o#world", "world"}},
+	{`ノ┬─┬ノ ︵ ( \o°o)\`, []string{`ノ┬─┬ノ ︵ ( \o°o)\`}},
+	{
+		`%aa + %bb/Parsi Tv - سرقت و باز کردن در ماشین در کم‌تر از ۳ ثانیه + فیلم.webm`,
+		[]string{`%aa + %bb`, `Parsi Tv - سرقت و باز کردن در ماشین در کم‌تر از ۳ ثانیه + فیلم.webm`},
+	},
+}
+
+func assertPartsUnescape(c *qt.C, unescaped string, parts ...string) {
+	escaped := defaultPathEscaper(parts)
+	pathUnescaped, err := url.PathUnescape(escaped)
+	c.Assert(err, qt.IsNil)
+	c.Assert(pathUnescaped, qt.Equals, unescaped)
+	queryUnescaped, err := url.QueryUnescape(escaped)
+	c.Assert(err, qt.IsNil)
+	c.Assert(queryUnescaped, qt.Equals, unescaped)
+}
+
+func FuzzDefaultPathEscaper(f *testing.F) {
+	for _, tc := range defaultPathEscapeTestCases {
+		if len(tc.parts) == 2 {
+			f.Add(tc.parts[0], tc.parts[1])
+		}
+	}
+	// I think a single separator is enough to test special handling around /. Also fuzzing doesn't
+	// let us take []string as an input.
+	f.Fuzz(func(t *testing.T, first, second string) {
+		assertPartsUnescape(qt.New(t), first+"/"+second, first, second)
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/LICENSE b/deps/github.com/anacrolix/torrent/webtorrent/LICENSE
new file mode 100644
index 0000000..99d4f26
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Michiel De Backker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/fuzz_test.go b/deps/github.com/anacrolix/torrent/webtorrent/fuzz_test.go
new file mode 100644
index 0000000..14638fa
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/fuzz_test.go
@@ -0,0 +1,31 @@
+//go:build go1.18
+// +build go1.18
+
+package webtorrent
+
+import (
+	"encoding/json"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+)
+
+func FuzzJsonBinaryStrings(f *testing.F) {
+	f.Fuzz(func(t *testing.T, in []byte) {
+		jsonBytes, err := json.Marshal(binaryToJsonString(in))
+		if err != nil {
+			t.Fatal(err)
+		}
+		// t.Logf("%q", jsonBytes)
+		var jsonStr string
+		err = json.Unmarshal(jsonBytes, &jsonStr)
+		if err != nil {
+			t.Fatal(err)
+		}
+		// t.Logf("%q", jsonStr)
+		c := qt.New(t)
+		out, err := decodeJsonByteString(jsonStr, []byte{})
+		c.Assert(err, qt.IsNil)
+		c.Assert(out, qt.DeepEquals, in)
+	})
+}
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/otel.go b/deps/github.com/anacrolix/torrent/webtorrent/otel.go
new file mode 100644
index 0000000..2c09964
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/otel.go
@@ -0,0 +1,6 @@
+package webtorrent
+
+const (
+	tracerName        = "anacrolix.torrent.webtorrent"
+	webrtcConnTypeKey = "webtorrent.webrtc.conn.type"
+)
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/setting-engine.go b/deps/github.com/anacrolix/torrent/webtorrent/setting-engine.go
new file mode 100644
index 0000000..a84ee02
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/setting-engine.go
@@ -0,0 +1,24 @@
+// These build constraints are copied from webrtc's settingengine.go.
+//go:build !js
+// +build !js
+
+package webtorrent
+
+import (
+	"io"
+
+	"github.com/pion/logging"
+	"github.com/pion/webrtc/v3"
+)
+
+var s = webrtc.SettingEngine{
+	// This could probably be done with better integration into anacrolix/log, but I'm not sure if
+	// it's worth the effort.
+	LoggerFactory: discardLoggerFactory{},
+}
+
+type discardLoggerFactory struct{}
+
+func (discardLoggerFactory) NewLogger(scope string) logging.LeveledLogger {
+	return logging.NewDefaultLeveledLoggerForScope(scope, logging.LogLevelInfo, io.Discard)
+}
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/setting-engine_js.go b/deps/github.com/anacrolix/torrent/webtorrent/setting-engine_js.go
new file mode 100644
index 0000000..ea42d11
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/setting-engine_js.go
@@ -0,0 +1,13 @@
+// These build constraints are copied from webrtc's settingengine_js.go.
+//go:build js && wasm
+// +build js,wasm
+
+package webtorrent
+
+import (
+	"github.com/pion/webrtc/v3"
+)
+
+// I'm not sure what to do for logging for JS. See
+// https://gophers.slack.com/archives/CAK2124AG/p1649651943947579.
+var s = webrtc.SettingEngine{}
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/testdata/fuzz/FuzzJsonBinaryStrings/195b11403204772a785dfc25a6f37ba920daf479f86bcfbbb880cd06cbb2ecf8 b/deps/github.com/anacrolix/torrent/webtorrent/testdata/fuzz/FuzzJsonBinaryStrings/195b11403204772a785dfc25a6f37ba920daf479f86bcfbbb880cd06cbb2ecf8
new file mode 100644
index 0000000..9afa08b
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/testdata/fuzz/FuzzJsonBinaryStrings/195b11403204772a785dfc25a6f37ba920daf479f86bcfbbb880cd06cbb2ecf8
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("\x93")
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/testdata/fuzz/FuzzJsonBinaryStrings/582528ddfad69eb57775199a43e0f9fd5c94bba343ce7bb6724d4ebafe311ed4 b/deps/github.com/anacrolix/torrent/webtorrent/testdata/fuzz/FuzzJsonBinaryStrings/582528ddfad69eb57775199a43e0f9fd5c94bba343ce7bb6724d4ebafe311ed4
new file mode 100644
index 0000000..a96f559
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/testdata/fuzz/FuzzJsonBinaryStrings/582528ddfad69eb57775199a43e0f9fd5c94bba343ce7bb6724d4ebafe311ed4
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("0")
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/testdata/fuzz/FuzzJsonBinaryStrings/caf81e9797b19c76c1fc4dbf537d4d81f389524539f402d13aa01f93a65ac7e9 b/deps/github.com/anacrolix/torrent/webtorrent/testdata/fuzz/FuzzJsonBinaryStrings/caf81e9797b19c76c1fc4dbf537d4d81f389524539f402d13aa01f93a65ac7e9
new file mode 100644
index 0000000..67322c7
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/testdata/fuzz/FuzzJsonBinaryStrings/caf81e9797b19c76c1fc4dbf537d4d81f389524539f402d13aa01f93a65ac7e9
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("")
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/tracker-client.go b/deps/github.com/anacrolix/torrent/webtorrent/tracker-client.go
new file mode 100644
index 0000000..bc9dab3
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/tracker-client.go
@@ -0,0 +1,396 @@
+package webtorrent
+
+import (
+	"context"
+	"crypto/rand"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"sync"
+	"time"
+
+	g "github.com/anacrolix/generics"
+	"github.com/anacrolix/log"
+	"github.com/gorilla/websocket"
+	"github.com/pion/datachannel"
+	"github.com/pion/webrtc/v3"
+	"go.opentelemetry.io/otel/trace"
+
+	"github.com/anacrolix/torrent/tracker"
+)
+
+type TrackerClientStats struct {
+	Dials                  int64
+	ConvertedInboundConns  int64
+	ConvertedOutboundConns int64
+}
+
+// Client represents the webtorrent client
+type TrackerClient struct {
+	Url                string
+	GetAnnounceRequest func(_ tracker.AnnounceEvent, infoHash [20]byte) (tracker.AnnounceRequest, error)
+	PeerId             [20]byte
+	OnConn             onDataChannelOpen
+	Logger             log.Logger
+	Dialer             *websocket.Dialer
+
+	mu             sync.Mutex
+	cond           sync.Cond
+	outboundOffers map[string]outboundOfferValue // OfferID to outboundOfferValue
+	wsConn         *websocket.Conn
+	closed         bool
+	stats          TrackerClientStats
+	pingTicker     *time.Ticker
+
+	WebsocketTrackerHttpHeader func() http.Header
+	ICEServers                 []string
+}
+
+func (me *TrackerClient) Stats() TrackerClientStats {
+	me.mu.Lock()
+	defer me.mu.Unlock()
+	return me.stats
+}
+
+func (me *TrackerClient) peerIdBinary() string {
+	return binaryToJsonString(me.PeerId[:])
+}
+
+type outboundOffer struct {
+	offerId string
+	outboundOfferValue
+}
+
+// outboundOfferValue represents an outstanding offer.
+type outboundOfferValue struct {
+	originalOffer  webrtc.SessionDescription
+	peerConnection *wrappedPeerConnection
+	infoHash       [20]byte
+	dataChannel    *webrtc.DataChannel
+}
+
+type DataChannelContext struct {
+	OfferId      string
+	LocalOffered bool
+	InfoHash     [20]byte
+	// This is private as some methods might not be appropriate with data channel context.
+	peerConnection *wrappedPeerConnection
+	Span           trace.Span
+	Context        context.Context
+}
+
+func (me *DataChannelContext) GetSelectedIceCandidatePair() (*webrtc.ICECandidatePair, error) {
+	return me.peerConnection.SCTP().Transport().ICETransport().GetSelectedCandidatePair()
+}
+
+type onDataChannelOpen func(_ datachannel.ReadWriteCloser, dcc DataChannelContext)
+
+func (tc *TrackerClient) doWebsocket() error {
+	metrics.Add("websocket dials", 1)
+	tc.mu.Lock()
+	tc.stats.Dials++
+	tc.mu.Unlock()
+
+	var header http.Header
+	if tc.WebsocketTrackerHttpHeader != nil {
+		header = tc.WebsocketTrackerHttpHeader()
+	}
+
+	c, _, err := tc.Dialer.Dial(tc.Url, header)
+	if err != nil {
+		return fmt.Errorf("dialing tracker: %w", err)
+	}
+	defer c.Close()
+	tc.Logger.WithDefaultLevel(log.Info).Printf("connected")
+	tc.mu.Lock()
+	tc.wsConn = c
+	tc.cond.Broadcast()
+	tc.mu.Unlock()
+	tc.announceOffers()
+	closeChan := make(chan struct{})
+	go func() {
+		for {
+			select {
+			case <-tc.pingTicker.C:
+				tc.mu.Lock()
+				err := c.WriteMessage(websocket.PingMessage, []byte{})
+				tc.mu.Unlock()
+				if err != nil {
+					return
+				}
+			case <-closeChan:
+				return
+
+			}
+		}
+	}()
+	err = tc.trackerReadLoop(tc.wsConn)
+	close(closeChan)
+	tc.mu.Lock()
+	c.Close()
+	tc.mu.Unlock()
+	return err
+}
+
+// Finishes initialization and spawns the run routine, calling onStop when it completes with the
+// result. We don't let the caller just spawn the runner directly, since then we can race against
+// .Close to finish initialization.
+func (tc *TrackerClient) Start(onStop func(error)) {
+	tc.pingTicker = time.NewTicker(60 * time.Second)
+	tc.cond.L = &tc.mu
+	go func() {
+		onStop(tc.run())
+	}()
+}
+
+func (tc *TrackerClient) run() error {
+	tc.mu.Lock()
+	for !tc.closed {
+		tc.mu.Unlock()
+		err := tc.doWebsocket()
+		level := log.Info
+		tc.mu.Lock()
+		if tc.closed {
+			level = log.Debug
+		}
+		tc.mu.Unlock()
+		tc.Logger.WithDefaultLevel(level).Printf("websocket instance ended: %v", err)
+		time.Sleep(time.Minute)
+		tc.mu.Lock()
+	}
+	tc.mu.Unlock()
+	return nil
+}
+
+func (tc *TrackerClient) Close() error {
+	tc.mu.Lock()
+	tc.closed = true
+	if tc.wsConn != nil {
+		tc.wsConn.Close()
+	}
+	tc.closeUnusedOffers()
+	tc.pingTicker.Stop()
+	tc.mu.Unlock()
+	tc.cond.Broadcast()
+	return nil
+}
+
+func (tc *TrackerClient) announceOffers() {
+	// tc.Announce grabs a lock on tc.outboundOffers. It also handles the case where outboundOffers
+	// is nil. Take ownership of outboundOffers here.
+	tc.mu.Lock()
+	offers := tc.outboundOffers
+	tc.outboundOffers = nil
+	tc.mu.Unlock()
+
+	if offers == nil {
+		return
+	}
+
+	// Iterate over our locally-owned offers, close any existing "invalid" ones from before the
+	// socket reconnected, reannounce the infohash, adding it back into the tc.outboundOffers.
+	tc.Logger.WithDefaultLevel(log.Info).Printf("reannouncing %d infohashes after restart", len(offers))
+	for _, offer := range offers {
+		// TODO: Capture the errors? Are we even in a position to do anything with them?
+		offer.peerConnection.Close()
+		// Use goroutine here to allow read loop to start and ensure the buffer drains.
+		go tc.Announce(tracker.Started, offer.infoHash)
+	}
+}
+
+func (tc *TrackerClient) closeUnusedOffers() {
+	for _, offer := range tc.outboundOffers {
+		offer.peerConnection.Close()
+		offer.dataChannel.Close()
+	}
+	tc.outboundOffers = nil
+}
+
+func (tc *TrackerClient) CloseOffersForInfohash(infoHash [20]byte) {
+	tc.mu.Lock()
+	defer tc.mu.Unlock()
+	for key, offer := range tc.outboundOffers {
+		if offer.infoHash == infoHash {
+			offer.peerConnection.Close()
+			delete(tc.outboundOffers, key)
+		}
+	}
+}
+
+func (tc *TrackerClient) Announce(event tracker.AnnounceEvent, infoHash [20]byte) error {
+	metrics.Add("outbound announces", 1)
+	if event == tracker.Stopped {
+		return tc.announce(event, infoHash, nil)
+	}
+	var randOfferId [20]byte
+	_, err := rand.Read(randOfferId[:])
+	if err != nil {
+		return fmt.Errorf("generating offer_id bytes: %w", err)
+	}
+	offerIDBinary := binaryToJsonString(randOfferId[:])
+
+	pc, dc, offer, err := tc.newOffer(tc.Logger, offerIDBinary, infoHash)
+	if err != nil {
+		return fmt.Errorf("creating offer: %w", err)
+	}
+
+	err = tc.announce(event, infoHash, []outboundOffer{
+		{
+			offerId: offerIDBinary,
+			outboundOfferValue: outboundOfferValue{
+				originalOffer:  offer,
+				peerConnection: pc,
+				infoHash:       infoHash,
+				dataChannel:    dc,
+			},
+		},
+	})
+	if err != nil {
+		dc.Close()
+		pc.Close()
+	}
+	return err
+}
+
+func (tc *TrackerClient) announce(event tracker.AnnounceEvent, infoHash [20]byte, offers []outboundOffer) error {
+	request, err := tc.GetAnnounceRequest(event, infoHash)
+	if err != nil {
+		return fmt.Errorf("getting announce parameters: %w", err)
+	}
+
+	req := AnnounceRequest{
+		Numwant:    len(offers),
+		Uploaded:   request.Uploaded,
+		Downloaded: request.Downloaded,
+		Left:       request.Left,
+		Event:      request.Event.String(),
+		Action:     "announce",
+		InfoHash:   binaryToJsonString(infoHash[:]),
+		PeerID:     tc.peerIdBinary(),
+	}
+	for _, offer := range offers {
+		req.Offers = append(req.Offers, Offer{
+			OfferID: offer.offerId,
+			Offer:   offer.originalOffer,
+		})
+	}
+
+	data, err := json.Marshal(req)
+	if err != nil {
+		return fmt.Errorf("marshalling request: %w", err)
+	}
+
+	tc.mu.Lock()
+	defer tc.mu.Unlock()
+	err = tc.writeMessage(data)
+	if err != nil {
+		return fmt.Errorf("write AnnounceRequest: %w", err)
+	}
+	for _, offer := range offers {
+		g.MakeMapIfNilAndSet(&tc.outboundOffers, offer.offerId, offer.outboundOfferValue)
+	}
+	return nil
+}
+
+func (tc *TrackerClient) writeMessage(data []byte) error {
+	for tc.wsConn == nil {
+		if tc.closed {
+			return fmt.Errorf("%T closed", tc)
+		}
+		tc.cond.Wait()
+	}
+	return tc.wsConn.WriteMessage(websocket.TextMessage, data)
+}
+
+func (tc *TrackerClient) trackerReadLoop(tracker *websocket.Conn) error {
+	for {
+		_, message, err := tracker.ReadMessage()
+		if err != nil {
+			return fmt.Errorf("read message error: %w", err)
+		}
+		// tc.Logger.WithDefaultLevel(log.Debug).Printf("received message from tracker: %q", message)
+
+		var ar AnnounceResponse
+		if err := json.Unmarshal(message, &ar); err != nil {
+			tc.Logger.WithDefaultLevel(log.Warning).Printf("error unmarshalling announce response: %v", err)
+			continue
+		}
+		switch {
+		case ar.Offer != nil:
+			ih, err := jsonStringToInfoHash(ar.InfoHash)
+			if err != nil {
+				tc.Logger.WithDefaultLevel(log.Warning).Printf("error decoding info_hash in offer: %v", err)
+				break
+			}
+			err = tc.handleOffer(offerContext{
+				SessDesc: *ar.Offer,
+				Id:       ar.OfferID,
+				InfoHash: ih,
+			}, ar.PeerID)
+			if err != nil {
+				tc.Logger.Levelf(log.Error, "handling offer for infohash %x: %v", ih, err)
+			}
+		case ar.Answer != nil:
+			tc.handleAnswer(ar.OfferID, *ar.Answer)
+		default:
+			tc.Logger.Levelf(log.Warning, "unhandled announce response %q", message)
+		}
+	}
+}
+
+type offerContext struct {
+	SessDesc webrtc.SessionDescription
+	Id       string
+	InfoHash [20]byte
+}
+
+func (tc *TrackerClient) handleOffer(
+	offerContext offerContext,
+	peerId string,
+) error {
+	peerConnection, answer, err := tc.newAnsweringPeerConnection(offerContext)
+	if err != nil {
+		return fmt.Errorf("creating answering peer connection: %w", err)
+	}
+	response := AnnounceResponse{
+		Action:   "announce",
+		InfoHash: binaryToJsonString(offerContext.InfoHash[:]),
+		PeerID:   tc.peerIdBinary(),
+		ToPeerID: peerId,
+		Answer:   &answer,
+		OfferID:  offerContext.Id,
+	}
+	data, err := json.Marshal(response)
+	if err != nil {
+		peerConnection.Close()
+		return fmt.Errorf("marshalling response: %w", err)
+	}
+	tc.mu.Lock()
+	defer tc.mu.Unlock()
+	if err := tc.writeMessage(data); err != nil {
+		peerConnection.Close()
+		return fmt.Errorf("writing response: %w", err)
+	}
+	return nil
+}
+
+func (tc *TrackerClient) handleAnswer(offerId string, answer webrtc.SessionDescription) {
+	tc.mu.Lock()
+	defer tc.mu.Unlock()
+	offer, ok := tc.outboundOffers[offerId]
+	if !ok {
+		tc.Logger.WithDefaultLevel(log.Warning).Printf("could not find offer for id %+q", offerId)
+		return
+	}
+	// tc.Logger.WithDefaultLevel(log.Debug).Printf("offer %q got answer %v", offerId, answer)
+	metrics.Add("outbound offers answered", 1)
+	err := offer.peerConnection.SetRemoteDescription(answer)
+	if err != nil {
+		err = fmt.Errorf("using outbound offer answer: %w", err)
+		offer.peerConnection.span.RecordError(err)
+		tc.Logger.LevelPrint(log.Error, err)
+		return
+	}
+	delete(tc.outboundOffers, offerId)
+	go tc.Announce(tracker.None, offer.infoHash)
+}
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/tracker-protocol.go b/deps/github.com/anacrolix/torrent/webtorrent/tracker-protocol.go
new file mode 100644
index 0000000..14be67e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/tracker-protocol.go
@@ -0,0 +1,76 @@
+package webtorrent
+
+import (
+	"fmt"
+	"math"
+
+	"github.com/pion/webrtc/v3"
+)
+
+type AnnounceRequest struct {
+	Numwant    int     `json:"numwant"`
+	Uploaded   int64   `json:"uploaded"`
+	Downloaded int64   `json:"downloaded"`
+	Left       int64   `json:"left"`
+	Event      string  `json:"event,omitempty"`
+	Action     string  `json:"action"`
+	InfoHash   string  `json:"info_hash"`
+	PeerID     string  `json:"peer_id"`
+	Offers     []Offer `json:"offers"`
+}
+
+type Offer struct {
+	OfferID string                    `json:"offer_id"`
+	Offer   webrtc.SessionDescription `json:"offer"`
+}
+
+type AnnounceResponse struct {
+	InfoHash   string                     `json:"info_hash"`
+	Action     string                     `json:"action"`
+	Interval   *int                       `json:"interval,omitempty"`
+	Complete   *int                       `json:"complete,omitempty"`
+	Incomplete *int                       `json:"incomplete,omitempty"`
+	PeerID     string                     `json:"peer_id,omitempty"`
+	ToPeerID   string                     `json:"to_peer_id,omitempty"`
+	Answer     *webrtc.SessionDescription `json:"answer,omitempty"`
+	Offer      *webrtc.SessionDescription `json:"offer,omitempty"`
+	OfferID    string                     `json:"offer_id,omitempty"`
+}
+
+// I wonder if this is a defacto standard way to decode bytes to JSON for webtorrent. I don't really
+// care.
+func binaryToJsonString(b []byte) string {
+	var seq []rune
+	for _, v := range b {
+		seq = append(seq, rune(v))
+	}
+	return string(seq)
+}
+
+func jsonStringToInfoHash(s string) (ih [20]byte, err error) {
+	b, err := decodeJsonByteString(s, ih[:0])
+	if err != nil {
+		return
+	}
+	if len(b) != len(ih) {
+		err = fmt.Errorf("string decoded to %v bytes", len(b))
+	}
+	return
+}
+
+func decodeJsonByteString(s string, b []byte) ([]byte, error) {
+	defer func() {
+		r := recover()
+		if r == nil {
+			return
+		}
+		panic(fmt.Sprintf("%q", s))
+	}()
+	for _, c := range []rune(s) {
+		if c < 0 || c > math.MaxUint8 {
+			return b, fmt.Errorf("rune out of bounds: %v", c)
+		}
+		b = append(b, byte(c))
+	}
+	return b, nil
+}
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/transport.go b/deps/github.com/anacrolix/torrent/webtorrent/transport.go
new file mode 100644
index 0000000..8566258
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/transport.go
@@ -0,0 +1,265 @@
+package webtorrent
+
+import (
+	"context"
+	"expvar"
+	"fmt"
+	"io"
+	"sync"
+	"time"
+
+	"github.com/anacrolix/log"
+	"github.com/anacrolix/missinggo/v2/pproffd"
+	"github.com/pion/datachannel"
+	"github.com/pion/webrtc/v3"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace"
+)
+
+const (
+	dataChannelLabel = "webrtc-datachannel"
+)
+
+var (
+	metrics = expvar.NewMap("webtorrent")
+	api     = func() *webrtc.API {
+		// Enable the detach API (since it's non-standard but more idiomatic).
+		s.DetachDataChannels()
+		return webrtc.NewAPI(webrtc.WithSettingEngine(s))
+	}()
+	newPeerConnectionMu sync.Mutex
+)
+
+type wrappedPeerConnection struct {
+	*webrtc.PeerConnection
+	closeMu sync.Mutex
+	pproffd.CloseWrapper
+	span trace.Span
+	ctx  context.Context
+}
+
+func (me *wrappedPeerConnection) Close() error {
+	me.closeMu.Lock()
+	defer me.closeMu.Unlock()
+	err := me.CloseWrapper.Close()
+	me.span.End()
+	return err
+}
+
+func newPeerConnection(logger log.Logger, iceServers []string) (*wrappedPeerConnection, error) {
+	newPeerConnectionMu.Lock()
+	defer newPeerConnectionMu.Unlock()
+	ctx, span := otel.Tracer(tracerName).Start(context.Background(), "PeerConnection")
+
+	pcConfig := webrtc.Configuration{ICEServers: []webrtc.ICEServer{{URLs: iceServers}}}
+
+	pc, err := api.NewPeerConnection(pcConfig)
+	if err != nil {
+		span.SetStatus(codes.Error, err.Error())
+		span.RecordError(err)
+		span.End()
+		return nil, err
+	}
+	wpc := &wrappedPeerConnection{
+		PeerConnection: pc,
+		CloseWrapper:   pproffd.NewCloseWrapper(pc),
+		ctx:            ctx,
+		span:           span,
+	}
+	// If the state change handler intends to call Close, it should call it on the wrapper.
+	wpc.OnConnectionStateChange(func(state webrtc.PeerConnectionState) {
+		logger.Levelf(log.Warning, "webrtc PeerConnection state changed to %v", state)
+		span.AddEvent("connection state changed", trace.WithAttributes(attribute.String("state", state.String())))
+	})
+	return wpc, nil
+}
+
+func setAndGatherLocalDescription(peerConnection *wrappedPeerConnection, sdp webrtc.SessionDescription) (_ webrtc.SessionDescription, err error) {
+	gatherComplete := webrtc.GatheringCompletePromise(peerConnection.PeerConnection)
+	peerConnection.span.AddEvent("setting local description")
+	err = peerConnection.SetLocalDescription(sdp)
+	if err != nil {
+		err = fmt.Errorf("setting local description: %w", err)
+		return
+	}
+	<-gatherComplete
+	peerConnection.span.AddEvent("gathering complete")
+	return *peerConnection.LocalDescription(), nil
+}
+
+// newOffer creates a transport and returns a WebRTC offer to be announced. See
+// https://github.com/pion/webrtc/blob/master/examples/data-channels/jsfiddle/main.go for what this is modelled on.
+func (tc *TrackerClient) newOffer(
+	logger log.Logger,
+	offerId string,
+	infoHash [20]byte,
+) (
+	peerConnection *wrappedPeerConnection,
+	dataChannel *webrtc.DataChannel,
+	offer webrtc.SessionDescription,
+	err error,
+) {
+	peerConnection, err = newPeerConnection(logger, tc.ICEServers)
+	if err != nil {
+		return
+	}
+
+	peerConnection.span.SetAttributes(attribute.String(webrtcConnTypeKey, "offer"))
+
+	dataChannel, err = peerConnection.CreateDataChannel(dataChannelLabel, nil)
+	if err != nil {
+		err = fmt.Errorf("creating data channel: %w", err)
+		peerConnection.Close()
+	}
+	initDataChannel(dataChannel, peerConnection, func(dc datachannel.ReadWriteCloser, dcCtx context.Context, dcSpan trace.Span) {
+		metrics.Add("outbound offers answered with datachannel", 1)
+		tc.mu.Lock()
+		tc.stats.ConvertedOutboundConns++
+		tc.mu.Unlock()
+		tc.OnConn(dc, DataChannelContext{
+			OfferId:        offerId,
+			LocalOffered:   true,
+			InfoHash:       infoHash,
+			peerConnection: peerConnection,
+			Context:        dcCtx,
+			Span:           dcSpan,
+		})
+	})
+
+	offer, err = peerConnection.CreateOffer(nil)
+	if err != nil {
+		dataChannel.Close()
+		peerConnection.Close()
+		return
+	}
+
+	offer, err = setAndGatherLocalDescription(peerConnection, offer)
+	if err != nil {
+		dataChannel.Close()
+		peerConnection.Close()
+	}
+	return
+}
+
+type onDetachedDataChannelFunc func(detached datachannel.ReadWriteCloser, ctx context.Context, span trace.Span)
+
+func (tc *TrackerClient) initAnsweringPeerConnection(
+	peerConn *wrappedPeerConnection,
+	offerContext offerContext,
+) (answer webrtc.SessionDescription, err error) {
+	peerConn.span.SetAttributes(attribute.String(webrtcConnTypeKey, "answer"))
+
+	timer := time.AfterFunc(30*time.Second, func() {
+		peerConn.span.SetStatus(codes.Error, "answer timeout")
+		metrics.Add("answering peer connections timed out", 1)
+		peerConn.Close()
+	})
+	peerConn.OnDataChannel(func(d *webrtc.DataChannel) {
+		initDataChannel(d, peerConn, func(detached datachannel.ReadWriteCloser, ctx context.Context, span trace.Span) {
+			timer.Stop()
+			metrics.Add("answering peer connection conversions", 1)
+			tc.mu.Lock()
+			tc.stats.ConvertedInboundConns++
+			tc.mu.Unlock()
+			tc.OnConn(detached, DataChannelContext{
+				OfferId:        offerContext.Id,
+				LocalOffered:   false,
+				InfoHash:       offerContext.InfoHash,
+				peerConnection: peerConn,
+				Context:        ctx,
+				Span:           span,
+			})
+		})
+	})
+
+	err = peerConn.SetRemoteDescription(offerContext.SessDesc)
+	if err != nil {
+		return
+	}
+	answer, err = peerConn.CreateAnswer(nil)
+	if err != nil {
+		return
+	}
+
+	answer, err = setAndGatherLocalDescription(peerConn, answer)
+	return
+}
+
+// newAnsweringPeerConnection creates a transport from a WebRTC offer and returns a WebRTC answer to be announced.
+func (tc *TrackerClient) newAnsweringPeerConnection(
+	offerContext offerContext,
+) (
+	peerConn *wrappedPeerConnection, answer webrtc.SessionDescription, err error,
+) {
+	peerConn, err = newPeerConnection(tc.Logger, tc.ICEServers)
+	if err != nil {
+		err = fmt.Errorf("failed to create new connection: %w", err)
+		return
+	}
+	answer, err = tc.initAnsweringPeerConnection(peerConn, offerContext)
+	if err != nil {
+		peerConn.span.RecordError(err)
+		peerConn.Close()
+	}
+	return
+}
+
+type datachannelReadWriter interface {
+	datachannel.Reader
+	datachannel.Writer
+	io.Reader
+	io.Writer
+}
+
+type ioCloserFunc func() error
+
+func (me ioCloserFunc) Close() error {
+	return me()
+}
+
+func initDataChannel(
+	dc *webrtc.DataChannel,
+	pc *wrappedPeerConnection,
+	onOpen onDetachedDataChannelFunc,
+) {
+	var span trace.Span
+	dc.OnClose(func() {
+		span.End()
+	})
+	dc.OnOpen(func() {
+		pc.span.AddEvent("data channel opened")
+		var ctx context.Context
+		ctx, span = otel.Tracer(tracerName).Start(pc.ctx, "DataChannel")
+		raw, err := dc.Detach()
+		if err != nil {
+			// This shouldn't happen if the API is configured correctly, and we call from OnOpen.
+			panic(err)
+		}
+		onOpen(hookDataChannelCloser(raw, pc, span, dc), ctx, span)
+	})
+}
+
+// Hooks the datachannel's Close to Close the owning PeerConnection. The datachannel takes ownership
+// and responsibility for the PeerConnection.
+func hookDataChannelCloser(
+	dcrwc datachannel.ReadWriteCloser,
+	pc *wrappedPeerConnection,
+	dataChannelSpan trace.Span,
+	originalDataChannel *webrtc.DataChannel,
+) datachannel.ReadWriteCloser {
+	return struct {
+		datachannelReadWriter
+		io.Closer
+	}{
+		dcrwc,
+		ioCloserFunc(func() error {
+			dcrwc.Close()
+			pc.Close()
+			originalDataChannel.Close()
+			dataChannelSpan.End()
+			return nil
+		}),
+	}
+}
diff --git a/deps/github.com/anacrolix/torrent/webtorrent/transport_test.go b/deps/github.com/anacrolix/torrent/webtorrent/transport_test.go
new file mode 100644
index 0000000..c17328e
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/webtorrent/transport_test.go
@@ -0,0 +1,35 @@
+package webtorrent
+
+import (
+	"testing"
+
+	"github.com/anacrolix/log"
+	qt "github.com/frankban/quicktest"
+	"github.com/pion/webrtc/v3"
+)
+
+func TestClosingPeerConnectionDoesNotCloseUnopenedDataChannel(t *testing.T) {
+	c := qt.New(t)
+	var tc TrackerClient
+	pc, dc, _, err := tc.newOffer(log.Default, "", [20]byte{})
+	c.Assert(err, qt.IsNil)
+	defer pc.Close()
+	defer dc.Close()
+	peerConnClosed := make(chan struct{})
+	pc.OnConnectionStateChange(func(state webrtc.PeerConnectionState) {
+		if state == webrtc.PeerConnectionStateClosed {
+			close(peerConnClosed)
+		}
+	})
+	dc.OnClose(func() {
+		// This should not be called because the DataChannel is never opened.
+		t.Fatal("DataChannel.OnClose handler called")
+	})
+	t.Logf("data channel ready state before close: %v", dc.ReadyState())
+	dc.OnError(func(err error) {
+		t.Logf("data channel error: %v", err)
+	})
+	pc.Close()
+	c.Check(dc.ReadyState(), qt.Equals, webrtc.DataChannelStateClosed)
+	<-peerConnClosed
+}
diff --git a/deps/github.com/anacrolix/torrent/worse-conns.go b/deps/github.com/anacrolix/torrent/worse-conns.go
new file mode 100644
index 0000000..ef33b97
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/worse-conns.go
@@ -0,0 +1,118 @@
+package torrent
+
+import (
+	"container/heap"
+	"fmt"
+	"time"
+	"unsafe"
+
+	"github.com/anacrolix/multiless"
+	"github.com/anacrolix/sync"
+)
+
+type worseConnInput struct {
+	BadDirection        bool
+	Useful              bool
+	LastHelpful         time.Time
+	CompletedHandshake  time.Time
+	GetPeerPriority     func() (peerPriority, error)
+	getPeerPriorityOnce sync.Once
+	peerPriority        peerPriority
+	peerPriorityErr     error
+	Pointer             uintptr
+}
+
+func (me *worseConnInput) doGetPeerPriority() {
+	me.peerPriority, me.peerPriorityErr = me.GetPeerPriority()
+}
+
+func (me *worseConnInput) doGetPeerPriorityOnce() {
+	me.getPeerPriorityOnce.Do(me.doGetPeerPriority)
+}
+
+type worseConnLensOpts struct {
+	incomingIsBad, outgoingIsBad bool
+}
+
+func worseConnInputFromPeer(p *PeerConn, opts worseConnLensOpts) worseConnInput {
+	ret := worseConnInput{
+		Useful:             p.useful(),
+		LastHelpful:        p.lastHelpful(),
+		CompletedHandshake: p.completedHandshake,
+		Pointer:            uintptr(unsafe.Pointer(p)),
+		GetPeerPriority:    p.peerPriority,
+	}
+	if opts.incomingIsBad && !p.outgoing {
+		ret.BadDirection = true
+	} else if opts.outgoingIsBad && p.outgoing {
+		ret.BadDirection = true
+	}
+	return ret
+}
+
+func (l *worseConnInput) Less(r *worseConnInput) bool {
+	less, ok := multiless.New().Bool(
+		r.BadDirection, l.BadDirection).Bool(
+		l.Useful, r.Useful).CmpInt64(
+		l.LastHelpful.Sub(r.LastHelpful).Nanoseconds()).CmpInt64(
+		l.CompletedHandshake.Sub(r.CompletedHandshake).Nanoseconds()).LazySameLess(
+		func() (same, less bool) {
+			l.doGetPeerPriorityOnce()
+			if l.peerPriorityErr != nil {
+				same = true
+				return
+			}
+			r.doGetPeerPriorityOnce()
+			if r.peerPriorityErr != nil {
+				same = true
+				return
+			}
+			same = l.peerPriority == r.peerPriority
+			less = l.peerPriority < r.peerPriority
+			return
+		}).Uintptr(
+		l.Pointer, r.Pointer,
+	).LessOk()
+	if !ok {
+		panic(fmt.Sprintf("cannot differentiate %#v and %#v", l, r))
+	}
+	return less
+}
+
+type worseConnSlice struct {
+	conns []*PeerConn
+	keys  []worseConnInput
+}
+
+func (me *worseConnSlice) initKeys(opts worseConnLensOpts) {
+	me.keys = make([]worseConnInput, len(me.conns))
+	for i, c := range me.conns {
+		me.keys[i] = worseConnInputFromPeer(c, opts)
+	}
+}
+
+var _ heap.Interface = &worseConnSlice{}
+
+func (me worseConnSlice) Len() int {
+	return len(me.conns)
+}
+
+func (me worseConnSlice) Less(i, j int) bool {
+	return me.keys[i].Less(&me.keys[j])
+}
+
+func (me *worseConnSlice) Pop() interface{} {
+	i := len(me.conns) - 1
+	ret := me.conns[i]
+	me.conns = me.conns[:i]
+	return ret
+}
+
+func (me *worseConnSlice) Push(x interface{}) {
+	panic("not implemented")
+}
+
+func (me worseConnSlice) Swap(i, j int) {
+	me.conns[i], me.conns[j] = me.conns[j], me.conns[i]
+	me.keys[i], me.keys[j] = me.keys[j], me.keys[i]
+}
diff --git a/deps/github.com/anacrolix/torrent/worse-conns_test.go b/deps/github.com/anacrolix/torrent/worse-conns_test.go
new file mode 100644
index 0000000..3865b64
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/worse-conns_test.go
@@ -0,0 +1,44 @@
+package torrent
+
+import (
+	"testing"
+	"time"
+
+	qt "github.com/frankban/quicktest"
+)
+
+func TestWorseConnLastHelpful(t *testing.T) {
+	c := qt.New(t)
+	c.Check((&worseConnInput{}).Less(&worseConnInput{LastHelpful: time.Now()}), qt.IsTrue)
+	c.Check((&worseConnInput{}).Less(&worseConnInput{CompletedHandshake: time.Now()}), qt.IsTrue)
+	c.Check((&worseConnInput{LastHelpful: time.Now()}).Less(&worseConnInput{CompletedHandshake: time.Now()}), qt.IsFalse)
+	c.Check((&worseConnInput{
+		LastHelpful: time.Now(),
+	}).Less(&worseConnInput{
+		LastHelpful:        time.Now(),
+		CompletedHandshake: time.Now(),
+	}), qt.IsTrue)
+	now := time.Now()
+	c.Check((&worseConnInput{
+		LastHelpful: now,
+	}).Less(&worseConnInput{
+		LastHelpful:        now.Add(-time.Nanosecond),
+		CompletedHandshake: now,
+	}), qt.IsFalse)
+	readyPeerPriority := func() (peerPriority, error) {
+		return 42, nil
+	}
+	c.Check((&worseConnInput{
+		GetPeerPriority: readyPeerPriority,
+	}).Less(&worseConnInput{
+		GetPeerPriority: readyPeerPriority,
+		Pointer:         1,
+	}), qt.IsTrue)
+	c.Check((&worseConnInput{
+		GetPeerPriority: readyPeerPriority,
+		Pointer:         2,
+	}).Less(&worseConnInput{
+		GetPeerPriority: readyPeerPriority,
+		Pointer:         1,
+	}), qt.IsFalse)
+}
diff --git a/deps/github.com/anacrolix/torrent/wstracker.go b/deps/github.com/anacrolix/torrent/wstracker.go
new file mode 100644
index 0000000..84af9cb
--- /dev/null
+++ b/deps/github.com/anacrolix/torrent/wstracker.go
@@ -0,0 +1,92 @@
+package torrent
+
+import (
+	"context"
+	"fmt"
+	"net"
+	netHttp "net/http"
+	"net/url"
+	"sync"
+
+	"github.com/anacrolix/log"
+	"github.com/gorilla/websocket"
+	"github.com/pion/datachannel"
+
+	"github.com/anacrolix/torrent/tracker"
+	httpTracker "github.com/anacrolix/torrent/tracker/http"
+	"github.com/anacrolix/torrent/webtorrent"
+)
+
+type websocketTrackerStatus struct {
+	url url.URL
+	tc  *webtorrent.TrackerClient
+}
+
+func (me websocketTrackerStatus) statusLine() string {
+	return fmt.Sprintf("%+v", me.tc.Stats())
+}
+
+func (me websocketTrackerStatus) URL() *url.URL {
+	return &me.url
+}
+
+type refCountedWebtorrentTrackerClient struct {
+	webtorrent.TrackerClient
+	refCount int
+}
+
+type websocketTrackers struct {
+	PeerId                     [20]byte
+	Logger                     log.Logger
+	GetAnnounceRequest         func(event tracker.AnnounceEvent, infoHash [20]byte) (tracker.AnnounceRequest, error)
+	OnConn                     func(datachannel.ReadWriteCloser, webtorrent.DataChannelContext)
+	mu                         sync.Mutex
+	clients                    map[string]*refCountedWebtorrentTrackerClient
+	Proxy                      httpTracker.ProxyFunc
+	DialContext                func(ctx context.Context, network, addr string) (net.Conn, error)
+	WebsocketTrackerHttpHeader func() netHttp.Header
+	ICEServers                 []string
+}
+
+func (me *websocketTrackers) Get(url string, infoHash [20]byte) (*webtorrent.TrackerClient, func()) {
+	me.mu.Lock()
+	defer me.mu.Unlock()
+	value, ok := me.clients[url]
+	if !ok {
+		dialer := &websocket.Dialer{Proxy: me.Proxy, NetDialContext: me.DialContext, HandshakeTimeout: websocket.DefaultDialer.HandshakeTimeout}
+		value = &refCountedWebtorrentTrackerClient{
+			TrackerClient: webtorrent.TrackerClient{
+				Dialer:             dialer,
+				Url:                url,
+				GetAnnounceRequest: me.GetAnnounceRequest,
+				PeerId:             me.PeerId,
+				OnConn:             me.OnConn,
+				Logger: me.Logger.WithText(func(m log.Msg) string {
+					return fmt.Sprintf("tracker client for %q: %v", url, m)
+				}),
+				WebsocketTrackerHttpHeader: me.WebsocketTrackerHttpHeader,
+				ICEServers:                 me.ICEServers,
+			},
+		}
+		value.TrackerClient.Start(func(err error) {
+			if err != nil {
+				me.Logger.Printf("error running tracker client for %q: %v", url, err)
+			}
+		})
+		if me.clients == nil {
+			me.clients = make(map[string]*refCountedWebtorrentTrackerClient)
+		}
+		me.clients[url] = value
+	}
+	value.refCount++
+	return &value.TrackerClient, func() {
+		me.mu.Lock()
+		defer me.mu.Unlock()
+		value.TrackerClient.CloseOffersForInfohash(infoHash)
+		value.refCount--
+		if value.refCount == 0 {
+			value.TrackerClient.Close()
+			delete(me.clients, url)
+		}
+	}
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/.github/workflows/rust.yml b/deps/github.com/ledgerwatch/interfaces/.github/workflows/rust.yml
new file mode 100644
index 0000000..2ec3432
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/.github/workflows/rust.yml
@@ -0,0 +1,54 @@
+on:
+  pull_request:
+  push:
+    branches:
+      - master
+
+name: Rust
+
+jobs:
+  ci:
+    runs-on: ubuntu-latest
+
+    steps:
+      - uses: actions/checkout@v3
+
+      - uses: actions-rs/toolchain@v1
+        with:
+          profile: minimal
+          toolchain: stable
+          components: rustfmt, clippy
+
+      - uses: actions-rs/cargo@v1
+        with:
+          command: fmt
+          args: --all -- --check
+
+      - uses: actions-rs/install@v0.1
+        with:
+          crate: cargo-hack
+          version: latest
+          use-tool-cache: true
+
+      - uses: actions-rs/cargo@v1
+        with:
+          command: hack
+          args: check --workspace --ignore-private --each-feature --no-dev-deps
+
+      - uses: actions-rs/cargo@v1
+        with:
+          command: check
+          args: --workspace --all-targets --all-features
+
+      - uses: actions-rs/cargo@v1
+        with:
+          command: test
+
+      - uses: actions-rs/clippy-check@v1
+        with:
+          token: ${{ secrets.GITHUB_TOKEN }}
+          args: --all-features
+
+      - uses: actions-rs/audit-check@v1
+        with:
+          token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/deps/github.com/ledgerwatch/interfaces/.gitignore b/deps/github.com/ledgerwatch/interfaces/.gitignore
new file mode 100644
index 0000000..0d36de1
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/.gitignore
@@ -0,0 +1,5 @@
+.idea/
+/target
+Cargo.lock
+
+go.work
\ No newline at end of file
diff --git a/deps/github.com/ledgerwatch/interfaces/Cargo.toml b/deps/github.com/ledgerwatch/interfaces/Cargo.toml
new file mode 100644
index 0000000..3a9dc30
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/Cargo.toml
@@ -0,0 +1,26 @@
+[package]
+name = "ethereum-interfaces"
+version = "0.1.0"
+authors = ["Artem Vorotnikov "]
+edition = "2021"
+license = "Apache-2.0"
+
+[features]
+sentry = []
+sentinel = []
+remotekv = []
+snapshotsync = []
+txpool = []
+web3 = []
+
+[dependencies]
+arrayref = "0.3"
+ethereum-types = { version = "0.14", default-features = false }
+ethnum = { version = "1", default-features = false }
+prost = "0.11"
+tonic = "0.8"
+
+[build-dependencies]
+protobuf-src = "1.1.0"
+prost-build = "0.11"
+tonic-build = "0.8"
diff --git a/deps/github.com/ledgerwatch/interfaces/LICENSE b/deps/github.com/ledgerwatch/interfaces/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/deps/github.com/ledgerwatch/interfaces/README.md b/deps/github.com/ledgerwatch/interfaces/README.md
new file mode 100644
index 0000000..8e156f6
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/README.md
@@ -0,0 +1,28 @@
+# Interfaces
+Interfaces for Erigon components, compatible with Silkworm and Akula. Currently it is a collection of `.proto` files describing gRPC interfaces between components, but later documentation about each interface, its components, as well as required version of gRPC will be added
+
+
+
+See more info on the component and descriptions in [Components](./_docs/README.md)
+
+
+# What's in this repo
+- Protobuf definitions
+- Wrappers:
+  - Rust crate with autogenerated client and server based on [Tonic](https://github.com/hyperium/tonic)
+
+NOTE: You are free to ignore provided wrappers and use the .proto files directly
+
+# Suggested integration into other repositories
+
+Using a go module is the most effective way to include these definitions in consuming repos.
+
+``` 
+go get github.com/ledgerwatch/interfaces
+```
+
+This makes local development easier as go.mod redirect can be used, and saves on submodule/tree updates (which were the previous method of consumption).
+
+# Style guide 
+
+[https://developers.google.com/protocol-buffers/docs/style](https://developers.google.com/protocol-buffers/docs/style)
diff --git a/deps/github.com/ledgerwatch/interfaces/_docs/README.md b/deps/github.com/ledgerwatch/interfaces/_docs/README.md
new file mode 100644
index 0000000..62eba7e
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/_docs/README.md
@@ -0,0 +1,138 @@
+# Erigon Architecture
+
+The architectural diagram
+
+![](../turbo-geth-architecture.png)
+
+# Loosely Coupled Architecture
+
+The node consists of loosely coupled components with well defined "edges" -- protocols that are used between these components.
+
+Its a reminiscence of [microservices architecture](https://en.wikipedia.org/wiki/Microservices), where each component has clearly defined reponsibilities and interface. Implementation might vary. In case of Erigon, we use gRPC/protobuf definitions, that allows the components to be written in different languages.
+
+In our experience, each p2p blockchain node has more or less these components, even when those aren't explicitly set up. In that case we have a highly coupled system of the same components but with more resistance to changes.
+## Advantages of loosely coupled architecture
+
+* Less dependencies between components -- less side-effects of chaging one component is on another.
+
+* Team scalability -- with well specified components, its easy to make sub-teams that work on each component with less coordination overhead. Most cross-team communication is around the interface definition and interpretation.
+
+* Learning curve reduction -- it is not that easy to find a full-fledged blockchain node developer, but narrowing down the area of responsiblities, makes it easier to both find candidates and coach/mentor the right skillset for them.
+
+* Innovation and improvements of each layer independently -- for specialized teams for each sub-component, its easier to find some more improvements or optimizations or innovative approaches than in a team that has to keep everything about the node in the head.
+
+## Designing for upgradeabilty
+
+One important part of the design of a node is to make sure that we leave ourselves a room to upgrade it in a simple way.
+
+That means a couple of things:
+- protocols for each components should be versioned, to make sure that we can't run inconsistent versions together. [semver](https://semver.org) is a better approach there because it allows to parse even future versions and figure out how compatible they are based on a simple convention;
+
+- trying to keep compatiblity as much as possible, unless there is a very good reason to break it, we will try to keep it. In practice that means:
+    - adding new APIs is safe;
+    - adding new parameters is safe, taking into account that we can always support them missing and revert to the old behaviour;
+    - renaming parameters and methods considered harmful;
+    - removing paramters and methods considered harmful;
+    - radically changing the behaviour of the method w/o any changes to the protocol considered harmful;
+
+Tools for automatic checks about compabilitity are available for Protobuf: https://github.com/bufbuild/buf
+## Implementation variants
+
+### Microservices
+
+Erigon uses gRPC-powered variant; each component implements gRPC interface, defined in the protobuf files. No language dependency across components.
+
+**Advantages**
+- it is possible to run a single node spread on multiple machines (and specialize each machine to its job, like GPU/CPU for hash/proof calculations, memory-heavy TX pool, etc)
+- it is possible to plug & play multiple variants of each component
+- it is possible to write each component in its own language and use the power of each language to the most (perf-critical in Rust or C++, Go for networking, some parts in Python and JS for fast prototyping, etc)
+- it is possible to replace components as better version in another language is written
+
+**Challenges**
+- deployment process for average users could be clumsy
+- managing multiple sub-projects
+- testing interfaces, extensive integration testing is needed
+
+### Single binary
+
+That's when each module is in the same language and compiles to the same binary either as a static library or a dynamic library or just a subfolder in the code.
+
+**Advantages**
+- simpler deployment process
+- simpler component compatibility
+
+**Challenges**
+- have to settle on a single language/framework for the whole project
+- less flexibility with upgrades
+
+# Components
+## 1. API Service (RPCDaemon, SilkRPC, etc)
+
+Each node exposes an API to plug it into other components. For Ethereum nodes, the example is JSON-RPC APIs or GraphQL APIs. It is an interface between DApps and the nodes.
+
+The API Service's responsibilities are to expose these APIs.
+
+The API design is not limited to JSON-RPC/http with `eth_call`s, it could be something completely else: gRPC, GraphQL or even some REST to power some webUIs.
+
+The API Service connects to the [Core].
+
+In Erigon, there are with two interfaces:
+- [ETH Backend, proto](../remote/ethbackend.proto) -- blockchain events and core technical information (versions, etc)
+- [KV, proto](../remote/kv.proto) -- database access
+
+## 2. Sentry
+
+Sentry is the component, connecting the node to the p2p network of the blockchain. In case of Erigon and Ethereum, it implements [`eth/65`, `eth/66`, etc](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#change-log) protocols via [devp2p](https://github.com/ethereum/devp2p).
+
+Sentry accepts connections from [Core] and [Transaction Pool] components.
+
+Erigon has the following interface for sentry:
+- [P2Psentry, proto](../p2psentry/sentry.proto) -- sending/receiving messages, and peer penalization mechanism.
+
+Both the [transaction pool] and the [core] use the same interface.
+
+## 3. Transaction Pool
+
+Transaction pool contains valid transactions that are gossiped around the network but aren't mined yet. Transaction pool validates transactions that it gets from [Sentry] and, in case, the transaction is valid, adds it to its on in-memory storage. Please note that at the time of writing, Transaction Pool component
+has not been split yet, but this should happen relatively soon.
+
+Miners use this component to get candidate transactions for the block.
+
+Separating tx pool in a separate components, makes forks like [mev-geth](https://github.com/flashbots/mev-geth) unnecessary, because it could be just a separate tx pool implementation.
+
+Transaction Pool connects to both Sentry and Core. Sentry provides new transactions to the tx pool, and Core either sends events to remove txs when a block with them is discovered, either from peers or through mining. Also, Core can re-add txs into the transaction pool in cases of chain splits.
+
+Erigon has the following interfaces for the transaction pool
+- [txpool, proto](../txpool/txpool.proto)
+- [txpool_control, proto](../txpool/txpool_control.proto)
+- [mining, proto](../txpool/mining.proto)
+
+See more about the architecture: https://github.com/ledgerwatch/erigon/wiki/Transaction-Pool-Design
+
+## 4. Core
+
+Core is the passive part of the replicating state machine that is a blockchain. Core maintains its state and reacts to the protocol messages from the
+outside, with the goal of synchronizing its state with other nodes in the network. This synchronization is achieved by applying or reverting state
+transitions.
+
+Currently, Core is the largest and the most complex component, and it has its own internal structure. State transitions are split into stages,
+and that gives rise to "[Staged Sync](./staged-sync.md)". In the staged sync, we consider two forward state transitions and reverts of previous state transitions
+(also called "Unwind"). Forward state transitions are split into the invocation of functions in certain order. At the time of writing, there are
+18 such functions, representing "stages". Reverts of previous state transitions are performed by invocation of another array of functions, also
+in the specific order. See [Staged Sync Architecture](./staged-sync.md) for more information on Staged Sync.
+
+Core connects to [Sentry] and [Consensus Engine], and accepts connections from [Transaction Pool] and [API Service].
+
+## 5. Consensus Engine
+
+Consensus Engine is the component that abstracts away consensus mechanism like EtHash Proof Of Work, ProgPOW Proof of Work, Clique Proof Of Authority,
+and in the future also AuRa Proof Of Authority and Proof Of Stake mechanism. Note that at the time of writing, Consensus Engine split has not been
+done yet, but some [work has been done on the interface](https://github.com/ledgerwatch/erigon/wiki/Consensus-Engine-separation).
+
+Erigon has the following interface for the consensus engine:
+- [consensus_engine, proto](../consensus_engine/consensus.proto)
+
+## 6. Downloader
+
+Downloader component abstracts away the functionality of deliverying some parts of the database using "out of band" protocols like BitTorrent,
+IPFS, Swarm and others.
diff --git a/deps/github.com/ledgerwatch/interfaces/_docs/staged-sync.drawio b/deps/github.com/ledgerwatch/interfaces/_docs/staged-sync.drawio
new file mode 100644
index 0000000..d2a1824
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/_docs/staged-sync.drawio
@@ -0,0 +1 @@
+7V1bk9q4Ev41PA5l+c7j3LKb2lymMqnd5OmUATN4x2COMZnh/Poj4wtYLWEbLLtxSCoVkI0MX7f6pu7WQLtfvP8ROqv552Dq+gNVmb4PtIeBquqWatD/4pFtMkJMRUlGXkJvmo7tB569/7npYHbbxpu668KNURD4kbcqDk6C5dKdRIUxJwyDt+Jts8AvPnXlvLhg4Hni+HD0H28azZNR1Vat/YU/Xe9lnj5aJ+YoubJwsrvTn7KeO9Pg7WBIexxo92EQRMmrxfu968fwZcAkn/sguJp/s9BdRlU+8PHWeRt5b38v7/8l/yjrLVE3wY2WzPLL8TfpL06/bLTNIHgJg80qvc0NI/edB7wzzm5X4Pci+a+ljOIGCzcKt/SWdKIbNeOJjEmyOd4OENfTsfkB2MROB52Uyi/55Hsc6IsUihqwkHJYKCrLqRtPQgba3dvci9znlTOJr77RxUDH5tHCTy8L4TuESUwgiF0KlVYVKUMWUipA6nm7nNCR5yheWqpCAHIUg6gIzzoKg1f3PvCDkI4sgyW9827m+T4z5Pjey5K+nVDMXDp+FyPq0dV6m15YeNNp/BguPfYUU2SQJJsFkmTEoYgqiyA6IMgsCN+ccBqLv81yEnnBEhkzvxeZOeVtEwKZL/hWkDQAkpvlm7e8OCBHRsdAmgDIVbhZuu3iWKKEiFUETTW6Zj+re9Vso1PMNhrFbCFXzKMyxQyRu3TFLCAJEsWcPR+lZj4OHTbNTKB9jkY110Kyc9VMoP2OTjcTgk01E57bbPqx8JwF9EcdwmX+dxNkF27WuwDHLb1Bt1fv+4v01cvu/3t1cEc5yHQWMY7L8XpVPuonM3xIHp1OdMGy/Tg3aCrDDRy5TsxWuQG6XAB+2Zaajs9UI9B/6komJxRCbKwR6CIx1tqXS17RtYiCxVyD/hcec60EO3T2GnTb0Nhr9aDs3mCDfh06g81AZ7Cp0GHgLWZOsLp17IrQ6XbXDJdp7a7lYD2rEAFuFazC9dxZxS9nvvt+G+9WUjDc5TR9+TDxnfXamxTBOtTnA1WbzWbqZAKUP70yNcemYebwulOw01kK7gF4Bge7bCx0fSfyfhWn5wGaPuEp8HZO0ntRrWekU4ziDOtgE07c9EN7qpTOYzPzRE744kZgHoq2sz24bRXfsK7+ddPH7HklmXDPOTmgZzATtKXHm8mrG3FlVkzqT87Y9YucU93sC13qqKbeSGzkpZDQyY27gfFwbMlWcWWOrBfhAleGSgbCtvDAU5kvuyWYzdZuNGCXexNEg0b9VQJwlxTRRsMicU8WApypJMkB8ZPkigLol+SiAPLXZYoCs3eigLdvVS9gqOrXgOFJpiG7s6tr0DS0WrUMoQsnQy9MDdee6jy9YKtjzbwIvcBkhhknawV2Ilk6QfAcqRohC1HwNILWE40w6ptG0CqkGF6FwO4DOmtnnS4G4FSyBIHwSXJFAQzW5KJA74co0I5r+0sUBefvJl+Nw4aMQ6Nr41C7hg0r6gVdaSxowJlKkl4QP0muXjgSP+yLXuhd/FBrJ37YAxNRsxvyE8FEksSA6DlyhcCRyKHREyHQu8ihViHj/WoP7NYUu3V+shAQ7cE3LQQEz5ErBGDwsW87icmS6ZMQ0GGE7+ohtuQh6uwq7dpD1HmRwysztMMMKrJwgQ4DfkLzYLL1PQpHqAnQOiDPOAHu0zgfcCavLzs4v24iOo2b0THp2ECMhsIxDL4mL3VVVzMXupCcPZKGMYzPrdOs7FUYvFAVtwaYt14jk+0npMBZvKoItVXOhIGsDLVgE602ET7MeGmqPMxMaZhZXAvsOX0bhNE8eAmWjv+4H2WE3P6eT0GwSsH6142ibdpcxdlEQRFK992Lfhy8/plOFb9+eD98s83eLOmv/XH4ZveZoZG93X9s9y773KCiC0ER2Nnqx6BKRUBii5euXsgEla298yg6wkJR0h+KCiJ9LVEUhlKRppLfsNrU0jkCrt2WGtCR+fZEp1IeHHexgy0zKcdhZk3ePn2k4386S2rnhZ2DylqAeYurzpSGUcEELIsGZQJDOVQBQ9WoJjNIZzIj46eTtUA7oSlVZNbWDU2pzHalSZiJBKGppgIRRoV+YafxmnWc1xrnmc52LZmCMoutKa3MCkxcwmILeWWzAgxD/PX3eeIZxJ7N3Z80fnEwnvyBUewPRvz39rYZUa8xZWwmx9nncUpeO9y8qEdQaX7DVpojKDU38JSaG9hLzY3SUvP+dewTEQVJqbkhv9T81D1/CWhjK043EBen14Oy8+J04wKK028Yu4l0X52eLQFUal3jLMyW1bqJpw2vKXAa0Kh1E0YBet/vT0QUJGrdhM7yJav1ErSxqXUT+kpo1Ho9KDtX66MKUZ/JJvyVA1c1BFTYcTo13Gg1EG88qpZLI0qZniqNQpqdJsgZbJ/9U6OQBmtCVYxC1k6WZ2ySNPgp/FpMlJWkG/dS8+lGUMw8P355ePz2TAeV4XBIlKGi7P6BNXOR2XWj473Hb+hPNe3ixvqZ+XXpLDfMJ+Sl25kwnITfjdFHnWtcXr3CtStqK4lpgB0Mi8MOrbZFNRE0sAderclxCtr2avH0sDexN7E3YUCp931RRURB4tVmcqUvXu1ldb63YEgMj1d7Wa3vLRgxw2/ldZ/CZUPj+El9AkjRmbzVOpaVefq5H2ymtRd/w1v6JlP5Qzh48nWctPwtW1KZ76A87FE1ZcaumpqZbMR3llsDAgBsTUDlhgAWOxOb0yE5u8aGxvPdp6/3f/U1qmCXFe4SLWuTwoQDzuSZ9qIKI2g45RkXw6e8kETZXVKI8lkoBWQX8kgVwTcaYwgQlavTLG6RjyVLCo+gYVVGHTZROd9o+62oCWUuz7Brm5rQtmuamuz9VCiLJ/nyW7EEu8AtrtvUMksQpfe7WN226TMZqpu2PjyxAwOYyzJG7FyNNWQCX9s6bhrqAu6Wus1ElN9tnyldr73eaCIqvz9rFwWfw4pyq2aBIBvLc1x7xu0zY05sdzxr1m8lSlXHlSiCDrPtFBXmX/RgeU/mTuh77s5giPfG2J2u3a3/ye9iTQ+6uif7Dyc3p2PsrbPgnVIjOrwzG+KbOedExU5liLNsEhsULXBMktxuKRgkmjSDhNOstQtpwBDEJVPDtXgEGZmW5tTpBFVphZpVVyjptJA7/6KH+x6ez6+XOGdBNGuME42NZCkG9M/ySqdWClYJp2H5b8j41qUwPqc6JWF8SDXMjE8QMH6FhPb+M759KYwPEzVSxodWO2bGV7tn/KxJwtXPkeTnjKquKUWw0VLPZ65/VhaoulILzS4rfEJpoz1mjuRh2yrKepQNgSs03sxm53Yk6cQbImzvAK5RyOtjp0krbc9zyK/Zox2cgqDrTF5EnijRWfooIdBYcnYfi5VwvI9Cf95OOCarUxnHP+jV3cJmfNm2yjKIRJAe7qgINlCmTvj6lX7Ki3Yyf6hwdlIqt1E+k1xsLhUnU5PwsgLz86kk2LZVWsP03bYlWmXbVpAp05JtSyC15q6zalSZVcb+zBNc2O1GBMpMhTEhrPYuffPkhh794bEiumAbmOiVbeCO1x6nM2ry60SK66LzASx2hzXf7DjUVrzYu7xGSqn4LdBg6q4jb+lw8357QQgyIhgpUSP9dbIJ/e1d6OzOKyilQFF3iYRTGEQJzbWHOEVPtuuljIC24mQgc0hgSetvTUiFSi0JJMjtg3ISNGswmNaQTQO38p7iJWQwdEMaGWC88fHH92+3998BNco8z0OuFTE+Nh9UYXPzdY4P2mpzfUJgMOjT19uHU8iRc7BoEWAjB2eV8AjCq5WQRxC1kwbpFxPbFRRgnZvZB3J8s+Bt/cQ+9uAzKvQkHcDbQMSZtBJxVu0uePoyfbwaa0FwCNTZawFUL2jNrYUR3rWgtrIWOOeUdrM48szvNGBPRz548Y9BsAhEJ52eydq2iEmaZkfbMI5yoyzegmGXt3gDYRJs/Gn89f11EIuW0FmuZ0G4GGS7CdS2CcWxmb5sKlhMzv1I58QG2t5TqBAbkN1ChLCnaFSuGJbWQYRwTnIFuKDtuEBEh7Si6TpCOIeu9r5HtpAsSPqOEM7JpU03HpGHHrY+IoRzpCiaRiI1wey8kwjhHMnZQSuRJrAkvEyClsFUESh9tpckAqXPOYayM6ZKaIRZhXOOn/zshq+79N1vQVzZlOnyvulxEW2w6PGKB+gh42y0ehy6SHj0eD0wEehx6Feh0eP1sMSgx3l9Lq8Jve3s3mlsBwUeP7Sbz6tD/w0QQLpdx6bAILDrOGeinidkWg3m6IK6eTyWIOegwt73kBWSBYsRyDm0FpMReBw9dEYg51hWREZgLTC7NwI5h2yi6wtLRqALUfdMyNu7vdp7bWVrobP3OKdaejFYC3fqOVG8nObOeu6Ky7UuO9maPW2HpyZU3gqVaGl2k2mUnfedV6L8PLhSVoW9T0j6eXhNkJ20ewfLWY5ncySgNJOboVcuADPOTdY7kxcweGOE8cYMTj1C2+5YlcM48bpjBnp3jHOoZ//31kVkweKOcU4PReSOlaCHzh3jHPyJxx2rB2b37hjnHE80Mfl6WCKIyXNOtmtf6xsYtX6FTMO2uMoUlCcj0uGcfbLA3y6Dhef4OyZZLLxo4aZpwP3S5SLyoNHlnLZleHR5CXr4dDn0k/Do8npgdq/LOWfHodHl9bBEoMs5R8Fd462t7a8zxTIGb3G1G2/lnGcHCCB/f13BZ9tZTSdOthrRSciK2Rq0OEdT9H6DXUQWLFaghTrLsgQ9dFaghTnLsh6YCKxAFFmWZYqM5UGte9yuCZUdbrAjNPiga1qIwkydyAEU6cfmus500bJUjpnZ7uZ6NvN1c13a5rpZ+awYSxCYb2lz3ea55oKFWOdI51MbA2A+NTAr6838GXaFVj8zkJlJZ4+SlHxkM7GhA/754x/fbr9//FrFAWryOL5YUGTSWKR0m5DDJpN2pnP8noaMJPo2jOv2DmhGNdX8czCNzzV8/D8=
\ No newline at end of file
diff --git a/deps/github.com/ledgerwatch/interfaces/_docs/staged-sync.md b/deps/github.com/ledgerwatch/interfaces/_docs/staged-sync.md
new file mode 100644
index 0000000..88a9874
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/_docs/staged-sync.md
@@ -0,0 +1,103 @@
+Staged Sync (Erigon)
+---
+
+Staged Sync is a version of [Go-Ethereum](https://github.com/ethereum/go-ethereum)'s [[Full Sync]] that was rearchitected for better performance.
+
+## How The Sync Works
+
+Staged sync consists of independent stages, that are launched in special order one after another. This architecture allows for batch processing of data.
+
+After the last stage is finished, the process starts from the beginning, by looking for the new headers to download.
+
+If the app is restarted in between stages, it restarts from the first stage.
+
+![](./stages-overview.png)
+
+### Stage
+
+A Stage consists of: 
+* stage ID;
+* progress function;
+* unwind function;
+* prune function;
+
+Only ID and progress functions are required.
+
+Both progress and unwind functions can have side-effects. In practice, usually only progress do (downloader interaction).
+
+Each function (progress, unwind, prune) have **input** DB buckets and **output** DB buckets. That allows to build a dependency graph and run them in order.
+
+![](./stages-ordering.png)
+
+That is important because unwinds not always follow the reverse order of progress. A good example of that is tx pool update, that is always the final stage.
+
+Each stage saves its own progress. In Ethereum, at least a couple of stages' progress is "special", so we accord to that. Say, progress of the _execution stage_ is basis of many index-building stages.
+
+### Batch Processing
+
+![](./stages-batch-process.png)
+
+Each stage can work on a range of blocks. That is a huge performance improvement over sequential processing. 
+
+In Erigon genesis sync: 
+- first stage downloads all headers
+- then we download all bodies
+- then we execute all blocks
+- then we add a Merkle commitment (either from scratch or incremental)
+- then we build all indices
+- then we update the tx pool
+
+That allows to group similar operations together and optimize each stage for throughput. Also, some stages, like the commitment stage, require way less hashes computation on genesis sync.
+
+That also allows DB inserts optimisations, see next part.
+
+### ETL and optimial DB inserts
+
+![](./stages-etl.png)
+
+B-tree based DBs (lmdb, mdbx, etc) usually store data using pages. During random inserts, those pages get fragmented (remember Windows 98?) and often data needs to be moved between them to free up space in a certain page.
+
+That all is called **write amplification**. The more random stuff you insert into a DB, the more expensive it is to insert it.
+
+Luckily, if we insert keys in a sorted order, this effect is not there, we fill pages one by one.
+
+That is where our ETL framework comes to the rescue. When batch processing data, instead of wrting it directly to a database, we first extract it to a temp folder (could be in ram if fits). When extraction happens, we generate the keys for insertion. Then, we load data from these data files in a sorted manner using a heap. That way, the keys are always inserted sorted.
+
+This approach also allows us to avoid overwrites in certain scenarios, because we can specify the right strategy on loading data: do we want to keep only the latest data, convert it into a list or anything else.
+
+### RPC calls and indices
+
+![](./stages-rpc-methods.png)
+
+Some stages are building indices that serve the RPC API calls later on. That is why often we can introduce a new sync stage together with an API call that uses it. API module can always request state of any stage it needs to plan the execution accordingly.
+
+### Commitment As A Stage
+
+![](./stages-commitment.png)
+
+One more benefit of this approach, that the Merkle commitment (hex tries) in Erigon is its own stage with it own couple of buckets. Other stages are independent enough to either not be changed at all when/if the commitment mechanism changes or be changes minimaly.
+
+### What About Parallel Execution?
+
+Some parallel execution could be possible, in case stages aren't dependent on each other in any way. 
+However, in practice, most stages are bound by their I/O performance, so making those parallel won't bring any performance benefits.
+
+There could be benefits in having parallelism **inside** stages. For Erigon, there is **senders recovery** stage that is very CPU intensive and could benefit from multicore execution. So it launches as many worker threads as there are CPU cores.
+
+### Adding/Removing stages
+
+Adding stages is usually a simple task. On the first launch the stage will launch like it was launched from genesis even though the node might be in a synced state.
+
+Removing or altering a sync stage could be more tricky because then the dependent stages should be considered.
+
+### Offline Capabilities
+
+Not every stage needs network to work. Therefore, it could be possible to run some stages, especially during genesis sync, no matter if the node has a connection or not. An example of that is indices building.
+
+### Risks & Tradeoffs
+
+* Malicious unwinds on genesis sync. Since we are checking commitments once per batch, that could be used to slow down the genesis sync significantly, if we sync/execute everything but get a wrong root hash in the end. After genesis sync is done, this is not an issue because even though we do batch processing, but in practice at the tip this architecture becomes block-by-block processor and is not worse than anything else. 
+
+* Batch processing doesn't allow most of the API calls on partial data during genesis sync. Basically, regular geth at 1/2 of the sync will respond to the RPC requests but Erigon requires the commitment stage to complete to allow these requests.
+
+Those tradeoffs are related to genesis sync, so in Erigon we are focusing on reducing the need for genesis sync (such as off-chain snapshot distribution) to minimize those issues.
diff --git a/deps/github.com/ledgerwatch/interfaces/_docs/stages-batch-process.png b/deps/github.com/ledgerwatch/interfaces/_docs/stages-batch-process.png
new file mode 100644
index 0000000..7501442
Binary files /dev/null and b/deps/github.com/ledgerwatch/interfaces/_docs/stages-batch-process.png differ
diff --git a/deps/github.com/ledgerwatch/interfaces/_docs/stages-commitment.png b/deps/github.com/ledgerwatch/interfaces/_docs/stages-commitment.png
new file mode 100644
index 0000000..d847088
Binary files /dev/null and b/deps/github.com/ledgerwatch/interfaces/_docs/stages-commitment.png differ
diff --git a/deps/github.com/ledgerwatch/interfaces/_docs/stages-etl.png b/deps/github.com/ledgerwatch/interfaces/_docs/stages-etl.png
new file mode 100644
index 0000000..13d5e9d
Binary files /dev/null and b/deps/github.com/ledgerwatch/interfaces/_docs/stages-etl.png differ
diff --git a/deps/github.com/ledgerwatch/interfaces/_docs/stages-ordering.png b/deps/github.com/ledgerwatch/interfaces/_docs/stages-ordering.png
new file mode 100644
index 0000000..c4fd8e8
Binary files /dev/null and b/deps/github.com/ledgerwatch/interfaces/_docs/stages-ordering.png differ
diff --git a/deps/github.com/ledgerwatch/interfaces/_docs/stages-overview.png b/deps/github.com/ledgerwatch/interfaces/_docs/stages-overview.png
new file mode 100644
index 0000000..629820b
Binary files /dev/null and b/deps/github.com/ledgerwatch/interfaces/_docs/stages-overview.png differ
diff --git a/deps/github.com/ledgerwatch/interfaces/_docs/stages-rpc-methods.png b/deps/github.com/ledgerwatch/interfaces/_docs/stages-rpc-methods.png
new file mode 100644
index 0000000..e8f1e79
Binary files /dev/null and b/deps/github.com/ledgerwatch/interfaces/_docs/stages-rpc-methods.png differ
diff --git a/deps/github.com/ledgerwatch/interfaces/build.rs b/deps/github.com/ledgerwatch/interfaces/build.rs
new file mode 100644
index 0000000..1ab5ff1
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/build.rs
@@ -0,0 +1,56 @@
+use std::{env, path::PathBuf};
+
+fn config() -> prost_build::Config {
+    let mut config = prost_build::Config::new();
+    config.bytes(&["."]);
+    config
+}
+
+fn make_protos(protos: &[&str]) {
+    let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
+    tonic_build::configure()
+        .file_descriptor_set_path(out_dir.join("descriptor.bin"))
+        .compile_with_config(config(), protos, &["."])
+        .unwrap();
+}
+
+fn main() {
+    std::env::set_var("PROTOC", protobuf_src::protoc());
+
+    let mut protos = vec!["types/types.proto"];
+
+    if cfg!(feature = "sentry") {
+        protos.push("p2psentry/sentry.proto");
+    }
+
+    if cfg!(feature = "sentinel") {
+        protos.push("p2psentinel/sentinel.proto");
+    }
+
+    if cfg!(feature = "remotekv") {
+        protos.push("remote/ethbackend.proto");
+        protos.push("remote/kv.proto");
+    }
+
+    if cfg!(feature = "snapshotsync") {
+        protos.push("downloader/downloader.proto");
+    }
+
+    if cfg!(feature = "txpool") {
+        protos.push("txpool/mining.proto");
+        protos.push("txpool/txpool.proto");
+    }
+
+    if cfg!(feature = "execution") {
+        protos.push("execution/execution.proto");
+    }
+
+    if cfg!(feature = "web3") {
+        protos.push("web3/common.proto");
+        protos.push("web3/debug.proto");
+        protos.push("web3/eth.proto");
+        protos.push("web3/trace.proto");
+    }
+
+    make_protos(&protos);
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/downloader/downloader.proto b/deps/github.com/ledgerwatch/interfaces/downloader/downloader.proto
new file mode 100644
index 0000000..e2f0be0
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/downloader/downloader.proto
@@ -0,0 +1,53 @@
+syntax = "proto3";
+
+import "google/protobuf/empty.proto";
+import "types/types.proto";
+
+option go_package = "./downloader;downloader";
+
+package downloader;
+
+service Downloader {
+  rpc Download (DownloadRequest) returns (google.protobuf.Empty) {}
+  rpc Verify (VerifyRequest) returns (google.protobuf.Empty) {}
+  rpc Stats (StatsRequest) returns (StatsReply) {}
+}
+
+// DownloadItem:
+// - if Erigon created new snapshot and want seed it
+// - if Erigon wnat download files - it fills only "torrent_hash" field
+message DownloadItem {
+  string path = 1;
+  types.H160 torrent_hash = 2; // will be resolved as magnet link
+}
+message DownloadRequest {
+  repeated DownloadItem items = 1; // single hash will be resolved as magnet link
+}
+
+message VerifyRequest {
+}
+
+
+message StatsRequest {
+}
+
+message StatsReply {
+  // First step on startup - "resolve metadata":
+  //   - understand total amount of data to download
+  //   - ensure all pieces hashes available
+  //   - validate files after crush
+  //   - when all metadata ready - can start download/upload
+  int32  metadataReady = 1;
+  int32 filesTotal = 2;
+
+  int32 peersUnique = 4;
+  uint64 connectionsTotal = 5;
+
+  bool completed = 6;
+  float progress = 7;
+
+  uint64 bytesCompleted = 8;
+  uint64 bytesTotal = 9;
+  uint64 uploadRate = 10; // bytes/sec
+  uint64 downloadRate = 11; // bytes/sec
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/downloader/keep.go b/deps/github.com/ledgerwatch/interfaces/downloader/keep.go
new file mode 100644
index 0000000..e518c75
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/downloader/keep.go
@@ -0,0 +1 @@
+package downloader
diff --git a/deps/github.com/ledgerwatch/interfaces/execution/execution.proto b/deps/github.com/ledgerwatch/interfaces/execution/execution.proto
new file mode 100644
index 0000000..4184485
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/execution/execution.proto
@@ -0,0 +1,105 @@
+syntax = "proto3";
+
+package execution;
+
+import "types/types.proto";
+
+option go_package = "./execution;execution";
+
+enum ValidationStatus {
+    Success = 0;      // State transition simulation is successful.
+    InvalidChain = 1; // State transition simulation is Unsuccessful.
+    TooFarAway = 2;   // Chain hash is too far away from current chain head and unfeasible to validate.
+    MissingSegment = 3; // Chain segments are missing.
+}
+
+message ForkChoiceReceipt {
+    bool success = 1; // Forkchoice is either successful or unsuccessful.
+    types.H256 latestValidHash = 2; // Return latest valid hash in case of halt of execution.
+}
+
+// Result we receive after validation
+message ValidationReceipt {
+    ValidationStatus validationStatus = 1;
+    types.H256 latestValidHash = 2;
+    optional types.H256 missingHash = 3; // The missing hash, in case we receive MissingSegment so that we can reverse download it.
+};
+
+message IsCanonicalResponse {
+    bool canonical = 1; // Whether hash is canonical or not.
+}
+
+// Header is an header for execution
+message Header {
+  types.H256 parentHash = 1;
+  types.H160 coinbase = 2;
+  types.H256 stateRoot = 3;
+  types.H256 receiptRoot = 4;
+  types.H2048 logsBloom = 5;
+  types.H256 mixDigest = 6;
+  uint64 blockNumber = 7;
+  uint64 gasLimit = 8;
+  uint64 gasUsed = 9;
+  uint64 timestamp = 10;
+  uint64 nonce = 11;
+  bytes extraData = 12;
+  types.H256 difficulty = 13;
+  types.H256 blockHash = 14; // We keep this so that we can validate it
+  types.H256 ommerHash = 15;
+  types.H256 transactionHash = 16;
+  optional types.H256 baseFeePerGas = 17;
+  optional types.H256 withdrawalHash = 18;
+}
+
+// Body is a block body for execution
+message BlockBody {
+  types.H256 blockHash = 1;
+  uint64 blockNumber = 2;
+  // Raw transactions in byte format.
+  repeated bytes transactions = 3;
+  repeated Header uncles = 4;
+  repeated types.Withdrawal withdrawals = 5;
+}
+
+message GetHeaderResponse {
+    optional Header header = 1;
+}
+
+message GetBodyResponse {
+    optional BlockBody body = 1;
+}
+
+message GetHeaderHashNumberResponse {
+    optional uint64 blockNumber = 1; // null if not found.
+}
+
+message GetSegmentRequest {
+    // Get headers/body by number or hash, invalid if none set.
+    optional uint64 blockNumber = 1;
+    optional types.H256 blockHash = 2;
+}
+
+message InsertHeadersRequest {
+    repeated Header headers = 1;
+}
+
+message InsertBodiesRequest {
+    repeated BlockBody bodies = 1;
+}
+
+message EmptyMessage {}
+
+service Execution {
+    // Chain Putters.
+    rpc InsertHeaders(InsertHeadersRequest) returns(EmptyMessage);
+    rpc InsertBodies(InsertBodiesRequest) returns(EmptyMessage);
+    // Chain Validation and ForkChoice.
+    rpc ValidateChain(types.H256) returns(ValidationReceipt);
+    rpc UpdateForkChoice(types.H256) returns(ForkChoiceReceipt);
+    rpc AssembleBlock(EmptyMessage) returns(types.ExecutionPayload); // Builds on top of current head.
+    // Chain Getters.
+    rpc GetHeader(GetSegmentRequest) returns(GetHeaderResponse);
+    rpc GetBody(GetSegmentRequest) returns(GetBodyResponse);
+    rpc IsCanonicalHash(types.H256) returns(IsCanonicalResponse);
+    rpc GetHeaderHashNumber(types.H256) returns(GetHeaderHashNumberResponse);
+}
\ No newline at end of file
diff --git a/deps/github.com/ledgerwatch/interfaces/execution/keep.go b/deps/github.com/ledgerwatch/interfaces/execution/keep.go
new file mode 100644
index 0000000..956d739
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/execution/keep.go
@@ -0,0 +1 @@
+package execution
diff --git a/deps/github.com/ledgerwatch/interfaces/go.mod b/deps/github.com/ledgerwatch/interfaces/go.mod
new file mode 100644
index 0000000..542a921
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/go.mod
@@ -0,0 +1,3 @@
+module github.com/ledgerwatch/interfaces
+
+go 1.18
diff --git a/deps/github.com/ledgerwatch/interfaces/keep.go b/deps/github.com/ledgerwatch/interfaces/keep.go
new file mode 100644
index 0000000..08badf2
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/keep.go
@@ -0,0 +1 @@
+package interfaces
diff --git a/deps/github.com/ledgerwatch/interfaces/p2psentinel/keep.go b/deps/github.com/ledgerwatch/interfaces/p2psentinel/keep.go
new file mode 100644
index 0000000..40aaa2f
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/p2psentinel/keep.go
@@ -0,0 +1 @@
+package p2psentinel
diff --git a/deps/github.com/ledgerwatch/interfaces/p2psentinel/sentinel.proto b/deps/github.com/ledgerwatch/interfaces/p2psentinel/sentinel.proto
new file mode 100644
index 0000000..3775e8e
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/p2psentinel/sentinel.proto
@@ -0,0 +1,57 @@
+syntax = "proto3";
+
+package sentinel;
+
+option go_package = "./sentinel;sentinel";
+
+import "types/types.proto";
+
+message EmptyMessage {}
+
+enum GossipType {
+    // Lightclient gossip
+    LightClientFinalityUpdateGossipType = 0;
+    LightClientOptimisticUpdateGossipType = 1;
+    // Legacy gossip
+    BeaconBlockGossipType = 2;
+    
+    // Global gossip topics.
+    AggregateAndProofGossipType = 3;
+    VoluntaryExitGossipType = 4;
+    ProposerSlashingGossipType = 5;
+    AttesterSlashingGossipType = 6;
+}
+
+message GossipData {
+    bytes data = 1; // SSZ encoded data
+    GossipType type = 2;
+}
+
+message Status {
+    uint32 fork_digest = 1; // 4 bytes can be repressented in uint32.
+    types.H256 finalized_root = 2;
+    uint64 finalized_epoch = 3;
+    types.H256 head_root = 4;
+    uint64 head_slot = 5;
+}
+
+message PeerCount {
+    uint64 amount = 1;
+}
+
+message RequestData {
+    bytes data = 1; // SSZ encoded data
+    string topic = 2;
+}
+
+message ResponseData {
+    bytes data = 1; // prefix-stripped SSZ encoded data
+    bool error = 2; // did the peer encounter an error
+}
+
+service Sentinel {
+    rpc SubscribeGossip(EmptyMessage) returns (stream GossipData);
+    rpc SendRequest(RequestData) returns (ResponseData);
+    rpc SetStatus(Status) returns(EmptyMessage); // Set status for peer filtering.
+    rpc GetPeers(EmptyMessage) returns (PeerCount);
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/p2psentry/keep.go b/deps/github.com/ledgerwatch/interfaces/p2psentry/keep.go
new file mode 100644
index 0000000..52deada
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/p2psentry/keep.go
@@ -0,0 +1 @@
+package p2psentry
diff --git a/deps/github.com/ledgerwatch/interfaces/p2psentry/sentry.proto b/deps/github.com/ledgerwatch/interfaces/p2psentry/sentry.proto
new file mode 100644
index 0000000..5a5527d
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/p2psentry/sentry.proto
@@ -0,0 +1,200 @@
+syntax = "proto3";
+
+import "google/protobuf/empty.proto";
+import "types/types.proto";
+
+package sentry;
+
+option go_package = "./sentry;sentry";
+
+enum MessageId {
+  // ======= eth 65 protocol ===========
+
+  STATUS_65 = 0;
+  GET_BLOCK_HEADERS_65 = 1;
+  BLOCK_HEADERS_65 = 2;
+  BLOCK_HASHES_65 = 3;
+  GET_BLOCK_BODIES_65 = 4;
+  BLOCK_BODIES_65 = 5;
+  GET_NODE_DATA_65 = 6;
+  NODE_DATA_65 = 7;
+  GET_RECEIPTS_65 = 8;
+  RECEIPTS_65 = 9;
+  NEW_BLOCK_HASHES_65 = 10;
+  NEW_BLOCK_65 = 11;
+  TRANSACTIONS_65 = 12;
+  NEW_POOLED_TRANSACTION_HASHES_65 = 13;
+  GET_POOLED_TRANSACTIONS_65 = 14;
+  POOLED_TRANSACTIONS_65 = 15;
+
+
+  // ======= eth 66 protocol ===========
+
+  // eth64 announcement messages (no id)
+  STATUS_66 = 17;
+  NEW_BLOCK_HASHES_66 = 18;
+  NEW_BLOCK_66 = 19;
+  TRANSACTIONS_66 = 20;
+
+  // eth65 announcement messages (no id)
+  NEW_POOLED_TRANSACTION_HASHES_66 = 21;
+
+  // eth66 messages with request-id
+  GET_BLOCK_HEADERS_66 = 22;
+  GET_BLOCK_BODIES_66 = 23;
+  GET_NODE_DATA_66 = 24;
+  GET_RECEIPTS_66 = 25;
+  GET_POOLED_TRANSACTIONS_66 = 26;
+  BLOCK_HEADERS_66 = 27;
+  BLOCK_BODIES_66 = 28;
+  NODE_DATA_66 = 29;
+  RECEIPTS_66 = 30;
+  POOLED_TRANSACTIONS_66 = 31;
+
+  // ======= eth 67 protocol ===========
+  // Version 67 removed the GetNodeData and NodeData messages.
+
+  // ======= eth 68 protocol ===========
+  NEW_POOLED_TRANSACTION_HASHES_68 = 32;
+}
+
+message OutboundMessageData {
+  MessageId id = 1;
+  bytes data = 2;
+}
+
+message SendMessageByMinBlockRequest {
+  OutboundMessageData data = 1;
+  uint64 min_block = 2;
+  uint64 max_peers = 3;
+}
+
+message SendMessageByIdRequest {
+  OutboundMessageData data = 1;
+  types.H512 peer_id = 2;
+}
+
+message SendMessageToRandomPeersRequest {
+  OutboundMessageData data = 1;
+  uint64 max_peers = 2;
+}
+
+message SentPeers {repeated types.H512 peers = 1;}
+
+enum PenaltyKind {Kick = 0;}
+
+message PenalizePeerRequest {
+  types.H512 peer_id = 1;
+  PenaltyKind penalty = 2;
+}
+
+message PeerMinBlockRequest {
+  types.H512 peer_id = 1;
+  uint64 min_block = 2;
+}
+
+message PeerUselessRequest {
+  types.H512 peer_id = 1;
+}
+
+message InboundMessage {
+  MessageId id = 1;
+  bytes data = 2;
+  types.H512 peer_id = 3;
+}
+
+message Forks {
+  types.H256 genesis = 1;
+  repeated uint64 height_forks = 2;
+  repeated uint64 time_forks = 3;
+}
+
+message StatusData {
+  uint64 network_id = 1;
+  types.H256 total_difficulty = 2;
+  types.H256 best_hash = 3;
+  Forks fork_data = 4;
+  uint64 max_block_height = 5;
+  uint64 max_block_time = 6;
+}
+
+enum Protocol {
+  ETH65 = 0;
+  ETH66 = 1;
+  ETH67 = 2;
+  ETH68 = 3;
+}
+
+message SetStatusReply {}
+
+message HandShakeReply {
+  Protocol protocol = 1;
+}
+
+message MessagesRequest {
+  repeated MessageId ids = 1;
+}
+
+message PeersReply {
+  repeated types.PeerInfo peers = 1;
+}
+
+message PeerCountRequest {}
+
+message PeerCountPerProtocol {
+  Protocol protocol = 1;
+  uint64 count = 2;
+} 
+
+message PeerCountReply {
+  uint64 count = 1;
+  repeated PeerCountPerProtocol countsPerProtocol = 2;
+}
+
+message PeerByIdRequest {types.H512 peer_id = 1;}
+
+message PeerByIdReply {optional types.PeerInfo peer = 1;}
+
+message PeerEventsRequest {}
+
+message PeerEvent {
+  enum PeerEventId {
+    // Happens after after a successful sub-protocol handshake.
+    Connect = 0;
+    Disconnect = 1;
+  }
+  types.H512 peer_id = 1;
+  PeerEventId event_id = 2;
+}
+
+service Sentry {
+  // SetStatus - force new ETH client state of sentry - network_id, max_block, etc...
+  rpc SetStatus(StatusData) returns (SetStatusReply);
+
+  rpc PenalizePeer(PenalizePeerRequest) returns (google.protobuf.Empty);
+  rpc PeerMinBlock(PeerMinBlockRequest) returns (google.protobuf.Empty);
+  rpc PeerUseless(PeerUselessRequest) returns (google.protobuf.Empty);
+
+  // HandShake - pre-requirement for all Send* methods - returns list of ETH protocol versions,
+  // without knowledge of protocol - impossible encode correct P2P message
+  rpc HandShake(google.protobuf.Empty) returns (HandShakeReply);
+  rpc SendMessageByMinBlock(SendMessageByMinBlockRequest) returns (SentPeers);
+  rpc SendMessageById(SendMessageByIdRequest) returns (SentPeers);
+  rpc SendMessageToRandomPeers(SendMessageToRandomPeersRequest)
+      returns (SentPeers);
+  rpc SendMessageToAll(OutboundMessageData) returns (SentPeers);
+
+  // Subscribe to receive messages.
+  // Calling multiple times with a different set of ids starts separate streams.
+  // It is possible to subscribe to the same set if ids more than once.
+  rpc Messages(MessagesRequest) returns (stream InboundMessage);
+
+  rpc Peers(google.protobuf.Empty) returns (PeersReply);
+  rpc PeerCount(PeerCountRequest) returns (PeerCountReply);
+  rpc PeerById(PeerByIdRequest) returns (PeerByIdReply);
+  // Subscribe to notifications about connected or lost peers.
+  rpc PeerEvents(PeerEventsRequest) returns (stream PeerEvent);
+
+  // NodeInfo returns a collection of metadata known about the host.
+  rpc NodeInfo(google.protobuf.Empty) returns(types.NodeInfoReply);
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/remote/ethbackend.proto b/deps/github.com/ledgerwatch/interfaces/remote/ethbackend.proto
new file mode 100644
index 0000000..1f40d86
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/remote/ethbackend.proto
@@ -0,0 +1,227 @@
+syntax = "proto3";
+
+import "google/protobuf/empty.proto";
+import "types/types.proto";
+
+package remote;
+
+option go_package = "./remote;remote";
+
+service ETHBACKEND {
+  rpc Etherbase(EtherbaseRequest) returns (EtherbaseReply);
+
+  rpc NetVersion(NetVersionRequest) returns (NetVersionReply);
+
+  rpc NetPeerCount(NetPeerCountRequest) returns (NetPeerCountReply);
+
+  // ------------------------------------------------------------------------
+  // Engine API RPC requests natively implemented in the Erigon node backend
+  // See https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md
+
+  // Validate and possibly execute the payload.
+  rpc EngineNewPayload(types.ExecutionPayload) returns (EnginePayloadStatus);
+
+  // Update fork choice
+  rpc EngineForkChoiceUpdated(EngineForkChoiceUpdatedRequest) returns (EngineForkChoiceUpdatedResponse);
+
+  // Fetch Execution Payload using its ID.
+  rpc EngineGetPayload(EngineGetPayloadRequest) returns (EngineGetPayloadResponse);
+
+  rpc EngineGetPayloadBodiesByHashV1(EngineGetPayloadBodiesByHashV1Request) returns (EngineGetPayloadBodiesV1Response);
+
+  rpc EngineGetPayloadBodiesByRangeV1(EngineGetPayloadBodiesByRangeV1Request) returns (EngineGetPayloadBodiesV1Response);
+
+  // Fetch the blobs bundle using its ID.
+  rpc EngineGetBlobsBundleV1(EngineGetBlobsBundleRequest) returns (types.BlobsBundleV1);
+
+  // End of Engine API requests
+  // ------------------------------------------------------------------------
+
+  // Version returns the service version number
+  rpc Version(google.protobuf.Empty) returns (types.VersionReply);
+
+  // ProtocolVersion returns the Ethereum protocol version number (e.g. 66 for ETH66).
+  rpc ProtocolVersion(ProtocolVersionRequest) returns (ProtocolVersionReply);
+
+  // ClientVersion returns the Ethereum client version string using node name convention (e.g. TurboGeth/v2021.03.2-alpha/Linux).
+  rpc ClientVersion(ClientVersionRequest) returns (ClientVersionReply);
+
+  rpc Subscribe(SubscribeRequest) returns (stream SubscribeReply);
+
+  // Only one subscription is needed to serve all the users, LogsFilterRequest allows to dynamically modifying the subscription
+  rpc SubscribeLogs(stream LogsFilterRequest) returns (stream SubscribeLogsReply);
+
+  // High-level method - can read block from db, snapshots or apply any other logic
+  // it doesn't provide consistency
+  // Request fields are optional - it's ok to request block only by hash or only by number
+  rpc Block(BlockRequest) returns (BlockReply);
+
+  // High-level method - can find block number by txn hash
+  // it doesn't provide consistency
+  rpc TxnLookup(TxnLookupRequest) returns (TxnLookupReply);
+
+  // NodeInfo collects and returns NodeInfo from all running sentry instances.
+  rpc NodeInfo(NodesInfoRequest) returns (NodesInfoReply);
+
+  // Peers collects and returns peers information from all running sentry instances.
+  rpc Peers(google.protobuf.Empty) returns (PeersReply);
+
+  rpc PendingBlock(google.protobuf.Empty) returns (PendingBlockReply);
+}
+
+enum Event {
+  HEADER = 0;
+  PENDING_LOGS = 1;
+  PENDING_BLOCK = 2;
+  // NEW_SNAPSHOT - one or many new snapshots (of snapshot sync) were created,
+  // client need to close old file descriptors and open new (on new segments),
+  // then server can remove old files
+  NEW_SNAPSHOT = 3;
+}
+
+message EtherbaseRequest {}
+
+message EtherbaseReply { types.H160 address = 1; }
+
+message NetVersionRequest {}
+
+message NetVersionReply { uint64 id = 1; }
+
+message NetPeerCountRequest {}
+
+message NetPeerCountReply { uint64 count = 1; }
+
+
+message EngineGetPayloadRequest {
+  uint64 payloadId = 1;
+}
+
+message EngineGetBlobsBundleRequest {
+  uint64 payloadId = 1;
+}
+
+enum EngineStatus {
+  VALID = 0;
+  INVALID = 1;
+  SYNCING = 2;
+  ACCEPTED = 3;
+  INVALID_BLOCK_HASH = 4;
+}
+
+message EnginePayloadStatus {
+  EngineStatus status = 1;
+  types.H256 latestValidHash = 2;
+  string validationError = 3;
+}
+
+message EnginePayloadAttributes {
+  uint32 version = 1; // v1 - no withdrawals, v2 - with withdrawals
+  uint64 timestamp = 2;
+  types.H256 prevRandao = 3;
+  types.H160 suggestedFeeRecipient = 4;
+  repeated types.Withdrawal withdrawals = 5;
+}
+
+message EngineForkChoiceState {
+  types.H256 headBlockHash = 1;
+  types.H256 safeBlockHash = 2;
+  types.H256 finalizedBlockHash = 3;
+}
+
+message EngineForkChoiceUpdatedRequest {
+  EngineForkChoiceState forkchoiceState = 1;
+  EnginePayloadAttributes payloadAttributes = 2;
+}
+
+message EngineForkChoiceUpdatedResponse {
+  EnginePayloadStatus payloadStatus = 1;
+  uint64 payloadId = 2;
+}
+
+message EngineGetPayloadResponse {
+  types.ExecutionPayload executionPayload = 1;
+  types.H256 blockValue = 2;
+}
+
+message ProtocolVersionRequest {}
+
+message ProtocolVersionReply { uint64 id = 1; }
+
+message ClientVersionRequest {}
+
+message ClientVersionReply { string nodeName = 1; }
+
+message SubscribeRequest {
+  Event type = 1;
+}
+
+message SubscribeReply {
+  Event type = 1;
+  bytes data = 2;  //  serialized data
+}
+
+message LogsFilterRequest {
+  bool allAddresses = 1;
+  repeated types.H160 addresses = 2;
+  bool allTopics = 3;
+  repeated types.H256 topics = 4;
+}
+
+message SubscribeLogsReply {
+  types.H160 address = 1;
+  types.H256 blockHash = 2;
+  uint64 blockNumber = 3;
+  bytes data = 4;
+  uint64 logIndex = 5;
+  repeated types.H256 topics = 6;
+  types.H256 transactionHash = 7;
+  uint64 transactionIndex = 8;
+  bool removed = 9;
+}
+
+message BlockRequest {
+  uint64 blockHeight = 2;
+  types.H256 blockHash = 3;
+}
+
+message BlockReply {
+  bytes blockRlp = 1;
+  bytes senders = 2;
+}
+
+message TxnLookupRequest {
+  types.H256 txnHash = 1;
+}
+
+message TxnLookupReply {
+  uint64 blockNumber = 1;
+}
+
+message NodesInfoRequest {
+  uint32 limit = 1;
+}
+
+message NodesInfoReply {
+  repeated types.NodeInfoReply nodesInfo = 1;
+}
+
+message PeersReply {
+  repeated types.PeerInfo peers = 1;
+}
+
+message PendingBlockReply {
+  bytes blockRlp = 1;
+}
+
+message EngineGetPayloadBodiesByHashV1Request {
+  repeated types.H256 hashes = 1;
+}
+
+message EngineGetPayloadBodiesByRangeV1Request {
+  uint64 start = 1;
+  uint64 count = 2;
+}
+
+message EngineGetPayloadBodiesV1Response {
+  repeated types.ExecutionPayloadBodyV1 bodies = 1;
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/remote/keep.go b/deps/github.com/ledgerwatch/interfaces/remote/keep.go
new file mode 100644
index 0000000..fbe5b64
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/remote/keep.go
@@ -0,0 +1 @@
+package remote
diff --git a/deps/github.com/ledgerwatch/interfaces/remote/kv.proto b/deps/github.com/ledgerwatch/interfaces/remote/kv.proto
new file mode 100644
index 0000000..ca7ff18
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/remote/kv.proto
@@ -0,0 +1,251 @@
+syntax = "proto3";
+
+import "google/protobuf/empty.proto";
+import "types/types.proto";
+
+package remote;
+
+option go_package = "./remote;remote";
+
+
+//Variables Naming:
+//  ts - TimeStamp
+//  tx - Database Transaction
+//  txn - Ethereum Transaction (and TxNum - is also number of Etherum Transaction)
+//  RoTx - Read-Only Database Transaction
+//  RwTx - Read-Write Database Transaction
+//  k - key
+//  v - value
+
+//Methods Naming:
+// Get: exact match of criterias
+// Range: [from, to)
+// Each: [from, INF)
+// Prefix: Has(k, prefix)
+// Amount: [from, INF) AND maximum N records
+
+//Entity Naming:
+// State: simple table in db
+// InvertedIndex: supports range-scans
+// History: can return value of key K as of given TimeStamp. Doesn't know about latest/current value of key K. Returns NIL if K not changed after TimeStamp.
+// Domain: as History but also aware about latest/current value of key K.
+
+// Provides methods to access key-value data
+service KV {
+  // Version returns the service version number
+  rpc Version(google.protobuf.Empty) returns (types.VersionReply);
+
+  // Tx exposes read-only transactions for the key-value store
+  //
+  // When tx open, client must receive 1 message from server with txID
+  // When cursor open, client must receive 1 message from server with cursorID
+  // Then only client can initiate messages from server
+  rpc Tx(stream Cursor) returns (stream Pair);
+
+  rpc StateChanges(StateChangeRequest) returns (stream StateChangeBatch);
+
+  // Snapshots returns list of current snapshot files. Then client can just open all of them.
+  rpc Snapshots(SnapshotsRequest) returns (SnapshotsReply);
+
+
+  //Temporal methods
+  rpc DomainGet(DomainGetReq) returns (DomainGetReply);
+  rpc HistoryGet(HistoryGetReq) returns (HistoryGetReply);
+
+  rpc IndexRange(IndexRangeReq) returns (IndexRangeReply);
+  //  rpc IndexStream(IndexRangeReq) returns (stream IndexRangeReply);
+
+
+  // Range [from, to)
+  // Range(from, nil) means [from, EndOfTable)
+  // Range(nil, to)   means [StartOfTable, to)
+  // If orderAscend=false server expecting `from`<`to`. Example: Range("B", "A")
+  rpc Range(RangeReq) returns (Pairs);
+  //  rpc Stream(RangeReq) returns (stream Pairs);
+}
+
+enum Op {
+  FIRST = 0;
+  FIRST_DUP = 1;
+  SEEK = 2;
+  SEEK_BOTH = 3;
+  CURRENT = 4;
+  LAST = 6;
+  LAST_DUP = 7;
+  NEXT = 8;
+  NEXT_DUP = 9;
+  NEXT_NO_DUP = 11;
+  PREV = 12;
+  PREV_DUP = 13;
+  PREV_NO_DUP = 14;
+  SEEK_EXACT = 15;
+  SEEK_BOTH_EXACT = 16;
+
+  OPEN = 30;
+  CLOSE = 31;
+  OPEN_DUP_SORT = 32;
+
+  COUNT = 33;
+}
+
+message Cursor {
+  Op op = 1;
+  string bucketName = 2;
+  uint32 cursor = 3;
+  bytes k = 4;
+  bytes v = 5;
+}
+
+message Pair {
+  bytes k = 1;
+  bytes v = 2;
+  uint32 cursorID = 3; // send once after new cursor open
+  uint64 viewID = 4;   // return once after tx open. mdbx's tx.ViewID() - id of write transaction in db
+  uint64 txID = 5;     // return once after tx open. internal identifier - use it in other methods - to achieve consistant DB view (to read data from same DB tx on server).
+}
+
+enum Action {
+  STORAGE = 0;     // Change only in the storage
+  UPSERT = 1;      // Change of balance or nonce (and optionally storage)
+  CODE = 2;        // Change of code (and optionally storage)
+  UPSERT_CODE = 3; // Change in (balance or nonce) and code (and optinally storage)
+  REMOVE = 4;      // Account is deleted
+}
+
+message StorageChange {
+  types.H256 location = 1;
+  bytes data = 2;
+}
+
+message AccountChange {
+  types.H160 address = 1;
+  uint64 incarnation = 2;
+  Action action = 3;
+  bytes data = 4; // nil if there is no UPSERT in action
+  bytes code = 5; // nil if there is no CODE in action
+  repeated StorageChange storageChanges = 6;
+}
+
+enum Direction {
+  FORWARD = 0;
+  UNWIND = 1;
+}
+
+// StateChangeBatch - list of StateDiff done in one DB transaction
+message StateChangeBatch {
+  uint64 stateVersionID = 1; // mdbx's tx.ID() - id of write transaction in db - where this changes happened
+  repeated StateChange changeBatch = 2;
+  uint64 pendingBlockBaseFee = 3; // BaseFee of the next block to be produced
+  uint64 blockGasLimit = 4; // GasLimit of the latest block - proxy for the gas limit of the next block to be produced
+}
+
+// StateChange - changes done by 1 block or by 1 unwind
+message StateChange {
+  Direction direction = 1;
+  uint64 blockHeight = 2;
+  types.H256 blockHash = 3;
+  repeated AccountChange changes = 4;
+  repeated bytes txs = 5;     // enable by withTransactions=true
+}
+
+message StateChangeRequest {
+  bool withStorage = 1;
+  bool withTransactions = 2;
+}
+
+message SnapshotsRequest {
+}
+
+message SnapshotsReply {
+  repeated string blocks_files = 1;
+  repeated string history_files = 2;
+}
+
+message RangeReq  {
+  uint64 tx_id = 1; // returned by .Tx()
+
+  // It's ok to query wide/unlilmited range of data, server will use `pagination params`
+  // reply by limited batches/pages and client can decide: request next page or not
+
+  // query params
+  string table = 2;
+  bytes from_prefix = 3;
+  bytes to_prefix = 4;
+  bool order_ascend = 5;
+  sint64 limit = 6;   // <= 0 means no limit
+
+  // pagination params
+  int32 page_size = 7; // <= 0 means server will choose
+  string page_token = 8;
+}
+
+
+//Temporal methods
+message DomainGetReq {
+  uint64 tx_id = 1; // returned by .Tx()
+
+  // query params
+  string table = 2;
+  bytes k = 3;
+  uint64 ts = 4;
+  bytes k2 = 5;
+}
+
+message DomainGetReply{
+  bytes v = 1;
+  bool ok = 2;
+}
+
+message HistoryGetReq {
+  uint64 tx_id = 1; // returned by .Tx()
+  string table = 2;
+  bytes k = 3;
+  uint64 ts = 4;
+}
+
+message  HistoryGetReply{
+  bytes v = 1;
+  bool ok = 2;
+}
+message IndexRangeReq {
+  uint64 tx_id = 1; // returned by .Tx()
+
+  // query params
+  string table = 2;
+  bytes k = 3;
+  sint64 from_ts = 4;    // -1 means Inf
+  sint64 to_ts = 5;      // -1 means Inf
+  bool order_ascend = 6;
+  sint64 limit = 7;       // <= 0 means no limit
+
+  // pagination params
+  int32 page_size = 8;    // <= 0 means server will choose
+  string page_token = 9;
+}
+
+message IndexRangeReply  {
+  repeated uint64 timestamps = 1; //TODO: it can be a bitmap
+
+  string next_page_token = 2;
+}
+
+message Pairs {
+  repeated bytes keys = 1; // TODO: replace by lengtsh+arena? Anyway on server we need copy (serialization happening outside tx)
+  repeated bytes values = 2;
+
+  string next_page_token = 3;
+  //  uint32 estimateTotal = 3; // send once after stream creation
+
+  // repeated sint64 lengths = 1; //A length of -1 means that the field is NULL
+  // bytes keys = 2;
+  // bytes values = 3;
+}
+
+message ParisPagination {
+  bytes next_key = 1;
+  sint64 limit = 2;
+}
+message IndexPagination {
+  sint64 next_time_stamp = 1;
+  sint64 limit = 2;
+}
\ No newline at end of file
diff --git a/deps/github.com/ledgerwatch/interfaces/src/lib.rs b/deps/github.com/ledgerwatch/interfaces/src/lib.rs
new file mode 100644
index 0000000..d5fbcd4
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/src/lib.rs
@@ -0,0 +1,153 @@
+pub mod types {
+    use arrayref::array_ref;
+
+    tonic::include_proto!("types");
+
+    macro_rules! U {
+        ($proto:ty, $h:ty, $u:ty) => {
+            impl From<$u> for $proto {
+                fn from(value: $u) -> Self {
+                    Self::from(<$h>::from(<[u8; <$h>::len_bytes()]>::from(value)))
+                }
+            }
+
+            impl From<$proto> for $u {
+                fn from(value: $proto) -> Self {
+                    Self::from(<$h>::from(value).0)
+                }
+            }
+        };
+    }
+
+    // to PB
+    impl From for H128 {
+        fn from(value: ethereum_types::H128) -> Self {
+            Self {
+                hi: u64::from_be_bytes(*array_ref!(value, 0, 8)),
+                lo: u64::from_be_bytes(*array_ref!(value, 8, 8)),
+            }
+        }
+    }
+
+    impl From for H160 {
+        fn from(value: ethereum_types::H160) -> Self {
+            Self {
+                hi: Some(ethereum_types::H128::from_slice(&value[..16]).into()),
+                lo: u32::from_be_bytes(*array_ref!(value, 16, 4)),
+            }
+        }
+    }
+
+    impl From for H256 {
+        fn from(value: ethereum_types::H256) -> Self {
+            Self {
+                hi: Some(ethereum_types::H128::from_slice(&value[..16]).into()),
+                lo: Some(ethereum_types::H128::from_slice(&value[16..]).into()),
+            }
+        }
+    }
+
+    impl From for H512 {
+        fn from(value: ethereum_types::H512) -> Self {
+            Self {
+                hi: Some(ethereum_types::H256::from_slice(&value[..32]).into()),
+                lo: Some(ethereum_types::H256::from_slice(&value[32..]).into()),
+            }
+        }
+    }
+
+    // from PB
+    impl From for ethereum_types::H128 {
+        fn from(value: H128) -> Self {
+            let mut v = [0; Self::len_bytes()];
+            v[..8].copy_from_slice(&value.hi.to_be_bytes());
+            v[8..].copy_from_slice(&value.lo.to_be_bytes());
+
+            v.into()
+        }
+    }
+
+    impl From for ethereum_types::H160 {
+        fn from(value: H160) -> Self {
+            type H = ethereum_types::H128;
+
+            let mut v = [0; Self::len_bytes()];
+            v[..H::len_bytes()]
+                .copy_from_slice(H::from(value.hi.unwrap_or_default()).as_fixed_bytes());
+            v[H::len_bytes()..].copy_from_slice(&value.lo.to_be_bytes());
+
+            v.into()
+        }
+    }
+
+    impl From for ethereum_types::H256 {
+        fn from(value: H256) -> Self {
+            type H = ethereum_types::H128;
+
+            let mut v = [0; Self::len_bytes()];
+            v[..H::len_bytes()]
+                .copy_from_slice(H::from(value.hi.unwrap_or_default()).as_fixed_bytes());
+            v[H::len_bytes()..]
+                .copy_from_slice(H::from(value.lo.unwrap_or_default()).as_fixed_bytes());
+
+            v.into()
+        }
+    }
+
+    impl From for ethereum_types::H512 {
+        fn from(value: H512) -> Self {
+            type H = ethereum_types::H256;
+
+            let mut v = [0; Self::len_bytes()];
+            v[..H::len_bytes()]
+                .copy_from_slice(H::from(value.hi.unwrap_or_default()).as_fixed_bytes());
+            v[H::len_bytes()..]
+                .copy_from_slice(H::from(value.lo.unwrap_or_default()).as_fixed_bytes());
+
+            v.into()
+        }
+    }
+
+    U!(H128, ethereum_types::H128, ethereum_types::U128);
+    U!(H256, ethereum_types::H256, ethereum_types::U256);
+    U!(H512, ethereum_types::H512, ethereum_types::U512);
+
+    impl From for H256 {
+        fn from(v: ethnum::U256) -> Self {
+            ethereum_types::H256(v.to_be_bytes()).into()
+        }
+    }
+
+    impl From for ethnum::U256 {
+        fn from(v: H256) -> Self {
+            ethnum::U256::from_be_bytes(ethereum_types::H256::from(v).0)
+        }
+    }
+}
+
+#[cfg(feature = "sentry")]
+pub mod sentry {
+    tonic::include_proto!("sentry");
+}
+
+#[cfg(feature = "remotekv")]
+pub mod remotekv {
+    tonic::include_proto!("remote");
+}
+
+#[cfg(feature = "snapshotsync")]
+pub mod snapshotsync {
+    tonic::include_proto!("downloader");
+}
+
+#[cfg(feature = "txpool")]
+pub mod txpool {
+    tonic::include_proto!("txpool");
+}
+
+#[cfg(feature = "web3")]
+pub mod web3 {
+    tonic::include_proto!("web3");
+}
+
+pub const FILE_DESCRIPTOR_SET: &[u8] = tonic::include_file_descriptor_set!("descriptor");
diff --git a/deps/github.com/ledgerwatch/interfaces/turbo-geth-architecture.png b/deps/github.com/ledgerwatch/interfaces/turbo-geth-architecture.png
new file mode 100644
index 0000000..701d907
Binary files /dev/null and b/deps/github.com/ledgerwatch/interfaces/turbo-geth-architecture.png differ
diff --git a/deps/github.com/ledgerwatch/interfaces/turbo-geth.drawio b/deps/github.com/ledgerwatch/interfaces/turbo-geth.drawio
new file mode 100644
index 0000000..25efa8b
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/turbo-geth.drawio
@@ -0,0 +1 @@
+7H3X1ttGsu7T+HJmIYdLRIIkiEQk4mYvZAJEzsDTn25KsmVJHmtmZHvO3sO1pJ8EwA4Vv6puVv+EC/V2GsLueWuTtPoJQ5LtJ1z8CcNQnKDAH3hl/3CFRvEPF/KhSD4+9MuFe3GkHy8iH6/ORZKOv3pwattqKrpfX4zbpknj6VfXwmFo118/lrXVr3vtwjz96sI9Dquvr3pFMj0/XkUp9pcbSlrkz49dMxj94UYUxq98aOfmY38/YXj2fn24XYef2vo40fEZJu362SVc+gkXhradPryrNyGtIG0/ke3D9+TfuPvzuIe0mb7nC9LwILSzdM0IaT2N+uVsjunfPrayhNX8kR4/CdhPHJ+Ead028D3P/4RRFeiBjwbwLofvfr7y6QJnnEEr93RYijj9dBcMJfrlG28STPsnsj+nugLvUHAvGdrODoc8hdNAwIWsqCqhrdrh/SiekCmTEOD6OA3tK/3sDoNFOEX93PzntPg0sXSY0u2zSx9pc0rbOp2GHTzy6S5NfvjKRzkmkI98Wz+TCubjtednAsF+vBZ+FMT856Z/YQZ485Ef/wRvsK958yUVgUx18G3dJjO8xpfpNO2f5Jj5dOFn+SW+Sat/KBnfT8CvyIV9g1qfrg1pFU7F8msl/BYJP/ZntAUY7s+d/e2TFn6yOr9uoM2yEYjTlxz4edD/OlPw31CYcQqn9Lv1RWiHH64mwPJgcfwtNUmoiCJ/kJoQ5O+rCYaRf6KaEH+pmuD/LAH/b6gJSv2Gnnxkwfcqyh1QH07wR6tKSv2GqtBshCA/RlVIAvkV8bG/3KOg9F/rUqh/loT/R5SF+THKYg9hM4bxVADkhiEGQNH/fyoOg/xacf56KIayf63iMP8sCf9vKM6n8f+7iiO0zZg24zyCtqQmL5o/AJz9GYpD0PTffw3PKOyvVh3sGyHmn6g6P4vIf1Xn13T5jugyHLsPGZes2FLQGN+lQwE6TqHogj6KbkyNXy7xn+lDWBV5A97HgDvvez9nP6CoJ+H4hA2+P4xdGBdNbrcduICDC0X9Ttl8+isWdQ6mWBVAEWXo3Zb0f5JiACNrIWXlAvbQpNP/xFU7J38fl/wHwTcS+TuFf+GIvlYmkvk7jX8tHyjyhynU1yFoki4d1n3Fv7RJOJghA5/aLgXs4MGVjxk4FPucD+ivuZduxeRD9gBr8uHT4yOz4Htx+/zD/vHDb1J8bOchTr8DFk6f7OfvyWya/Cql9zUHP+cP9WMUlfgCzKP0F/z9MM2P3/qFxV81RCG/09AHMnzV0A9T/G9EZr9ljz9pIlSaIg4rNYzSymjH4g0xcTFqp6mtwQMVvMH/nBD9dVYCpkR/aYP7aBkmqO/8Vybm3SX36SrymRlIwin8Cec+fMTkrgFWQShcXrdW5HrKWw68tLvzlJwcvMtM8J+CCNwD/BVVFNCSv4K3IydVkulaBDYfPGdK4JLAvQxqofNc4H7k6ywG+Xrj3+8FQAoZRQLT/aEv//G/oYcZQBbe78YbV843wzqZll8Y1/slObr+Ud+dsy+ykiFfNnJvz7pvt6+L7vOHBb4VmV57VdPk1iO3VyUKrtX4l53hMimyRIe5XTvTTKwsSkyvKDNj2M/nTRKsOvLoLWNEACQwOdeWJWeyJb/9DkPFFV9n2SwaEAzIGTLlDpe7QMxy3X4BgC6/5sYmD7cJaV+OZY7jz0Yrc48zuFVGU9LEmFcZchArmlhmr7eEvJjlkO9Iehx3alDR3QFzYo17p7l9dgdiLIl7faFSdYgCD7ni3rS8wBNp14fxoFdlRabAKfLKs01UHozkrhzPWHlGPsuyA+09nSgE5mTOFYa+vOJwVOXqqQEYLFvaCVPgnAoPLR7zdd+SNqfvCHAt8tpR2evEayer3ic6xasW8v8OLKa8+5Dm+DQdS12T3UStbgJbI5Zr9sK0criaHMHlko0eurah7jxe52jgIywfZaEOMPt8wl20XB0ws5xxUM2+wK+7oFWEZUU1sRDNIV8ds4zIXvsUe7JexAWInqlfQr+iM5zVVjCn5+gUSnbTiwFOvw6s/sa4qF8htJ2rOdBpXhUxoyTkxySLqQKekTL7fLfjR3DJbO8ec9xN9GoSS/AwNggrQs/JVuc9lVJwLCb4d0i4hidnaB2G1EkVpV/11o4sjSs5uhqmyBE5wKCIhyQjT1b3YhDPfenEYbpJY2/s+RQjtftgFMixANKayRI2vDcylqKUYjVCZEct1Re8f/YD0IhEqHwTq+BB06f08nH4qNv4rvLyTdSVfRqPY7EfoRxuDGR62REaEBr29GxEi7eRRkPjVL2312M8bjUHxOFcGBeEUZSXd4plSfED5JYjcjqEr0CG4mdEQOxZyIIYZSwTf7YQaFFYAf4/36MsiixgPQUek58PLzn6BPfAHV+cT4VItxRVsN7ODCqOzQDuAfsr8WOq4ilFTT5tS+d0tvsl1A9kJwC9uYydiaYbX3Q5GwQK+bav+GO9JdLo+WFuCnkkvbQCPZ+s/TBT41bVh7x7L2+t04WX1nxA06S788TlFuTHKbckUz3ms+Dsr0DXOHNBjod8o+NE88xSVW71OQob774xQ1UjXmkvjW000J++KoZgl1yn79CP4Ph6IzzylbkoFCY61on00VA648vycyjarjjvt5JegcJenhGdIkkTEmBeLEu3+ryRpyRK8WAGLaUjlJbZvgEPy48LpNXjQbaITkCZ2ilxNimbhA4d4++A/ut1VK39smgNSgeqRS3UaWuGNKeY6tB7XPPo8GYDJVEtIK8G33MLzlZ2L3lUkVOIME89PUNXl18SPMVXZmCgGBEwZhVZYVsRxckrfHdV+0Ho6vMEJIe7s8yZhXiVT2ga2MaUxy8pDZWYWQ0bY104LoM5DBFwNhmm6bUbEXkShiKmNTbroWACJeeImy/TLEr34Jvy6iDP5SnPY6tPXFRpXZgVi44nHnSoqSizuU+U88O2dYHdkYsOvtMyUIbHdIBD2Ubohhl8Y3DIgoDObNdaD96+6CW1nTogfVIhcbkjgJ5TNbCbAE301lKrUBxMnT6YRQXfswQVKDGgxzWlM6NpaNd34VM1eEr0r4djg44DdAkDyKcLi7EnFMphb5wiCqNY6U7LeU+Ic3DVTrsOjb0LdRx8a17gcCMihCFIy/IPfQ7wRw6Z6ZN1DoZ1LbYTpst+B50DVnLs1SKRxZGAYPB2ZiGsfthwbLfydtz2+wvDJzxrkbBoFd9hMbul0T0kE4PxS8ZzBE4S6hkvO+RFJGvvqojo98M90TMBkOp03/e4ucgmppwaOISzu1F8ggUNxrgVad3ANWWo9ml8t6M36TwhlBKfXm7C42fd2r0w6BTeFDibN6/gaTd0yhMd30S0U7K7gIZj1qxJMw/dlQe0OvhBMNwjRKYhBHZ2QnOh8RMRJXW79mq3Ch765ukbCoxO7qzm4htP50HSM5Qv/zbhqg84fOZvN1Haxgh0yC/FaTSPZ8QrOHY7Hk4GhbZxgEcuAZUeSpbgCaZcceccEEAjNn0oi0Mlw66XALFpfQuvqu0xSrnUG7Ucj+vM0S8GhECykeWPE+8AL7CeidQ4KorWyg34ANnDY3qUfTPZoUeA2ik9tlOLMcDK82AUpqMqSXeckEdi4L0RL8/x0kc1NI8X+ITUVpL7xMZoe+InKtJyCsiK7w8owtQ8IS4CGHnSnGqo/oe192x8qsaIRxElzqmh7a+FDcY+nvXBpRj3RD2al31gEFX1NkfXyhJhrLK2nHFSF/91LYTxtpYNI8ku1l65QggvJ/xWaef8CF3T9C0+PEkh78rk9RydJKU7LtuxLXLov/ouPOmFnTqjVxVmMpz2c/GY+rutvWJ7lUpzR5xTxSpWrcucKsRRXSzAFdw2Kli0dVkMIEH8qliHbmyA6gyZtke75nhpquxBX4MsqIMhG5abxADCCJM/bESdeiyJbt6g3VbkjJUs6QxT3Pd9eRMA71tUR3f1zsxGybYR8C7PFnKh2wV/aih2ptLmggBvw/OTwtGKlmo9enpF1C0imgm4NNqYOADQQ0OFpvaRIPcAWru97V1TjK9xXg9u3VByONMQriCe2FKsMO79oA7EWihn2Bu6hmQJzDm0d+1JN7sdYLAzyqowj3C2NlSichtXEb+Srdcd4MHz89UcJDrsea8+6/0uXnljrMe7M9ydybgbPJIo5gvO7+z6mZiVSAMtiqk564VlFmVKbZakOkuEXr12PIJFx8rLXtUVfcDwDWJYu7moBX4XGjwEkryBJ6X7fRmgFOl+ax31awdolb/3UFJSfNdYEqMTCDtMAT+rcLav9LLF4ILWtetTaY1pYxXqOcmmBwgq5QsFpb0rmXy8LK9+NSmFvCFTj2LD4KA6q3eHdoEjjBZHt0h8SwAdoBC/gLYq4ZgaCk0Vvf4CFOG5GWDCSWdWL3Qu3sDZlCyAvvmzczqAd+WxAPIp5vPoRE50H117/F4KUvq0TxBCllmyYLSLzhM+P6HzQnmCTZtD109tg3DaunnddndqOK28Ujoa+Gheg9Gb6YvvsXNSv+sNP5RwJZovLYitzGuZLHjUTQDw2BSnMCd2sYbrBlCKXso6ULubgpMjHhnRq2YIM605i8qFGFNf1NBTCHuGAc71kfUl9GiLsRVPOj5boCno4DBqAHK61J7a42z+UCWIih5KuRGhWNHqGEdVblESN+1Px5Uv5pnLT2KbDK87gx2k8qo8wTBznz71kBELjAji6JTA5yRhGKJp23ZIo/yJCTxOGeKz6yRLaMB9ju9DDI+oHRf2B5B16I00IOVg6nm1aXaMgiiW585qmawAhu9jC7MCxQ458tx5pb8ETVC0kUNDE6vWQ4/NHUACeXY4mJY6ARRBRErrUnFJ4cC81YX8A8+ySoAF9QjRyhuxqBehPSddTh5QMpPBRFQecmmVUwC1AWIZ2Cw+bC9YUvkMntywB3xSCTrXDRDY51pB+wzTPTJ6qnmjfdzp8xb7Ssj6LurEgKxAxvkTv8a+fAndKQD4I5fS1nRIMZix5V4NkYvWMb8ALIl2Y7vnnEE42Dij21C2eSmYd0cPdfUqbPFyenQb4tzJlKvuDN2dF8DhWOZnITw/PIQwLRxtKfcSopcACq4pBHZp+zxZCOpj9QJvvsmP3keUDQ4YS0dKe20xAOE0vsC4kQ4aElvXo2zKFZKLbSCqAu+aNgXIexhG5oObuQIoJD0ofuh1wVdI3nw4+blQ+5KZ/QWf5/FU4naInDfg5BGzSDWuzgiEFE3mdjMdS2IVlqKnjQSWjGruzLRPPfkyb6x+d2XRN0vIHXtNYzJecCrh7VdxHqAURzsZexdluJxO4ImZfko0HOKAK73yTh2tNyNQEqIzNzpc0cR3+8f1XNwKr2GHyIgHrKLjYDGfvq+elY50LwnHrwlBi3azsVmIZ5QPVZCpbZE/pvqERXq9eINbHYj8siTr2hk2ubYHQ2xkS64WRMRzVbj0vUefTXciQUDWXG6Ficj8kEOtsPfanNWCvq+MJtavteaFi6DYrL2yZ8KiDmgUkVnd3PNUnIszDCK6LjQbS6wosjdRkR9qrCbxuFaDyzaS2fma3JrHkK/UDUXfwWbXP8FMsWqmZ5pNFA2ogc4uJyt3ZH4sswfjlRLQLMP3UfYpcAUQ5QhPUMOVnWQosztWvgTl7OHAlgqzT4v880VGGymc4ltIynpjx9pbSbEsd0WFMdT9VYfH6S53/UmETgix+GhqdjJNZz9asA72UXbvsEBetv4OHiH1IpqO82Ynkn6H0YNcAOjjMo8sGDFrWgWFrzCORLSk3G4Ia9hHo+Mbiy/t45oMRwO8bn9AXp5DTGGH0DBSl86YwLILVmaRdYqIUkdi0H9we8L4NCsL1JIi8lqIbNxeF96oVUnQX3iQ79fk/Dgni8mboyHqtCW0JJOf++uToowQij5fCNcMb6uJsvogVoDI80auPqDQpWbnkPY6wYjSRD1eyWPaD25Xr6fbGdFPbJnIVxAYHuNE2CCgQzTRwSdmba9CSUnHIlC2cmaifFMd/LTd6MXDLducFt1UYwi0Kz1nNoKdAUjjebuL5TnkLhKMgqg4U1+GJEuWpKkBljXjbbiaVLrJPYa3z3G+aeHkQ7/fWK9HYWlOrpBPziW05ERppzI4A7t6olRNfGpq+Oqu1gIl5rmOxfnZUqfnoEdxeTkSN0E8XN47h3DjzUM81OQqwJh2t85IVkA7jVFXVnwyl1CULS/IW9hA5F3RlAFT4IlhJtoCP/E61NnYwz3bK+udDPTqfrIe6fCMkoQX261ozPbO4er9/hppD02CFVqRJoEBFHGkUgwlh8plFsO6OjvPkqdK0qVHHNzIQ+Kkk2QZolUi6+WqRSGMQko4lkabK3KOAJjiH66c0LJuWCWATCWCijG2z8XjxN2JuSTnThJwPxQPtgt5gLHFKab0wORjT9uHIcQ0Gdgq8rJBW+N3qCf5VRtpjx44YdaXY/O6JRNCno5LNYRIjLdrSs0jpXNPNJWPm8Ft9MgB5B5PSrE98FAKJ0O4n4u4pBqbZC8nquiS196dmWLjUPza79OqP0vBqpAsoTvOpbLSCXnRu6NELrHUyto0tsY6yeVgkufZq5TrpADwAWAqtPae7m3MMgLqTrLUT1g2vc7V5Y7yiMlw7Rt9AX21aQGHHjJbGI/id/poaX9NoSttqbrb7wBY356kIlY7K+Oxai/IphrFY6iqkO7iE58qN/mkRi+gl+fYo5TNbPkRZh1mCdgK8GaKMVazD2YGRuCsuzsD4uW7x+hQrRjNV2QZgd7F8B1Uq1zljgzhOeJQJBiqnpjfEkPdVh3KaV505xnnCL+62ReiOO/aE6Dq/qRQe7uo/OGyRCnilxtOscGMPoZ5xDbXt8pWZPPZcDrEeNG9JCiqN5WP+rYAM4kadwx6BMLucErPNXmWMte7ZY6+kGn8GmDs+OhmUTEBnUxbAWIs5h4J4vE9g7p3i4A60/NA+o4+r9PaK+0T2P0omm53aXdaT35SS0twLHTAbUAjz/ycJA0pjifj4ZEihLwUAEiSIVrv9gLbgdR4wkwNsJF91j7nBrWDGd85Z1EZC5mfw0tULAV6xSeTne4mtD0OhV/9klQ7I5pAqB/lUHfK8LSNpUa5FR4blv2ki5pYLMd53rV7cVqBJXjQaR4TKm/3BxYSgmJ2hLKtdFUIgCr5I5reHp4HsXHkXs2LFRUK8PUaT4fM1PRaJkoFV5xyeUIe1Pn2oPkiTZ52O53Pcdnb7NIDZUQcUwre0UkC0UMVYMmLXRY2k+5qDIJeKyLvzXVvRPnoeuw9Lh4xkv4UjZgqGW4oijTFI9M76zTIOWgNmt4p8tnTVUyiDIryedpF+1iHyYoS4nm7B87z0avtG+Rz+P2s6IGoojNAIDNqJGXUU3PN85ZkU6yd0OxG2onVnLhsQ9tplaZL2HbnCyXBTMPd68/2S+Olm8TLvclpKpEGefHy93X0uYdUely3AximS6bSceczWTxmgShc8WZNzdCCWLbtBhDH1XXoLjSh3+jz4QQDuxEKlHzsuMFYJ84uCowfMphzSZkDn1dGvrlifGazhJsKdaJTmaP9zKB17srK64OGIf+wlZyGaIKAA28HokQQYWpDh/SuGjMuzqwxImgZ28rj6NOvPr+xWJxWM8IBmFSd2KTloQFXuDllzpziuwV1Y1Qh75gTCCnCS4axTWiTF6Qr76o5byji0UG8Bmh96GVIGcGDAjLzQUaKDk+Od5AmcXhoXQCy1i94EzJKVlGTN6gHiEE0Lt2wqdcqfVi8xVQ9KcLpvCsNiDj55kLza6aZdiylFbMcpkTdkUXjol7nokaKkhFmz3KOa3ksrLvhxcS9lNT95JDF8gGLcuVSC0+Z8AKy5yKuwvx7o1l2GnO4eScXwXAuB0HN0KlM/bV4HADgt3rs9tW9AnL1Ts/1qJkcGh7qtMwwJnUBtJoMFOPAZNdRjWqtXJnsEsjHk0aZShtLNFhBRK5oeiMWG0z0IFCHoe9j0iNM5j43Sa5Ego3KT/vjRJ3DEGZddIBsAKSDOcQXGXhBvZxONJVHIYdkmXV19YZq9vOZFTK/saBoZGK+ulvmf8yxZiFZBSdzDACvEPUGpjMrz8ddonlhaTXEEpAAMUYTBG7Mwq03Me1pdBhSnEUo9177PAx2U3KlEm/o49YvYrKr5da39fyahCR9LR/7bb5KrbylPgUsXlyOMMca9mHfzxXbe92Q5a2jQtLj8ekV8QA6NqaWPc9ifMAUy03kV/o1AefpUtei7YADtvznyXW4XCO5wX0AV9LreKkmSCY3sYNdLhNjUWoLZygFvD3jKNsKHHdy2cxGvB3MlJWiy9152W7uSP3N5hQNoa1eHBz+FZ1wzXZQYyZT13WqCXJzbUXuLKbqcRtgcpOO0+wWbhoX466stEaWtE+hoXyf0n3GVbY3RgW9tMDEyBfDF7mLcDtiAkDQS41HurXPc3gydbZEWaFdPLILk5cngyCv6u5xdGs57iou+7gY1eoBmIAc6WOYIF6WYJyfA6RE3p/tXZqKpmmptCLPglNtjwcIccX5KiBTk/oTgd6a5xy05DEKi5Wmex5rp9FUxGfJKdYaH/MRTTD6kjlT4KKindUpyhpyjM5ypPjdsvYcZoBRygAMvG5Tbdyd6qJgVhNs4wqzw1RtKFYbzh4Zz9MIbDqhHSAQmfDlcopIaJDoeRlqVI6d8hrCxZZQ7GDqZ7izZn9q22fIXLk7JT89FEXQm30qVQxXNvL6KjnQQ3q52heoBpiD0Qk1x1TAEHgiD0tmmHKekIfHUu3i0EZjP2QQEC+Gr74ENWQ4U0Q0AAZc6oH66kqY5CUfqIk+3x+1VTcLYQiqJ+iqCSEEfyq4E/4KbydDHgBky4FtUUqali4yD12uId7moaWCFdhFEWumzK9f+jEaKQiG1gnXjhgXSa29xuhwDk/PS3iWgdeqJb28N0zX7SgAayjKXGCgP7pClwXbxdSOh+7RGkbfYeB8cSZ9GjxU9Bxg38R3pGNvTiJtC4Znr8TaJ3VjLE8vPRiJ3B/Qp9yyW6bhLjJ1s9wJW40SlgITG7huIuQD93KiZHMtLqu4z4JcAvo2sNbZlljG17Y5DJLIEwnaR9XjCrVYP0BkYTxHnLdP5/1apnKENd0AvH/U2dy10Lyx95ubmuCXfS7xdGsfaRbKuUVByQxI0cZhlAVtsHEqvd6O9pNcNCGFBTeuc133AZ1vsUWu1zLZgTKpHqgNOYcz3H3Qytj1vpJVZLYqSZ6iFn8ixyWioT1W94RYbPDlV15zgXvVPBoGBYtDEk3lYWWomBAa6BKMcByEaZgWInVg4+g2r55qIrSZYnGPWlDSPWIWe/bjcFwfS/lorxcGag02+0ZTVtU1nLtK730lOr9cyAVfe+vwpGOUo2TECNRUuWHc/Sq0XezjPlwrm2FqTw10lF1NymC3fZvlGSAU3K+30y3U7nceolicFqlL8SAu9YHXa3IhNfSQeDPAPA+iXQMHJg5ipRMBnLMwqaWJ6Od2XfZEp8QVU1tKhcZoTNaA8pZ5YepIay5mrZmUV1nAbBOOvMxRkwAETSsPSBKa9dHi3MUqXM/uaJiHyOYmW/xavyiztjUfHF6XPk5CoxBz2y3CZtbJMn4ItetAt2/kCCjEe+0HF/2SgkU4L4WHqcUsAaETllSxYl9OU5iFHC7xIfjbRk9H+7oIpqjICxRmOvFJdgyJtxmjl5ax7pSN0PgO7wJXqARndsjd1yrwwD+S6YhHt9xfHayJrS6nhU697gtVgu6gZaCTLMV7YvF61CWsE3Rs6xMNRwOEM6h2V9eqmwGmqrrQvuSSK7kym3/MHsAx6iFGnBMQNYgP/IomHuo2a5MR2sW8ol5NFLJ5AUGNvs3l9qKrkuITziGbmFYKSDDFeox+Q1cfaBe1FrcKHD7L4Z2+jWpFkxLtx9d4zVRsUJwNsjRAhr6L+z4EMT5/seeFlbazLadEqrwwpJLCGGucGhGdzAmV6sbfGntZZ020kKcNQCCQN4mDWZFFx2tyeaBTknWBRDxhZP5Ek9cRUsDSXo73U+eZva1YDyd6CFCelnt3EpBkOr9uoglkDoSubt7c/JPquKfidQa+WCyeet26Nt/ZJutamG3mZyGffc6qCnR3rId6loedGF3SkgTC9FyNE7aHPbjTNAFfocfQ2TY6zF0WYrKmD4NjyJEk1uTEqwzD4riexgTfnhLLu1oZpaKbBLHnTVl7cl6eg2rkCsDcuryZXmk97v217saQe6BleJ0WDAr+kI/XBoSeWpWSB0HCdMkA2BeOmCx7bv2owrScDof1htfWrBaBZWKhJvQimx2MZV+GYMEdBohgPLO6v+1Nt6BEhANLhr/bGjxg6ur9eFl6d8erHg0rj3qMBh68oOBMuq/t8au7AhgedrtpStWluPsy3xoXuO75lB8Lfvar2PcglmHY1JnV10uAthWafp9i08V4Yk8hpHdGoxrZP+GisdlYE00CVWm2uDOdthWp/IRZqQ2JuxXtiW5K+UeUDg0I92VfQeVniuNOLysNB3hkPM37fjtglu9SK3CiyT1dUyVCijb1+uSZjUrxEep17BAAbINr6Ah3eXFGsNgdw96wqLyQ+AzgEfk4HcR+K/um7S9FXliWgpT0HiG1HXTIM+tZRTGG8aHq5khj81CTOgAHM9LOm54h84rdz3MQeJaeNAFmOMmmaRGeRoNkrwsOPTl75khlfNOEJoQTTh+JEZgtRLJK0dpQvRiEvq/pfUWm65ETjuTkxBtpYzsZL+VuhzCTC3NSmlJkraqHzOn8UOAS+THRgV60lKYIeQ0YdWeYKXNhxig1W0aO8CcdUAyQJ0aB2Y3sJcO1ck7FlJ0ErAHOWfE1QfI610goMLb2bY0vvJmeGvLxuN6Q13KQzUGOzEKGEikXM0akjjrPVYQ3ZRulx2N8RG8PWAZmrnZU2tSsoDxHprK7wbLEMz4lHgnX2IV8UxBVMGCG7tJHBTZNQQy5QskYMqnmjXmj9bh6uYGteSUdaaEDYqG9HdVLO18ugldNYfaoyYHiLo/TOtPxYsl5L0TKSyvPFkMleoHLkhbJQK51hD8T5wn6zW1r0V54+LMZA+veE3p/Pg2vVCM6o2EergZoQl3xF8lvyXDu99N9QaehooVHBeUcoWsyJyxpEgf2DUNiXFcdNdIMpxaOaoFO4FxecdNR5Zc9JGLbeSwNgh5dcZ4QbpbNkLEk3MSVPtKrYhJpNU0wG3kTMrg/K9doLc+exBCN9yCdzKst4fToLoJ5UHT71CeaGhf5uAqHg6vQ/d7Xl6fGndPRNWEnCuXG66Edt2PMgT+NHPlkjYEDREQMvZeY08HMWnALzyO+gI4TspWwDT0mmDJRUl+VcYqMsQi6/RGu93FLMGPTa1zo/LDQ60JRAbYTUbZA75g5Jm4zHZTs7nUcq3niG4JJkh4JBvWJOdBs7abN8ql3FiamnejTNqfiTA/0jqlyaWzxHTi0a9yrQ4AyA9v0zfOyqz50mc5YPpf+WhITlFKNw/vrPo6FZi2jLkbMQ8aV8qbaD4yu+PJB6U5TY6NbT7LOdDjSgh6Y9urstTCUVPGo5WccZy7qurbnApTfm4YK4xOzUhgY1DgkIz5QeU402c7hqnGJz8h6Suu3zdQzioqIqBKKqI99mzEI3qNoayohsAHBOCUTpsTOlJaelu1VeME+PMjr4zpHSlOm7xUu2XiF193iGIDTB7emtQ7CiekJ8DvMySLFdSzD5f4M6Pf+LZOlnwgVXEjPKNKM10cWD41lPuIubC4H1H+iJXkJZvtuG8r291CtPZRo2miprsIw0fu22PQ9qM0Jy3ZVyNvrPKyKHS3OByCDaYWcMysLUWkzo6jf9yWmj7UM9+8t9YM4bAANXBNxIAUgdI2A6jOZ6WaQPH19HbtqXGl9tZBAJl2L9TMPe0k83Cu4mKUZNnTc2MN2qkpfwcW2ok77wjBCVAW+K3rsEtFv7GNwPjDiUFJGaJZIRGvg39u4DKyruNLWBXATFk8gxQJACvcxsuRLA3f7k1mtF+oybkOmvW2agp/paIEOxzmahy4/z0R1Gaj0aJnX61psDf4q85wasKU6aS2IrVNlPaTtFjKqEJvNRS7ooSq2J054II6rx1YYzli/vGrX6HGYJpTN90bXo8Gotq8iMekd7QDxjjrbNdmgDDHblwDYbWDE7AAqfKJBEt8sF4HTmq4zMNrsUYD/lv5x8243r8cxbpd42ytaz30+gJtod3QZQkY/9Q+qrWWa0fGLdMS+qznIh32mho5vY+hT2iXkdsyg7PPmMlmcKtycKMkjq2Bnq20+8zuI5hY7G0+KSL1q+0LHnbaspeklQ+N4ScemMLkAVfhxwhPPjkSSOxYQ52z9EYzEQrDhNqbpsj+mOSWa1QeGNwKgUoSrtdCnCfeab5wP8JIywiSkGQ89WAPQr4DZypOrJc6hSUDVcWhXEhcrH74JZRiZDxAKEqoeIGiAwk1fVulfpqSJMEeouzzUFJ+6lddqoLQPeNPu0f36EiYfFZ9HtmRPsYHbZIXc8O9Uw3EXtJloGLaGxIJTsjjo48KW3mX2qkUYX6SM98OWBF4F8fyo6gdMT0cIq5q+1Bfz8KRfHxwYNNDh4+amwIW/DhwwqWuLEOpqpM2r+d7ru6JZoILwM5SuqquFtMtDl3mw1PlWofqxTAL39NwqDRGU3d/9CetCctzgzYlF4/R9pOyS5F8DBXQfotlb407XJw7CEchknd9QMUYwoyQjup4trX5VZOZvbVYMLabqeAnCNu/cNHJm1LET+ZVKWhT56FlV3T08G6f8dl492NQKERLfTXDdyymw+OXRYoo/qRfpjzD5i+bxojZJ5qPLZhlw1wILcA8A25CnAFEwIKIuW6p7ytlMHSuha0/tLYg8fnN4FmEPfq8cMOcz1hDpuN/om0ctHgQ07avJNfWO9PFdet58m2Q2Yhs9t3DSC+pzqJs5O6+cW2Hq7XV88/LMcs48exiIBOu62f2zRp8mBiUdx5Wsubrex3e8y6Jnp1LvOkKo8nMHz131YntZcfmytGTuHHQMT88uhbvBOIlvdHny2DwOX+rUXG5tPVg2/LkQGnjuFGE3LLkOCh0G9qhks1FS1PjkInbqJGa+UerJXRZ2u95fs3l+E5R/9F5kO/cgAOIAM5Ew7bWjhLUbKFthUc2w6hSg6v3Vp4L5gtjBu1+HFJERTGu7irUgwlEx8QkNrMD17g4AskPOY+dFCgiPGvpWphqQTOPV2HugI+jZvBqWfWGOACEsKOPLKSJtVSEuR6KYY4R1s1KY79aGWVzUO1ZSQh9dx8EzQvdFwmxP1fXBSd4ermjJiSlZ0qbyKMUmA0DLLEwgAGdiOaiR4/TAXo+Aid9YuT2NpUf3rtIRSa30vtIYRzpw1e2aT+ddOyJXsxFcL2CGroQ5roolIvZojZqGNlBn2icYXzqkqg79S0soHvph/1L41iUup+5dVPR9BtwuZqixkLSpXGHWKxiIRn/xVLq08YbWxz5X8vGaornyJdSSUJhZheE9baFL+PZV3Mk200Up9uMeo/UmVBKeIlYcNpNxoqH3G3tqjNQnLTLWrbEWssOisyM1Z5tWXFKyPUqekjAq+ff2q/c2l5loJv11wF3JLf0kJRAX6vtcm376eNJqqC9+sfm5FcgaEpx3tw85xbg/Ma1Gp0nC7bI83YmX8HrANO8IZ71m3Rait1R46kGN9W6PyJ0dVv3jcr+ifGeFOw1ipCvc6oIhOTpmMJ2YqnipHyluww8JaYyMyBkFXDtYFpplnj7z2J97HYY3f81PyuHGKRYu0M5aCxppSBLBzcx1ZTmu7AJXyYdoopfNiYnazpopqt3h/XhYOc7jtilKIbCrMUiBLCEVHycuT5hrUGgmw3cWR3qYvHNgMSd+iPDaPmuxcnRh+yyhYl2xnmqraGZRFrJ49Dt3iiuH8LWpTC++lpwMgtk8njYk+0LS1wq29qzK5HhGr/dIYBpGgd2fgK9re07i5C6NVZgWd6/VdFIbYovCp+9WbgMtVUc1lGxHPVGxjDrE2Uj3KT/B31+oZ/e9EMfEzYGj0di6L5gskGODiwzvgUehOEDRwJ6OXiBczue9s7+ccYxOHWRaJaIDt7w3p2kDCjOoz5VOspI5RuTMc6oowdRjgqLvtccyfgLzqLLGvrNiJlZTjZejW946uMnsDNVwGa1Uf9zrHrtBlRnPQzg1HjqFItwhentnVwgsOLPT7cJyMKJu58dwjWr4E6a1ZtPg0DtzViPDRbtTCylyJJI7VUx2XC/Vbagk4AtLC/4wBzJxd0sHLpQ+KX8pYdiiHT4eYUlN+qbEPU4BTiOBAWJrX9JLyNTqNPRhs8F93MeJhGvEgatV873zNBPasiMopv7lrDUAiBp34rSDJFgfLuy3QkteqgfKVRTE8dlEeaplJHiCznA3cRVbdLpklx7bqX2OzqaYeKjchd6wQkszhDU6fMASvnJjPF0A6nPF2sYmsd6Bkin7Nb0PEcRi1T1B95w6ePyO+NqIL/gLN6nFffDcia8YvzTcvPEn9FLFyDU/AcsGI21qmv1a4XKRE4eeOoPgnz1ePWJ4rWEVo1rdQLRRyqhmOxsah6cazCbGsTwkFdqaX+0SsA9PQupge/Rwr+7FeBJgUJbn5v61Az6XRN4r3iEIsPlrJ58jCBm1p5XsG9aq99GXsyDi/XZru9BCMiVZAVPFaXgndKpXe6SR1UB4UoZEy6Q6egdeUd/muLns9j13ssJeOnbsETGZsc5GU3EX4R7GZgeic9nyWOHXJwsDa7pFHcxbLUrtC3Zp6gPi6AJJnzrkxAu35HNJgSG/CuTqFW+ftIO+LmtdOTCj5coJrt9VmgoeOsvv+Fty0XRmNNFELce8cbqmqy9LxoO7YF5LNIrGikM3D1XfeWt/AciqoWAMZdoN5R72QgMSVdyEPpxsKanJFWDMW526Mbr2MC/gWyqftadqnyDUuhWWlTvcCsVhxJ3pXMnSY1sACqQvnmmZ3uvu9Hr7IOeQPldcL5ZS10qb5dSDOc110eEAeN6qS+9Q0qlupdLtuuR5u+yjalnXy77z3Wg2VlsIHETAeKuZN+FZHfP7dwAQAz5Qcz8yu4RGIKABOCrwD0BUZ5B29vHxecncVpq748ZaIYrDnMfyApIsMkgC0Jwb9BFUvNcDfK1700Y/T9WskfUdleCvt4hUE++vQi8T9Ka8I4SzcNXbWX1ycGVKAIQaKcpxj91BoqXjKnRxgNlmC9BXiOxhztoAhuWPBmUz+7RXHnqehmtwnUQ0261wCEhsZrEC/rIDp4GJSfEdYAJZ03jUuqN3e8584KkfLdCtFf5EYhiuO3XNsu5sskApesNT6Zt+EZLMgZlw8UL5DkUis0uH6BiYU9fq4xQWr66PKTAMSYYWzJZJxWISanagXT6HBv7i7lQkc8ej1sonBkKOI24/RhBkH9aL9mJ5+mbC1cQmCOHM+0ymlkif/WHxrjBlceFt34RUhvuzA4zCU/oBc2k2+sFgdA61OVhEQ6wrIi3QWIjtaTdcTEo5owT/3jsqp1nZMh5JmhLpY+Y5hNohP50kctf+ys4DxE8QF7qe62Zq5ncch0Kaz1nt1iXM3T24VRcLinGUfmPxpz4sjIBHaVF1A+VPpCdTBORsnqgbU9/7kt2kYMlGFFFcWb23NVkniZ8gNZR9DmaqtzE73UqNZk9zQbVdkDmy0MKeYiVnmoEbyNQ1unioK0T05chHBneJfIH2fQ5AnvO1WSDMkv2+G33Kv2q9y8FyJPx0fw370jwI6Qm7gul3uB8om2lSgytKDaPMwElTFt58iFV3FP6cTXDcKOxPQ2U0+J7v2wP1XUU9buBRICXm7alA5VTki5XGzGzTHXdJXNb2LvcM16OelvcX3KVPNS6anHCsj/SbNL9YWbU7atMi83V9gFHzJH6laAcvX9Q7FcM3vVBqvW8hj/C5EplagadufPwsN/EBwigY0gLy+bhgaslzgPt7hDs7xwEUsTYeUQvKeKbfk+G+7MyLZfcD3eJp40pae5QX70M4OiXLdIWUP/Tl8mKD8RRhBkwoPClUJlPHB+Ya3I23MVEC5OmxVZ+ERabbowekCKIpFLXgMiTbXN+/QyF1kl6A60xVY7YrGSly+rCR3T/QunQyn77jKtT0k0ilQJ1i+u3x5wKj7fEGgGOF11HU5vA3VbaNalY8FiS5BCf7hc49p3N+iicmt3JnbpVuojlT08HMx6o8zqdak42n/Zr7lAFocjF/58ep/9RLzEfgSeBvm6AcwowPGLXWXvsf+hq5/x19TKCPd7oUKA/043BZ1KLyb5P2xgevF7RENBbKrvX+VTp3d1zdupLC43yGv4r/MeUp2F/XESC/rk3xrTovzB9WluIbtcX+lVo8Yrs2VRsm6fD/Zw0e/FPxw/+Yqm/YN8qL/ZkVeOh/loT/6yrwcPdCbgvLHjYixi0hVZVU/lbp3f8W4PmVJuHkX12A55uM+7pyEl9Mdju8p/rjavDAwpefFeGBHx+f3/ulDM/706c6PH9u7Z5P6vJ57Z5/JO1/SemeLys7Y/9q6R78dxr6caV7hL/1xv+YnF08a8sfrL9dutn9noLQ7/I7P2t2O0zPNm+bsFLbt3ajnwz5RwkM56n9Uux+ltembdIPV+QCDvSvEC/ka/H6JmmI7xSvf1OUqC+qQJHU94nSj5KAb1Sf/iMl4BlWmVAMcfUNOfgN8/TZx98yUD/bP+pnLPYJaPx28TH0D5Iw/D9MwNhfmxjyk8n5kwSM/E8yMZ8bmL9j5F8sAfSfIwH4FwXWSeLPlYDvKA/3v8bEoH+OhKH/YRL2ReYA/zLy/IMl7DuKkH+MbQDV12cxpXcQocA76xB2v5akrG0mOayLCs5FSaslhUUGP974HGbDz59F/Mj79c046cvkwM+Pfn3ewofKhh0k1Jt0JP8TKX6rquGQjsURRu/p/ahavwj6azD6qbj75wj6G2Ez+gPCom+y9essw9X9irFgctO3qP5m18fH0G9Ui6yLJHmbiI/BqppmcD74L1esj1PEv+LhRycztFM4fcaDX7ONB2QRoGUBDMQE8Bn95fObpx2wc7BO9DSExZtpaThOazpOXwsXfMnyDwqCqV+7A5z+ms3fyiZ9KvP546Pf7zhv4y+CCP/Yfv/sTtC//xxDf/ge9bvuBHz6LM/y/e7g2xT8Rr3bf/jgH+0Q/ob+2iEQ3xnVfNUQ/UU7X0KXHxcf/0N6/dez/JvHL3zBxW8k3f4gx/Jttn4HYPiPtDg/FDH+pRbiq8PRvjMo+aoh5nfa+aMtxNcgRbIVcOFy1zXwxzKE/yOQ5QfYCewLAPoJqXxuJ765cPOHQZOvT2lJuO7ruvifFrvmuuLgIsZP31Hh/Nvlyz8zID+AoF+4T+zT4sdnBMW/Qc8vvfUPo+enzj6jpwNc51cu9V9d4vg3IBT7ow3kH7HugH3hSL/0kN9rNMkvPfKXDf3BVhP/eqlSvYlwXR787/+mesGC/eMETzr8fbD1VxxlSH7pjNhvAZ1vKBz5hykc9h+AdP511PKNTOq3p/knJdNp/IvVmi83v/y4RNe35/mftFzznYsuPwLe/tF8Zb5Mff2raJT+ckH3T4aj+HfkUv4bsP4+bsL/wyJW/OtlNBv4ScTQdfUrBv/vjC/+wJQo9SUcoojv89t/WOCB/8mLZr+zM4OhPl/Y+scr5/DDv5Ta/Pe2A+HMn+MqiC/W0wn0D1tN/Yfz/K+F/zcPtiT+/knJPi2E4H+1kf+OM2H/Q9DdX4bUCJr9tfp9uSnze5EaaAgIAPLzC/1CFv5c3EZ8nRiRfFuyNA64d0TUPU3VOfG/nv7fVfsv99Ljn1Tsc53/ZBf+FE9PfMeu7T/e0/+yMYb49PHx+b1/Zh3zF9gAcdTn0R/LsD8OOvzhG+cw4u8k9itpwVjk78gXDX33Vt8vJQ//oqE/2sL8yZmgH5E6+Dv9/VuCf3tvF/uvCPG/jljxP0s88b9/sYaGs/SPkk3qz5XN70hr/RfT/q5zo75YHvjLAS3xdRR7lzTbevwXyPy7vCa/+N0Ajn2dsqD/zIwF+TWOEXQNsPvu3P/L7n87XP1y4YGkvlZt7JeQ9s9h+XeY7f//D/7+cFa1yX04+BuuUSr3GZ4hkH86+LvRjyTzbZcM4PoMD0sjoAkeppL4MJ6v7mZLpnnZn/x6r4wo5u/JIwjju42gPLE99tnSG10tCImFdbgJNPIHfFA1qaTm23LH0FutMwEx2ErMt9G2VdRTrAlJl89tuxLS7bKGodxyqPDILy8wOIbIzo/8sUvUzkJ8wIsfKpD4Xk/MrxZlulqXYS3FjPInVuV+41fn/319x0vjs62BUsHlqsvcB1iFZ7hRJ+lSLicRVrXJn8R+OiGp7WzBKPF72fiqky6RryxdR5rsSJoSd9afb5G7SaVKV1MmvTqkcnpW1ptT7e3xSExYh9qn6vTAWn8knX7aJsO4lQzbn56lEjqYZCx4ulKjHw0nC5awWygouCKn+51OILfLjV03PMw660V1gdRWu30Ko6upNoxZZtbeOPZVKxs5JIP8EE14kE1ckKpZm7NysjYKCsxDlOH5ijN/1qBWEMKWST0D6x9SdnpXb6BNRY8FoywWmRQ1OkBWT0cFHtXWSg8eLrAmcJamJcLCtdDkyH2ZOjqlSIFY6X0g1zHWGzGJR2dFuCE7f2nv9OQb9fF00umZ37gzh74pZQiTt4RyTBUt6yeHUT1Lw1pb0q4N3G7VOVxDPbaGh7lyHCOeQ/gdeA73FIyn2LnODqJgdL5rVadoRhhTOHdzCNFZbxwHi4Dn75IeZ7yChUbPrtXL9z2oRkXCPMqmXZ7ei/mo8+ZB3uAZc/CQDn424bcv/BYgNKrYWrR1r7EP7ApaGGFoKkQsJ1nf0xUYVVGvHh+q0GzBjMWrxOVtfIJyNLjliScPxbPH0yzBkia5dILH/F5MksXusCANcWSVNui8X5yIw6S2yTxg4a07F4ewGA3nwhNmLjJh3etiL4UHvY1HE/DsmUNizUhiRal6lhCaKAbfwl+OCI9DnyRGzUo9c3yaKztSzvErwqCWN1+IKl8FLtDgicoicgUhj9I520hiZjDoloJzi4X1rivMQeMiTwThQXv8apTv0oJ3o8CJIDjIVx0cCTzsY+0vdJWDeSLwYNLLrvHQxvKmbGyc2W2xWrdmaNeenExnkzuLeQuPX+eke8+SJt29zz9zwvBw4C/IJZlvNNPyjIomBx0hwPO31oSnPhIefF7tsFD1jBqWWeNE6oyUHUu8gExWWiaOZ/DYaUWgOAWe4ieDreoZGB3H8ks53dreOQGpzErtejBYfC2n4sZz8lWBI061HdboZEP/yRt6pN9eQWUgHJJ31orujdLX3CpyCgYePXPK2zoc3M7sbjN1pyASRyOHZ15vmDk9uhpQAISQcCTcuWgILprl8xAHG/VE7LDkk8LvZyEXOI192x2/QH0fT3tJk6e7HBosp+1neExPysVQN9c6oAoceaqRF/O5Qa6dHF3CgBdYWP5v8hfBoe2Vy2EhuALKuvoUBrziZqB/Xae99m6ydoXjHtql5Tkd1vOF9TvXizrTGXbpvCujEKLR+KiWn+ABSE8RHjRwq8sgSyd1rbDLtdCbooNlWDlJcZD1yDXAl11coYwCyVFkJixu/TAsbLCQ03Lj8tNI2P0tHuzeld/FmEhViUS20LkVliC2oH5p0MfxwYWw4qdVPfZu7BCZkw/DedzijiQBJYWXCCkpFc9mgv2f7mi18QzHj6HlYMy9a6KKIH0DGwmBU8j3EQkAGWA6T6Jrr5Dbq7mRg5lxsqq8IsO+yOZg0c6Q0dHa6wjD6cCiAWo8zjzVzORRXhpDbhjuDi1HDYsBMlBh3wUD3+dvTdrh7HWN0MLKSeV8Q/QxchLCFXo2d4Gosh8sh8rhPsHFgqL0DvJAbYpJZ8nDUAkMcILFrzg+Ei/KyjGEYR35vF6BZKktsB2cip3dJEuusz4ScOTqIb7ctkL9M5AXnnpTTiKjBFYOp14N77fjcO5DdeVXfoHncdiOzp77IY+59ZpAC3teTj4+3JnLZbuoZWQ7yfPBmaeWjJvg2rpefUUVAxZqNJ1Y5CQ+T94l0FDau0YZwRHvI0ZQoVN65BA8xynBMyZiwjpXxUYNXQQMkv3AzBpjABduxdiUCF924BYYg8C30GIL5wjYmsYX2qw53RNrXTaGO0vVjl641c4x7W03TCA5sCqvLI9paY/5WCXy/bxieGPGHCESbLqeU2/A82syBZwgJLBp5upWLIUsezr0fXO6vQ3EU1z5cwH5UwSOuYN2oYGGZaLy64HJC/FR2nTegtbBopZXfwnl3BSZJ8SGMvtYCiuV/MtNtBaWM4VuHUeeM/YTtBg2ZQB6Cy8PYcpWGOO+wV9jMhrYjQAWU5PhwRgXVCGAtI/x1XK7ce4e9zMQqOzDGO6zN6iLqkuE+7QZ8Fg487m4XiF4EPcnLNXFG+iteDjTBA+kN2e+5VYFyg6/Tu4TxUNkP4r2moGe6qPWwy4/g9lAQgKvfoF1QoHY+tPtkDLRtPfp7fRHlTPu7xm4lAIlhmg+0ohhxZ6HlAtK0FBAAet449wrmCankuIZ9LJDe3Xi3UHpxNk5/GsODxbhzvyie935dM1qInHUCep4IT4gZ8aT+rYh49BTYW3dqhQZwfhfObAneHHfQ+g4IZ59JgvU4PPlPf3zrBUsc4+7D3PIU4eYwkwgrD4V8ztwKCYs9HorCn+gy4veto//R9Z7LT2qNNGCTzP3eHOJEd4J4cQdXljhQTz9VPH1/mPOmdjRHd29P5WgKnPlWplZVfAsdmi/eXm9K97hD0PVwHwBhNn+EOYTS6EcD9o6A++xub5Zn1Of+IsAxhPgeLprsRecNFdAbq7HcZTqNJaQmFNX+cI/W3sZw5sYGl+Il0T9Yejw9IEbsOcgfckwWd8SV1VKCBGjHkEUYspPu5IxvA8YBlzyqqc3WO1KejUAqSstNAKZmXJE7xJ6b0VO4+pmdb69xEGwA2gwcl+woIKIDLizU5sMz1+HeFg5LkPaLVNPRPcF2C8D5OerQwaxqdzyVm9PZBDT8AkZ06qEfUgN/LTxT+6tw3lxJiuLGDDweQ6PNxnO0Y95R38WonPwrMcPfL4khcj+EcIlJJwDXl//WDlgGPAg8pb37kP0H1wGAR2MLX7FE4wtUN+C6cpvF8sLmCAYkfZIyNym66lsVnQoY1DlGXIbC9BO4Qsdkk54QitNnNMs5xDDWRNzfZYJhJpl/yJHY3PL3xpAQnT9x+a6WWob8CbGJbXBaB1T93nB08ZXDo4tp8qDob6u1cRyeMcIYDlNHfIcsOHbR4UujKJibgxLP0cvuYDtA1Ygau8Hd7Vv4Ms1QXfw6HLUnaCkS5ML8jJLEbLz67poZs8SRp3wfSJgRxUnNglYYU7VxQSe/Qk5nG34CviuSobctULkiucO8zVRGIk+p7lDQTyrOP4CNtStmjvCy8Nvkpsqy777HeF7kd7SEIM41EHbr2ANzmpyT0W8aVklWbd9AZ62HTL8ZoAk1Zu9UB7M5C8SwMSY6nVv0rjv6mYSbzEq5wXWWCQkHR4CDofZyUoG+GbBT5ssBn1qSuXXAwAQjEs83doq+lu2N9AUIuExHwx8anxQO6T9D9QXTxFG3+X39r8hlU8UcS8qvU4miKcIjOZlLUUKjZbqbOj3OsNFrJtP53x8y28aFp4ffShvn1MtBXCuY7NgS5T0mcxL9m57rhyGAJqp7RoKKRbJgx4t84CJqML/mBn7uig4OlWk6uTl+dRrvGrzOcRGkfiuTZsFSP2NfcyHcRxMuAh01/EOPOx/cZrlgZ7mZIcKBsSF7NQBH/5dY/NqH4coHCqfYqoA2Obz9vM6b5NPQ8IZX5vulw+JdOxW/OA+BA8YzMH9vcg0W55xQSYHnuRL+PWqTfLUvVkgjBZaICDtyACmZAb4t3f8+6Qvj/hA+66t9v2eR+cbQMuH193EAM0rgwBMWyAw9QvPUOaJX9aPKwmegrNr70vEHhFwQXCoHEvBw4qrJ7xlm+AB24jsQJ9jDSLwqsy8TwPLdmpmsRGIVDMGKQNxnlEjyrh83PP0VJi3nucqg+V2lNTRypZnIABraeEkEq+JoMuZ5GN4kDrkOF/l0XEKl8uA7QqX+BPvFZf0hu7e4kFroQNzHGEK15Dgfe4wZg9SjwP1nUO85ZtERQDh5H2oh33eewY+B7Sk+b366W9EnwFhu591t2Huo5US26e2RzfRfxwfWN4z8x/AQDQWpnOkeURgjDvgGZSCOUWGkHw/QfqrceXpq3r9BsNczQ4Rhat/9IxwUzeThR1e8BmfmfkAqpBfiq9F5hcOV/uZJ6yEiCjN+Zz5ZTRoTO81QNfr+0E9437njB9p7i2YUP3EpqBrxb6ZgzalBbz3hj9kpLS90VGnTR+fgshC708yly0PmIwgUfrw1Yt6a5z+jGyAJJbUYfBtgKLeF3gZAzJHy/60cpsF+EXBN9Y4+X5KTx+oPjCnO/Jy3LCpxPqCXGDbuT3NMrgeKvB7ja91Mlk2ZP2BAL/Ap37LXGZ7q9VhyaXwvGq4XwDmszRtDq5jbohlwLS1XZYeYdCRJWTHMDyRWITe55J31XTBs4ztZCj3dXtV0ROsULvwh/tqjjfXCvoJTyW2nGxlyvsiO1PW+t30IzEIKiZSnKF74hPNBswiQbst0/y+Uw2q+iBlSdoX9Eelc9Gb9vWx70QKLRjNWw2VK2GM+p8S6L+HOVoAx5Qq7W39W38XeaJO4thtyaFH5jHQQoljuJuuRWrHUH2VSEW80Ox+E65MzYzbVUIGRg3PAeXJNDo1O7cCaK6c8/VCsnB5xxCusE+Mqu8++XTq1UeGV7oeJzzqNmW1PRK3TwTDZg6ze1YmV5pb2y/uSEQYO2Q9CJlvjmi5+LpoDhewpe2lmIC5qFzDD5F97dYGj7tnieDEysF45iCmy2cni4+FZLrKFDjEBOKi5m4N7JPq6t6PaL591wuhZrcMD6YrcM2Akw/MiinXrRscdqodnPbTBl6RdagGPJwfAXrArgZrR0RdW/3JkCKpq1IKHva9eOzoUHZk0ThgBSo/QDx5vd2BQVQOERkE/Bs8RdmCZ44/HmdTT2pi0yT3YJadzZ5jSWs3++MRyqB2OrKmN4jLn1d+WlzwWsoZv8L8k3I61N2PHp5gyxVC7J9uINl9F81YJ7LxQL9K8G1/CZhp/BsRowz6VXGmINgeHzkA+DSYSWBVCUa1r1hRxuA4uGPDbX18xtRlAxfkDQ/Ghc4x45unPTmhIP16Vroxhy8CiJQqplD3GF+xVmAmCOaLCtMIpLignA1OdH+fAp+wawFv25FYZno8v2NL5lD5iuxx2fxTYyHLHQ4Pxu0K2r2lv8cJHa9Tr5ddtFytvIzQMWL3vuYp/pKKKGAtiDZcFlUVFIoyoBYGvJRP4kpC+40XE4AQCNiennCmSMKnvFrlFGHwVNKOfEBLLoKkuAju44fMBVPlsRHiedHw/ZSjjMm5PZhox+zdhTZnSYNpwp9s4wt7PAD5sgBvXv6HEZ3CcCyBZQxBeC94zF8g27thcPmZOgl2zYgFTyFnJ48mScGFHLYA3MV8Kw32g9eOSESLg3gu4F/A8Ac+mRtIFvZW6jr7KLjKbQEh5i54jVQFxZD2SAo8w6lMTkYsMGsBaLTHzVAq/ZtsUdNBTwP8TSe4J8I9JWPB99ozp1TPK+K+p9NOJVb2k1eY2Pe1PHFEAlmWnE0+J4wY38/YjfeNdfCI40Cgy/dVKW+3AurRL08Rb5xObju5gaLiCW2QnWxiWNjgUMA7ryMHFJz1KHpqAp8mqd/wtA89PGhO5rzTuxf7XgoMMt7XGBjjNOm5ZYTFFKmxshgawhT+SLPnzyf1umHoz2/ZzUDsI6DK3vAtQh3mTDx7MFYPGge/vXmg2M2nDNS+QfD07z7mGd8YsSyjBGkMlzUeI1A9xrESrxNactkIJc3lHC3wQAkKdaGWmVEU8HOoNw7Kiz7j9N+lO69+ZBjIEFPFAitVaHOD6jRDZVCmrzwCgvoThKvax2yNQLl+PrKPuQJlfdiFBdG89woTJd1Re11oRvZXfijIl2u5w7K+XC1wkBKbAjxvnSszgA9UBRnqqxwjalv3VOtoDzp1gcGHcMd7lcimRl99M/5GZnXkLy2MnJY6IERJIgMpJe0zhAe4piSmj5x/9Km17yNkrcqrqBu0T2DeivnF1QQR6DA7/JXSgA5rDcffyuvf42hetn+/Q/eBGMore/h3e9s0lcpB0rQNj/01cKelRTfvF6QF7NnmK55p4IzCZHK2q58e9/gCJucKk8Dvey7YSFkpeBYxvQXFjmQe4K4R8nS5B3i7SmbdO6ZsIBi2+5baOmrFxUuW9oNB6PsyWJrCvg5MRePJ85/POxwtA+sbQ0jboL9Q2zi1otzdvMULsFJV+qvePUxYDDgrdRcE83xV7B+u/Hrw6YUUV+4hTKebwvt0eekRXLjkYKJSawpQiT/k+V3KSRcJlurn3/cLj8Z3sOJ8c4OHBwWjAB73iOV4Vr2gtZbMOyf72J5J8+QIYb/rIcd6X0FnFmpb9tOu2BO8GkXCB9SB1Qf27MYAYjRzmcCNeUL2EbikfBAB/EmXLDqnh5H8MojXAC2a88y/ZIU3lqEw/rq6rSCnVk972H2rwsuerojLOEIOMp4QASsQOIMWD5GpGdMPXDzVKnMqWS518Zw0i1lpKQoiicdzYpW8aW9Snz1yvDgF5yROIxXmybYBvITo+xdy4AH0LBpCdyAEatOh0zxoKeMP8Pks8bMPwpXL48GJP+u+tlQdrB9Ty2Ts68SIIOCRiYfBOv7rCU8if8PvjQGgVPyd0wBx1S8PsfekOjao3oMhJ7jEoem0oky9xY5ktRKfgqYB7E4pE3BEaL9g5pzvXilIzeAGpcUWcUW40nXr5HCfZ5pwMD0C0Wr8AvvRMKDqL92heFZbyJg8a0jMj+u+9BUI/QXE5Sr2Dk/4gVh8SCZ896+C/ENC4wEBfey2Kb7MDYacVw2Y9F19aYSnsEPee1EiAYYfTvPDGAGZfDPfCeTmMQkyRvy46n/RyxFqw1r4Snaftx+Mx33IP2alHi9zAwfLYT0h2Bz2mucw5/1Kfh3ip/hAgH0qo9kHSb2Vjm47krJf0epQ3j4nH4uC2OcBWwUxDnWr07BLENSPDXsD/5vhLAOt+H/5ScG7dIiPONkOwL3B+veWtHz8Hzs2G/I91PDgDmFzy4czWgmw6AF4JrzFkKqqTS0r1vALoVpLlMXu2dadeIn2xgNxgwOiDKwbIS2QJ7Ti9yKJy4MTudFbPKIH3qR+wYkg6pWetZf9D0Y85hr7WdphWPxKQbESgYpwNq/fo6IGwxEmng8/aDsrdCsrziFHiPkXFX856vG4sUZ2bYoi7sLxBp0NrlYkfBb74I95hbm3vPO0CPAdISKLWMqtBZ9Zc+1iE7W0cbYPkhllOLAaogy7ZjAzovlZHj2LACGxfVE/wefNM1p86PZOMbO+KyuNwZtreRxmiQcVkTnpJwFt+9617jr7zmWfZ24NiU8DdfAN/sIPA+/VTdb7LurPQKCQoezCMthAqHtg9kQQ67dHstgDRNbnFu9QU+3yeDs3LqIWQpPM56VCTN1Dm1WaqfYRClun8IDf4UK/oSlV44zyA6T28XEDA13RrlFIgF1lFURAhRS9ncKrXby/mFLwzYmQpeSiCZ1DAa0Fd+w3gDdlvJlwBr+dsYbmZ0uuaXxxEulbU/fM/yAjMB4IoMAusG6PA08SoZZLYJa+ex/ANJS8c3FtiLy077uyLPv8giEfWQSYVDW4KZixD641D4GGI1Nef5auBdeSdSMcTU4npiF4mwkUGjMiQf6NUS9Po5dlnIAyNIUUiHryIOKitLYO5v1quAGNh/dG5MzCY3j7jIAieT+wpWhtEt5aME4mrwLMh+Lr1gIm8F+9PgrVW3WufjZluCd5kfbBrVZ+jxu92+vRw7euD6S0nNxIlf74W1kM+9IioWt8JSy2vhMKRtAQMLgghuUiCkLs0+lzr0ABm5pI6iodmBCGiMUZlyZrFh3mShBdHfndTKmvhKPyWF7d/qETzIawz9JqcPRUV2geLF5iBHmK9wjHGD/0JnkanPT5cj+9eRPDx//6L9Es3i8MbWGzPi/6Cv1G/6LuSNLvWNzlIgdEyOYt1TWTKGq0Li4Yto3ZinKd1M75THUXHhEOW36gP+3zwi4w1q9OA/Ad4p96NUXIZo5xxtYnAVtMdlPCGm79DSos6iU9Hv4CO7/zFzBt1MY2nEVoaj1+J+bKto01c04vla8IrzXk06YsB+vNLe8wcRFX2wmVkPXYUl2NWzSjHx8gmEX50pvMoijtOnLFaWfCWebZcqLOOWhbq5iBn5Bv+WYABP4B7BiF6LuZoQ2vFZFcbLofHU/M8p8tMYO6b2PbgzEkvhJf6RK+8iiLv+ec2k8O/zGYQdkp7Cnj6/C39FCKLDY17pjC2o140Z8vPMuC/90xQE8YXF03QBx9qfybrts24TsEFcLFPLRrofWSNKh/6UQ32pnAObbOBWgGHiZGWH/x4kZN2+uNsnT4febzsKYudAtgd8fxhCl/vHHvS8k/qXF26And4MU/OAp58hy2vCaL9AzNoYOX0WMLmAk29SUynddm2/yOZFf7GI9xh1dmlaWlgLUyHy0CZphsGrO33Da/UyIX4f55PY2ohaX9whN3nyOs4iWuPOOduH2Mb9KIXga1buMn5mUVNfVppwEdASJ/Kyp36p0azsVGvrqvAgQX8c/j/LJW/UqwrISxmnYZIrtZuvfQOP7VcG/OrymY/Qh0S6oaWS7kopGgftlmCwwfl+ZyAqoivBQCTq3SPKH6dn6qK0d4Op6qEeETfLsunWfr9eZbEcQajX/dHIy3wupSeh1Wp4xyDEsPh8kfe3/njzLSrfEWj2XjYKWIwFmhPGh3ZY61DsxCQOny2Z29Bov+qCAvql56p41oWQRO4z1glOA3E3t/p3pSqE+FuGwWZ+UMr59p3A/EiJMxIe2KLcCubI5tk2vRwyAq96FqutsMviTQGLwENUYc/t+WDSJl0Rc0wTZ3UjR7oNi6UKz19Axq3OiIpRgdXhpWJSA6mDydDbbsGWyL8r71t6TQLtEu1gS/HdKwENZzSLmCcxwnU0qJ3KL2FdRUDBhu30FfhmHyJG4vWoviBLEhU5ifqPUaL4s162vCruSfJ/dS58Z+5CVmXFQNMwAr/O13jlJyyrwAkBnECSaVoSeK1/lxpTX6jHwC/56pGLuRQKPv70750RQF9eytd8Z6PakNtUSPTZ88GAHxRU51XnvpOZYEuAOsDyR2WGTp1GepuMiqPtCn8aeH6d6Hk407T8hQrRgZYIWAPym4dnvRinlwwDjRKZ6e1YDTcggnWnXclMwvECdua28ViXbPuR48ALDKM+P+Iev8RPeEKbAcf7Vk6KRZ8DNgXJ6vocD3WX3D2JszBADqHipGvjqk91J7bANEfXpVMnG8axcOP13F963XZy8/i//h9pKKSf9lPEspvQtaeWO5CISWxXlpa8TkAZ55hFq3X/m83OcMplfgZunzYn9mY6RhzuGCCHS6DuicBPO+LpMiknPQ64zNSr4Xq4srDFZBHfU6cdneZwiVJ2GgYAGJkMV2bji+v/SRCSfJwzyQNG3bhvofNN+Bqqlg5JUwV/AzW2FDOy3Y73lnHqdpODOZg+ioZx5ErWgwbL8VoNckQNEiwgm9twoJhnnnSNLfMd+icy9HYKDQ/8l4Ri5V7353bQrMIcKHtvaWcHCIDxi1/Tzf4Lc4VVRmGjUE9oyA6RF6SKOCnCSdsgSyR2VrGAsSuWJHmqTEdTMfEsQ8iX7hw8DU/6AOuW+g+vvz07F/HouE72LjGYsT3PWMDULGLsZlhTvvxeSqTu9Lo0UJTONVFHxugXw8DoHryB8qScKraVsqbUwHh7xRMwTkVuh/X0K8dyfBxA0l9mg9cRa8u+uxRvu+ue7ulCmOXygradwrnAIW6C9HARSxEr8I8X9WTfhlms9rmmLToP2cO2l/Yfj9vqRbTO0Ll7Wbe9SDHnL+3WtR8SZmMnKeAJ+ouob6UHyDuYz83Yq2ZJ0LYyO/sD6YAmTC3+opdWxmhoOOF8liQ0l+nvnpVJ0ZqJnwAMPU+3+ZlCqCKyvo39MNHus0Up9HYsuYSgiwC1RocZq9NVy9Oev8ZgAPf7INCd397Ruvd/n4cSbsiBEHKV0gHE6vOx0G8z5xxAJlWVIVYcJqym47tQKo814W6R5dOPskCeWUyyf3EPb3el/uePq/CbsGTv69zQl9dO9tW09ENNbk2mHs3On6nSdlTwDCzUUU5AacoZgZRJXF6NGAYZKT5Ubq5aRyIhQsxx12+YBdgzr7QhH8bR/cGwJdYTEQiYPbK8Joxy1F3h3mT7FWnT14M/378RZuiA1MdvDYq1lnHabHiop7i04tYLZ7Ri/v03lJsxit3EE62eb7HzQmcgRzfka66gqn3ZrCFrLyyb/yw67wANgqZ1qccsl1HZ0xcZmltZDV46wRmDe188I7/BBdMT2wZDq/Zjg/7LdquYO77yfeJ2W8r5uFj7QTYcBOXfnSZu93wMrHtYmFQ2hfT5L8LsqLmdHqaJ4eqOHdWvDN7iTulfVLgii44n+T2EBDxLPlrprVFb06JTEM3NO2+Yprs4Ipj+zVl0hdprsOIGjXnzB8IzKMvzIddV6DslMiDz+ESH5tdR2mLAOUzmymrDwXKt9t1kgbOGZFh0iOze5pY+DLQHB6gQgiZvNuQYj/seG9SrlIXqwG7OwpiC6n8jXLxmu3Sgt29Zwn9JDx6m5gQi5QWRN8fKJP84mIohW3UIVQsQCsKX9s5LuBAkQiPTwyouaaX/RhPmAI4bTtTFEUv3LZgkpKa39xMGn6J8/Ta5EN7sJk1b9bJV++ywdWTW/IgDIJnsDcRFFBNanHsBfkMrP4B29zlPr9j1gCj0A3WAWxgdinTd+UYXPE8w3tjnAtWH8Kh20XjdSMeR7OyUDSBNTr9XDm/OfOSAamdEQS/tyR8aCcFIJX4yIkro+NQpfjQSaNvRqvaXs74fCqGexJfbH3A6JBuqaGUwmQlfy+nIo/YAdU9TUavKGEEebGxvbYVxcR5cyAD7U+Z0OYn5Oqf7NB/wUBUCz2L6sFohjyOYJKL4G851rQkhXqCN4ey3TsfdE5lbp3bmnGFw75qHatjz9UbWOesypV0HEXFj78JfZhXW+U1v+bvVTcr2H4P8hd1IH1n8FLcp+R/svwZf9edvOxptJg45TGz7khF6ZI0QAipvVbOhxQRzZiHYHboZ9WmfjmuHCWWjxAZW/CV+UhUH55KD82tTa80WeSlSX/ieZW9YyCA1bQLPPOoR+QduE33WTpfXUw1InJhbduVRaSbKscksFxOsc/RPFfDzbR5p/vuEZ2Wpl7S7YhUsi9JHx30mXxXXd+HoJb0U4L36+xV8aPOpSkK9kN3VBzTfHdy4ZKuH1gulnUzS7BL2nHh8buYOZy2s4HN1Aq+FZu7havNEXdg9kI5DvU989T7o7h1varGQjgDq1dah9qMEZ2vfZpEwVgO5KYhb1u/WqxNIqrBAfRCbdHAprp0jQXil4/Zyi1vqtEUuZ+PFc4qvOMhVRiFba88khFYnJnPzx9NSeNopjXPr1fO9uN49DfAs4+tFEcOoJPpRbcvAoY5W2PQ3SI+PoH5yaILxQ65m9h+zQw7V7V0GsenNvUk5YEIibeDR7A8GDMadY03FvvU6Ij5m2eSaV0CZOVdGHx7II878snVR9eCGv6teHmpSh94HGBUrHQHhDvEiu0YIYVrsUZtWoANL7QIpxkplSy1TSdis7xfHRmItjwPGN5gMOLXh+jH0qvCqkXHfol8QB8KbfZqZdwmqynLAQ6HdqFvHuHpRsPH9FgTyR4X/hsPP1r/iy6GFQisaeZ3SbIRjGCrD2YB673kfTKOV8ppRCvL+EVfWT07ISThLjqj74RniLXRCMpd8IrrAP2XZsO8jVl2I8rAZq3boHfoOtgcpRvnPTFsHBWCCzNfxu7pwCfuMSuYVi5HnBdPirwts0izu0drSzAQJXnbTiWV2mGr2693/in6rhfzvnja9Q7d9QWGtihE+Mfrh3D5NwEd1Crbae054MZdafMmIHcAQkEpMDrUGp7zs11niehXfv4e+ZSs0Dr4CS5gtRq7TVonHzbmCyLiTWX6w7NjuGd90uwtXx1z5vn0usel+USdpKDjKx0gYGCO/CotLtqN8L3mKM0w7Vrf4L+F5sIGV3USI8ZTygPhFOOKfjA4B/95Os3vG2Vx+UG0s3nkEeeXpIsYTafL4vDmLawO+AenXo1owczXxB7EvgsVknlAHbTpNHUu+MIL6Dj3MY+wJLLJytxBbc7dmn5cH1cBLAGp3ku+x52xNOrYwzO9WI7tjPgDYKkK9X+WQiYSnrvo6hTVHVmTHtIEC/y/+b7DGCNGQQrfEh8HhaV2hpWYeCqNOwsH/STV/cX60zJgbtdGQPN0soLDKMwvl45njyVhE0nFOapFjTrvShNm9HEvtVU9d48ChlGZdN36AKsefxkRf9tOo77l/rL0NDmr/B42kr6KEceyfIhK4KWxPZMaIz8AwAkzE1dGUcCYma7O7S8HPhQCqvif8s3xHQ+9jiREPW7IUfY7p4wxjhmWAxzXvAZUfSgo0/hxFVjrJ+iKJDQoq8ybdbHDog2l2nOFXFBpRCCgoQG1gYN+hnUAZUCMThg6t2/ktHXQmPsDKYgmxKmAVAFYjyVz6c+Y3UL29MkiexiP6F7Ef0yuRtxmWtQkxHGkAsFdM2szqm0PcGQzOK+3FyzgRZx+WqJPxIb20mJ75TQvnpU4Sx1ghTrhU1DqwAshKYH6/ZF6UtJ3buhZxWoAfNSTY1kZsxERTxY55LHqF7YY1DJ5F3HEeuHJw8AfUSmzEdq5iTs5cE2Y156JuKB6eXbeQIP8N47XqArHnxTMO7+uXDl+i4Rm2c2SgAM/hJ0WXAQRaC4a/9sJ96GseKpjEe4Slog5phOgbFQXTA3fRx2p/voT8d1Hc6/6olRpbvf2cxhXJfrIco/7yrIEttgxby8QuPGZfEfLn+opQjmHXrASmOo9Jpm4ftUrG2XiCuGyjqUmMQkZq9Uaf13fKicG4W7DiW1zuUyB1HuF4xY+sORmb9swA/L2tjff9e8zW+u4BTMRbVw7GAsgjYv97r+pSfq7iRXVrNANwVtLmxX4qVgc6zTXglcAyEMOwDi28IjEne9PvPDSyN2bWwm4RaYo+C3x3NT6nrsjW7GUHjHoINDtOxC11KPVwHnSTrqiNgbgUAo+O0rO9tH8dS/F1uwdt/kO0I9HGeyB+6dr65fq7RGaeeDmFH26QfyUAax+02UUKUf1fujXXnqomOClrPyppNG5AUPRP6lxtwOLOI47J4+z1jqXM0QssNUfgauZOCUl2olV2oG1UD1BxrfssGHKWVhksSDFWHUHmB8gaLF0CI8tl1olFn+xntc/AQERVGNCM0C5q9bCkRAUoN5FamG9R9nt7Mz4KiO0H+SD8+Q4Xn8wbENe+fun7uyO8ECAreeDNe520iss8KF9biXFxasjSZdcAJJ1PBfWBV9W4s0V/pDU7Enq8lSaFdK07yZ5L8MLgUzZcJ4V6Lne98Yl+glwX5hAWp/Ul3kJIV97fQ7j3NBFhcgHtXxd6XDWAzo5xKaNIt5OQy6jwVJxfPbmyWM+RhXty0I9rG0zPoQicZUfyDmQPjDVhIDwSEw8N8hFgX+7p5OnR+EVcN0reW6BwlXeXlE2W7WMJvJ30jq/v+ya0VpQ+AeDN9WyHCfVlGzeukz2cf3NxnCFxanuOgIYftiamz/kIVeyGB1X6/ygRDCdj5fHM/gDEL4jtz3GO1e3NuOejd1ieTt6zU9aMTwZmHkpdH8BY9t4CeYnvT+oQ2ZsNn4Y/xsU0vX2THF4HLVD4GH8ZXH1I/7Rumi0G1sLwcjwHcSiNPWu/RcyPoPcK7zR5YPHVF/Dh3m3Fktd9/PntxJ1pfFptG2j7CiIEFzyuH8okE/OE0goYmjzV1ZPVBLnB8LwwCZSsAdSvRS26TJkJf04paf3/zYua6gs8r0510+4P6LrHvO72vRTLG6W5lIV4FGBqPoPEVplr2XW3vEuY3SUSSutp12mUk8kKkWJhHdbOxLuWZ3KJzMe1QYAkXLj08YF0t5Mja+ApxdyzQgiDhSbD8L3BVpzTb3BBD23HUQ83N6ATEfprQKps7YqDxL6O8IEUTkPE+xbVAOcZ4taZc0HVPyIETiD7qOZPMjH4dB78oloD2FLcEVGbBM7ZsXnX/4VW3uDtIWh1fGB2Vqv0X+/8z+FEdSmNgc0OigUkgJgvjvK/hRJ3+LtLSQuZ6NyTmgs2dIyYr1dADViz9yEAzzzevE8cPQ+fYqRRx/FzIRIdOHJuZLfNa/jKdZsVCfnt0Fc6KLPHHzUDzB2Nm35gSWXF7oN8FTP4tb6SH5Gbb0+iUQlZH6BV8/icxaXOSTppXNmZz8Ri5Eh4GMKtgAOueVlni24dkvgp3ykkHJFOxbPKA1U+d72YhNZVGAk9kh/+ncUAQQcTI8qYgdE8BCFUP/sTXg1QtYS/qVsi35iHBSZ8vxhwMGJDf+zvi5PzlkdcalwwVE2R5wDxCTiZ85Amx8lHIMMHfmb92vLsUlVfCIoPnyT4TriOWvG7va+g1f5MLSYYcWNQJZSbsV0VCAKEOR4X6u8ksBvWUVOW3tO0SA0tyZ6Qlh466he4dc3LUGrcUtaAOPxnYUh/iV5G/k1rESMi4MOWPKHtcbHYeN5zdei2NTD9LhW6zLmfHkPOCnluD0b7fmdE99ZnZe+vOTMpxbcm8AWb371j9vqGYsG7MLhr7od93Wlx7zNNpwiyjddvVFz6THB46LbwpSU+Og88YHwNqGwIrUX6aUZyFDDJse3+H8GgjAqWLvQyz9YtAg2HpRSw43Fghrbw5wNOTNq9xTjiG178nqZnQbjPPctVLY75XBGhdgKGprnowW9xx/mK8cmdYY43uhcr+XCc1E76RsNkbkc/XUX3R6/a+3CRj666P9GEoDgggxa5xAux8EWE5hD+HJVY/jd7bPIbq6AX3CDx3964rdy9iC179KvJnAvjuyUhXjUueZnmmqbd3iak10tXo8G9sM8KX4DokMZEYI8N94267fi3wFZ8imsnY0RnLzYRIv8s4j5diEm4MGiftGHv1hsHXdaWtASqgSSsDNYVXm8exhrxUEX+kx6J9QNwj0MaT2CGeKK2Dk3eifjLpvT3WiNH8za/dIKZxdbSR9FUvKaiiCD2DedVhZ4/eKvbPSM5REsPKrcWSRtpHnDc6zFbTp4MPsGXGNryVnAd+nkKJu+Mt6dVi/J6gowgPopDl9Y7uM0K8VSDwt+obsnw1kTMpWrlfblcABQGjUhH/MoWm5v+92767QqSeDOgChBL4Xt90zrE2WGZpf6BcsMUKeEcx3fXT5YJ3+ysri64ymA4+hkIAlmTlffnv9BRR+Hz/yX4y5NuYRBkNdD02YOe19gCjzMxxKDBAsDiadZJhxAVgQ/384ZA7oFIzOg1fvDcqzd1Ps1Vjr4s6y+eIeQu/xsOeKLNmzChmn85+DudhvThFcyd9DPAzbAgM+4UxzKukfem8wywJep6UwCS+9WTZ79oN3xbG36sMBIvTuobwbUqX1uzuXyOtLe1sTGfg+6O1Jmc+WUx1cIJLz1LLQi9YehTVXh1wntn8d8y+UUpkJWaYjKogzue3mlFlZxwidMvC3vv+CeaR+5CxbwwyYNh4kfRLbMa4+elcxSchaBvQyNVPRiO9LgwWnhnOE8ViVpqzF8dwtxv4Zo9oO4MezL2ZUAsFr9k84OwZSEkZrsv27BIZOvqIBQ2hhe60xzGFJ7jNIGB0joQiU1MlM8R5DlYzHNu7g1RXnvnBiRhrM1HlUKxOr9dszGdi+/V2xKvzb1fmjANA8ne+M/VVdeGKZDJ782Efq78W1H/E0oJEHpT2gyz5VBIkfS9s7lj4sMLovT8bZltqrRCLD24QWqVKcKiK/ZbB7vObjQ7a2uvQcpjvmB19zCyf97eg777VkFmrsu8YeQgSnhOHOVHvICiSs837bv4sFOh/8o/djn6q57SqRJ1z6Z/oQKaDU0F0PkBOBHKG1CVgnc4zwI/29ljy9h9mAL6S6vSfsbmf33dpbHLKm1nbjlpSGdz4OQuqVRs1q82mGR2ciE9TxJvOUPmebMGbk+eTaBykyk1H2HggoM2eU8ovYpoxvH2+EEw7DTlI5r5LKoSe48h26w1mMyh/EjWy4wDdnvgJolljmZof86acehS8S5kqSIQbB8tilMNydMX+dbuXtoRSiT2ZjNPmJqX2Ybwe3CJ9739lzH1OYLY9V5N3D1O1uzCzf3ol7aubXKzTTACZT8GVD3hJArUDnyZXBX9IxKtibEiprI1OX2Tnnadhwx+AjD21FQxWGEeK2pdRsYOnzQ0tATVmfHpLDt4WTdizlcgmgCZKFjeCiYe9Te0LNCJrcAtTYXROXf3fO67tDsY6TzaPiue37g+UIdezK6Gj1iRBGDrG4xxoaUod3xDGuMSK+rPcqMGMQmqfftfiaujiF7+oQBTgD6+D5g3V8SfKunWdLaNHrt7B2xnk8QAS3+2fnhnGW5atDQob1ha0vd/tGx413n+z6Tw3UOYT9wiyzIvRhvSW1lz25oycJi6EnpC7kGb0DmddnBg2ISOLTpOE0nfpXxt+LzWSx68E0rzdXKa76rsZqMupt+ECrOI9jSuXF5gSle2BTwF8kLicJ6tzFGofq/6t18qgVSc4POnOmW9E6n6NNqDvCAbuTxziMH3F+7yjJOrNUiLXXHheskqvL3/7sE+kMgPJ8m8MOF2YAlMCd/quRu3AFCsIahzuVBVNlovMT6ZxMmII2HNJQYZVD0lbU/MCeISQrFaaFgcfNXzUXc58+QPM8el8V106QE9oUOlAxqtzvYVGLHY9KOJCYkvjRF5B4e56F3wgsnE8/gHZmvLM5KhHEUFSxNqeBeBVwk3QsEY47SvOJUCbVPm9B+7x2G3s9U2USC/8X3f0WojYZ56W3MEwZ1l0xa9aEYZtrbnByQPnw27R4FIK4tnPkSVAYlXZoCH1JxwptEVXiHeqx8jh4QDg+FbOGwnssv7LDCsfD0ajYdWiFf3CynUielb4vzuzf5oQ9xogJ79zxA3KOBD2hUvKT0IekERe6v6wunYMADdH27IHa51+a+9Tv/B38hV7s4MEk/9kanGTDvMuPO+N/e/BnoRlYoFtKt4Nh5b8uvw48u6D/1h4vR+RdlIuLNQFk0DmOleyvyoIswAhCKB/kcB49BG9zjdhQMfx8dblFc2H4yE8y9Ab9POsv73GHHAwkiuK0VbvlRYNhbWn7jp+Xi8W/zR08jm9fUhx9Id+buH2LWtdIpuLtrfJbxvP9sc7lrjO0W7MUiHdwKqTv/I24IaBjpo6h5Vy6CbnCFApmHZEj9sFx1pHP6NEuY/vl5YqLF+XU9ocmL3/x/Dcpkwy4ZS17GSD4F6B01fwtayXBA2ywxF6i8/FOmU/m4+6S0ttDu7f3DX7rkdU1E5gJAkAlANf+qfbrLa9LXBnk3c27BFmIqYT7kgkI+VCBkvl49bjyYrNBG7fFYm2A+4lj9/S936lbYcc1/375BB8/ekrnLegJ6iE/tfHOzzC7LJl+UPh1N88xYad1yVCWfwUX23up+eYLNrzoFId7W7Do7cMdJ5UpGVk1ccDGRZczq8tr2A+7nvAYY77w3A3fUoO4AHOm9Dgd+104JeA4YCzLZRc/x52SiQSHLD7Ck69MTc9gpo37VP5W5+MK59RU33LFH+v/YvlG0Y7mrr35uP5FPM9/kECswJxQ5j8DF48uQ4RK3xqh7r3gTFBW/qeb/oCJYHDUCoOFR5S90FC4DRbgi97HzaM+ODdPGTAzzINjuMeTH//LX+H9pa9wVxvUwyouZvYel6HkcQtEVwLdabycctT5+44gn/Fz2JAvxdke3Gc9fViPU9BcArIKSFQw8TC6w/ZPXJYeEXql92I+6kd5oG3bbdD2jq7T0pmkLtvvMuigIi5E5Ndci02OZqyBu+cg2o6unWsYLj8tyB+402w7kwtjzX5ts+4LV0lAcFcTV/t5KmbSrsvRONAi08Qw+NyRqROD/7LIuZzt3XatIkRluqRriQFcn7Es3Vedr6uh7bk2Mw0pG3Sgt5XqUSIeJwPwgX6i4iiTazX4+NkyH0tHRffv1IhYGxcNEW1YCdiNVxPN/C4DX3d1GeZm/1nw5wdp0PZI/p+79VaiB3cuUxaiEEsuJF1m7htRQfgQERjrTMKFhP69O5MBfwaHn1YepXIhPRVVvbm3EkDH8bBhZhnXz8WvFakkEnIt6ztvYvnWDs/dsxVAxmuGeiUOVpHMWTI17A11UCA+Bnsb6M8xl1ga0N6JFMpLnoD4elbdXY3hHvV5M7NYg+d98Kzwxg3K0N3AWgFJ9a0TPhbmte6VdudUE+V5aOpolbEbkQN5dCymaP55kdubgkXdCm4Otutd9S0ZF+xxPmLuZWnPV4RawsYhAG3Gc7vfSD5d9nlBowmLZ3hPpLUjjRJ/nXX7qC9qu98HWs1TjO+9EJD0PkGIYjwU4jgIUg8MRiB7pRUX33pOqtSeeyacMDwoCBxZaidZ5o50/FZWWZV45vlncNR2fZ1l2OV83+28Vdr24XEP+H9Enk2Wx/mKDoR7vUfYcVU3Vhb9eL7l9KwET3cCvWFFgKGE2PtwZUgaSTHsdroFrG64JCQ0yG3rffneJUbNqdN8eGALaHrx7N3HxXu0LPxgH98LfTNjxZ8aBqw9FoRwZB/ci5fsPzeUesfpI9vHaUiZB76fygAfqM+ILVeCunwgsaWvy8DyifgJyzu2ItPBh8phwvk1EO2LuAnLdALv5QtNh9vOeOTJq20sNHoLc0KEH18kGQIb/3g48oNYoeYRne95YUPLdO6Eofn1e07l28/D+BDzMiv0AMkLP4V73igu2QovTrUq3n3/xdFH7b8C2ONh5zUrspfu0B8tQ5TtbwEA2vwF1vz0mEX93mM3N1CRH1IrPZ8bbTLn5N9nf3JgRfsnmgeByjinw/4GZqFnwGHUD+wVm3DyV5p/8R99I2W6QsblcAWeIkx1lQoXwLo7pAN6IMXdecLTWKXBoLE+vRsxkrswtkwe+2jl99C+xDfP1zVmu1gAzznJDqOFrBa6ncnOb9sJmmvVvonIbnbWxh//LcEVp06PRUhFmOWV3cetVihY87WECHmZe/irgT8kHGogRsWlXJEDWBWeQ6zFP/ZGaNl7/KLLGRrDzNWTK8qwEsGI2VgULTQbaBEFdx9xFLXj3UROYHtgXdfvsabDXlcNwfz4iONRnRIf0pI11/hBsKptuuGjf7co6JulewK7BR9tiu/BrohKrjQ72dkzhjKFUQIG9coQdvQyL/bhXJfZu0BtPHWG8iB5hRkxONV7wD0+DKtFb55Vv1r1kA54RpNb0ZKDnh09H796Z8zQcd5ul0HVQwQSX45U/I5W2awhXED9wnr7jimSePE/hEKi4ZV/jkqLqwenwCXDYpnpf7kFHIGFhhXfO3prp7lUEIqAdXMpb3M9RHBGCmBGDOoR+EP86ikrBqac3bA3eZwwj91jVJw2i2xOrwsAOLaXjwJawPxJYC+p48ky77DI/KTpoZtho2astHtDmI82b8cDgXFa9QGKiXd8f6i/MY6B+mDd7V2vfz7bY3lpbmhesKm4y/L7YnmkvA43BTTA/pYw+6oSQszd+2jZX8bkW8e/zArjWv35ab/UYJEy/pEPhRG+fx2rXpPCXWKygzoLjdE97fUazGOlzfCQz/SfJ0VQsdJ5k8z03TAdQXPdMN85mkyZgA/lzlYUcfDksY8PvHjYoTM6iqDBHde8a/8gTTmOnuPi6j4xAjlqM2eBh/FK2lEPpGSQ5K6NvFT4iRXucxDvHRo7tt69cg+4UY/H7g06mGLzm3PoDuSfJPG197ICGKlppfYcK8U8OMAsCP8lIHCiidcW1hPMY568eW8UwsQGpxguQVd6b3h1rnH6P4QjPwVOFZDvstDXEzaL6TvvwiX+IXDiJ7U1F2Xd+2AFT4x5/2Ny8U8S7vr47dknNt65CLN8M88wCRVqpO8tBGwvWsgFXn4tflh246EDG0ryPz4RjszOdjDVgYJYw2XGe2+EiXVZfYZBQxO6aOeqqn72XLmtf7yvZeBqPcsIBC8Ejd6AL+0X9nijlkthim++zpQpIatEnruzJ9rNwpgAxg/+yUNO5Hhqs3nQX7RQL4gJ5t+dj5Q88Ub1ntBRvvhZ0o0C9wXKjQT0HvLet7CHRJn4WyEwqXwoGMO0p9+tDNNEgae3SZMGtZ/SFdBHtxZv9/PbTS8ml9NTmAiONZ2fVj509XLivCK5tk3MSy7P6JxIKoWqnNYO9UxZRxH3ZFywdJ12sjrT1H/yDnEBHftok/ZTss2o0Y9OnIZKhalV0QeqU6Sq5N5BLiRBTVG4ksl4v79LZJ0TqLlAYD7ps1SaOcnDAu7YhPPJwz3Q6nEiEcEIWeVXWgWi5nhPm2T96neQbL+ATyGm8ZJGh0RWzMqQSO5+C2+Y2ayR4sqX13wzbM1fU2wuh8rGvgfcuXTC3LgCRn9VJ58BJm0YQvRuJ+xsuVc2W9BS6vA3GKkZ/Qy4F5QfEas7n+ta4ok5AOXx71lhpvtpCNIT7YC0pkGMJU1ms/nhAA9d85rbEsGFlIIYprmp0VKJDfX4hYZrscxtfkYJFzBd09UzQpb264zy4A6EKnu0HM8vpw+XjvmgmvjzSMY7pIKbgHYEBAaeXcJMX1R525UJf4pKrZG40ih1fv9YpOTG15lZcw+/CqNsf4XkI/j8UAWsoNb9HhH5+47Q4Zcixq0jI4u0Gzdvx0tJ+1xfyrmJAO5IQBSM0Ee9eYH9s7yUYxocrH+5Tn8n4NcyhmkdbzMaO9Z/Woyq2e+MvpUM7LxsA777x3weAsyCNDfavTC2YZcTmN6VVF+UeGkjWNkvf6NBiHsscEzTnsp3a26/Lt+iidjk9csy8QvAlpPhNlXTk/55XqTHWrvP+L1iN/SIOlKT3hvS5we2Qiv8vckvL1mczL1FQ3blPAcU+wODy4LNyPAb0aOlEguJB3W+9wNtVKjCFyKsdHGq3cnn1J69fnlj2BupX2NLzAjlzee62VVEw7C7fYu9jOSNuGgR4ZFT5DW/i8OuJhIbbtTL8Mj1bWW0L8TprB+D60FPFjPxspuYJjFqe955ndcjs0vq6YSHQZLINsPdVofntj/1Q2tClHLmx81/KKMPmIarLZhe8Jo/b4isgGg84IvK7QRI4bw+oje/2uWHhMpo6cLuonaXQWYo6FCrYfM59gTJ4jJB133hNwIeuj5srCqoeQ/Q3jUk9GVLUySpdDjF9sA+lLuRK+B92qbAd8bN5/2Gs0pwvXQsAY47qUiFG6z2lWsE9cAxPn9PiV6JQnxD/5MFuKlEfv+d7nBG7Lg48PwAF78QJTUDmDvlSAuX2eNYKBshvwyVrsnwn8JHj3H4RJGLCI/EqfUJN44Mdp4iTQQxoYlbN9X8og3P18R8Uei/iUjdmdG3lT2y6tlXPAeUdFHknfDKWoxz1E6JM3+yySVTWMVbjBsXf7GbNF+ZgzvjEqg3/lDHpP7tsRu3AscI6vOn7j0jt5azVT3YWiChdn/la8zCHhhF7Ek7HQn20XTs/htOJCVMqhv0TRA5BoVJPhvltUr9VcKbewgHiZVDSZnZ76l3tLDcTT1yJSNPgag0wO4HSAyONCEZZvrb40Yku2VD7juKgJeFDP2ZNVjRI7vPoP+Ku1MUo0MskgQo3+lf4P3X+1fXi7klDT3vtPRFcqh51c/3C5tZcJ6avqMk3OuPr/i+ADbDwR00pvK6sJArYt1+RcZLNju4S1fCaUe+K3n3mV08oa1wH/mVTvwYCSNq0eH3PnilCOiVjmIlM6xL3Weqqr5WE+noweAFtstbtAKPBJor+IbXATsLeHWYKAIGNVmANRk9foS/3InuDOlLIRcngSRtYTFcYpofmEX+YiPu/fb3zD8cBdEiNZIrcSDersgg6CG30qHHNPui8dsO1ylpzsUm4j4YvZlYFbE6tLaCJ1BxAMkfkp9mJVe4B4nr802FEIDoEMuH+d7D+rJlqjDcL8yEpoEhtCCUXLmSQtQku9/CJ/4bsKJa9oVPVjqrVV7rbZOLQ7f0D2LVC+6yNVuqdt8lUbLfI48Y4Sp/Q98NMLqZv1m3QqmeCuRtqyy7nTvKjG+cw8uoZD4QXNOpzcByNMRFlS2U4NhxlHIeImFgI7BpbU+TZrd/pzFC7i+oaf2F6oBspifgwyfRzVPK3Z2RBinRCCd+3eSknnRnBtSwKlyN/drAS/3ykNI7zJDsMwBEA8Xl9SCwG7V/6F6I1gvSSVIwmCZgYukFbIjqKSNbdFLvfoG9j1UP25kBAwI/ZwAOlM/2M+UkA7cALbI4Xlgrm2ibQbpZ5JEsE/IYwYr9ZIAySxm4YgfP8uGjO1VQ0KiHMygjfffG+WW4O8aF5Ze9X8dfqkwN2UvrjFiZrhi/HOIqYvoozz0m+sPsPMdsYX+J9ji1YXYUjy0w+Pfy0UmcuGQiE7FteQ0yYEeEcqXWEOS9Teh7ak0EXk4QDShFZPYI5d0gNCkeN65yQXV4yYoEd+Y90sDjXwBNQ1PIy7E06t4bqV2fWWsOcZrzVBODvXxMmtdP/7l0Z7mfPhGr9jj1tk646wfn0tAI8jRLvj8Qi1w+86kkSMwZ8dflLGVnp2voBPx7yJF/XcVNj5fvDkBCs1NLBvutSTZe/cKhdzrLtRD9ZYPl23P8jvM8m/xlUxKcUcseU38P7Rvxqvpjim8OXzAzP1jH8QdqSWQkDlCMPScgBDLYgyI8gVmXg/iBhAEmoBAlH37ZvSVde8mjEf/eOMHN3PY+Lhx/PwCDgEnvMkT5DXjdvho5SlmyYq21fcCDCiSJnfANlf6Xt3T664wpGJ004FcPZhkSa4rGqQYIxceP5zcF6uhL8bzVi42df0+OEL8cIbFRV9LwVGRpJn0culy42KXw/zL1ZduO6si2X3PeaQ08ih7TY0z3hsGA6U1r+PorsXKfc2vsqsrcmQuQFDFjzlAoZFKP8Hc45kY5HQH5mUyj3bK8+1PQClLQs8qgzDv/xcz62n/DqmsHYF4Di8mXc1fT5+LUQqdpJ2LLZ1XAKFIvqYfKjWR0psRBVuaklAic27M/F2xIPQQXm+wXDtto0AcTUhWGolWosbSD1e883zvFb8Z9X3ecPZiFjBw3HV4PfR/w3J8Y1To/hy2HzitfP38q5DrxyYehEFyl2JEcQhv7fjcVYe3JnPLT/x1N31IaJtFQfzv7VZ3xx1mZ+XcFjrdnuAtRgkJKBWjvMcoAI7V2q/3FuT3w5cElZyLkEqdAJt/r2IOEEYV5/mlRFNinxqH3AK+CqO2mdBKaL1Q88Rp19PIxK/GlfUJvIIfcI2pTKAXUnRL9WItCzXQqg9iwewyCnbdD8oUmvtJ91HfKPmvOPzDScXoSVYIec9cOb1O5Cifya7txY15SrhHBZc+51cRy8nwSw00QjZbihIqLnwcvbugzf1H6dG1wAzbYIAMQ5Wa6FTUSyI/hdkPBwPNvz7aRfyYYiTC/faEIOEMEnt/cLqJj4h5XEd8kYufvCRkKM6yvYdmuNiwZeT+hGX9W8Wok6YF5DLInQVGujg5KOnRVa/07KnrNL0g2fdE5P5fSrrT2Q772RSnT6p1bf46q6OFBwTFGIPKO/qj9cd1kiC0icxUJd36+sGahFw291thn1fNnX6XW4sWyvEYf2gd8r9yBWKqsrUTRcY58wbCBddwjbD6eraP1awwscWRRPE2OV3Jq9xIVMKFSodyKuWVxpqk+vloKp/M86xTvj/3nFeRfvkzOUMMp+SFrV97gjhC2zvLBB7GnQX71zhjnt/n3YU9w1EuoaEaNfGKAvEm8JnTZQzjw3lX5n31ZGcrp4Ih6oxQPz3FILVxVReh3b7SpZ7EMszkFm58jOO9mxHietjwXX75/1Efbnffp8ZviV5sQVhMKkX6+aVGmSl8eG+kVq4rOiDEn/PyXU+D17RfzQvWjytPn3ShljqeS3RvC9/PXL33c6YB5BZ973H4I737D0jvkbh9WTFk4jSfhc40dxfNZBHSBaEfPnecm4+CZZipdhLKC10eB9PnGFFNPIrZwLhldNL+/hM7+7Y5uYTzjm9f5t6DVmnO+NXI4A1InVGfKUivja/ADAZzJoq5PrmBqHkQ4JWoAuE2AEgOIWyIX+Nrisp1Ol4zOcTScGigL1VyM4YbywIg16Qd7KRSZ8fXv3OM9+rkFJf0eNk5ahuj9JP32uJwtwqwqfZW8q5831kRFZxSwhSm6smmu2yE0nqLJdUSIkSxNklBG3MarIWhmM/eUH6IbzlLOykxo6/Ao1In4Bs9Jt6fIpQrn8RGvc2IHhWKssNRPHFq3UkSuVQEBkJmGN2/FcYpIjkJknB3qdHyDTKwAtjfNyFzcqYSWY115QxpN3DtQcC65qDczPb5PEqplNI/6JiX2/q690PIUf6A001b5Lnj9nRa5Oh+yoO1zciE+T0n+SCA23jyJCvz5aE2dn+RLAeN8wI8FmEM5RgyD5xKvezRN6dHTzKUWhyj+tzlE42eGrUeE4AvBKlfTLzQL17kqtv7tnH4nczWf2l3UzCr3ZAGI8Ze6T8CgXnxVD0ATiYIo6UJ9kIsa7cuXS9EcJ7cvGi6FMk78R0ReOEbm+aDRRj9/bGOKQzW2c4eI1jZQfi+ITZJGqItZ6pYEhP1JRREwUvo5HA8XZGBxOPG4jry99zf2YMT3zyotOyqIZplo57XeaPMqNuYp3Ndo6/A59oqSC0pXod7W2pGrkLVUWNK2H27CKUjp5MjbXA+UPPFdQepKHmsBqM6gaTEIL9z9b9XIEw4JBdc+wKIpCXoU5xWJ2Freqhvi/sgLLCepp9ahK7VkfOSo3A+XJV2kFT5LA9Z3KpVB0Goq5vk6BoODFeFIo9lp1EX6nuRA5/aB/z7hm2ThH9D7hGaGf1hbLW/kdUjS8t5RST0UVBzCWj+CHuDzN/DeoUQ0IFECJLBYE4DffK2+GM0Fc9ff0XozA8Jarc/f6RB+o6d0zRzVKq+5uUq/VDS8p4ETt+kwe76PjK+JtJzsL61gqSI2TjbBfanXT6sk8f74987s/3vnx/acVH4DylbXqxwCC//e9+qqBZlcWpQ0okTOMYek0a3R83Xf78KuisjD0DGuQeI2J1TT/RNuWuYCg1eLCDdWVwEf6cNR1HMQFBew9ycCDIInH5KuLvYCZuOBW2z9rpgvdjBiilpQ8Zz2c2qMmHoUqgxnu/+9fPUp7o0fR5V4+JtRkQXynZadGrovwQWMJ+j47/hSfAUC6b1EM3APoFXNXCIMK0l0XuU6EvvOrw5JAhOe/M7M7Ui/QXX40cb8blyM/hbaC8tZkWPTu4BstxGB5Gog/9aXNYjMAEDVAFrZGoEbcE2uPtwrQEXVGkqTQvybx7qnpoNO0Zl05Jfuu3+O08lSXN7emjFb6cdjMVwcVahKxY5mrRTfMoUvNv7i658ECO2FfBRFy3pBU4BkqEIqv0e0dSO5+WSNvu2j6Oj/+BFnk+yRIihlEfvrX/rwcMTNeaosr0BjFkg8jxR9bcBH+2h4wOrgl4FKNc0rA0X7542LmFn6XkDwP3/9IqbJlo8KzRpLHHR2FcEUfw25+L0g0nsm7KYJ559HVXGU029bMvM+1+VeDii4MI13tdIn1Gf5zG+jwUC5z7t45sfiK4JsAQFoR0YHTbyXt1Ntabh0Gyp2AU0lCcBiAtL6JQ/vDfD3DO31CQrn6UTEcH/EqBfsC20WtDBeZyjo+SeNNxb6eJc4Em7imNeBjuPIu6h7ZQnespW/IqwFT8EKrV2W9BK4kl5J3NDCVUQuFbxz7Mi2YyDGMAn/OoDKBX6YvLuxU+KGzmX2iBXq1xYC/BvkF3/Ah3mKN2ppm0M+Bj+dp/yjfKZbP88kSwSm4oTLLEsGjDFoxl/Fr5ZvKIumT2gHscVR4y4exwIoIVo04xSxU2+0QZEFq6iKw3Her3aQ8hNGaYG9WmHy5TtDod9Q4o/2yKY5lFzwRIHbE9G4T9AHLTtBO4drlVdTgMiP/dUQSUWQ7qFaMv45jonSvmzxp6WoWp66rXndYon8IaxCLXiyNm7zN83LbpAECVAC6RELJD3FbWgDDJRuA7bt26o/yvy8vC9OqaWt/jCLda7hIR9AtRzIt4snKfBq/+XPncAIx0FziRm5X5WshXo/940LYjEXgAmYvQ49KmY5LgtyuTiGNmRVUJmAlqyE3BhR117Xgt2u7eCbTFx97kaaoyM7yjS1X3C1ddA3HKpGSIliTc6n5iilVK5iAIiFNhvfVNRaWQHLQ3vk0xAu+7MD9nUsGzvRyhBCtKhBrLhfKG0X61hbfzjGK5acQ7H2YxJ+6u54qePqf0IaYhTz2TLBnfc9U5Nn7ixkPO4q3i61fizTd43cGLk7eEsmtqx4fPGabypICi9o4FpyjjwgQFYgq5D/r/iH4cPxi4CdorKxiAo/seonaRxyQkxqGmq/7/2g7YpPQ3zDHpSsK9XYuBgoGk0D9mf6UniAlVKpLUb535sOLZ/ezuzEhIEvkUF9ImYxvztmRol4jgQ3qgbRpKZ658c6GazX5jA7yXwWN6YyyLB+78+G5MiDL+kujlDM3FF8UYl/LRW8Iqg1EzPGCWiSBiSDJkG/KHBUm0NspMa97S9+vJo8+pznoYjmixQ1Em0r+mP1F4MYFPuohCvz1yk8NeAcmgREF89TduAgn3FCW171w0XdyNEOgfhwEdp/2VMiAJEody60i+0+MmVF31OaeET/ONu3JZUiJAsiCxtI1dug/4Xz2M151e7CQwKDjtjZspLJo7+C4vOe0dsMSgflNE2trwo68t/bVmxIzql7xK6PNCNodApRfgcYgeFbOuanD6D8TSIh3gFNrF5Y4A+KNz6mBNSHlB6GWO87SwYnX2MvdD75xyJ/FikQQ67h6wa3nfoN2TnrfMfzEi8wkmwnpeK3oA2Dzf+M/elxc2uN2iO6/8LXbCK8B3ZNMZldfKG5DWIojupeAAPZtXhDwlvWuSO8Nr1Uksa284FjhlpVS15sD/EUQs7WKzYm0MZs6uDVeBtG+wmGHPHnd0ziZ/I0QfX4pfwKpt0E6CSjzN9thGIoQrywI16OmB3Zx3E1YOV+J7vmfRIZ7NrHCcf6okMc+HIXnsSZ+UQXAROaya6f6cPwH24shi/g7LkrIjxDWfHYcy5eFzFYkRAoit9ODkrEF33QNLsija+C/rwa/05RcmPHFhNadMjdfj1xzWPQaLe533H0gbbledTy+6SsCKwr2524Yn3RRzQ7rCARJpkINDLFmkIgjYw6y97kgSK6cfv+3hjaMfhoyNvL82R3QjsRL7GPZZ2stzfsJVRXppIzOEEX0Y5GA0rt88YEDOwi1ACQEvOP9osQAO8ZmjkC+v7NCiWQVupwVmfbSa7PhwDTn3f16TXfbuoNUqXgY4Qe5x5EIJY6RlVf8NptMJEnzUNKI0MZNJXIIZAavBRSV1n/JEKCZ1HqUO/2SW/H3BRIdBXqLnKr1aOg4r6EFKqomC0rADKZbZ7tNqoAeMKVHefFGAh2cdwZb3JFqM8m9gi7v8lRirN39EtXo7BZooZ3fNj9PpxnIzbnDJoLVLFQz5NcQhS5ZEuJByqkdiPjwdWoIM6+vk/a0SZferLInGlnOEREQfFSs8mZt2fB7ZHmiuU0px3XfBlglwC7a06VQ05gvfHFzzUfxbgpr/qQeqMSaYd99BJ9tcabC2h5P1I/qakh7GjJVvD7wZBt3+54nkR2mQPP9P30lrCKy7PclTRx+PJBpY059duRkzUT3agX4t2bQ24kV4gEyarTw49oOyw2nc5/TbEjS5RfbCE4GXIb+zZ83Z4s1mNECdvblzUzrnBJ0O4KeNiplJdPjV+RYbd/HCtAANua1ynP4xub12nSrxdQ16n8AjhoZOpeYwPijyvhbECjgze4AXE3PFZEh5xkyrhxIpncQ+xdQ3wLu+RfFGNfjup/9pAeUZyTMZp+31/tiZEcUs1qwZXzTIceVpi+4xx1jF7imOmNXouvw6vOC7Oxe8aXUj1UaiyzKBsleiQUnoSQByQcH6LcxfpnHPYavejIa2jzBmdnyGdo2fKOGNbc5r1+OJ33TFrHUD4RmGeo7kT70/jf6jtRWcYRoCjX/+LyTrsXEoSQzT1744ZLUY1mxiDIs5kO/4kUZDzaH/XqeErmxPEG+3xW5XgL5fG+o7/8SJH9rSB2SxHYbyU9VBBkO0RO87x8Hnk7wlvBU7/txtpHkWLsjbuhW6xoNKhyvv87vZJTTICWDWEAniZT/DMhu3sIF896+KkPUV91GsiUxapHaj4vR8ic+gqTbgaKKtfei6ZeJ58RvBUW+jimZl7c6S1UzHEPJP3ZZBFfSHLFGkcoTzF5zUFPZ+d1I19EnLFH6AX4oREpMWSUydMG391r4o+/+LXcYEFwNelDQ5Pf89bGtsMpWxQnwbWVzeqqfU/eTs9ur0Ip1LHSr+Xz8zWleRHSsfa5buLGGuWD7pba3Fu587/H9Gie5+Hu8cFoT55Jrxyf9zVS2uXDb7moX7oRJd72gjT/ZvF322qco7P5Tw7q9AtNtogXP8jZWdIhYED84jrU53a65L9tE5oXMNXf1FAeFz4UoqHlmNVexWc+T8rdnxWQhfyhPwispICEeLwNCjGQY343ek5Y9xelXmz0qE2yox93Nuxr+mTgeAuxL0SI5XmT5PAt7ITeldm1Ns/R5ETfWUzUoPXDdP0REUoTaD6LpF57Zhv2vSpcK5Qjh3oT5cL/nf+4mu+pbh7LnuQ2TEiVXrE9tTsdmf3dftAF+9qboOImd88LSYBkdhlIiCRE/mz7HMMAHk3EGpD/RUuGQ3M1S2/CytWgLPR5+v7uyWerCRhTzapNoqCLRshmJv02m7nrQCbdw1mhlzXOf5jf1C7KoW4SLTn38A9dHgPkgEUjaf9CG+/8q0ILafS2ccOdD00irjaSYfH3M+gPrI18Il+fb3YfGqX5nidvTw4oGVbjeP89jDkRZtkvJYi/e5GBQwLPextHod2qLvhc12gACQgUlf57qawbD/vj5hW5Wxyy/uNm/TiG6wO6wDg1Thwu3yL0VLzscpSk6CB/e4RMvlkQAz6hvN7YEC0B/I6/hTjfGa/eSlNzoXrrAfWcsdJFkGCBAp04B8ZxyIXoP8CdQ6I5oumo6C4CVhigzEkFsckHUi6qL5i3694gZzbaaRFZPInwzwNv3ndeJQGP3tG3YMAd/TcyN+r2e29Fb3eooynvfKQq9sTf6bA7CQXaoJ5TtXmSwd7Ni3MAZa52Pz7f5nVuckq7m1umaFoGorj5PvKGv4ml1WgxG0+6819jXhmHxrWXnnQvSNqRmdKFb1eRS7kKDLWu2sOQIGoz4jvaJvtVjeXyT7TvS/cvwMSB5Hd1BNR/iNdzdC4jq+Y8tNQYwSnn8GPGLSzy6G+hGg6itU5Sk36sk9lm8aDw7xh83FIoc0ONl+67Ag+AVZP4UgVXlSxnjKfKooyIvl9pYrToQxdRfo8x2u+VQXzJKq2/1E+t/q5NtHXZnpvxiNs1/BD6XoE4hmNS+u/eqsUJ1N1zxb39Ny+xmg0t4fQy/jupu/T+IgvZ8TuX+xw68cEz3Guo0fd7dHjt8V3ZZmhX2my4znlpK7TbjqYlewh53QDhg9KAX8NTvk5QqLUVGf2ar1z/8lrWjLJNBWuB6Qy/la8r89hi3nVYm/3U4g/VXWEYu259kATjC185M13OSKNExMge+JZ/0BSjPEtytaxICh99qH/Dw+ZKf8oGCTAe6sY30cbK2xx7ZP7YGZFXRY6O2sb7zj92TH1QM3gZ6xrzb522BH8ElvF6GAExtOsjrhM2xQ+a3+/CKVY7/DUGZLp79UlozGZf3EC8eYILnvjC/OBf0mo5oij6liBCLRhAEJ4tZs1A41/5Sn+H4jp6xk9oLbG3rdboo0YUR7LCDNgfm3IMfwUrRr11cKZ1S83LiDotgtbtOXi5LHAEgNQ4f1sOSedRT0xWcnntg7oRonFcPR/I9B7ULamzq+IT2/fV8u8edXtmxjuXrdux/ZSCxFNuIfO5s/sO6thWEK58OAOFgvR+m2iWeFABld8ClxI4c8+APUPyjBI4tzhTvaIUwFbMRSKoB67nTt96+FJYW9mynYEipvyaEVuR/gFcxGLyGHGAXDE4j16XEmZ0v7H3Kd9uZa4A6ycLgGeDeygCKVpzoAz8DoXYrl7n7h8oRiKlES3Gk3xI7CYciT76EafSITIDK2mJ529Lk6zaqUy5B/ep8JglJLPrqBbLcI+radoggdgHJSvLUAsYmHefupLgMhEI0r88Mf+zrhZvM2ndu+ADucXQpWgoX/oX8Vc7IHKZJ4IeYsquZDSS0zqoR4rMU1JQOyJQBOuzdCWOTV8IYuGiAc06JQ4F1Ufvb0y9SSifil2Q6Rs4DUn7HfJA44F/zn9ZsoMF3CkfzsH5QeoxLEQJOEWcG/zWiR5W435FhZBnUY7n+FxF6qsWXUrvyRTh9Lypl3XflqwQh+qJF59vHUoou+PMte7cuCRsav1nVe24XSeFeR7ybsEOZbKWLtnn6ZgLZH9FPTKQl+xv2jXmTH69PgKHtoLaK8uD8n+p1ZvQuLKWRkbosdmlhrli5+6v1G+g4n+eQhYu0sxTMtLc6GrBY0zKFBx3fAL8YuOyNn6pDHi1ZgKoQCuU0OUHmRCsgKG/RXtn+42A6reWRqtGMzuiAHB+b8Q3PFfMxvntTPF7MP7DB7KmeBw8y13ycl12v3ZQ+yT+C37PYihoAnsP8DPYB5SHVPWHB+O/U6n/GvFO2DsNqzIL9n75i7xy9ZBr4rqsY8IHwDyrebQhydGX99ZV5FvI+BVKQW1dyQUGARPEH+0m3faZuVjriC7nGJ3TEU8q+N64FmAClBEHY0SQS8I5nLfvZqKPgMLHnRB3Wr9WAU0mCeLieeDVVzQgT2hu6xfb1K0e+3gEwilDrSyFwXbWnflhmzhQcanXuuHeLRjUHJ+LRav7/Wqz07IMxl6iKtXJmUBJGLxWdaRxL47GvYhxbKFeW19dlUKu5H9yGfL49KvsLj/zbAU0/Wu19+j0nkBKFOhV/PP+casJcmn5dXIGTY4cgl8EK23RUe0aJb7NjcEkUhZCPrywRaZN2ODzUtE+CYDkQwCS691lLXpBFJDDYk8xCzi33iGnUX9jKSVzGLh7CjIwGOivGPK4oTwSQRrfPpzvMPCe4T9FgXcH9I3gBwRNqQUrUSu95v5u/9aXpwlc3PAnHLuqZdowOmZuC4/I0KIR9YTgO7p8jYydkM+D+W/tvVD2bxz+PI39HYTtqB1j76MZM3aR5QMRJBKmngSBsmbnUm6AQJWN27XP4Rmlz/ybme3CVzTtFwCYogH/BDHOAJL6R5C++p0eq8bE++VZIuIngfL7aNd0S22X/952YRFdXkC5kfS6pAsKyabum7Fz6O26yO3aRYQI37TTm9qQYn0qTyKbtJ9dtvg286ZGQYuRPZLjM8YPQdhk+jcW8L+EeKtnjqLTJVD48RwxBMF4Sg+3Re7aIa0TerT/ewsio1p37SPsiF35KAtpfr7k60ixxy65T8itBE2/Ee0HDy7FOMk89LghJTg3H25vs5x/79aXKeKNcvb/GO7r71IX+F/o3ureY7IhYE1iRzYvRyzAeUnYbe7rrmqno4guAB1kiKWQD3HnvvhVcU9MnfFDd2iZWFe/riIsBIibY43IHX89m3Avk/Ov3SFXpoTPdWW5sD++Ptnt5ns4gbKbpajhoUc9BnNKHXar33117cZACoOCG+Oz/4qikqyf9i7gbgn2oUQg3uDKofXgJqXf2nrRJCHwocqD468K+yPlJbmPP+oGpPFg2rwjj/N67B+xOgppXF7W5Y0eOk9Oot3YSyznNy4LbAbGLcrAtOty+ojgJvpLkukQARt+s+A2jvDk28IFgYgyOe2o3+eX5Vdv5Jkvlm0Iy58nfc73K0FyXL1QOCIksmFn0DOB5ZY8Jb2XOsFDlDszS6DR8RMyVTEjyxxFXlRCLN+uuV0Qb4jQABzI9PLIvgoBI/VFXoVFdKrGNr/zT5TnKypT924TtzcmeOoj+wEAnKuzFETT/To2dq4mj1v5NvvujUBL9QS6su5L4ZB/6Nt7mJT//inIwnV2KZUgQ/Kv6LXTbO1AM5NjBQhqJZRCtpBm9Dpww/TlzirFAfkb4mTG/bIFptgYxVh6ankoxz9GGdfXlypfIQgZoeWl2IOs3ZWAvouV/KWArBY10RH17e8z0q5am5le2QmH2O2HyVrdIxvlT/418Lyh7XAZmdhvRqEZyGzJa+gV82eH3wxhD5jpDip+Swn4LHSxn5zmgW+g+f1vU9YRrP63wAevV9vv8/5SvIZzDnW8Dq49QVGqmiuyu5kEpRXL8HPhE1fEoXA2xXYTHdj7vY82sKfniJM1w5xXW3a3KBMIC+gM7ncX6rryHvw6o/0lyyYLiH7adY3eVcny1fHrShIZ7LxhRc/WGLfRSMLzXWybWKElA66LlN5Mz+Dr8oo3A7t4+sYEBsE90O7GimbCcV3gJSSD+wtCQ6irC7LB/vY05B+3cLTj7KLA3wVg1XoZhEAb0g0EZQQAasExgsyzanpBb/xQ9UxdlT7HO+lazvvbnpdVgvwRtXygK8V5kLk6xKRZlvNXdtzFgU/581YnDpwpQUJ1aPPVaTD4bVBBiQN49G/vHyrBfy7/S3SDnhJ0f2ckAQN+WFjNvD0B0ABmfU9otcqHT4y7aiNo+aXENr59BdePHwv5Svh9jGOuxu/Xy1c2Rr2QtPfj37tZZlLixhbiZOqWYnzn9dF5v8DvBCKl0kfsd03kOmy1K+DnqxzJzN3v9npWFzO92W1o5+8NZbre0ArgeCTXFuEwhNgDOcgel6MOq8ZYvco53CN10atT7gw1nVws5UknIU1E/0VlV3i7bglC1DuSR3W/Ml0Bs4iJ3R3kJ+8KZWF/5Ryt46GCWJSVf2fkSHbbRv7yApxCTJHJa7vBHt2kE/OxX3IFilIJBYefVeJAR3bGj683ZhGfgOrX/q+Nmoy2/WVEhBJ8uUrdBM+8FKpotnPSeR/ubrVmMM1P4IAGAVYEeZD4KTrAfxi+4wPAgwf0eBDmKNoaBP057fPaJqLW4vNuMUQj+e22Oo0ZjzSWK2GRqUB6uuBztUgHEjne3nkJX3efibWicyA8k44mYKwOc49CmU/Z273MvLfHe0O5ZvgXPjy0DlRHUFUn1d2B2OPS47vroNDKBahieLaYaT627rd4S0wUgReu7Uwv7lg9fAo3vzSZYMvLuZ27fF5Z9YMZiBfRPUEpPzXw5mUrlfd5NwuJjEoTGz8BXGlpVe8Qpb8pUUSFKF39N+IbuBIWATOMY4FMBe0qqs11kE/njwqIezd+0669qc48aZrEIy/dCqdRVyaYSofb+RTsKNwhAxJcXh8uAaBSuDy+bAaJsw9doOzALgzqRyZJ1FoAQDUktZILP2zTH9292QGlpWlkJAXKM6LgcfmHh+7ItML/TQmu24hd+fvpjlvByGyYWAITjhm0aA+QyVy81Eo4o8rTgnhMif5VBTCfa0E7noHGGkFOfkU0m+nTACEVISBYZwEl/ASg1jKrqUUWau5wl3soV4RSvpqVSVeOwrhyFHLn7eC4FCHWfZAKOh12Or/J5F61GtDF+BhV60g3icpjaQfAXzbmishZlZ1QQz7XOTxnB9KTf09AK0ls5+DH9JvmdhYYH0kqjVf4ayATuO9hc52tQdygxStuiuJ7370O5sVq4CmM1pXt3mllhwDiAdn7vvrkTVemcVmGznqg4X2d//3i6RM3ceanuwaaAOVRMqgBFEgZoZuikNVzqoyZyYe76mX+Jn9Ox1ybEoW2DFq8nSzoJDjvAoZTcem7Oy97kmYXGc2FnguACeEA43/3OgOWCJW56HW09fXxt3YKSXBeV+ts6lV1A1UwnGuA4VpY++3OS6KKmBDKLDrHQ0XRoOCKJ/vik3oubXCt3SO1kM/WbPrBlj6Pi1/m3ERc3XkM4W7cKaTIoJvNP3dMZ63rwg3uAwrbXV7Rc0LpotdKbWd8dUDpy6jB6OXo908Kn0RudMRsRtOq8R3wfAx12/5N025c3lV9aOjuAzhCUfD/NTwad7u0dRIF3R/GkZCCEB3zl88nz5EHvIWeMKZx5wOxHP83W7KEK0CD8WdqwE/t8bo2mlYtV18IVo9KQcaLVnX5Jgry1yI1ptBrxuZF91wBNMHRygdIIBJ7kndYXvcWV2FYBkphPI2tL48mv44HMvG7sQPic4AFmwYRTEhYRm82pjfK+FcKgvGpptgmuyEylddvOje09FSbPtJz+ttGQb7DpG2Vuo2MKjT+bp8HiQCManmTayFxGYjU61wynKWv2f1gmLRjNyizR8yA+zcVQltfr+THRbixv/m7W1FChkYpQbGHckPA4KWfiTnoKYeWCxE6DYiXfGh9rJR5W/1S3PlB+KSIV5L7jaQM5e9JcJw1dOGybT/NkHdVTRq4khM6uhVe+sxQEDtIkCDZK8gx/0hRzcObZKN5wPM4vLq+8yWAzHnVUWeUIerjIeaOHPI71ZXBQ61kga8+C6pQl90UyqZa07tkyZuZxNpNPkNuzYRJEakgYdsp+LNvD0UfBXI43WRNtHvFUBYG+Ov20YjbsKhmrpqQ0MDNt9MTw/HvS1QMwbkHGY0Qow0ZTQfCDD4352p0uSCTgDjBfTSplK1PHuOO7XieCCSpN12zNO9mtMsgdnfAiczFXW1Awo+RhEfIA5EuVkwvBbBvrgLJoCCjvdccVR/I2C675sWdwKUkkVe5t/taTGWOcu+4zuVgO/j/RWb5qsBC0fMJQZouCu+Q0r+sgeLQUTv2U/bGHhD5bc31gPXUcO2Bthg8q/Hgv93jzpirSTkRg/zNtwMPfp+j+CRTSCCCizLUAdbcH3j7ipZsBkCEwwZCICy51MuDqNvG516isOzrMIKyJO99spIvk1/xcuLFUyDp5DHx/poj60Q/f69E+JcdQaH+t6pSIBG6IksflHymIrPOJwM+A+rdH7vHYvf6Y8uI0+NSor8WFlM5dAfDDvk6X7x9Ctp9ewl7GB2vrqm3rmEM1mv4HYgArIZPOhcPWtZf1FM8Oussa/sDaEvxDnt0SgKudoYbtrFsFtQ2hvGbme/NvlvZoWZ/dp4RxMbCF5YaCAT7ijg3A4VFd/pKMYytQOtQHpAPAXb0oem2KaRbR7GmxD39dW0oj9q8DQ4AuiTB/4nUxb7vPBhsiJJGcHKsqbsK560F5/AYf0cVTsCU4XyJotbR3GxnYnkXn2jbE843Xz+6U3XBcDrQ2ZVy1kf5arp7W1lIG3/anRBgULgiInAcOOYHQm9UXI2sJ1XWfnt1LucHZ0H3OAOHXhwS5IhR4its4kTWS4Lx6HFKpI1CYOLLKdK4QGo0IPO6YYkl/vQ7CkokfmQ8nNV4/9CwkRi/XNwFNAntiAdCG1/nuK9aVyf3SQ6wjMWrfJHpSGbiS/6AlvcBNwEhpXbSFfsLg1IIE6VHKQgFkmtNJJEvBCXKXhyEiNSeqPem6tocndiva8fCxOPCim7dK0S2BLUti2r+IvulWy6yUFcJdqB32Dwr52qIgyhYYgx+9lj/UHYUO3NI3y5OAnhOgT8Q8b4Jf7CFFqSQHSqplD1S6axdwDCHLppvIGeHlEBF9q6AsH/emQjusruRGg8jNQGso7hBH1cDcu4QZ5ZEV3xDszrua/NEfA2CuZOjN3lNPmrkg1mU4fuDsQ70phQAlNbRfusXQ24ooG80fBrEgvdRVjHpCZHrWQj2CjSmOsKd+KrnUtZCkdANJaV1S1Y40slox4kRjmdVYpdKgfHaFRqEcRi0ej6JTodsca8PbutvJo5mdnVXdAU5ecVHcx0ihCyy9CBdYEUNebpCDu/YQQC1YXjDHwAieptE+OQZyN4qGHjFsu5kdCttbdK8IYxdqJhVUr1BDlVhzqnm8/xboyeq1QkecMw2sBtiBh09JL+1bJtgUAD/QumFCI5SRxwb4PCDtUour8Yxeclz5gnk6RWabSxeNThwOgINPEBLolM1cF7uAsS/GOVX35F7vHXC48j8Vzx4PHdUUVCXv+8AVAnxUI7EVkxfJIALM8EvB3BoeRi2V98J83xQiUObQak9EXaCXtI0Cm2+eg8b+10HTqDiy3GEBKxWW9w9IYSitB470vMy2rZB1dLWgBY4/nkp4qWcFYPXZ/mkcH5S71FIkggx1oAYmyKTFdS5uC3aCt7cmK4Kz5K0Oe3gGbsLMGGEgf51ffk9MljmA8GEhzGXMElU09h05fxtCWG8PYlTJ1U4d5J4SMCCv4BaWe0MSpD0uQdC2SyZFJkwKK0QUmN+QHZRh0COOzMOIPMqpKs7Keq5JgC5lCBrAAr88ne5TCEmmUMAbQoOCgwVsBmRDWXrWikjIe4zK+qAfaEZu1eSQPFUVuBUDyRQLupzf6MMAa8AY1ALJakg+0NWj3aEo0WCMwmekPEaGRwjDz0Yn4ba7hpl7744JgAB/gOZxx1CMjBfGGNPD8hoCklrqOJLmtbFn8RdREcuwPJD4wbqKFtdsAsfU46hlwiY5N6BZ97t4gBo3BSK9tsj9I/wE6GvhoaPVgycXbUk+2VY6bFOyooYAvjAJdjlRgLXf8QS7fRmAM3gvaIP5p1bjAM5d79L+TVHlBiiHj8CRENheN0qf5qlk/6+7vDPisX5HuN44XYB3ApASIfPvKaabNnb6cEvBryrQlz7/r3yHauDAd01tKLhjRiX+JGA3jzRn/EyWl0U5pVUQreXHkTYqwWjhQMyyHpHgxOLVv5l+IfiIbP9CEBh70lh2B4GbOfG+uXfuMA9/kCOSDtoh0tADCuaZuX6Ewd83VJ4Hpt5ZSDCRV/h2AC6rbOGszAW8Ef+/UdHeW7z6f79TgD+D84Bf4l6NHe7BJ2cBCz/7/eUCMfJDM9/P82zcK7BRjX/93uIHSrm/vd45IrlB6znjs5CyX7XR//+hNB2b4/OlLRSPHdP5S2isYsA4u3ybeGvSxtaWX7gbd5lWHqrlGvJEdFyb8cbPYKSLpZsyteo1UX08NDANtv/zIt6otHkcDQ8n12+g62thdSA/vsu2EGhEUmCIAOTl64Mhm15PaOftGcZcfgEuDYqWl7CkRkfWzJ5dDgmPBUW5wjHQ3uKDv3kBnxXmd/tgfyCjxN58QcSaiXodq5bl70e4qsPldsw/QpE+AQFn9MgR2gEbP5eAZF3r6lSAqKpRkSX+KR5VvG6u6irDJzcUvnDLuFNfpg6uaGbDHmklnjIQNEqaVqjXekl89/EChoalfVimPrf/kM1m0UXORTLwFVwTgunURVF8ILWYtJxGDA+9NDhES7M5/p6YdBcfxfNTO1QRwwe+ZAxWhOQ35A9EXn1vofBXjVouTZPQkkK3rz81DPZdSZNYI7djBXWjHFGOMkQFYDwhSilt4jB9g2ct1aDq6nJOxfchWu08Fn24IIxazuTcFIr8YVPCUpUGtSfRA2Vu4VyLMKO16/uwgCE9UIO+btQFUp1X/cXmH5DOswvCEfb7jGdW39up0gfiW++hvcrvePJxsw7hDkLotf2EWUXrr1TYEzernDtrcW2RV/FWuWLne+W7j4wyjby55raEkBjN5BkOsMj1XfePuLi2MRYAtp7mV56MHiQZ7oaYNH+rfMODyINl3AeKu/Jl7YnnhDMgrfhJ1K9O4v1gpQtawXqn5+UlQg6iegdTrDRGjy7r4hD5uubFP9Bln42mWxAERUTRtp5K9IQ4n6XdCBySxz9INH+Sk9k1uO/21lDZpY+TlOVgmGCx+fyktKZa9754IGS5mgMwjuUdehthcCm5jlDd3ftUrmnBI3OoUCcslBW6ffxXMH9EnDK9e85v1WK89rnRw5B5T9x61dyBu3clZp0rN79h4cKOOyaMrjmyiWdSdfkZfe26vqJXGmfg1IK8cWbuUHxEfPTpqfHlxI0EwEaYAZeaEfnnduQgZvMKDLuB5h1UKGNTvhdaEK+n+X5cg7iEYMAmDjCD6GyYOwD8om2vLwMKl6bnwySIdY+tsB993pCMlxByaMUV05sXsNa2bw2mK70R1UjYBK+mvYEHRLm4S0viXb+QFu9725/ZUuQ6xHxBnSwNz8SaNBGUhDb5fV2R4JvD/HSB7bbngf8MCHE9xw/DbxK6JmmtTRmgAS/GOfNSWQUFS2KOTxH6Ooty6UYfh99F7jHkFOFu1KjrvKk6VjHpt/mlzXjltxuC5HB11Pr3zuLJ7SBMyWe4W8G6nWBdCJhQLvrBQZZed6ZAeWQOJN3a+Qab4kiAwVRSD+IRdl0MxRxdF54SpkEcZcS2EpPsZQS8xBEkAHII1yVEJvsa7e9oh/UxhKPTzfflvzyXKgyuR9fZkUTIvpD6OG24SvDG6fKCTyYSvCUb531ANuF/d97Xbg7/wp58/9QVKFeN3znddNvVE2gecHkhFriBFF4HS/tPgAPdY8ETx7lG6BBkzJBm2jNhJ/tXXe30NCP9DJEegKR7p3I2Zw2WVypGi3m7crqc+wKz4G5OtC8tmgjuxpALscOUdLNQnn/zjME7kxW+j3ObbiiekqMq6UyHfbVcCg1n0iZYNNncV8zxARhpm+Zq6G9q2s7LgyxZACUdl1jyt2+BSPu/QxVVw9CbOEHphyAClk8NL7X5mqlW+bgG70wU5hFRF7RbuFTOpT4fH0xdJkPDpWF9rG9hI9r3Xjc9oK5uVtPn99dHIFQ25wII/EgPjJ0pSkQrydI0mGH52sKvLXWyyOlQAn9tFQWUyyVBtwAhXHw738brNx5Y0dPgDo7iWZtNrog+85iCX1Xg348iGNEEuq/lCyJMb4aQSL/MevckDFQjXXGf7RyNiLD/1Zehjj6LndEtaOL7gdwWDAqGB/dKMXtZvVBUpwm7oXrkTz++IVdJEoJX7twNzINnXOC+B2g/TiZ7du38gVfOC/bdDsLMjbNrDQ1LCjVB3Sq9KHg0rL+yzrYPLkMMcoTw3eGu1CJQQyxJHYJywytfnrs/q9yH3Ue3J+MkyAXTu5QoIf1hb5iFgezxlr/JZ8hV1cgauNP3pWwdf8sYqAEdRHYDC+xJ+FKO5pofslOKRd6ExAgcbvlRchDr5wykUBLLR/KDsXxT4BMy7iu6zN5CFyx8X6Z5w+Tl/s420QGce9j2fDLBXJPbh7KBxbLbXnOEBME8gVK3hwbVF3Bo1zUB7LlTSiceykCIyHze2mCqTOfA2PeS2UGYz552EFWD/nDIIMqzFWBcERw5RO8wB3IKf2OS7zUYkssIHLGv6CMfYITKgxGX20+rm4S6r/IYqGjdirQe8t6XXXbRH7tksMwu0MrAoCElF0+xBBQDaQ3bNXN7S7qpr1uI355KyXqW1WK9MtMFwMj1mv/2wj5EtB2A9x79MGYH27AsAmF8Wfeb266C+1n8RTiSt0APoIm2i6v7jd5k37PUfXFcB4jNID3xaTK7zvpgbm3h2Dqz6SWbDFjjPNnEmS4Me/IuLjyuYdltb8vKb5Ic8Hk09dc0X7Gz+o/mRII120zqFxTFjrJL40cUmB+Qq2l1gcRGctt7DdPGWps6YOpxWAkFXhKf7oUj+pyLWWlkiOEDoA0KRn1JBdANhLX7g3GiN0GK5TvquqFd/JhXcWWV4V97DCC+u8Lf275cc8wUKLipHp5Ll7QwTRBf1wd6ALjMxKchxHA8z5zaQFPQGU6JxpSraDqMLApJ+Mm88WhecPefe3aVl/FBv8ILFARzmeoNvuuuACKKHOonjHYUUOvGp0jBWA+PRHweQSMST9QD/5YGH5LYGyqtCYbxHFGORH76KUYNNKtTaJQzvQ4a/KvxtmIF2cwmIlD5YIVYX4MGXDFSLpkRWmoFstJu1tn70IMaJAF4wkURk4ookhfc+7k2KN+hlcuXZMB5pbgBfWgTvADqBz92gonQ8ZmLT55/NuIWV+byXrNzWwmaKaAQeszI8csIwahMxB+pKuZItq7fDAZ5BVZ37ZzAiy/0TbXRHe1sYyLyomjjSOHd2SjfMEdjnb+28nmqaM4/BAcvUfkWGWEjEUoKH+n3UYoHYrzKIq+Pkd8Bg1+xuAjlykwzr3OuAzTKT5vlyy0snXPLTMvIZKb+8nfhKFfqPUbb6GCSEoEbT5QvDOd8Isn84mg/cUAdG+AnOAk+xJhfOAVDyqbhIdG8UtzmhKOZvClTnTJjoH0SkwBhupaPNT9mr8jRQxZrbvCJQauqjG8WXfLN0Gl8m+RdBZGPpmYBKBu3fPkh1A3BMbFZcI+BIq3gB4lM9oBQ7mUe5uBW4XkKUe/+VITgC5KJ3RanTFplAb+oBBRDfxukI33pEcYyBnTfeyrqVROZ/Xn4/1omtuvSyXoExSQUooPhPGBdhakL7K9z7LrwEujF0OuN5Bf2QcVOHEQw8j3NcFnhxQIKrqq/XTtNw1gRGQhkJlffJnyXUaUUwrWCiBecKPebgi0WCSleQqgFgFB0GUJZCiQe5HAB+oguuLVb9Tq6dYew2DNF4ZaI4YP0JNUl1Zdg6WWYFv7RtZs55Nw8wsYEEKt8PYeGvVstNuKr83MbpmOmgNZxhC5Tsnh5jihzIcuOl8PlA3XHohHkgV+FRZoVawC4Q7thcdXcHd7s1mhWOThmM4VD1F9jNmfaxHeRbP9rvyic79rdCpuQ2G9vnYxzqCre5l/AImXxsk1SoOgQCtkh98AVBjZLF0yt7xQyRurEchuVvdxAv37KwjBsRQK0jIKZZuK7xlLs9qIpVYv/w56A0gs5zgo7bv5KjWX8kbw9f0OslzNl/HfVZVgV7Na8cczE0JUAcGDQX76YPmw4ClKLNRsLY+87SFDy5Yll6caLsZL1f0az9l4Kevwm5B+6QsCMkgYDEAZ5KNgiwf8tVCahgAZYbArdAaOaoXKl65+TzjD9zFybUpOuMDOIWCs5Nq8oYnHMQ0gWkXHTEIb71msi6wdRh/eF+4l//wQM/glTcabZVZaGseuH1Ffi/c2tqUCpRM/ZEXCXxrs/ZQorTHx7tfua3VArHtHLdr7nOWc7YbCistUPY14wvbA7b5pCMcwpK1EdwcFEtS3rnOvvITa8l/BEBIfIhuwPVT80O8F1KmPntNX4CRppADFFVUTFBArQyjDz3xFJ3ItVNsOdpvJsxWxqYfSVK4an/T7OvjAjqREv67TkNCrIT7vM5bw4xBgHIhNZoBRVShzdX3SpdA8ie4G+POBoAtt+zJf/KBEhF4s9N00JO0T+iL+77YwpGvLEiksCT7DbouCipWzMyWRngZgQbUqoYKwLsd1HZ3wr0r1BiTI71xxrxut2tWrAguhRJE58oD/tyEm/3uy83mhS7/Qv/n26IbQha3qBvygP+7Giq2l8Piw68yrPioBHiQOW67cAYq17pvKABH8OtwO6ihngWbrIrL/u8DRa8m7czJLQAoTEril1hvLMS0wHjrJfp1XxaVgCjK6kwf+RCpT8SkKIixaKpaqc7GUfh0Smxg0QBeM7vDjdz4YgF2uJrPKaz+bW05k9c1rVe+LcRW9VDM1y1tLOr1Ufl3i+bla0ONKY310cd853eJfgQF5FzA/qksJy2mn5sGiRnF8YUyf/W/9+AEKc0Q5Y2pnk0tEfB/LObz9l0+UMXTMO/7LcIgKyomchkMfrdYaCbGl2LbH7aDeDan+0FkCWZJRZmPnPcONeXnOMrRbIX8Z5Rb0LXZDjMFbSex0+bewv54EjEy1Zgp/OcsGrVxPO2djcAQLOgGVqaKkonT/76kQtWUxoxKF8qGT867RYVOpaCbYgJivqQLpYCqy1eUBo+jQabpF3pLkm6TFZF4UngQ+kGZKVlBJCv8TY+yn+L9yKpcyidqc7l70lnh69qvLqfjMiZGaeUrJXpeTyID3gtCtP7xI3n5vwZiomleXGYYpeITzpYunkICIj25FNAMH7+lxAy8hMWSKN/INkmNWdIsVO1D6aFF+1xYQBsfSgORhQigL1vPjtTFNjCz7ZLntI3ozV6shT6t9/2Sua19LIll/diNkZp9Lmjih8d7kKnu8ShrIvJSYW4t2Ivmi2DXwSc7HVPJE/FtAFuHM0ixiyTRQC9n3Ud8bnDryhFm7toH2Mgjjl9ilzFztvqM7RIIHaPzIhDPS3sGhjXeSLRAK/8SM38HV7s/cZcYby+W2RgpUbJ8Zxc+DdCNgQQj9KrQ08gmwmS9jUyXKD+0GzzrXVWDc6t2tPQPKMiOfqpFPec79eEyzRGhLUEJS7ZgwKCA3mm5v4lF/qbB92WeX/rJ+OrqsT1zUIAAofGiNyz+3R9QBizZtg4qXbTRQmsNPMACmrDPaiO1M2hOz3wswyXx/hESDm5RTzswqok4h9/IvfXzGJM5xiM645Q0Ik4zwXGPC372QoZPyH3taXv/lulBa9JPkQHkmhDBE9DdVON/ezp0eo4R4KjDmjG7tIc9UkvVeZpAxKNh2p/d1d6CC44GUsDK4G07PaSuIklps+PFUWNS/bMBZW49c8fy4QRJ2wjAuy4fzJTpXPE2ZirHyof52qtRBGIkKP9L2qpGP5UGrHTAAVHi9MvRo++eTgwRJZlBGzrq5fHlrvlkKZY5Sf/bwL6sVTv38U0EtzhVcKyw3oOiAtlQi+2u8q4rnf66zQ5csdJqCSmeotoh+ASvYQurgb6ApopWA4B/NnHSubuL23Wf2+2foqvdf+E46motskb5rr+dDEXdSR70loPLkgclHZjuP0VVbcd2cXqEaJPyyZJUI21/csgHNzbeImlRVPe83dYGWthU72vNdoPYBq4/dfvIiIB7qWkyi8MNGxgnaQfitd53qw6g+r2uZkeiWql2GBCLBoN0phAb1jP6ki86IbyrknsenyCsYSMcw7LyZxDPKGSqMIyODukkSSk8DSuMREZU5Li+44qdB37CuC4vtHw5jYamUOzQ/EhLkoyTd4LrxYcDSXbII7vXUpbejEjGF9heCDHKIm7X5NJ3lWxPbpZA1iBr9xEoIht0uc+1PPXc/tAX/2lzW4cQVVHxlpIKC8hJ1/ft9DLQnwj9TYT1vxRW9qUOjUakPlBUWq7iClMGv8uxnKHNx68zYvst6TJax22O99hi1RHxHD6bUY//Yd0kW/3XHMntvL4UZVUc89QR/56i6id/Yb3mXWQUIuvd//Km24qu5cyO6mKcR78OHeq/vWoES+FIcbuu5NTdHFbHl3nHzYKP054Og5jW6ujORNbNRlQMoEG2bkgiQ3ker8omRzrfweiMiYo/uR2FOUD3dKtINXf8btOjsDarER1m/Q5fqnbkzrxtH6NCv3zJt+VF9g9jzP9cdpW2MP65LM+YDGGVdYF8GmoJxwfiLUsw7px3LjhRZcKM3C8isfbM6Rsca+bFo7xo9R470PbNEaSASJt+hBYOuo5fxr7zRg2KyN58ZkN16eamyqoFHGuziANbSLK/zo2jjJe36Z78j/HpvenJzGi8l8K05LEWc+2UzGLTDIVAVS1r5Cypd/YuqtqiNsgGcylL4WgVF5CxuJNAbgTUDvVIsPOu8/8fVd629qhzBvhJBpEuCyFFEcUeWyBnB058Z/mUfn+PPn+29vZckhpnqqu6e6uHI/QT9pFFcjBo4+QiXjLUP4XdvHwBB3Hf+hlWIU8KyF7peC8uzfMYsCiAXJkfvzUh5YzVTT53Cpuit4LX6e2DFSnRFA3ubE6PfmohFbIZsEcAky2+zjsqUJwDy36iZTS8VvIB9KEnSAfQBIDy9gy2urdBxkbtNHyKaZ9UiQ74PlpkHoiFcM4/D2crfsWxN3vUlryedxlzLE7cjkh3RKf+en5gVBeHOwuiMJ7hryjWEppDyNgreY2OfApBeqUeVsNnsmdx3705oByEcwjnEo4fVEZtuRgTvuKChZduYh+xaM2dBq86fhkFOhizmrYmJcqtQ4gR/v5t3uz+R5bu+nrEsE47weRTvzgfagT+QrQ2e0D8mekNtfcEcZPK8q4769hMSDvkhtHG30XuhwwcQUFPfKRCXMAbKpzTSGeraTPBVj/Th1FlDfv2qEGYlZHaLfutvO4yKKvUecmZv/k91weoN1leQn1VxGjrn/W1zgOa/hWkwhG2LfUnHkGOpDT0J4UX/LBcLFP0IRDbGC1m2sV1YdhaRSR7W1BUQJWAzhYSrCL1t+GkA+V1GIE4rX53wrY8M1y2aDkGHGZ0R8IaAiEIiH9tPHfsks744mF+Y1gSeR4Ajn5+FTyTv3DW3irQhWMoOt/Gb/+aUelpY8si1BGffddYB9Xw2rzovdQUcqMgyKR3G6S7ecqggXtFx8BKHKqzA1kwaii2NZco1tbJc3pTEsogS6hwQQ+VQGKXPWNNrXccfelDpYjeO8+CborDgFC3uCRmTpC/GtHBAHNYjulm2DIRy9bAM/nBYi02HgIcdpvb39khLV2HgjS/QVfZkVDhclf49Hp+7fM26/osLeJU4I8NkwVa0Ysv/RU1hPLfWcK6DSrMNVj9ED4iAzE64nIXfnHsx7kpyXV16+oOI4snpTrm+Abtn0nC5wAbE793L8Q7/4AZuMpzzyOH6M2hju4gEwm8tpKw2ewizt9+3xN7bNR3OOUCk8+PO9T1y6+/Uv95w3zYbQjHeCruuhgAahIlJqDPyAWA8bpUOAG9gRZNFgjdtZNeOl09S7lLSxWJZXc5iSstCelaAYyhMsnvL7Doo/xDEcFxTQuYeTllSx6C8aD1RHeiZoMwmHZs5yy2S960HoJngVZ2XHkn4g9J/80i+pcysgSJVsFKq4bEdVUZ899AM7ynfXgGx7fEcIIW6XOCQfAkP8AwmLC7SOkEn9gyxGnaJWZv807zxFbHK8FRZ7nGdgKB0PJ4BJdFq57UhvgSbiZzEjq2gMv1Usmb89xRYiC6wAzDrV2NjHfxxCfK3Ss/ygpy4H3U1rUIoLAPmqQq3MRGIYtdjexLmXU6iZO/3ZSY5zLfS6j64tVJS0Q7Sp5xMqNQa3J6/ZCzr9IaFP2t9k+vlomqauM1CrcLqPB4aqqRNmVrm2gPhgFDFPDVrgsbZ3NGErBBnmJzYBguu6pRG+A++PQYm0Et/Tj90Lc8V/1Qr9qH7+H4xxP0b7yv3GiWs1ZxFq9j3P3JnvJDbEtsLIpLpzijAH6TSghc4U/58nvtJ8xz+AvH21VDITXMXb1ZjQBF6RioLSsK2Lwlxd/osCTb7Ohz7aL6kUZN+TjriuTFtuIfDPMAmODjqt3mkka0f7gqBgmdmFRBvlGAihD91DtPk8FHehynP2TdPDn5id7+BtNarrx+iueHH2Q7qPeHr7akGpzKDyPTGG2Qcsy30MHJ5gei0YxT8JJZUYYBFGDtVYT+RdiwztkXKbqlc2K4JEcObu5Sofxe9HforQU3Yy64AtZTPeKnkgoPx7FcZW+PT3m0Uj0tMSeK9eX/XxkSVCM0z+2aszxUHawn4+5ReHgZQP11v1LfgDhkaESojs9ORy26fDYh2YGGRNZsfv2RP4vdXOcXf14lGjIr5JFMFCd247YNw8I7oQkLfZ87+eQMlTRHUz/dKEnLyVSoFBTjEclF6dhTdk5b3t5A00T4Y9fuoevXzk1d2ObRggZ4aH8qRpkzOUkdGBIEoWtL6bQCl+s8WIVMg1QRFMeg3IOjF+dyuHWGOT7+DKBbWw37h2SVnJv+6wEEKCb9vWjiXAAiX4U7gGxN81+ERvB2e3pg/CqmrfLVz1WtIeHbAk8PAND49OsAOYIUhYQv1uXva7VHyWx7D6YDfCO1SOeaesLJ1a6O8HkYjiP2+7/VSf+DqT+hJCRL04YUc6s+diTMY6SJPHV3Gv78+IHALAcIKe/3fvFBQxgL5QnkQEJ8Oua1/7S88vsEuBcE6sewa/vaj+MiJxy8TX7ytA9LLv94JvGfBu36DMTN68FSdPq2FP8H2xl2gGVfGuGCvX1Za24l4gBltVmnxMTTEBBjmNVd9st9AurrTh5lQ8BW6W/bRpR7e80HTO7s/4bJwBnxE5Etxa71Stc+2FhnjFwqRxzhxnBoa8gXzNyM2oKvnaQ1C5yj4OWWa7yo06Lv7c4E+Ye0eWqBw7+Rueka50j1Yn43wHXCErO1vD6m5MjGJzEA4v+QeJ97WvMG7xKr44C5uSEkgycoaqCiKWLLt/P3deUpi585mtAvRvyj86pPLkOpd/2dKwr1gD+DNG5Rq+8vKCq2FU993YfZIFVW9VpSWq5qXgydx+tvf23lC358khn1T7LLGYlLStuM7Dos1GkHrBQq7A36wLGndRrW0AM9XW1gpfom0gD1/+1l4j7y45zG1TyT4lnbRVcVh4iZs3gOhunoCqIi8e/70RdfKSCXBa1fA6RbBp3QsUJQ+j5GsAeIp9/UeZwHiEUPRLib96q4FKB3IfY8A7nbiY2nG0p+f0qtZ6+MK4GkUq6qlUUjx6Nu6DGqMH4vkxPzx0YaV2QIhRhHsHsMpqlJlSGqufwZahFAERGv2cFiv7s8fK9qXYyojA5RO5/p3vkx9LXYadO/9W36ftYYITctjD7aRMovVxosJNB5qenL7bX7R8JLQXNf3AHjTrvIiJGQ93HkyTe0+TE+/gMqkH49gt+3cLuJnCxn/elvfEQZ6bt4rZGfldYAl7o6sAkj3lHTkGAKYHRUOS7m9ltDl29T54bXg+w4eZuzPIh0AJoYbG7t7SjOY5F0OjFO/zKyUx0RZVPD+Y0G/IWtEIm/f48sRQIzF4bl1oDhgRPbzYGGHARqMWagWM3W60GHZGokHbh8bkG1Cb5f6uwLn563nmPRi4LbAkr/conlGx60A2Irfi+IJL/WIiIcdnDN7pe1JXfRWcW7wSBQyWVU20l72lukNWFUefqMske8Mk8JRRg7xoycMSjtYRmWA9FI+it04EasNV/59brCmtCHtWMKyn9LJkMNwRJeTbuPZd1c5WCcMhNCa2paoAdTk7j2rhocO8wVfBnawfiK7Tm9tMnWmHr65wAIs52B0ILcvOjJA1IZhD+ZRYS4cxvyqr7suol8Snp5xw9jHEzvA0yu8gtiP3YGdbPhVG7/i5o571Hdb/1aDfb7nUcQ99MHY7XTi4gAuTwKoErIV5ZLSnyH/wttrRi1iHexyBvHaYOA6wBZqnWMiBue8B/u0ngnYezjsqBO3nJjQY4P5z8ldf1+fvnZTHKFK7D+Lpj2Vu3+2hhN/xZEOhb8b2uKmVCz0qmelyOGCZzIDrhuzFizzcWsvj2EetQCB5EuCNaoXjx6wgamabKq9Yrs0ZdeP/6u6X5ZnmW+eJgyI0hK1l70E6ynBqk/sUNuq9DpTY35svnxQfFHQ5q+D9SrC0s6eo2MVNkfyXLWsvUHKsNhNU37s0EUrxKj3dDSWebxOusNXCbDK3ztApXQ/5v4dg29Y2Obb2/Iqnuj8CoGQAj8qu+p+XQwFjk+fhw3eZFXT6ENGLKzbPBKhIVJsfkRI/FyG1Jqqijvk5Ww87mJFHjWhMIA9NO2qD2wwptHtcfgAVPlwc6CAzzOjmTHAJTtHVDaLm/wdEjKsGBsSgXhGd5uFUxJY34KisEdX7C5KWEmecdvjdl8Ms/dbCZGtEopyW9zkEHPIhcilNwFJ+5LBK7kgMTaesMpocdfn9ilGDyz3rydfwQ4E5YPoJTo9OKYz3m8JyGqYHz0EjhrMiNriftLXN7tJn3pqMdOWc2d7EOR/GcWWtwQV3OdNJ7Z8L5fd8b2OyI6IRQ5YyOH9K3qarBjUkiS09h7S3idjk0POmexTC/h42omg9C+uogytgR49CZFBZZvRqhXtWAGv1EEqprG+cRtAiMex0kxPvOBbWvd7QQTWWnCLc3pkiCqgcM6gtKbxSMUn/ccqqE/vHOyrcffdM/r2DPe/WMgWLfJZ7qEjaVITuSmH5dBr4KDbZ1f+RUHz/eYDmIsbeCBgi5Z44r82soRh6nschUdyL9FEzX/RH18pNr6riRPeYAVHGbo2GWqhlwb+8r3GXUIeCdAvxSYoQd0ZJe8HaARz+OonmEC0lJjnnEeELbzwQC99Lc+/vU9u6Nroo+RLqHXd910K97zuLF9Oqq/5nvGSwDlmaVzOpA7A+f47Dq7gqACCT0h42NUmKvpMkBOiDQfoKG6r8H333j8Kqg6FgX0AA4HnJxByVqqKCZYkC7+jOGkdU+dVKZpADfMTqpfC6/tRs+ZPuvobNVPjzI7vsMm/DQhXHA8vt22z/mvoa5jk8gf2nLxrgPzQc/Q2F7hWKcvPEVAbXwhk53bt0wNi0BYlseQQ2/J9Pl2tdI1+/FQb1I0cUF9a+z3fAstDLxYR8QEPoMbkHgPLzHNDxC9MTrk06Sy9dm5LYNO7PGQUHNRrKdXa62dFxtl99ZREEfN4DUlHOfoGM+rLCO3/OdrgOgfSOpbJ1n6BJn13rn6m4XvMQIA4Pws5wdNZFi9hCCFqCVTvUar55jTT8yRC7Uu4axr+5jhZFXOVRuqvaW5um6DMv6ZhjpLx5SHwLjp8RqdVUrT3cNZhncToG1zITLkrA/Fw+MpelDi6L/APy3JvewbL3GwOcqbaI7K17YjJIA8MMvIv62/GbRfvbXSIG/w7v6314SpRAOw+h6D+Qv/2gNngSRDrCre+cXsgRYz6LLyiDlN19Xp3DoGdft3jdpFPk9jJCr4chrI1zXZeuTqCsrHP8DU6YrmmXNTz/cpgxvCDD+1zE8s2JKZE+anwbuQjz/YuT7K2y5i5xe/x1C1RpubPcL9vt93SQN1+BwubOqbSk+57neg9O51myMRqriVltR/HISwHzv3zSadlveyStZeqhkc0+c5bWuGsSoAbZxbXk5x7DgHQ6PBf5ZMVYa794aIBkAj8y+6a/blj5WFp++1rtkOXF0fC8tKtKXi9RbZKvmmHD/5eF3Fd06DG7NVto3eCwroSYbujH4fX4zJUEh6XSDkw04ijq7rrqCotIEywnV1OufpUVDk/rHTcpshKxWofvawNPWFCCucgBWbOHEqg0i0BA3O3ffvmLOu/Xhk79SUJVmpgp5p/iHyJtSN0jPJm4hdY1zrcvyVvbdm2kXzgKegrvst9VzW/vMvfG7y/8jXmw8Y2aBPKALYmss/jED4f05Pa4Ve+VeYB9m7ELo++qfWTaxfO01tKznjp+eSehD0HyaslHtQJfUBHWGlyNIH5j2cP5E+ZIXc5RGwK3rHnXrAr/8ppunXsna4hW93/3TXQYJnZQcPPUBv9PdOH/a2AF1zDeYkZX/3jS/+LMs3032h8e2tApwqwdElo3lett30v0zr2bZnI3PMB2M7T4X2gzSA9fgSYkArlY9HI3b7ZRRzU+y1abRf2NnBrXhMRm52n7nhKPCtn3onua2FsA973YQVB/d6zu8aUe5A/eB5gWYd/Gry7yzI2Ft7vHjFdNqqAJEzGb6SPrZt33AMnkzj4rMRh5EllcCaTek1D7O59zSFDRPPre/xyH5rodk4rmwVDVxAO41fzBiD+qC0c5mmiz5BQaQ17HdgrTTCz/aNjf5FOPFKURgrL1b6SY/Zb9oZ55haeZXGf0PwuT928b55JD+EpIRlMoYx0+qcLELBfZbl9r18SkbLaBDCzq14XAr8Ddps+lxFzCooNAE7Nj/tWBz9s+org8OTAo6x+OXRXC5IDEeBZ3gL25vDgcRgbohbMtT/1QZ7uJhBdHnVrhlNsGghRWRu52uw8uldoovQu2vJsTBzHvjqfXk7jG/OXevWuVMH6igKFgq3CeulJ7znQqN8Gze5bKD4b040S/YfjPlbDaMhNYhqBbYsALUo84DEAC0wv23hryD2BwJpjDFBp6fW2dxvCvlTNhD2SLJGGKMOchXDYv+drYu5COgsA5pn/bWSGAVLQux+ThPm6vaF7DTFgEnQ7sOUhW93rSympofp/gz1g6KMi6MGUhOPWGNQWjH9ocW2Ckj5qEY2MXBtX/Uc9yLgxPu2Ydp3b1Wxsg1+ElrdZ33zRr1+275088ZgovcS32Re+xg7siXMNPDvQ7EX8wKk30sbBrkhAqg4kR10c5fvoPQfcUJKEGpZ18s1lAshWRZIZZGiOmdG8Yh+pWOuIKom3Ti1huEW1rA26lEbVWaC0+xKbNjnuPWMI/N/i34rc/tW3m6ugmC8JZhw/jwFHZz0H9IUgYv65T3z+wY4IG+vvSbJa7SM/mDGWVDrcIjoV3v1CZWEs8QPkVRE9WLAyAv936aF/HjbjGcYXtoW+Gu+qu+nYDp1hn63wpnD/5YCX7bADSkXzE6UhH89nO66Oonu5r92QL/mZsb6WRi8I8mIfh0C5qjlrPN6wkpVh2e5GqkpccwhvVHzNH6Ec+AVrFP/O6dDbqvaL4en+cHC5lzTr7c5cIYNlB/9KUqnE/8iV2EPUuwrhRcbP2MSrIqGqDnmLIdsoI8W+Doj+tzPe33/N20zAzHroYMOkda3vUcSWFA10jtsIV9ujvR7HP0fxNIW5bs2ERhKige6SR7kiYdKkww+HtNQ6ijzKzSV0pBp+trMHir+xUoGsEuYxaYhEetfABWJ9WE8+FDm/WNN5Dt0zLhlhTzVk41X9e+4JEThNT+/6Yz9dXJltd3IwZd0SaQ50thLYBtdjyoaPTAsQUumGMeRv893bun45OfsJKpXXSthXZHj3JfzR9tCcBOKB0mj9m5fIa1V4VRxmINzNNzSp0GJUgy/ce8d37DkW3XVRyGIP2QwDIi+N/mo+m3xFhjVP1fR7fjhrcHZWSgP0mfeEoxSZXYdTsNjuY5j9V7jCDCL9sNbXeMQIVs2ZwQEOM2fICvueFKRT3fcsDfAWB/b1AZqxWw+CGIxS9n8dkRHG8tQBAplqBZb4EFDw4sHXj6yldCZrUAMlEa/9GI6yxDDGuqZmeofYnKGY98hoBSyf97rvS1LQogHuktvbcv/ntaVeFZJ3L8kLHnK3PaqRNUvttTPg7ZXUQYz9vNTf3aCjIr0o2B2h2i6Wh0u42DrANcCruzmgPfJY8U9l41Qxv/TMf51Z0Gy70HT/E0dhShreKTC2VKWggSL3/JSl3/UsOJwzhRN7vi2c8vKgrc6rQ/MX+7YZo4caBFeL+R0v5hecTEirVYS3KVSaWTkWldAG/9xzzFcx6Wa4yUJ9yV5QnT7nsr+/vZt50zl0eMP5t6TSrajAf5Ddf8FSxu+04QgNz00m51gusPb0kaHGh49QDlYYILeMDb960MjXNPXUw4Npa9N4QcwnEN57Oyd1lKXevpMF6k4H3Flrj/YfCJEd218OuiUh9FUlxJgOABZ1gIu2/O2llQ/YvxHvo1nW5cUTFtXmpQxvO8PsEcWz/MTYs3JXdS64/9z8VzSZaTPSFj2cnZL7kYXTbVSjafeEhBnR54+35jEZNTQHmMtTc0WJgAwXOZ4EbxlvFrSJI/ilbhhvPym4MmRJpG1p/c/+0iugT575mobY3xrd/W2P/G/NErsGp0VWJcC4Asdhhb4+6Zwp+aGl3XmxMIn5vQMc8NiqTV8Ywgjz5+GbFFG645fS0LEsd6ro55UMcAbstERzr+PYYD0pthl2VnpACSHnUiEAaWfTnkEO333WgiNpZn2btYd99/Cs4UjfDULcHJbXq6KzZoWQadvSQ4D3rIhHWrgtjzvcwXf67YWDsN737lnXE9h7SC1q6ifLDuHtJhqFXAYMDnU7ZzD6cFR1jrC+47LVIZ6GYH7yuuZUQTYXpP29e67eGjfAI4t3l83+Pr6pRV2UlG7TntsBhlgh5s3d1Td8mnvh/rIG4joQglJMNKNk9lePAv97VZ/TURGHtHVXs4jeg/gmsLXx8hUojkScKXdY2piLoJWu3VjQt9vdfYBQSZ2l+tOhGo3MfOvQbUx/Rbl06Pcxa0Yb2XMr6F1kJMIQGy1hHtVD5PIoQCYKaPe3xDlFYJ0i4LDTKkGPIFHpnjVOobCPQmS+Pxg24IgGbhEcfjvm8gV4X7611FmYSTJXtvX65ey7jcxeu7qxT/K0WLBgMYGi/xY8jkXpSHwEiqQjA02Np8HcPk0Z476qUayQcYAN/pVBYn1b2tC1hqPx5iLo/uxOoAPez4O0mP+Hi74ZHyxcTZ/a0pXHwuiSo7H3r9qJt25YF+baL+KxQku/FfGQ8j2o9PA96YxMCwyEe865NnCgS4NrtAT2Ff50RyagUfkaJY+4Eao1c3GO+BEzEn0ysX4lqgM4G1cVjKiMVnTPGZrXGNXp5W2eWAk/xAJgzpktlTzzA00Vc3uRdjBuG5dREf5bfdp1S+07EkGsKHlbmZrY7uB16uDwrATFRNvZrDqjrtm5AyopRhHc92wutCqsBpT7Tr00g2PfgRxbZkODHc5jqCnDg1It475j0chu7vueqXupTsU+vxX2jeaageYMDoExUw3On+GL7/AWTyYnAOwd3eWp8ADHE78htrXwouBcKBWBLaQJtXFDpQvv8h5BArUQlqcX2V27XztJ4Mcd14EfYrChYMKjyGEhrAPgRut93uqLy1gAd4Ohucv8vEygiPVC042+Xs3fb0rQvH8l5Cm5vDvld/tzYsh86BziqvWqj+zu9FkqvXYyZAg3eYS6hbnSAkmkapiU0xS2l6FM4M3EuwdiDmC1F4PLT+r3o8aNyRmltFdjkdfQNhP/+V5XM0BkXJPW1S/0Fg2mCrYLptvzFWd7Bt7TXH9NGXAJUQl4tL/mZ3wY6Rd3BLA7HmgfDlfkU9orU15M2VN5SSw/MjDZTTxeekdY2axcApfLFeAs2evrl7eC3XAQRl1EhujUPrXimKHJQFl3XShQAuMRCVGsM5HKGVmO4Qb4Q8T5zsP4oO5ZxPPgo3StMd2M4ey8YhHuJxwmVAkrsYiVFK/bWSwf4yTqXsQXew8pVVTSgnP5zyhVkw2QAg9HgB40fU9KmSnuU315llOmGWDxv2i42vi8oTazxHq2/pgHpCaq/ul/oSmD2MwO429lUiM0DADLx7oaUGVXE20n+mJ2rLzrJ8x9cSvfVRNpygciRiWkV4+p4BzaVJzDs/c1OBJV/b4aV9pRZJPhmcI17eUasCwCc9N1iH5uX+Fmdb0HfD2wxuArrKqxesU2GNgJlZpssxDnn2l/v18k+5PerPpocfY4pM+XFE2qol1boDGHRE1A3zMMX8mNThGTq3xTHFqu4VmFhSUUrh8xoB/vIbhQ931uvHuyW1Qi+LMS2WaFp8clEQ6t9JB1MelZVLwD9sLoMcWhmZXxqMwineIO9wIbpdgJ7YUuYkCwQX+K+ro8R2Z4cf6Quc2sv8nfYEdqdPbFPDOwUbDilpOx4zLb5WknnvBJn/BJpW7hnic9Y+Qm5jTlzy2dGQGm4TX+OkDUvwlMS4Tzq6nbcfXUw37qguKEMoAvg6s0zikj8l82wRRdWIQVkHir151cISl84E/X1VT4dpX5GRpCo33PYlq1n/OBPc+ZKY5yKdjsZTYAE9qWhFkclUiGytdjsu8dqN8ToWoyGLLri4X0SsS/G1/FD70c0Tm5LV/fGDcYzwpecdnExwoxvGQyhkR2CaATAwQ5TMczEj1W22FhBYZeeBQ1dE1s0H+Wo364Auv6P19gVbu2TJyZSsFiVTX0xWLCZaqznwbPYS1QMXYRvYjKbliF2Lxe62O2dt1h0ucLeVCNl41u2uH2SJ1lR9BdSvgoUTo8GwKatVPcSqJQY8boptnXe5lcW+wxaIDMu3a6tziWPwbm2SF4xNphamq56V3Mr9+qX1TsjxlHg8un9tGoKk62iIdDGNa+dsO+kgjJO0+WZ3/vGqZ9yx/ES+YX9t6PMZi318lNJ2glYbzxim8kz2Ab/hsVlZVYM+KMga15XP7dtiWUQ0p/v3kAAbxUHUZSxCvyuMIqWDz8l1ZMmEwLacGwetflL/4tC1WHQ2BmYC6GgoT8/PlJxMGJndwLYIFbiQ3r6m6IMIgBtt3tGQ1Q6k9vc7++UhAz+gRQDewsD6gC3jFdMg26afGf1PC/vueXE5Xmz4ZlzU+kW+nf3ERuKUtK6scTD/4Zz1Kqg8EgJVjmY3cddl9s1G4jPWccQpU3I0B0XJEAJ2i9UPi9Pn5A/IaMd9u30qiYN2at5yJnVLV7A5916iftP7jHKjCnxj8+wz0T86ZPclKn8Os020v3tHyAnSEFUwsblkSDSD9Ycb4kSTI+0memckkDuPHlT1adutgKOraaRKPHwPcN4/sizkAV4/Bp+2QIoyVEvDhIVpKXXoP1+VQ+D7D9lYrDagWA616o/usf5FME/AFOrkDPjyV8u5NheOuhja2EK4w8IVjk7CCmnNY0M7grhHryEu3PEOBXrn/Ys2ypX3NE7zkJ6+32COM8iPZkoBxe3RFYPBLY9Qo8nvEM7vtVQERs7BiefWotLTmeoofF1dSj0sK0yh7IF3CDz/9wAxI+x9grEuvOwQfdGAruk98ishMUtPUegy8sE3iBVdXpg0JVuRKivXojHMSU52OVQhQe9fO925JdD5WpDvTOKNDqB7KAZoDvs/PaAr+NGINg/VSaJjYfx4gQAzwUMTYb53TufzgIUOQSY/sSvH/LPquUypHVo64014TgeBY9cU/Be7G8k/UOp5iSxDWG9jm1JLhsMee7LRArGvGSlHW9sbCMuI2dYOorN3ox27Onk8zcM7dp3esB715X8FaPtorDJt7Tmq57/BvEt+OZyXHksyPv8xXn1vwhqIip+7MT6tr4Kl2N9AU5TQSei6vjOcfI+dIff38Y8M9OvfI0yANvlYSPbvdn1t+ZJyGHrjZs601kSGXJtuO4e5feJswAtJflCnhHABoFSNG67KENg6n9MVzaHeduhMmCnE70dDTac062GCYoS/UM5Dt2Wq0rKu2TKUhVhM58MASQxhUA8O63sCYgUjv+rQGhT5bAms4Fz+ueweMuXqnCskdsRucLX2iknr08Y6Ew6TfhHDFnTKaYUn52u7bXCOjgAYv3gP2n3gheyajKLj2pOnsYbQ/vuajXjmmd0GMPiiHWgoWZuxy9PbiflcQaEUCAGfU27BKSvvXTFAT5Z3mR4WON2mTkU2JBwF+go3YXTsa3QxmkhVx03/utMwiIsauEY0tNaUeo0NUO91ngrd73F2VhTcbWZ8h8Tll24Cm0tnu65wkOq2ZvUXoHSde2H2Q6V/L2PFApT8tgJrNxgt+Lwe8lYb/DKYGNdwAlhUDTowQolTNoP8UYr+Ky3X5jDxe2ZnV2f1H0K7yH6LlP1pDq8nihffBFy5LM5X7vuuQhUpH1f/H97Y/00r3aggL/VIEnTPZ+hdghKexLgeFa2ljpLUKnjLfAPmgDhctnFnh0lQcCfbO5CHHYt1kx2o+gjNMIMWwihbnWv1l6hbv0CbNZe8An/56xFlBpFBQLp6jafOdGct9I7wHe74AYl41jSzFmnX3fGogOu3gAMmTQTxBTsOd7kTo8/g++wy2Dj4ln/7rfn5l6/rPqe7ba4wmHgHDYyeTRWVgP5Ea63/9FupFotl/uA+aRT8x6aFTeMqjsjO3vt4OgrjRW+pUO7+sdHgxKou5npGgftOSQObzz1S3rTd4+Xga59rp+4O4CFLCkIfpgvr7CBtGCeH+64nqRKi4e7bWu4R5GthaIF+94fOwuix4EYzgZhuyNw0bEel2Hue+3yfBoGppFWtxK+7irT/SlbOElyoDpjW1R35FvgB12o+g00kpEzD1evqBl54fblH0oXMXii5069SbN8ac9beH1yqGeVFhOfT1F/0WuJnqoeFEwzw+RFsbD6tlnG5qRdVK+xvpnnrQQNysfa4mmaN33Pe56F39BAA8xMdQ22xLFvK+AZB0Oxz77AMkxksmxjiURyR7vq/wkWOCZV3cnh29JVzGsnGWilI/0hajG5bz9uvKR9Ey0BMgAD6E+FWeOzKCwEWcC+cx2aW4iKK4T5NbMIlekJJBip1TwcHj5d/D5maIPApIE5oUKd9747gsD/F26GnwDEYdiw6fShwDNTf/B+lM8EGGM5GXt42u8bMRLBdz8J9GJw1/pb+0+77VLLDFnsO7R9EQzvRT1qdyVmjz8S27KFDgBT1ZIThiDeMugIt2czvDFhOCUSRTNOVUep5mdF6HyZpsgcEMpeTy9TUBjINg1ji9ljyPI70sFAY2t9Odvs2P7cU8c6urKxIwDnCoqvYKRVc3ZeT/v0RrihBRi15KB7lYfVIAO4cJ+I1CXhqd0mPWD7rWdZ72v7OhAQxLEHvcTeWBv/fNk309LgmtFQDzEaRgQ4EgmkWFhiBCDpUK/Cg9icmHS/WOAJCZ4vVx3fWhr9eOseqI5g3+bF/h0NUADA5nJODnR94boWsCTFwpYQ+ifJuRE7JEGd2hwVsAChgXDzTc1sKXJXR7jB1pjyoQt+h0JYGlUKMX/N0/ByMr4RbD8beXWwjRZolWuMyrzGEfz0Jxb0BDrun3cRySu+ocZk9mjm6gcLtYQ8mGVFSRiSCK9Z2NYdV1R4JnEQ5AdFlvtEhwLfiKgpTJWaC907DZue3iCH6MrE+7j2c3emLimmKwbSjZeWsyLIcgUsc5kUZmTFZnWSmNra5Z6ephKv1+8LQZ+27WFV4ZtOkgaGYaAogmkP+ds2TwL2THYh6CveIKWpu7dw4B0SCce6cDq/I+40h3jw4L9Armxzz+unIffr3jrX3Pl2ERi/vrUOUJkFvRoHYgSpkNvMWKwqWatRDZaVQYoeIXIzh1qBZY/dlqCIzM6FIVA1V0p9JHm7O+d6YUSfKsOgf2NFHV8N719PYn3PFibaf9ro8GAwDIVJd0k1myIsJxXOGTvUXMKW+d6XILtB1Yk23LnHgdorVUmfnL5FtPUWOQNGbnGTuDCwaumcNC4rWv/rsOXHEKWdvedmFTMPRsGc8VRWKAZtoxBcaJ80in8LTCD8VNhNhDug82yk0Gz75my43vLnLeBCeevsmCZ6reFjE//q9tR1v0jvuMaMcx7h8zKK+UHTPnEgIlfpZztR2rHypbEc5Vuh1QL6DJrTDjLU776peXexv4PYcy+pPrSFDJP1OA/HMaq7hzUKjmP2zF7b7YbLt9FXQ+ESgoyhg0/cLjMq3CujV/9PRLovg7DjQTC3IYBA9InL0aLyVZGDqCf+1oxwXk2U9ECErqAEySwetl/7o4g8V83LxclMSxf4ktruL9OZCtWloR9eKE76lLpIg5FEG8bVxDXsEs0NCLgZoJvWX4JwMetK0fwzVzB9bbqlczNhbaA2ATnZsjlAE33/UQIGkXLoRQHuGteX1zI2ee7j7jutxk/rJ0c7mkJyn1bEm7h1WgJESvljij2ae4ZnebKN5UIRZ6s1l+xQfR17l1IXnx3xC36R1NX+7Ip7EedIavu9lKy1wCeBL6nyoNseOr8NKuxhfMdl7EzKU/E2YE6pUStMfktMwHQ1c/7v3mTYoil8QbfbpC9PdZBtcbTk+kdfeNS/prKbbKZ+/hu28+1g282W2bnd+StcLfpa9yTtYy6qC5YzgjTFYJiYZ+FWrsfwP3uUdjRLLSt9qVUA0W1cIBBnsM+WzLnOS+wmWD1+fNEJm0hItr17rbVil9MoYqxe8Lj0YSAPhvPHd1Jk/dpVuZTAOcWiBgcVZb/hC/ufk4UQAe0G7vDWitBRzKJrFn6LQy3Q+DfSEemnDmgEmHFNOWdsC7/Gii43Yz6K387QF5Tv0j+sg/2w7R4ZhRxkC24KHdNgVwRu2fqhzRJCnNlau+d1LTaUfPbnGaC+JGXFwiPY7MTlfawdoCxFuowG6IWOsWg0J+CqgfygruSuGfAPLYckVyqgorvoH+YmuJPK/jjQvH2Ewymq2lPiIB8og/BWHoD6Pss9Y0tzoiBHt8uAeuHoy4eluwhIHbo/SW+4dBdbrmNQN7KAfgLM1LJszjyrZMghOuDkVuQyljrDiit0fXfZgY77XsQFgljd127mKRB/KsOn3fuHjHNJcpo/UtD5i1ceauwrePHlOJreVFrC+KitZYwUljoeSVL25QBjBPWBZXU1iC62PrDTvBp9LEBvrrj/+awcI1DnK8fIySSjBATQaQ0jfYA50zUholcL+xfmwGEALjVjPbDz6TrHBkDLwuwQEX3v9Ci7FR+P65DdHSwWHgw0rmqxWH0cx++GnhGj13vZ6N5GPhjBsy9KAr2PEiAZdcPQYkMfqwvOkIl7pJ5ZTImk/WFnkRuA/ERkzjM/d7XQVGOKTMLZlhFdtCmuKECCE+v+zgs/gshg8xHmd3RcmvVvsj8rAkATID2Cezl3J1tzQYf5LXgYNPLGL4OP3ShOZ89+xBHNTzn22ns2rHa1BAJIPiJfrMFF2OmBX5W/gJPyF5kNtcoD7xvRcwb3x2MkxBXVcBdN1Uu4RfZLnXhv9ObBNVxYJJuEH9gxY4qn9r+alyadorH2bDg058PvDX7gBzob2OUntUu8vcbTCcZZVkC09DRLnlcftUh2gu93cAuGfEYnkOHlJa5Q0p0fenCMsdvuv/uGz1LGe+9/ZWISqUECqX7KWNd/e3VRYbm9NP/VEpCGLuOZgoVMw3JWZx618Vbu6SNh4dRKXO6naCWrr5GNHgEoXrW8A1rIlCBErtOrH4ykYam+TzbO4x16xJS5ZhND3i16Ptim+8wnccvgm45v8XeKbwfl4ym6Sctucf3/tyDyh8i9pxZVVv3qxjq3gSx7VyQHkhpF5kxcgkmjIj+e9IBEZ5nwCbnEM0ncIbgfEjOlokYdkMk4QWz719GFWmv2h/Oh5XZ11oplejNFoM1hwEkoFncQgjDVTwyAomtJ9RoMLh3Arh3oIAoElF8ZSmpqZcoQORSmT9Zl9X/knfrjh1oaGiOKsNURIE7A9hR0cNtAHWX2inYxQY/p4k/zsifkxCvI10NQ7GYjyq2OpfqNljdgQoGVnsGeoibX+NAh24Wf6F5SMGA7eY3cGWS+6i1bUd7WqR52sWNT1Rplfh6UJgjw7LQ0M1dLG47/77mLRdkGPjhZpNZsHAF/9koICfZuwh/+hz7ZnXKYxzino4IF0O85K7LyWCuok08AovOkhVm/NCvprGmJYnmIL8IR2IPS7tU/0xfggN9dzGG3ma9Z6C2L9FiP14KhDs1a0rbPKcUkUmGiNfoYvoXQRCw6lc8AqHJ0WF/Zt0hLkfVpOymJxcezkv5YLyYcJ367hTFkDUKtgLXvTuI55nZGna1u2UtUZ2Guky6t08Y/uzP4CXuZ0FY5qOXT0nAH2/eNV97BfNbTOlbk6SSUg6dONl3PNpheUAFo77kcm1/C0U+8yMCIgzSvm9T5D+Eilg8r4y3wFgS+3ijTPSJL2jxw0u/RIEb6LjnXdtK/bm7VKTyjKC9pD6X+0N5XBN47MYoavSPuf2kuxWvF5TM1wdaenDbbaQifQDVt/7NqLVjnGH2/76nGKcE9zBJwcJJItl7MgdruKmE6Nd/+I5dxOiOm/JKI309zl5gN7nCHZnkPDr0+AZE3YNj4RRYwbro353NAJ8s20LswBoF5+DRQx6erNR/+09oU9mp/NK7er3Bn8RZd5cVl2ZJ1JILKjBUC90zmtU3M2t8TtdAFa1DvC89v69eic/m2fUxU8N9S699/futJk79ysb2n0B7P1dyQJYS6LiUwchyKArvB1NofP5Yd679kNdxYCzzNSCsg1co6vbHe6AoVAK9K3yn7zKckFr48IaFxjiAKuniisHkFwgjP6knr0+JVOc+Yn2TYnI+/4L/J3tm7s5bqdFKeAf9sTp3uo/hHK2DxScy/SLuN+n21RfXGYoSRac/lctDq/s+gKcSch+zk8nqjJ6ve6Q9WHZ4Bd5AfOpmEck/FoHAipk5ahPENUhAH/+tW0ZaeBSaOgWwAuVjZUHlabJiuEMMjB4BGfX71YbGDRlaZEPTQ68qgbPMtidUtPs30DUpgu34loqpGfhiU04d6it6eNyDAd8AnhPymWCmt5xFHHb9OMEc32hNjV+CleWVoOYghK6fhVEblxm107doTwgaqnrHSNVaVtgiR3lni8EbPUAR4ohC1WBn75qtKgzxhQweX9fILho2vTHOKBZ+OPUXeDqgQIsRviyYN+b0Lzs+4EyE4zWVJQznsnh3C0zjDA72TwpQKXx85pfAqnzHfKiM9qaZjZ9TnTKbcrjHUn6nwwAsD7MK7lm7b54fHg+0R82MGkztVLaoiUGAgPMAIOO3qQKlSJS3ZvAnSKG9Vw6TvVfipOzAIjm8eaD98Jh8z7IMDeICud5jpZjiMRk2xUvDej+JwifppmNVGdZ4uHuaPDPvBhxH8uDf38avdbjYQCXA1HPytqKy0aACPa6wUdNHO3ribLrCxapsAivYQnFX9AuS17JhV2Xr1E7Yygs+TL+OErKzSA3ChsThkwSUrZ41rE+JB6xPfC6gdlltiAQ/Ra9J8Z3Det9Zwutt4lY36g6g8M828ibwAHD8TgaUwmyfXmIj+BfEsqnF4L5b0PdnfZ9A9Y5PluaC/je08934x0fKvKkp28w630eP49l7xPsXlxJS3Z74psreoqtdX0zKDPpoNd684/eNDToV0TdfFh5QzR4u2SzfOHWVoe9eytCJXlUF/N4EUDcipIoJiPH0rcOKnE1TKfKEbd+mmlsczJtzNVQbnGnsWCHkNgmDgLmweIp8XeZD3Pf5zztDwhV3Kt4seMYUSul0nhn7KaOJCm35GylH9vwtoVAZpCU3WGEMnQnzPivH67/9oiCOUydMXcAs2T1hK07AGULKcefxa/59A0pgEua4DnbklE+lG8subSEBpBoZfITlUDiEyJSVTHuoA8QJx4moTBHhhGFRae4CxfjN2Ere0HWkCKw1ywiKyDTnsC+T05BoG4kJPRsOZrNsNMj2aMaMAhrz//+8SFIil26g8RQAU1l4LiCuNbNPLn8LWUaoMAHuq3yhVz1LHrAdnvsnugh48OBmY4qOJDj0382VZcqU7VXxrEDBzgbjj9EDAeRoI9LDP+N9gPz7D37VTzVc20yniKmdtvHsvTkm1s/GGo3GbnNZtkUuezh87kiLYWY8Y/10L0GEvactw4ufSJjXT56VL1iPrG6D53VqpQelDnRuoA8dydI08I66MkSrb5MPxvg6URypj5uvbRt/P7jcOdZKLJwPZQhqaqdbiXDuITuvDZb6zAK564n3cwwvtv7YEAWheTJZkI9xY632fMoCidr7GXfJtOqXctdplVOtc4r5nBHa8Q8+OBX2kKPVTuuaycdWQlN9wGZvdTnmySqPhthI1n2NBsf5CNlgcsgsfkNY7a7opa3CRXYRj5W4vez/pAGk7JmF4F6e/57J3852cSn/OVSsozP9Pm11i7kH5wPBOMGEDvnUxcjUBTHnl5RN4TuvnUzQ+jFQl4RkGNi0pvyyiBDY48maHxeeomKnhe9fwGBzDokYv0lMEXnaAwAC4W993hbu3sP8fJVK45xHFf3Mkosx1IfIeSMRDceb3gOMrUjrY7KuKSpgBWDSFTJfU+CPSfiW4htOHQRc5A4qsNhGDYRfYvfxChJqCF1XhrDYMSmcwzOB6BzV0LvlxdTXWayAJznRQNwV1+g/PWHQvwi9x1iAfyejG8ERYlwNkRvFJ0oXBIaQn6QOjgWdksxJmXzBEXceRaRLu9Tjf4m2cmdew1YRqh80MHGDaR1PSyj9RsSB0kFcI/Fk/J9uo5ErJPiu8l9vsNy5LP1UJ6aztG6GmpQzibht+Ibw6Id/py5AmLPpVWJfIba00rpDiCxpCqe2FX1cUZaLZdML47CzTNQa4t2AzhVDRDIUBme1TPdll4ze4BRXlPSUQxjAKy3xfxEf0+o6xMB+bJMS7CtxfKC/ywwvXgidJ1tpTsr/p1XafMpUf0nwjXOhI7I6+7Mq9lDnQEd3egaK4eNsIJDilfWQuRgLKIAmHawIekEAMNmhlOFTBVkqDKmfbQldfw56nqmC9VsXViAOFKgI8xigvohK6/VJmXkbSP2YNXjXabE7oGMOtQJf9OyerEU4IUAr7t91jEgnaVlFSKQAkeB41nkUPfIC8MnHAzenbgebG/5TJ3lvZT6UrvtSgovyEXx3HrPUYPu+qRRHjL0Pzhs8jAaGgJxnFFt5c9SlPijPXUM8obpxtzbpoz7WQqxcC++3NYAqhSAt84rVe2YpCLnQZI7bujcG2XfThthdP8P2MYNRc8W8/UhHXGIHxf+BWAIt8rhdvCL7WN5aSTwHm1pzJgqxr6imcHINQJHrDQmj4PeTgSJPIPM+FVdFIISAoEvTS/tFZaKtt+poEJXIYaoM7hqyJx8WO6GRPa+Cgb3CqW9RLoaMQx8XQGF3bWrz2f5dovbIDP5hcO+Xqv5m2yXU7nJh2qxxxxBoQg0l6CfRQhfP3cWrpEbgSq4vZGpQWfgQ4OcCzbBIDmSB3QvO1Kmf8T1dV9XatKkXuL3T6g1Hp6newzesRLV9cNg5u7CY7nNk/EGsgWEEiPb8OkCx74QZhFOUok6V8Q5Y/MSLC9/vMQZwxMyCTOZWItn2cWQDzshjuER9wSmiXHLfmWYyqBz2FfnqkYoBlOeeb+tQ2NrcxeEt9Pc7QKdk68NPgrDolyxivfLVO6MRayDAjnfW2of9GyJ4wM/rYZfP435DJ3O3Q3Nd/IXdVYbBIGBDswDUxHtTMAUO1Quz/aVsmbhYx24jNP4z0xydfbmGddW631sB/9DzURomIvuf0v3b2vFQhmWEkaTAe2rf4mixmT3HWBv0VuNIdxk0M7IxU4QdyNxoBcuPNfgVn/4FaMZKTZj2LlNqsr54jVM4+4QlROJKozLICniErI5EBFKpXsdOw+BsTffVLwvy0LEL5MOOGggDn7tyrYaB2NUdVVUPjpV5tR2Tbn4tkIVi35z0RolUFQRENAUXBp7lvSwjVpxSVXlC8KgqXfQtSqINTx4RrWarGG0x51ttp/vMO9qmpHd0ORtjUj/F63vf4WqWB4TPZo0w+eJywG6gG4HKMD8vf4c/z0I72+WMrg772yqIOprRpIXn4yrg3TbOarg9mHDY342G3IMQ84YMtSmJptRdvnwmsIDdPCJhIJVyg+k1lEPw0HhyAtxALbSe5NT322f7b2L/9NyQBBMtJqnA+qFUfDbh2cMgS373tuHfGI1vybFSHQgS49mNaH8XR+O3L5y0k/JKJnxwZsJwG54MPUmufUE2JQTPR14UBGgRaCfHVGjmEUTid6mz5QX3xf5hhVF9PZ67PtYCQgu/b0WpyNbGvj9p7oaww5PXfvww8ccVgb01KszbtO6ppHcjA/GG+pbETxqWuybtZbkv2fu47A+8KieAP5x5wHfD9E3xK69XzjFqCuOnvRdigQYv1ze17csqrvZ7aCLnBpMHllDlWYdTqrZCiHuIRcxkMKsgtrt3he+5C59p285/m5J1OZZTrPUtHBVxwlazKcFxoAmbwcMosmr4ZmOF3tG9E32Bz/9mkT6uQKGHmHzTjwfblLuJSI7Cjqw3s+FT3l7WudrUdaLuVajz6HcCu7xjGD0K2C8porbpFVgHWBGEd1YvOcTieumav44scIdcdd0Pel9A/qhtOyafPwC5kq+wIRvdbm68C+ioacb3TfJfgadi9J/cN6qF0/ROa6TchGHY7Fct8Mazke5i7EhGaE8smLN15K/66Nae2wh879FdXu9RFIbZcM9Q0jFrTv0R8WxO9gW4GVddU2qXcx3TBWo9BRWvivfTjHKW9MUA/73RkraugwBrU7H3OMQW5pzIQ8XtTiPqj/ZdviXzQR5+ZTkV24h6WDTCqlYuK7snEz/SsMRJgvTUKpR/4ERDqW7R9cVyxcP6Ou8JSIhQRPJZH6RGPl+JfKcs8RkCDH49cw6N3sW3PH+AuVrh8jQED/oYCexc8+zzQGZJIoY1Zz14n4RtoC2Nlc49BQ0guGyfEcfb0MEOVLLOc07V4BRLybYLMfilsvHVKN4RKw61bh9GIO7LfKwGJny31f9p78uaXVWONX+NH30DxPzIPIgZSYBeOiRGiVGMgl/flax9bB8f97Wjw337RnSviHXOhiUQVGV9+eVQmQcaE4+RmTI0GRGL7CneELwxfGHUr2IOYJ10kv1GtBVr6BhhkiGwAq+vpaCLqQLAvdGRnc9Gghf5i18ZlgYZfHP6DXx7C/62gEU9OWLfQK2zxGJTxXyPA2846qBLLm7yVNOICEFP7dJRuYMkMEkqLHMh9UgVOou+45J5/zCxPmwiYdv1aj2by2fyReATZA03TrozOw5HyQEQz0LNfKShvh/6ILzrEXd8LT/bNqIk0RHvPHbWH2sXF3mhwG1Kckh6CReqq7D0cUccWRc2h+7Rtyfdsp2vF17gPMj1/AsD9qCWwVJ/b3gWnbfl8Rm503IVy9f89viCh/KRAvM5LctTjRiuQZxsSK3lw8viQ0eaNvnJKJMQdjCinr0fyxdvNr/Hp3VbuYgfPnfnzKlH1+P0830Mz9utz3RASLdwV46nn7DvMK4h+Xz5qThxgM+tzSp6mPS3sdCthtb5yTKzgoiWQfmcvN92fNzFisOoE7JlokJ92uebHSoDdyWnns2ezcONsgf+/pL1qUdE31tMDNmREZ1B3NwctJ68sAXcHwE7v9aP3+dDk+XbW4XZz92e/SaaQFHFa4VInSZ0ObLVAT7Roj41NFOer8pzcDndQxg4lgg3UqSJzQuB79rp01rqBUlcGnvQry31zu57kwgGW9/Oce1NOTON+8qnouBjeZZ5U3wO6rdnEod4cDfOfiAyElu+UAh8Nkhf8smZ16nE9IuolfRp7dOupolcs4ZLqLPsFWOufFUGdsATywIugLKLGHugKyzyyOQxew2fxrDLmDnll9WEzeFCkDjNixTuKVg6PcV8PuRaf9pUa9sZ2XemLCRnAb9yBnbEJM8hzs5RjS7RzFKOVdssI4q4kp4barBLhOxVHcuJxSUk/yfjgbBfP5I9WLAoA82ClsKI0knxNSgS3Apeo8mwYNBN17CmurncyIsqFIGF7CMf6ec7eeZ14XJy36tG8bA64Q7oXn+5hyJ2ASQLwa8wIAukPu4SumgG9yE8nb9vKcBoHHHgBNkBoyg/QRBpcB/o7CCjd5e9QojlrEwUKXWR0USaWfhw3AU63Qu0dg4MQIg4uupIEfXU53768R579mdnM4ATQ1067QvonngVUnObJPMCWtCswNTdKcQOHuiXO38Xtp+9OV8s6oAhpjdTU+vpiJjSwTVk9yxzFolHJsYyGd1MBZ+f2ia47En89XjS/CurkliKX465Ap529OdeYaeolC5h7g5sn9GIl/AelL4y5GpUG7x/vpfT1yGJheOKuX8Mo0O6OAb9C1IqHYH77OzL8juO7EXzMnnCl/X5QumWzxZw3Nus+NG4vaWEgN1NuijouqXwAS/Ek9KBI5D7wRbqqj3t98nEGy3nnhx573SkpfSdIMo6Cb9vq5xh5rak8GKr3ZwLMu9UHJ+W5XbKlCjhbKosRnFV3kq8kDvzg+aN7B9VPuQpeNO+gGERv6B1QyovWDfvKlD4WxRS7bIHXwIpOSuJT0G9DqZ612OLT6TbUaiSYQw3fEJDFR+pYH+z5EI42Wj+Kzbw+Pf0vCOl8i3nh6khmY50renbmBYKsdKIEWC/cEBnnx9m4WI+b0lf6Mzr45kGjRoLYbYZtOye78/ioQGCfoUpYxLkHXGwb4HzhUjy6IEZ5/2Rz+JnrtenxnA5e5o/kpKohpKy31GqQ88OKcR15iJB9BTcIDlT6AhjEpKplsU04nYRB7Z9fSDm8UVamq/ewXcETO+KA9mnb3GLBQokDWRSitgrKQaRd0FrR1HBMqArekVPlcszJN4iWZH5SL85vP32p+tRuNcbP1zuRy4bXniVv5I9Jh/bEyB5HXbGPF6fplATXqVSR9i/PxyPOMLGnBZNYIeBD+dgV/oPu2pbVme/OrKTaLwoPbcA30ryeCKN5VAPPZbcmrbaifhaP961B8MfsYTpxnepG9Q1TtHxOIfm49Q8mEoFli6Qd8/jk/OtBf68M2/r23H8K0VycXmQCBOXS4GoVU/+AFH+pAvIXV6vrzVkQbwQls/uoMzE3pGgW6ljQ6DAniIvSF5oBSEWq/CGEtFewLe3dRHEYjpz0gV4bnP9NMiUKFY0uU15dd4dUg5PjTtqgUTzQt8+c4flQedULxLad2giTlH050x+68c7KXlnr7KXyn8hm5YXLDIX3RSirnYCmWcpERTy59zNOGcDIbn7520d7RuPDGm3/64QWVMuOGR80IFZ5AZiJKsYPMH2KbVEexhHKpV9f5STT1F9/5ohJ2rntyUZDE4tkKENtPa25HTGZ8Q9AmU8MvJut+/FvujoUc6eIXzBXuQhs1CVEpoX+NMH+izdsQ4j+e12+FWcIHxzlg31reVitSVEzsYmMuU9/uVN4khmq1ah8PhAyrL7nnPszlFL2uT2DdNlLdsRi2w3TqWlxxxDZNjjdYp0wPkW2LoCqz4CNquTbMThycQubzb9tnPJF0wTl4XsEUevqdR8AkdcNotFP+TZBq88W4YwFkL8zhmFVCGLgp8/OloR8d6UIv+EPm3Wo52YnKASU1yb1MIJWdJc2rf5VfDt4QHC/bppHLXjzPC03vqmUA3iI8LsCsVqlsH54zL6w8eHh/1km9dAsa1xCTcIdzAqCaG8HnxOjyM8QN4g32FWd98gziyBeBLocGxGDyO3H4QSSKQJXOwisam5S/3swNZaP4bVXk50ajfEI0KMhVxj2A0huj1Yctx0RDS4KEVY8sSo5gnBxPTZN6PQ26rGnxXPt5JHyPhtypxC0+DMo16KnX7Z+WIMkdvuH2xHXNNO3FILoIqlYo5XF3elryWuMqWqq6FfX7yH/1gNWwnlN6Dy0+1HTsA4Mtf314NrRwwu3fHkenkuNRYqFZdSrkDP10ZLQlnngztSVo/Px2Cs/mgZmZlI00fXPNdcZAn7yFSKfB9KCyggm+FrMKjnV0dizqhqgQS00qZThF8hryNs3+1FvdUZZeeQiGKXjvnNN6gv9DDwbAqH6JmKGCEbN5z9wgp/BwJalAg9TP0qfoXbDfwOaSbXsnfzBS9rA0nm1DlrxMJUVd4wsOmDP2eOJLvPPl6sU+ZsJ0pATDUWnWP9Xwgk5TsXI3M7hdowNYI3SXLJFeQLQoX2QDJHLyGBP3NPdhvDYVOBLSTiWHratybB4ttveFi0LmSNDGVyLmNqJoadqjL0QzM3eIvVQUSgPLAidNOcikCJe9cjHIrfQ9OtdJ2XZrL2BYSCiHVzH5geuhB5TLPbehGgm+CRtcfvI2Ea9fZ1n9OxdOwbYUqxXy154zYiKdMJ+Bclnhq2IIBkd2E9wmot9R0JGrHIVa7Ovud8hrm9PLX45CRHAudKnZY3yOejT8edN2oP8rLvh0UDLpAyDpWdes3k9hSrF603BBtDj7JEtf0gkQmCOCpk4OPKsfYkE3o7vG+prIlD64kBF+wFq6sXlx8RtS/VlMiJgI2U740FT7ldqkdi68WdNCyGFEVCoa5XVwFj+ajgFO4uvp43pJNVntVj4UiqrPkEd5ijR2BfiUfUzU+QYCZUlpsVJM5uAO+PYbYasvvJk0LHqhR4CJb51XkrhkfmA8KRzZ/D17SiQZsem1GKzfuSoIH6Tnw0nfBtvCIW8Jn7/cgG/O4lhdZgOKM1CIna5HasbMsOIZ8HiydD/cCmpekzvYMRKt4pW/dpzV0l2abIv71qBLosorUJ8dCuxhdNUcrrHcgh5TwN2NLue6bw3H8qJz4oLkFL4vacHJzgA8TXCgeeP+0Ob+AzbGHvx9Hth+Oa/VkOtj+KsdK9EIPr2Xbj45WwwSJ4OB+sGr2b6CEt1vli97BdN4+uxX7P2S2F/Hs9tSGCN52ea65kkOq2zEyO6wgqn1ZD1TjEaBqxsslA8RC+NKk2cMwjxXZN7Tt8oR9g0xw170a9vBqaDXIKnFB/H2G/r4H14vjIjw5ljcwMvCw/e1y7dk01wNYWjnmzbHM/CkF90IJeTXXczDVr+oigFZw+LXeoVWhZcQkMh6zOgWQ8mU4ntaHjeUk1Nr5nWXHJcfD4xg6thRxSZrYVnLzKw7n9OkDGipDWYqACJMgvqu4fIbWlbrusUT+PseAB5RglDDQpW75utfb44bhCLt/reDmNgcz3r+tbMMQqJjHwqs3N8Py1C0z40GPFgm40lOuoLO5yYvJlXMfIbHc7FU9vQIU3tKYRL2p3vsCOo8Zh9jlvlByKgTdW7a1aKbeGWMjVZ3N4PJb5Qg1oxzl8rV6S56640rdeXYGG9QTh+asKPZtEQ6aCa5YPFploP6Ixk/l+ByOPNZ/WxeGmvptgzVR9I0rClgYCEJui8leMhgBkkDV+KvCqs6I/6K7cBOMnMnTwCdKB5b/XcHpMEX3VXIa2euhMpixNpx551deGIxfeLWkZUfYXG18rP3hWWr4Ql3VdmnwoantLFyOykI3Ubc6qqB20dpf4TCoH8DtfjWwES0Dgof8rn3Q+bnkeGQFm7uWrqkciiZQrlR3NhqpRTmz/430E800/rkIfBnIKcTmVGZAhBcGBM/4YMxvHzIflwU027eInIsGDB1yg1uEckOmet/ZwOYGxl98u2c6OZK36zb1+xKZH9OebVxTIRheJ5X33njHUiJvPX++s5cTp2H3FbU7yp1+l+5X6gnT5vbyL1aWuysO2AgfbyA6fqx+ULx8MMWQh5EgrWoR6Od3P3l7rOITvhc/0WBbMHPAp6KYeWZG8KnJHG6imCdPsZFJcBtZBH1ZxaThL4zKV4fQCmLT8oy3qo2McO+53INvmaH+98AIDwXwzqTBWr7zADqD9xnmf6Pp8vMFY7F9D3a4Yb4ns0dKhzdFPCiL/dNrb1M793RfHArS7lZcayWgyotP247z146UJrI+1r9fOFL95NWvGWUoTlTO8c89K0fm0d3jKpJBkdfvx+8IuTVu7PYIrpEtpiG9egOn/SOvzVx60AeY4TW/rUcAwiAbWYQnE970zFPFU+1Qs4PmSU+/6xJGhR3LNBf03PD0abn9eygBxfMQKDXHUM3XJrSfxxUgbWmvzSJ1itulD3RvhCc0olH2rj2VfugA+1eQT7JF9bEfo40bpn60qOvxKR9wqD8avzfkJ+xj4tN8df4fGT0sJqYKCq9ThYLDvT99c3veHiEZqTOjUYdKJnoL+hKibde394HIKik9s8v7ZBmvnTp+BCLrv/Knt5PceXQxQgM+9XvlGFSNZFB0H6WaSp7+hdRSncd7+EI+hGRnnbT827c7Cx+VlfheQQWSo1zPW4l/igXPLsW+9dAa4FZ0l/I1cXDF97TOOjOdAcvlV4otMh3u8GWq7/kzC8/kJr15vzK+jjyFxRJd5drvr50lS7FgOwDnwnjCazwx4/2mOZudRJwsCemRmbMXMLEx/PwdVibj6LqverTm8s1bvGIIMyYRUdzuqKDwuHTmWq64Q+QBvB3o5926UESJjFdIEZyz7eLd5D0Wfj8QqgvwQgNtg4ivwZtLyALs5qPl8XmjudquP7BPCm+IvYt1ZnCeA6RafDXVxOSu1y9fFTRTcUS1MWUAmFO6z1HD/06/aWEL/QILU4VN4DsRubJsXafK2kZ0FD1C+P3b0OJ4LxYWndMLM0OV7AK5KuZM87z5f1S2oXoQlGGrrL9JV0egaBNUZqdOT0vZ7qKTqJaJWKbrWfdiJkUnR0GMaH+UfshGzwHCqnPZqyHpuP1QsIy5/oZFhVgt9kF0JKOB804xAFITodjvNR+T9dJGXuX3ZufbiOl4U75r0rqOTGbgcWvueUERfu0o9G6mad9Z0uuYO337msmcKK7xselFthUL82uG8VLZZH5n01bDAGCi88m0fatkRVDyFFBMvLbK0Dhb9fQF7pi/cay4l6KHErzbNVlWHxZXDGxrR/9oNVW75zmJvFr9+zaC6o9XotJtSaLAbR38bj7Ces+Hzgd7fMfNxLK/X6f6aCtzKnxPd8ywp6wYz65vb8HW5HGzHne7JrK2tPQtvHpnFhFax6oUXg5TwEomovuyLPWorkVQPQ0TR6fNJfAa+A+eKKvCSoFbXV0Ulv3B8kd+gORLFNBc2WF0M0S+kV65EUSnZ6zN7okl9sH52OelCrG9nm4jh3myVSidEUa92HGv8KHVc/jwty25ZR84caYoAt/RrrJcj9sOnwAOk0iweAfsBlF+PnCFGwjinVd7Q/kqsvH4WV+0VjOPZBn9882oSkdfEWyjuzVHUNk3BIxI7RDq3GcxEcbSOltwRBqfQC8hrqrc7MisWVZ+htX117WfJ017+SFqNwl2+lxgp/XqIVaGgfoibwDFEO532O9Z86PotP6ohanaiXfAOmfjQBUisL9jLv3vreRmdiIja/tQB9epOaRCV2uXNlULRI8k2yUTosyt2KoTV/TFMqiUMf3klMQxGAkrBviIfP38HnebPxE53QPNu+/kLLFLK7mpKMteaDbGqoktP+N7SqK7Zwxd2ud36cRQK3TOBex7G6N7upx2ZS7SA+PNbUa54D8EBL3UUKKEH9eYJzIfdfGlOC0epWnKYcvj/zSKMyyvrhQZ7y7J6rtG6PD0WWjj/+L1W3EICToYvNmKHx2k6fTeSAJQKvMGAp27bRULyyntSIU1ZIm4IBr5Hv70QYrlCiOf9d+S3+2XWrxIIHR/ewtYkNAvPr9YtRCzGIWfhmZNHhctR7ICdM7N6Q+y7inLMVUb8eSWk/DOCogh2qDJReaQM6T0RyTQ47t81T7fkwi/iDZvMhGU4+56TA7TpsEREKnWRydRgEhc/pb/cnLtvpU7dM4UkWt+c3RputX7DPL7+5uC9pYBGdtiyPYJCF3jzWfxUVPqc11vOOoixqwmYu8k5McbuzEv7anQCestrrFtSh6iiYCXLa8pxKSQBacP8JFoQ9QFgNM+n0lyLWyjI/ShWCjL6VFvTETdVYoQ4ywM+tAyDub8x/AiOCI0PiQNFuopgvGv5vhmdmViny/dsUA/ohSmksBdUvfigr2HtTK+t4rEzzz6aj1mnffiQb4gHX2WLJ51oF5fTYZG6uT4vmJboow4VPq4i3zMEqEfWttz8gTe3JUxPVGUXsvcW5BfcW5EEyJL84NCPQ+a73VstU9qzI/GUWOzn1Hx7sNtNbzAfS91+fXa9INruuR+SQWA0i0e3D+68p7xVYF4hVE1a1Zhw48LgVpchZC56qVLxT0s/NpaTweFZ5EsRsvIMs/9sFC9wseKXR/vI07OfWHifzX023on3RV0OzgTp8kreTlfcOvBka32DV4zxWPscsgwZHP+Njl1acmGo5GopmEyCzMgC5EnT5CUqa4IhNIpOHFOrwvggc8T1JXe6t4pbbrL5RxWLBXdX5YbYrIn7t/TSuoh4R6+r9zLjk/1O2CUcLl2aohUq2/rt2Hj11byysEcAhJ4m70/+FQzqSfJb/gecH9E9ZKbhWntXAjLGIRm3Ms9zZG4MFovOZW15Yf04zMz0R8+KPlyemksOFw4AoJMP9MKm0Z7Wi/yM/pZvXd4JL6O1tirN5zOOS1pw1rIElWcO8TURGn7kQ++jeRjwbI5GXAe3nyHzTtNtL1bK53QeevzZetcW3tgMWUOa1O1Yw2G9298r2X+KbvT5odQet0YIxnmMdualJ5rer5GlrWLw3W6A2y8fkKRTr9CuR9byi7GixYr+e1+i14Lsat16mLLIc1/PkbDFEOt7fEMYvOmCHPKq9bWlAIMME2SC3sZ0sbvl3YOvVtev6BFFi0Ic0dza5b6wL4ThSDg8fjZUn/Bu3xuZyDjV3yybLLqnmw4s6SwkidsQKRgo9CRxrC8zfXt9i6CFx20uvSmO5yP43hejgBXIKNGFYbs/b9Jd+HYE12PPFYfOdEKf5/vgpGAO+Lh9UaYNPNm836TrBwJBzyJ+pCYF7p48lXpY5rEQHpmTFuC300DfHZ6Lyqr0eAoyosbI3hmWHD99/3RyLvrsTAViHEb820PzPZvU+tDe7etia+2bWykhPCISk3ZHdP8IWX28Afx59m77eO/wKhFLgX1ujm04RPcsC3ZKKaUTnh7cUMmMb763Mz4ju6wdQozOmJKNai0iJWqcBblsG2RB8qrArAyoGYxlwkvE2M6Pf4sEurWMoG2b+v7NMDq0iakPPh/pcfKbzAgvTGp7o8JhpWfoKauVqwzb69rpFdu/dmC/rvStCzoIcUK57yAa+RVY+tu4a7cXFCZXmvcR9gPx5szwQX/6oy1Veuetx/mCPksHayBlR6xlYsohoDqOykYuorl44vNYAEIqqY/6giDKOrnL/tq6YTsxjHOqREsIELBr3uUVP4np+UgzVSMajlssBTeLQcokhi/5I7PSvNKr+Q6FWR8DNN0G70Bs8iejTuHLEFGlhYrSx30IfAnaRqzyEQeMpW0z/NuntjGefYbC6VZd8FKXYwktNU8km3g5mkxc7U0bln3jo/UcHxKOUy+GPwzmJ+diPvR5gO865/fwWQ8VnoSEjoRel56z+pqi77F3tBMkkMXA8MfwaQnPoZstlXmvkA4yt0oH/B+vVCEzGhFZW7MsV+rERNlDmRrs15Mod5PAKQ5ZHUdPTNGwnfvMyzUdL6dTiXXNJV8ub323tjm4CpekQBS0uJlKN50jzpunsP4S7JxfNQ2gSua3t1pfPHv7TRaPKjuM0kAlCRfwwSC0sjBZ0lhWSaqu0LXn/vxUz9hWWdu75nOTv/thNcoru50FKfKKgl/d+8vKuSJxeOaveRMBVq3PBRPKzYK8nTGqKqVqTZUtuYetcvYaf+oW75+RJFH06gmzAJ20hOfcvteUq6sca87bMODMnqaiLn3h6SoRdHEYjNKXDJzfnvlEvgHOLc2Lbfx5YPOTGs7SsgWKoZw8BbLg4Q5Qz0v+3MfqzFvRFa3J52flw53hMEy/TAuPIbu4QENoFKFY3HK3bZs2xOZwrqLDp3Razu4qRtBwKbZ0OUxY0R+wzdJp9CybDWXpxPgIQqVTcnK63N78YQMr8JAHyQR5sJwwLIuLSoAnIO/ndgcLBThdLUL1F6A3kOcgWIC5qbq8t41m2XsSs0VGHls1bYetuq66p8Q5p14Ob3k+ukhXlwRQUwwH4s6WOYm+D43OyM85MGHrwpove9myjeUCZMsS8KaifzWdOBfdF6FdhpNGXOdnHkb9WRyP0BVojljGwB27yiQ7NrEEhg6DG3iBPTFcYHLdhtikub3eaX1X/dD4kJjDX0u0Gr6vKnybT0xSmAeXnMzoUh5PaDkbsVSHB03XQIdI8fX5GHVHDNsikp/pQKbf+cyOc2Z4pe6S6PtVSjGFfnrAoHhzG7UD5P5NNo6b1XIz1J6/WjimF7b3gmqY950Ce4ID/15t2O8/ILqF8FwE+O2+Vwf2WCkj/SaIa9kBizbaWaI9CvAmnTNf8cwKajMm7Wl5r69tMQ38Nm1We3KhroMwojUJjHV4jSFL5uK6T0h46rYhj3xoNlTuAsE2Gen2UgX2qVMFTX9VTCeDv79+8xYoT1sjyuczcu9NaofWgjhd8hZ+otiPp1dZ3MBU7Hj4rG7KnclJAey/kU9+7ro5iOEICbvK1a14NTIGtZ3qE2tkI+g2iiNhi51SCDIIMYk/35EwxhPwi9huXhd6HRH5o1+XSmAqNDPnoA2kgN2P1twA9mfwbCmqjWNEVL3OI+fZawTbvIx1dEu2uKyVzr8uPbZy3PC4H1iHu9Yr51vEQwvX88CLCM8Aw/yU97dPEl+k7WxvQTIhoYGEN5krse67zzYXegBTyFA26PD0pz5HZ+7gA+BPo0zEVw/ZsItt7lONFNCgg3e2c9Basov82CCGtXFnib7dICRcMSULrs9MYAqt8/hCnyt8B8Co/PcqnKTDipgV9QQrpuRjgYOFSTzyj3jK07npxXQi1bJIKHAzkOHmPYh3gp1Mdn/HT/p7vdVSXjxlHjIieH31jydgZx8ZB0bDvvL0B0eHuqU+5g0T9irReeltfVMYl2lx2mVwmT1jiUZktEIXq6jVVoEQTXiyXreSMLZEmet8XcEytX8hMZGnyNMcH/ZXK+yhQ+DbxSORJsIXutujeBTYo8Ktd9HuGsuTgAyBmFzuEmYHaqmhabzQTMLdlBIYeFOebjPzuLs7GwgU1FiHWWsDyIrhruJYPNkk5zXEzECnB9fbWSrkaMfLy9UE1+u63tNoEuV9JSbNepPS64re037zXx8WMDMhVL9MRB5CKJOOdIuceRe9LGjb6gwpHON9VajqfQ64QPuusUqFvhuOFQP+VEAuyRW7TE15zOYcjqXGcGqbgsBmvPfw8dj/gtg/b29HnYJqWn3ZzcE13iCL1w+uQ3PLChNDgyb2coyr74J0OKQusxi4XCRL4a+K8iuNlsEsjJAhntS9XhQiJnIwa6KvASKpHukylz0i6iDqa5cdsMfgkuIVFC50cRMeujP1nvLYYFZ+uqYK+91u8Yy6q3gWLXLTFu9YRIYqwoZDwiNCCJijKaiMuIxK05ExFh2YoIiMAF4eAWczkH2JoOOjH9jboZ7W/bwWEoPIDP+sMdI2t93mJM6C2KVwXv1qQKb5+8IiwUQP50gNQjs5UO7tev4exfvJaRc5rqow8V7KUNVPA8tOGcgxztF7gyXF02pkioXNfvLdy1ToFhfZOkKAoTvqOREIljzipH49LV/507EDVHWi82N86+ebUGgBZZ3E13Aqp7R+zqcakiXydwcoN78AGFaXuIwwVh3JaRwf2kcMgpZ6r5CKB4SnNGmwmqstUmtxDtybxlAd0X/EewyOzvF5RlC0+yaoJSeoY9MoWSQJBpgs51+1U3On5RBSoguPnHBndcHaEmBX95teC11m1aITybf3K/hlnsnY+nDLtLOSt/oXWBPnAKxeC7R3OzBOm2TPfIr4S87hQniFAOG7JXmG50CLBOc1Ca8K/x6rF9laUtLqLVFNhOfoPj+K01Hf68aTs8slGe6mzDs7DG+R1vrpjVYv/91gj5BUbUaMG6zQyYiZXdZbaLrE9YQYmRDM+yK1BFaePBXRokS/xfsRA/H9jyosE9G+ayZ+ZTCal+RWR9HF3Q6PfMEnxzYMwzSVkZX8sqMCQ5oLVy6hKtY3PGpHayV5Bx9AsY7kr7IW+nNAFsp3U0gCAta8AgpV5HHXMLa7h1m2Td4swXSFo/OneFLIhYKsEQ1W8Sys8W/cSLXCSvIqGiw1BYl3BrjZK/RpEPnitj97sSx6t1pXe5+WPv3xMSt91m88ZFTIK2kz61cpy+XhUBPBHvbpbp03RAJBcSB4gyxjXpI0fRv1zXAr+7abaXJF37WNhNDRwpl2ZWSfRNquufvKTiZHdNoIoKFMS4ikfT8dT2f/DBbMj4A4CiKTp+9EGY6wy/vCWavFfCFrTTe9Qqy8F9IQZeJIe2IJXvIkoBeg8ihZmIXMCurUa2lOcp9oVeLAlHmBeSXseTOc6/35Cq9x5SrJ7mwfojp5SCUJ+qB2WPK+XkQpcaViPaUKN5wwls5lBTECGBbqtLIFc3QYFbh70p3511MvB0t8Npdf9sduTN7NqIguFOp7dQ8RyVd4pBFn8RpBkC1U9NGyJ1dv/WgD1KfEqnGUB29BYT8eSVeM5JmVkwqLEsR69tt0SaxMfHPelbpHYW8tFx4hJyGvSCORhCttWL5LMQiOn/iEJQ3515Bh8XcK8ybxVfRwKB5rIXvDnJzk/EVq9nw/PZvb6V5Rtl/d1fsV/Ta3vplMBTKK7LsQUeYLhOmQcOaR3dxP+wLMosUqThyc18UJWHehWRTXtfx+2b75NEkysSrpyadGOoEh8ypBv0r0S0Z89aYoIX32A3nKSct0rtqLrHdw+tf80LMVmMJoFPDjrtKCKFdnbUZkrggvKh+T2uq7kPEleXksRfnovrHElLIrOZjjk/G9M5x9AdLXT7TZr0N3DfOXK87DRr83SBfiNf4CoXRSZL7GatyTRmlX8wae/4+64NmFZIvPWaSfCWyLY+NKDJjUQ9j8Ll1b6Ci1dkb7u2bPsogZG4tEADWCmXZ8yE8NTRri/N6Vlsg5BObq8U0qPTPrRlb5spx2n2M/wScYFksNeHk06r+gSI/ofLmcJROJ8nqeIPeP8dfmxJn6rQHsN9+9QApfoJaFeylpGRNfPqzEa88T3aPYnG0mrv0ZLThjsN7bXbiZm1MUOn/Th7w7FB6Sk4jS43VOOtY7xgP9mp608kcTWd5p0Qqun490vVvK8JUKw3iNbFadLwljbrfm3mbqERC+EsXgbmy5um/lSFg2rm8veHi15n1O0prTb8mm9sUtj3IMQsesGV+KsAKl6u2+pQdThD43kKw+DiTBpchOrV8jLRDOxh40aLwriaAnvPZN8zpIn2dXbs4S11wMlZmjcNLxCvCCyTG+Uj1IyuGr57a1RV1t3656iYGO6FWsOfslNoW86oNne8vu7f22E2klq0jihRu1pBXBMjuxKceJ4V6UwvKB0OZRykBbE9nC1IHoh2hZPDervZ1amBTavMR2TmxexH5HxxM9F1YXL0UV3Zqtp93JT8UXCjLL26g7QAnjV946mgnr3zBXSZ5AD/18gYN9g8wepHBLCKd7GjTkhsQTYn3ZfTEXDq8JMM2HiqWdM/DuE1Rm5eVCHjZqQbjBoH+77G354T9HzPW5VMLqxMdXIjnJre97kV7oWvtb3pXxl/nZ8at+WCv85seu4JhPISpmXiq1wMhaLnILYRU1sO2RRn7c8PQkQtDQjb9JfbSlQONPSodbkMeYjV60s2esNk9335EG6DCd5btDX2zlwF8pfl/TUDTEaaMg0vtwQUBWvvr1pJeenz7YWGhFT+73WHIsU08XWndcXvLs7n3kNipqYxaJyIakcC+qoo1vtZY/wEN4emSJxsM2CiTNq0i8TutjS6Wq2AQT9kjXgzVD+b9chbBWo74VNAuafjxZkU7NdjSXM7EsFIp9LQvBkZFtuZHbQuoloCt8rj56kktBfUuD4bM6fKwLjRBc8QeZo/nm30z+RV/uXeEhEK/dgnFQ9ALRvdX7rNU3rxWO1SXXITDekzvQJDxC7O/s6W9+PRsFDxFIgVfyedo+GrghCxaLc17lzaNhluipfKwyCVodv2lz/lz50lpUXnFm5lQro/nMSyuaO9DLvDSev6SXaR6lW++A+umTpCS4kVX5yXatXYOkoptRA8/4Xmxd48/eBJqIN77LXUYW8hLjefOtCkFoy5WWSQmuYJSPxj1e4wGCt/l8hX2l/8KPBDDPsZlm/ByLx4bWuUR2VfGv3eFf/lFGIVZ0c4hJ1TOvxylkUDn+mRJjXf8TIf2JQGNx+tMJ6x9D1k5w5nTCf04t2TBl3785Rch/IsTmq2Zdk00D0vXYr78SHPVzyfZzzGA/h+srncqfU+SvU2X2Ksrp9+ce489x8ZcbQ5Dp5+tAUL9iVte/ffvx7xP2Sn+uEf/8cf+Hx19eZeNHg/9no59vfyZ/vcCjnrOfj/2cGKet/nUC3eXVj+hAWMvXlAX9I4G/rMOjR+fKqUFfKOHon3nXQgOzVw3vpmX1kk2v5PHrD8Frh6vw069jsau74fgCAjt+0PlH/SpadC5B45uhPwrjNHRV9o8+mr/q+m/O58cPOt93r3Y6BokS/kTBpD3GPktgGPPXN0NDIQzZ+Nofz+P1sH/PrFL06T9+P68UTf5hYvF/MLH4v2Fi+eCldC//MnzJhPDFzNQy5c8U9YeZvTi+L9sXdNI9IcsOs+VLiAT8DxOOXnr6R7NxTOOvj8F8w/igCa75Xx9sXmkKfxRGJCKvtjCzHN6S+OsZ/9eLE3+Y27Zr4cqhmx7T38zN76cTPFYihkYaTexJRMf4X4+Pue67AQlWi279eB2TmT3Gac3G6d8zzTjxTxcvzv2DSYZdH/9nJpn+wyTLF0325av1/2f5f3eWmRP232yW2X8O0kM3tynA2zGgaIDKrujaR212Xf9rIt/ZNG2/YPgxT93vwTtrU34YuhUdlo86F19DcswyOq+84HmPG6OjX3eg/zK54a9x+bkLeuEIDv7j9NthDNf+B/vbofT9y73gaPvbIzcbXmjMQA7/c2jO0iL7T+dyyOrH9Fqy3131j2bm16UuiOBfZYCFXrh/IwMk9nc4PXbzkGS/rvrr/P7hRjSB/+5GBPF3UjI9hiKb/nAjNBeP7W8+9muJ/MsP/Nv3/FXufu74Vyn8y2D9S4KpUNa7/6b99RmcWLHWpYTe/oz/18rlL/T4O4n8vqbob/79I20M9evwr9IGB9s/k6ufaf2FpIgL2rp8zkl5VUfH0L0x+zPx87mfWftPhob4r5FTgvs9VpEY9S/J6b9LAk7/jZDpF/KApvgd8lD/FHn+Cab9kjD8v0bC8P9eEkYS/3cljPjnEvb/DZR/ymlI7PT7WaRPfyQ1/4fMk384rX+0ThBxRScEXjzLtvT/CFv9OzmDH0X5Nzka6L9bt9wfaexvvoh/N4vNW0ONsvlF8SamuiuzbNfTPyALsq+rUP4M+zP6DXTzjOwU69chf75C3RGM90VNv8ji5erL/w8JxW9gxP5RSDiaIXj6L0LyB4n4B3LzvxaSvzN18H/gjWL/PaYOBLC7bvpb5YDwubS6NINP/E8=
\ No newline at end of file
diff --git a/deps/github.com/ledgerwatch/interfaces/txpool/README.md b/deps/github.com/ledgerwatch/interfaces/txpool/README.md
new file mode 100644
index 0000000..0ce8842
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/txpool/README.md
@@ -0,0 +1,45 @@
+# txpool interface
+Transaction pool is supposed to import and track pending transactions. As such, it should conduct at least two checks:
+- Transactions must have correct nonce
+- Gas fees must be covered
+
+## State streaming
+For transaction checks to function, the pool must also track balance and nonce for sending accounts.
+
+On import of transactions from unknown sender, transaction pool can request balance and nonce at a particular block.
+
+To track existing accounts, transaction pool connects to Ethereum client and receives a stream of BlockDiffs. Each of these represents one block, applied or reverted, and contains all the necessary information for transaction pool to track its accounts.
+
+For applied blocks:
+- Block's hash
+- Parent block's hash
+- New balances and nonces for all accounts changed in this block
+
+For reverted blocks:
+- Reverted block's hash
+- New (reverted's parent) hash
+- New parent (reverted's grandfather) hash
+- List of reverted transactions
+- Balances and nonces for all accounts changed in reverted block, at new (reverted's parent) state.
+
+BlockDiffs must be streamed in the chain's order without any gaps. If BlockDiff's parent does not match current block hash, transaction pool must make sure that it is not left in inconsistent state. One option is to reset the transaction pool, reimport transactions and rerequest state for those senders.
+
+## Reorg handling
+Simple example:
+
+```
+A - D -- E -- F
+ \
+  - B -- C
+```
+
+Transaction pool is at block C, canonical chain reorganizes to F.
+
+We backtrack to common ancestor and apply new chain, block by block.
+
+Client must send the following BlockDiffs to txpool, in order:
+- revert C to B
+- revert B to A
+- apply D on A
+- apply E on D
+- apply F on E
\ No newline at end of file
diff --git a/deps/github.com/ledgerwatch/interfaces/txpool/keep.go b/deps/github.com/ledgerwatch/interfaces/txpool/keep.go
new file mode 100644
index 0000000..b3de5fe
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/txpool/keep.go
@@ -0,0 +1 @@
+package txpool
diff --git a/deps/github.com/ledgerwatch/interfaces/txpool/mining.proto b/deps/github.com/ledgerwatch/interfaces/txpool/mining.proto
new file mode 100644
index 0000000..cdb3a3f
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/txpool/mining.proto
@@ -0,0 +1,103 @@
+syntax = "proto3";
+
+import "google/protobuf/empty.proto";
+import "types/types.proto";
+
+package txpool;
+
+option go_package = "./txpool;txpool";
+
+message OnPendingBlockRequest {}
+message OnPendingBlockReply {
+  bytes rplBlock = 1;
+}
+
+message OnMinedBlockRequest {}
+message OnMinedBlockReply {
+  bytes rplBlock = 1;
+}
+
+message OnPendingLogsRequest {}
+message OnPendingLogsReply {
+  bytes rplLogs = 1;
+}
+
+
+message GetWorkRequest {}
+
+message GetWorkReply {
+  string headerHash = 1; // 32 bytes hex encoded current block header pow-hash
+  string seedHash = 2; // 32 bytes hex encoded seed hash used for DAG
+  string target = 3;  // 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
+  string blockNumber = 4; // hex encoded block number
+}
+
+message SubmitWorkRequest {
+  bytes blockNonce = 1;
+  bytes powHash   = 2;
+  bytes digest = 3;
+}
+
+message SubmitWorkReply {
+  bool ok = 1;
+}
+
+message SubmitHashRateRequest {
+  uint64 rate = 1;
+  bytes id = 2;
+}
+message SubmitHashRateReply {
+  bool ok = 1;
+}
+
+message HashRateRequest {}
+message HashRateReply {
+  uint64 hashRate = 1;
+}
+
+message MiningRequest {}
+message MiningReply {
+  bool enabled = 1;
+  bool running = 2;
+}
+
+service Mining {
+  // Version returns the service version number
+  rpc Version(google.protobuf.Empty) returns (types.VersionReply);
+
+  // subscribe to pending blocks event
+  rpc OnPendingBlock(OnPendingBlockRequest) returns (stream OnPendingBlockReply);
+  // subscribe to mined blocks event
+  rpc OnMinedBlock(OnMinedBlockRequest) returns (stream OnMinedBlockReply);
+  // subscribe to pending blocks event
+  rpc OnPendingLogs(OnPendingLogsRequest) returns (stream OnPendingLogsReply);
+
+
+  // GetWork returns a work package for external miner.
+  //
+  // The work package consists of 3 strings:
+  //   result[0] - 32 bytes hex encoded current block header pow-hash
+  //   result[1] - 32 bytes hex encoded seed hash used for DAG
+  //   result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
+  //   result[3] - hex encoded block number
+  rpc GetWork(GetWorkRequest) returns (GetWorkReply);
+
+  // SubmitWork can be used by external miner to submit their POW solution.
+  // It returns an indication if the work was accepted.
+  // Note either an invalid solution, a stale work a non-existent work will return false.
+  rpc SubmitWork(SubmitWorkRequest) returns (SubmitWorkReply);
+
+  // SubmitHashRate can be used for remote miners to submit their hash rate.
+  // This enables the node to report the combined hash rate of all miners
+  // which submit work through this node.
+  //
+  // It accepts the miner hash rate and an identifier which must be unique
+  // between nodes.
+  rpc SubmitHashRate(SubmitHashRateRequest) returns (SubmitHashRateReply);
+
+  // HashRate returns the current hashrate for local CPU miner and remote miner.
+  rpc HashRate(HashRateRequest) returns (HashRateReply);
+
+  // Mining returns an indication if this node is currently mining and it's mining configuration
+  rpc Mining(MiningRequest) returns (MiningReply);
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/txpool/txpool.proto b/deps/github.com/ledgerwatch/interfaces/txpool/txpool.proto
new file mode 100644
index 0000000..b43a49c
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/txpool/txpool.proto
@@ -0,0 +1,92 @@
+syntax = "proto3";
+
+import "google/protobuf/empty.proto";
+import "types/types.proto";
+
+package txpool;
+
+option go_package = "./txpool;txpool";
+
+message TxHashes { repeated types.H256 hashes = 1; }
+
+message AddRequest { repeated bytes rlpTxs = 1; }
+
+enum ImportResult {
+  SUCCESS = 0;
+  ALREADY_EXISTS = 1;
+  FEE_TOO_LOW = 2;
+  STALE = 3;
+  INVALID = 4;
+  INTERNAL_ERROR = 5;
+}
+
+message AddReply { repeated ImportResult imported = 1; repeated string errors = 2; }
+
+message TransactionsRequest { repeated types.H256 hashes = 1; }
+message TransactionsReply { repeated bytes rlpTxs = 1; }
+
+message OnAddRequest {}
+message OnAddReply {
+  repeated bytes rplTxs = 1;
+}
+
+message AllRequest {}
+message AllReply {
+  enum TxnType {
+    PENDING = 0; // All currently processable transactions
+    QUEUED = 1;  // Queued but non-processable transactions
+    BASE_FEE = 2;  // BaseFee not enough baseFee non-processable transactions
+  }
+  message Tx {
+    TxnType txnType = 1;
+    types.H160 sender = 2;
+    bytes rlpTx = 3;
+  }
+  repeated Tx txs = 1;
+}
+
+message PendingReply {
+  message Tx {
+    types.H160 sender = 1;
+    bytes rlpTx = 2;
+    bool isLocal = 3;
+  }
+  repeated Tx txs = 1;
+}
+
+message StatusRequest {}
+message StatusReply {
+  uint32 pendingCount = 1;
+  uint32 queuedCount = 2;
+  uint32 baseFeeCount = 3;
+}
+
+message NonceRequest {
+   types.H160 address = 1;
+ }
+ message NonceReply {
+   bool found = 1;
+   uint64 nonce = 2;
+ }
+
+service Txpool {
+  // Version returns the service version number
+  rpc Version(google.protobuf.Empty) returns (types.VersionReply);
+  // preserves incoming order, changes amount, unknown hashes will be omitted
+  rpc FindUnknown(TxHashes) returns (TxHashes);
+  // Expecting signed transactions. Preserves incoming order and amount
+  // Adding txs as local (use P2P to add remote txs)
+  rpc Add(AddRequest) returns (AddReply);
+  // preserves incoming order and amount, if some transaction doesn't exists in pool - returns nil in this slot
+  rpc Transactions(TransactionsRequest) returns (TransactionsReply);
+  // returns all transactions from tx pool
+  rpc All(AllRequest) returns (AllReply);
+  // Returns all pending (processable) transactions, in ready-for-mining order
+  rpc Pending(google.protobuf.Empty) returns (PendingReply);
+  // subscribe to new transactions add event
+  rpc OnAdd(OnAddRequest) returns (stream OnAddReply);
+  // returns high level status
+  rpc Status(StatusRequest) returns (StatusReply);
+  // returns nonce for given account
+  rpc Nonce(NonceRequest) returns (NonceReply);
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/types/keep.go b/deps/github.com/ledgerwatch/interfaces/types/keep.go
new file mode 100644
index 0000000..ab1254f
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/types/keep.go
@@ -0,0 +1 @@
+package types
diff --git a/deps/github.com/ledgerwatch/interfaces/types/types.proto b/deps/github.com/ledgerwatch/interfaces/types/types.proto
new file mode 100644
index 0000000..2c8b80c
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/types/types.proto
@@ -0,0 +1,130 @@
+syntax = "proto3";
+
+import "google/protobuf/descriptor.proto";
+
+package types;
+
+option go_package = "./types;types";
+
+/* Service-level versioning shall use a 3-part version number (M.m.p) following semver rules */
+/* 1. MAJOR version (M): increment when you make incompatible changes                        */
+/* 2. MINOR version (m): increment when you add functionality in backward compatible manner  */
+/* 3. PATCH version (p): increment when you make backward compatible bug fixes               */
+
+// Extensions of file-level options for service versioning: should *not* be modified
+extend google.protobuf.FileOptions {
+  uint32 service_major_version = 50001;
+  uint32 service_minor_version = 50002;
+  uint32 service_patch_version = 50003;
+}
+
+message H128 {
+  uint64 hi = 1;
+  uint64 lo = 2;
+}
+
+message H160 {
+  H128 hi = 1;
+  uint32 lo = 2;
+}
+
+message H256 {
+  H128 hi = 1;
+  H128 lo = 2;
+}
+
+message H512 {
+  H256 hi = 1;
+  H256 lo = 2;
+}
+
+message H1024 {
+  H512 hi = 1;
+  H512 lo = 2;
+}
+
+message H2048 {
+  H1024 hi = 1;
+  H1024 lo = 2;
+}
+
+// Reply message containing the current service version on the service side
+message VersionReply {
+  uint32 major = 1;
+  uint32 minor = 2;
+  uint32 patch = 3;
+}
+
+// ------------------------------------------------------------------------
+// Engine API types
+// See https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md
+message ExecutionPayload {
+  uint32 version = 1; // v1 - no withdrawals, v2 - with withdrawals, v3 - with excess data gas
+  H256 parentHash = 2;
+  H160 coinbase = 3;
+  H256 stateRoot = 4;
+  H256 receiptRoot = 5;
+  H2048 logsBloom = 6;
+  H256 prevRandao = 7;
+  uint64 blockNumber = 8;
+  uint64 gasLimit = 9;
+  uint64 gasUsed = 10;
+  uint64 timestamp = 11;
+  bytes extraData = 12;
+  H256 baseFeePerGas = 13;
+  H256 blockHash = 14;
+  repeated bytes transactions = 15;
+  repeated Withdrawal withdrawals = 16;
+  H256 excessDataGas = 17;
+}
+
+message Withdrawal {
+  uint64 index = 1;
+  uint64 validatorIndex = 2;
+  H160 address = 3;
+  uint64 amount = 4;
+}
+
+message BlobsBundleV1 {
+  H256 blockHash = 1;
+  // TODO(eip-4844): define a protobuf message for type KZGCommitment
+  repeated bytes kzgs = 2;
+  // TODO(eip-4844): define a protobuf message for type Blob
+  repeated bytes blobs = 3;
+}
+
+// End of Engine API types
+// ------------------------------------------------------------------------
+
+message NodeInfoPorts {
+  uint32 discovery = 1;
+  uint32 listener = 2;
+}
+
+message NodeInfoReply {
+  string id = 1;
+  string name = 2;
+  string enode = 3;
+  string enr = 4;
+  NodeInfoPorts ports = 5;
+  string listenerAddr = 6;
+  bytes protocols = 7;
+}
+
+message PeerInfo {
+  string id = 1;
+  string name = 2;
+  string enode = 3;
+  string enr = 4;
+  repeated string caps = 5;
+  string connLocalAddr = 6;
+  string connRemoteAddr = 7;
+  bool connIsInbound = 8;
+  bool connIsTrusted = 9;
+  bool connIsStatic = 10;
+}
+
+message ExecutionPayloadBodyV1 {
+  repeated bytes transactions = 1;
+  repeated Withdrawal withdrawals = 2;
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/web3/common.proto b/deps/github.com/ledgerwatch/interfaces/web3/common.proto
new file mode 100644
index 0000000..e28a6fb
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/web3/common.proto
@@ -0,0 +1,80 @@
+syntax = "proto3";
+
+import "google/protobuf/empty.proto";
+import "types/types.proto";
+
+package web3;
+
+message BlockNumber {
+  oneof block_number {
+    google.protobuf.Empty latest = 1;
+    google.protobuf.Empty pending = 2;
+    uint64 number = 3;
+  }
+}
+
+message BlockId {
+  oneof id {
+    types.H256 hash = 1;
+    BlockNumber number = 2;
+  }
+}
+
+message CanonicalTransactionData {
+  types.H256 block_hash = 1;
+  uint64 block_number = 2;
+  uint64 index = 3;
+}
+
+message AccessListItem {
+  types.H160 address = 1;
+  repeated types.H256 slots = 2;
+}
+
+message Transaction {
+  optional types.H160 to = 1;
+  uint64 gas = 2;
+  uint64 gas_price = 3;
+  types.H256 hash = 4;
+  bytes input = 5;
+  uint64 nonce = 6;
+  types.H256 value = 7;
+  types.H160 from = 8;
+  uint32 v = 9;
+  types.H256 r = 10;
+  types.H256 s = 11;
+}
+
+message StoredTransaction {
+  optional CanonicalTransactionData canonical_data = 1;
+  Transaction transaction = 2;
+}
+
+message BlockBase {
+  uint64 number = 1;
+  types.H256 hash = 2;
+  types.H256 parent_hash = 3;
+  uint64 nonce = 4;
+  types.H256 ommer_root = 5;
+  types.H256 state_root = 6;
+  types.H256 receipt_root = 7;
+  types.H160 coinbase = 8;
+  uint64 difficulty = 9;
+  uint64 total_difficulty = 10;
+  bytes extra_data = 11;
+  uint64 size = 12;
+  uint64 gas_limit = 13;
+  uint64 gas_used = 14;
+  uint64 timestamp = 15;
+  repeated types.H256 ommers = 16;
+}
+
+message LightBlock {
+  BlockBase base = 1;
+  repeated types.H256 transaction_hashes = 2;
+}
+
+message FullBlock {
+  BlockBase base = 1;
+  repeated Transaction transactions = 2;
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/web3/debug.proto b/deps/github.com/ledgerwatch/interfaces/web3/debug.proto
new file mode 100644
index 0000000..7471919
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/web3/debug.proto
@@ -0,0 +1,32 @@
+syntax = "proto3";
+
+import "types/types.proto";
+import "web3/common.proto";
+
+package web3;
+
+message AccountStreamRequest {
+  BlockId block_id = 1;
+  optional types.H160 offset = 2;
+}
+message Account {
+  types.H160 address = 1;
+  types.H256 balance = 2;
+  uint64 nonce = 3;
+  bytes code = 4;
+}
+
+message StorageStreamRequest {
+  BlockId block_id = 1;
+  types.H160 address = 2;
+  optional types.H256 offset = 3;
+}
+message StorageSlot {
+  types.H256 key = 1;
+  types.H256 value = 2;
+}
+
+service DebugApi {
+  rpc AccountStream(AccountStreamRequest) returns (stream Account);
+  rpc StorageStream(StorageStreamRequest) returns (stream StorageSlot);
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/web3/eth.proto b/deps/github.com/ledgerwatch/interfaces/web3/eth.proto
new file mode 100644
index 0000000..3768d1c
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/web3/eth.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+
+import "google/protobuf/empty.proto";
+import "web3/common.proto";
+import "types/types.proto";
+
+package web3;
+
+message BlockNumberResponse { uint64 block_number = 1; }
+
+message ResolveBlockHashRequest { uint64 block_number = 1; }
+message ResolveBlockHashResponse { optional types.H256 block_hash = 1; }
+
+message BlockRequest { optional BlockId search_location = 1; }
+message LightBlockResponse { optional LightBlock block = 1; }
+message FullBlockResponse { optional FullBlock block = 1; }
+
+message TransactionResponse { optional StoredTransaction transaction = 1; }
+
+service EthApi {
+  rpc BlockNumber(google.protobuf.Empty) returns (BlockNumberResponse);
+  rpc ResolveBlockHash(ResolveBlockHashRequest)
+      returns (ResolveBlockHashResponse);
+
+  rpc LightBlock(BlockRequest) returns (LightBlockResponse);
+  rpc FullBlock(BlockRequest) returns (FullBlockResponse);
+  rpc TransactionByHash(types.H256) returns (TransactionResponse);
+  rpc SendTransaction(Transaction) returns (google.protobuf.Empty);
+}
diff --git a/deps/github.com/ledgerwatch/interfaces/web3/keep.go b/deps/github.com/ledgerwatch/interfaces/web3/keep.go
new file mode 100644
index 0000000..12018f7
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/web3/keep.go
@@ -0,0 +1 @@
+package web3
diff --git a/deps/github.com/ledgerwatch/interfaces/web3/trace.proto b/deps/github.com/ledgerwatch/interfaces/web3/trace.proto
new file mode 100644
index 0000000..2e02fe6
--- /dev/null
+++ b/deps/github.com/ledgerwatch/interfaces/web3/trace.proto
@@ -0,0 +1,312 @@
+syntax = "proto3";
+
+import "google/protobuf/empty.proto";
+import "web3/common.proto";
+import "types/types.proto";
+
+package web3;
+
+// Call params
+
+message LegacyCall {
+  optional types.H160 from = 1;
+  optional types.H160 to = 2;
+  optional uint64 gas_limit = 3;
+  optional uint64 gas_price = 4;
+  optional types.H256 value = 5;
+  optional bytes input = 6;
+}
+
+message AccessList { repeated AccessListItem access_list = 1; }
+
+message EIP2930Call {
+  optional types.H160 from = 1;
+  optional types.H160 to = 2;
+  optional uint64 gas_limit = 3;
+  optional uint64 gas_price = 4;
+  optional types.H256 value = 5;
+  optional bytes input = 6;
+  optional AccessList access_list = 7;
+}
+
+message EIP1559Call {
+  optional types.H160 from = 1;
+  optional types.H160 to = 2;
+  optional uint64 gas_limit = 3;
+  optional uint64 max_priority_fee_per_gas = 4;
+  optional uint64 max_fee_per_gas = 5;
+  optional types.H256 value = 6;
+  optional bytes input = 7;
+  optional AccessList access_list = 8;
+}
+
+message Call {
+  oneof call {
+    LegacyCall legacy = 1;
+    EIP2930Call eip2930 = 2;
+    EIP1559Call eip1559 = 3;
+  }
+}
+
+message TraceKinds {
+  bool trace = 1;
+  bool vm_trace = 2;
+  bool state_diff = 3;
+}
+
+message CallRequest {
+  Call call = 1;
+  TraceKinds kinds = 2;
+}
+
+message CallRequests {
+  repeated CallRequest calls = 1;
+  BlockId block_id = 2;
+}
+
+message TraceBlockRequest {
+  BlockId id = 1;
+  TraceKinds kinds = 2;
+}
+
+message TraceTransactionRequest {
+  types.H256 hash = 1;
+  TraceKinds kinds = 2;
+}
+
+message AddressSet { repeated types.H160 addresses = 1; }
+
+enum FilterMode {
+  Union = 0;
+  Intersection = 1;
+}
+
+message FilterRequest {
+  optional BlockId from_block = 1;
+  optional BlockId to_block = 2;
+  optional AddressSet from_addresses = 3;
+  optional AddressSet to_addresses = 4;
+  optional FilterMode mode = 5;
+}
+
+// Trace
+
+enum CallType {
+  CallTypeCall = 0;
+  CallTypeCallCode = 1;
+  CallTypeDelegateCall = 2;
+  CallTypeStaticCall = 3;
+}
+
+message CallAction {
+  types.H160 from = 1;
+  types.H160 to = 2;
+  types.H256 value = 3;
+  uint64 gas = 4;
+  bytes input = 5;
+  optional CallType call_type = 6;
+}
+
+message CreateAction {
+  types.H160 from = 1;
+  types.H256 value = 2;
+  uint64 gas = 3;
+  bytes init = 4;
+}
+
+message SelfdestructAction {
+  types.H160 address = 1;
+  types.H160 refund_address = 2;
+  types.H256 balance = 3;
+}
+
+message RewardAction {
+  types.H160 author = 1;
+  types.H256 value = 2;
+  enum RewardType {
+    Block = 0;
+    Uncle = 1;
+  }
+  RewardType reward_type = 3;
+}
+
+message Action {
+  oneof action {
+    CallAction call = 1;
+    CreateAction create = 2;
+    SelfdestructAction selfdestruct = 3;
+    RewardAction reward = 4;
+  }
+}
+
+message Trace {
+  Action action = 1;
+  optional TraceResult result = 2;
+  uint64 subtraces = 3;
+  repeated uint64 trace_address = 4;
+}
+
+message CallOutput {
+  uint64 gas_used = 1;
+  bytes output = 2;
+}
+
+message CreateOutput {
+  uint64 gas_used = 1;
+  bytes code = 2;
+  types.H160 address = 3;
+}
+
+message TraceOutput {
+  oneof output {
+    CallOutput call = 1;
+    CreateOutput create = 2;
+  }
+}
+
+message TraceResult {
+  oneof result {
+    TraceOutput output = 1;
+    string error = 2;
+  }
+}
+
+message Traces { repeated Trace traces = 1; }
+
+message TraceWithLocation {
+  Trace trace = 1;
+
+  optional uint64 transaction_position = 2;
+  optional types.H256 transaction_hash = 3;
+  uint64 block_number = 4;
+  types.H256 block_hash = 5;
+}
+
+message TracesWithLocation { repeated TraceWithLocation traces = 1; }
+
+message OptionalTracesWithLocation { optional TracesWithLocation traces = 1; }
+
+// VM trace
+
+message MemoryDelta {
+  uint64 off = 1;
+  bytes data = 2;
+}
+
+message StorageDelta {
+  types.H256 key = 1;
+  types.H256 val = 2;
+}
+
+message VmExecutedOperation {
+  uint64 used = 1;
+  optional types.H256 push = 2;
+  optional MemoryDelta mem = 3;
+  optional StorageDelta store = 4;
+}
+
+message VmInstruction {
+  uint32 pc = 1;
+  uint64 cost = 2;
+  optional VmExecutedOperation ex = 3;
+  optional VmTrace sub = 4;
+}
+
+message VmTrace {
+  bytes code = 1;
+  repeated VmInstruction ops = 2;
+}
+
+// State diff
+
+message AlteredH256 {
+  types.H256 from = 1;
+  types.H256 to = 2;
+}
+
+message DeltaH256 {
+  oneof delta {
+    google.protobuf.Empty unchanged = 1;
+    types.H256 added = 2;
+    types.H256 removed = 3;
+    AlteredH256 altered = 4;
+  }
+}
+
+message AlteredU64 {
+  uint64 from = 1;
+  uint64 to = 2;
+}
+
+message DeltaU64 {
+  oneof delta {
+    google.protobuf.Empty unchanged = 1;
+    uint64 added = 2;
+    uint64 removed = 3;
+    AlteredU64 altered = 4;
+  }
+}
+
+message AlteredBytes {
+  bytes from = 1;
+  bytes to = 2;
+}
+
+message DeltaBytes {
+  oneof delta {
+    google.protobuf.Empty unchanged = 1;
+    bytes added = 2;
+    bytes removed = 3;
+    AlteredBytes altered = 4;
+  }
+}
+
+message StorageDiffEntry {
+  types.H256 location = 1;
+  DeltaH256 delta = 2;
+}
+
+message AccountDiff {
+  DeltaH256 balance = 1;
+  DeltaU64 nonce = 2;
+  DeltaBytes code = 3;
+  repeated StorageDiffEntry storage = 4;
+}
+
+message AccountDiffEntry {
+  types.H160 key = 1;
+  AccountDiff value = 2;
+}
+
+message StateDiff { repeated AccountDiffEntry diff = 1; }
+
+message FullTrace {
+  bytes output = 1;
+  optional Traces traces = 2;
+  optional VmTrace vm_trace = 3;
+  optional StateDiff state_diff = 4;
+}
+
+message FullTraceWithTransactionHash {
+  FullTrace full_trace = 1;
+  types.H256 transaction_hash = 2;
+}
+
+message FullTraces { repeated FullTrace traces = 1; }
+
+message FullTracesWithTransactionHashes {
+  repeated FullTraceWithTransactionHash traces = 1;
+}
+
+message OptionalFullTracesWithTransactionHashes {
+  optional FullTracesWithTransactionHashes traces = 1;
+}
+
+service TraceApi {
+  rpc Call(CallRequests) returns (FullTraces);
+  rpc Block(BlockId) returns (OptionalTracesWithLocation);
+  rpc BlockTransactions(TraceBlockRequest)
+      returns (OptionalFullTracesWithTransactionHashes);
+  rpc Transaction(TraceTransactionRequest) returns (FullTrace);
+  rpc Filter(FilterRequest) returns (stream TraceWithLocation);
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..8269f30
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,115 @@
+module intent-system
+
+go 1.23.0
+
+toolchain go1.24.1
+
+require (
+	github.com/allegro/bigcache/v3 v3.0.1
+	github.com/casbin/casbin/v2 v2.39.0
+	github.com/casbin/xorm-adapter/v2 v2.4.0
+	github.com/civet148/gotools v1.4.1
+	github.com/civet148/httpc v1.7.1
+	github.com/civet148/log v1.7.2
+	github.com/civet148/socketx v1.4.2
+	github.com/civet148/sqlca/v2 v2.8.0
+	github.com/dgrijalva/jwt-go v3.2.0+incompatible
+	github.com/docker/docker v20.10.7+incompatible
+	github.com/docker/go-connections v0.5.0
+	github.com/ethereum/go-ethereum v1.11.5
+	github.com/fatih/color v1.18.0
+	github.com/gin-gonic/gin v1.9.1
+	github.com/go-co-op/gocron v1.37.0
+	github.com/go-sql-driver/mysql v1.6.0
+	github.com/google/go-containerregistry v0.6.1
+	github.com/gorilla/websocket v1.5.0
+	github.com/mssola/user_agent v0.6.0
+	github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
+	github.com/urfave/cli/v2 v2.23.7
+	golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
+	gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
+)
+
+require (
+	github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible // indirect
+	github.com/Microsoft/go-winio v0.6.2 // indirect
+	github.com/bwmarrin/snowflake v0.3.0 // indirect
+	github.com/bytedance/sonic v1.9.1 // indirect
+	github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
+	github.com/containerd/containerd v1.5.2 // indirect
+	github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
+	github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
+	github.com/denisenkom/go-mssqldb v0.12.0 // indirect
+	github.com/docker/distribution v2.7.1+incompatible // indirect
+	github.com/docker/go-units v0.5.0 // indirect
+	github.com/elliotchance/sshtunnel v1.2.0 // indirect
+	github.com/gabriel-vasile/mimetype v1.4.2 // indirect
+	github.com/gansidui/geohash v0.0.0-20141019080235-ebe5ba447f34 // indirect
+	github.com/gin-contrib/sse v0.1.0 // indirect
+	github.com/go-playground/locales v0.14.1 // indirect
+	github.com/go-playground/universal-translator v0.18.1 // indirect
+	github.com/go-playground/validator/v10 v10.14.0 // indirect
+	github.com/goccy/go-json v0.10.2 // indirect
+	github.com/gogo/protobuf v1.3.2 // indirect
+	github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect
+	github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 // indirect
+	github.com/golang/mock v1.6.0 // indirect
+	github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
+	github.com/google/go-cmp v0.7.0 // indirect
+	github.com/google/uuid v1.6.0 // indirect
+	github.com/jmoiron/sqlx v1.3.5 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/klauspost/compress v1.17.11 // indirect
+	github.com/klauspost/cpuid/v2 v2.2.4 // indirect
+	github.com/leodido/go-urn v1.2.4 // indirect
+	github.com/lib/pq v1.10.7 // indirect
+	github.com/mattn/go-colorable v0.1.13 // indirect
+	github.com/mattn/go-isatty v0.0.20 // indirect
+	github.com/mattn/go-sqlite3 v1.14.16 // indirect
+	github.com/moby/term v0.5.2 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/onsi/gomega v1.28.0 // indirect
+	github.com/opencontainers/go-digest v1.0.0 // indirect
+	github.com/opencontainers/image-spec v1.1.1 // indirect
+	github.com/pelletier/go-toml/v2 v2.0.9 // indirect
+	github.com/pkg/errors v0.9.1 // indirect
+	github.com/robfig/cron/v3 v3.0.1 // indirect
+	github.com/rogpeppe/go-internal v1.13.1 // indirect
+	github.com/russross/blackfriday/v2 v2.1.0 // indirect
+	github.com/shopspring/decimal v1.2.0 // indirect
+	github.com/sirupsen/logrus v1.9.3 // indirect
+	github.com/stretchr/testify v1.10.0 // indirect
+	github.com/tidwall/pretty v1.2.0 // indirect
+	github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
+	github.com/ugorji/go/codec v1.2.11 // indirect
+	github.com/valyala/fastjson v1.6.4 // indirect
+	github.com/vbatts/tar-split v0.11.6 // indirect
+	github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
+	go.mongodb.org/mongo-driver v1.11.6 // indirect
+	go.uber.org/atomic v1.9.0 // indirect
+	golang.org/x/arch v0.3.0 // indirect
+	golang.org/x/crypto v0.31.0 // indirect
+	golang.org/x/net v0.33.0 // indirect
+	golang.org/x/sync v0.10.0 // indirect
+	golang.org/x/sys v0.30.0 // indirect
+	golang.org/x/text v0.21.0 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
+	google.golang.org/grpc v1.69.2 // indirect
+	google.golang.org/protobuf v1.36.5 // indirect
+	gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+	gotest.tools/v3 v3.5.2 // indirect
+	xorm.io/builder v0.3.7 // indirect
+	xorm.io/xorm v1.0.3 // indirect
+
+)
+
+replace (
+	github.com/anacrolix/torrent => ./deps/github.com/anacrolix/torrent
+	github.com/cosmos/cosmos-sdk => github.com/hobbyworld-project/cosmos-sdk v0.47.6-rc1 //github.com/evmos/cosmos-sdk v0.47.5-evmos
+	github.com/evmos/evmos/v15 => github.com/hobbyworld-project/evmos/v15 v15.1.0-beta1
+	github.com/kataras/iris/v12 => github.com/kataras/iris/v12 v12.2.0 // iris@v12.2.0-beta5 was removed
+	github.com/ledgerwatch/interfaces => ./deps/github.com/ledgerwatch/interfaces
+	github.com/tendermint/tendermint => github.com/tendermint/tendermint v0.34.24 // tendermint@v0.34.29 was removed
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..177911c
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,1537 @@
+bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s=
+gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU=
+github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0=
+github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
+github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
+github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
+github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
+github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/Shopify/sarama v1.24.1/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/allegro/bigcache/v3 v3.0.1 h1:Q4Xl3chywXuJNOw7NV+MeySd3zGQDj4KCpkCg0te8mc=
+github.com/allegro/bigcache/v3 v3.0.1/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I=
+github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
+github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/bsm/sarama-cluster v2.1.15+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM=
+github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgISZN0=
+github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE=
+github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
+github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
+github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/casbin/casbin/v2 v2.28.3/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg=
+github.com/casbin/casbin/v2 v2.39.0 h1:3lCZ1qhw5Itdr0vf9l2iqF3Z6Va5zq8vXpIFy4xZm9E=
+github.com/casbin/casbin/v2 v2.39.0/go.mod h1:sEL80qBYTbd+BPeL4iyvwYzFT3qwLaESq5aFKVLbLfA=
+github.com/casbin/xorm-adapter/v2 v2.4.0 h1:wxfbV2dowwRzJBWiZ0x2/ECQKfmyAfXJyyEMQo/Epqg=
+github.com/casbin/xorm-adapter/v2 v2.4.0/go.mod h1:AeH4dBKHC9/zYxzdPVHhPDzF8LYLqjDdb767CWJoV54=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
+github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
+github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/civet148/gotools v1.4.1 h1:9E2rVOUIebQvdG/75iEc1/nqWxqmrUTSWepl0dbaGHo=
+github.com/civet148/gotools v1.4.1/go.mod h1:SfbUhBkKsnCIZ+1Ac94sQ2yTCbwkRKgVjDgGxrwPYmw=
+github.com/civet148/httpc v1.7.1 h1:X+Wfp9+MWgbCMpFrDZTJo0yv6sJM/bL8w5cLf1NUwZU=
+github.com/civet148/httpc v1.7.1/go.mod h1:JqwWFeNuMZD50uwDb2HehsuHmk+dPFTkSyyfSMBWis0=
+github.com/civet148/log v1.1.3/go.mod h1:goUIYRED4uhCzlF5mcwHrkJ+nPXJIPwcaB6IJSuiEW0=
+github.com/civet148/log v1.4.4/go.mod h1:1+dISnXR1i3HoAVd3IAzhs0TduJ7QOC50ZHEfzXGH7o=
+github.com/civet148/log v1.5.1/go.mod h1:1+dISnXR1i3HoAVd3IAzhs0TduJ7QOC50ZHEfzXGH7o=
+github.com/civet148/log v1.7.2 h1:VNQMbFJn6s+DtYCD+oBuZAufrbgtJlUcIdqCmjDiP5A=
+github.com/civet148/log v1.7.2/go.mod h1:1+dISnXR1i3HoAVd3IAzhs0TduJ7QOC50ZHEfzXGH7o=
+github.com/civet148/socketx v1.4.2 h1:Vu15JRjGrtaP8Hb/JeIlay+PquL5s912R5NzBfioVdQ=
+github.com/civet148/socketx v1.4.2/go.mod h1:dcYlimj1k+/wauiyWugZQw4t3aAjyzZ7n9OliMJI4Lo=
+github.com/civet148/sqlca/v2 v2.8.0 h1:lzvN5lfIqSqSavGDCeEQLJX/GXgj7wzkXsZwW4BSlLk=
+github.com/civet148/sqlca/v2 v2.8.0/go.mod h1:lpnAs9Nq0893TzDbLDAKX3NO6nVRDrheb7JPb2Ho5oE=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
+github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
+github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
+github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
+github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
+github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
+github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
+github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
+github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
+github.com/containerd/containerd v1.5.2 h1:MG/Bg1pbmMb61j3wHCFWPxESXHieiKr2xG64px/k8zQ=
+github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
+github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
+github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
+github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
+github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
+github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
+github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
+github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw=
+github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
+github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
+github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
+github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
+github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
+github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
+github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
+github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
+github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.27+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
+github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
+github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
+github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denisenkom/go-mssqldb v0.0.0-20200206145737-bbfc9a55622e/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
+github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
+github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA=
+github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU=
+github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
+github.com/docker/cli v20.10.7+incompatible h1:pv/3NqibQKphWZiAskMzdz8w0PRbtTaEB+f6NwdU7Is=
+github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ=
+github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
+github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
+github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/elliotchance/sshtunnel v1.2.0 h1:w+ZIzpwvLD/O4VrU80ASdWGL0FO1GxillHPoZn9jhAg=
+github.com/elliotchance/sshtunnel v1.2.0/go.mod h1:gRPilFGawrilzilJ+4ySFZxu/qoNZN++GQQ1HVFrVJk=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/ethereum/go-ethereum v1.11.5 h1:3M1uan+LAUvdn+7wCEFrcMM4LJTeuxDrPTg/f31a5QQ=
+github.com/ethereum/go-ethereum v1.11.5/go.mod h1:it7x0DWnTDMfVFdXcU6Ti4KEFQynLHVRarcSlPr0HBo=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
+github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
+github.com/gansidui/geohash v0.0.0-20141019080235-ebe5ba447f34 h1:v8OKndw+abloncjnDIoy/qHsyRWXCnTUw7Tc9emhbEg=
+github.com/gansidui/geohash v0.0.0-20141019080235-ebe5ba447f34/go.mod h1:/Mg9STHVPYcABQdI1MQrFY4ngaOwGeIwOypwEtWNQgY=
+github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
+github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
+github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
+github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
+github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0=
+github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
+github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
+github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
+github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
+github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4=
+github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
+github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/go-containerregistry v0.6.1 h1:x51Lu/yHigTg239AlvtBjWUxkFAdXkCNYmnTbPc6smI=
+github.com/google/go-containerregistry v0.6.1/go.mod h1:euCCtNbZ6tKqi1E72vwDj2xZcN5ttKpZLfa/wSo5iLw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
+github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
+github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
+github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
+github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
+github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
+github.com/mssola/user_agent v0.6.0 h1:uwPR4rtWlCHRFyyP9u2KOV0u8iQXmS7Z7feTrstQwk4=
+github.com/mssola/user_agent v0.6.0/go.mod h1:TTPno8LPY3wAIEKRpAtkdMT0f8SE24pLRGPahjCH4uw=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c=
+github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8=
+github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
+github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
+github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
+github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
+github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0=
+github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY=
+github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
+github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sideshow/apns2 v0.20.0/go.mod h1:f7dArLPLbiZ3qPdzzrZXdCSlMp8FD0p6z7tHssDOLvk=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
+github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
+github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
+github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
+github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
+github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
+github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli/v2 v2.23.7 h1:YHDQ46s3VghFHFf1DdF+Sh7H4RqhcM+t0TmZRJx4oJY=
+github.com/urfave/cli/v2 v2.23.7/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
+github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
+github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
+github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
+github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
+github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
+github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
+github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
+github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+github.com/zheng-ji/goSnowFlake v0.0.0-20180906112711-fc763800eec9/go.mod h1:N/L8JbBvbc3m0Y38VM1tV4fY1ubU09Q3WFwhBEVyPv4=
+github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd v3.3.18+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.mongodb.org/mongo-driver v1.11.6 h1:XM7G6PjiGAO5betLF13BIa5TlLUUE3uJ/2Ox3Lz1K+o=
+go.mongodb.org/mongo-driver v1.11.6/go.mod h1:G9TgswdsWjX4tmDA5zfs2+6AEPpYJwqblyjsfuh8oXY=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
+golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
+google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
+google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
+gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
+gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE=
+gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
+gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
+gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
+gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
+gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
+k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
+k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
+k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
+k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
+k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
+k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
+k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
+k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
+k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
+k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
+k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
+xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI=
+xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
+xorm.io/xorm v1.0.3 h1:3dALAohvINu2mfEix5a5x5ZmSVGSljinoSGgvGbaZp0=
+xorm.io/xorm v1.0.3/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4=
diff --git a/internal/hostid/disk.go b/internal/hostid/disk.go
new file mode 100644
index 0000000..0f8b8ac
--- /dev/null
+++ b/internal/hostid/disk.go
@@ -0,0 +1,72 @@
+package hostid
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+)
+
+// GetRootDiskSerial 返回承载根文件系统的物理磁盘序列号。
+func GetRootDiskSerial() (string, error) {
+	// 1. 找 "/" 的 major:minor
+	f, _ := os.Open("/proc/self/mountinfo")
+	defer f.Close()
+
+	var major, minor int
+	sc := bufio.NewScanner(f)
+	for sc.Scan() {
+		p := strings.Split(sc.Text(), " ")
+		if len(p) > 4 && p[4] == "/" {
+			mm := strings.Split(p[2], ":")
+			major, _ = strconv.Atoi(mm[0])
+			minor, _ = strconv.Atoi(mm[1])
+			break
+		}
+	}
+	if major == 0 && minor == 0 {
+		return "", fmt.Errorf("root mount not found")
+	}
+
+	// 2. /sys/dev/block/major:minor → 解析符号链接,向上回溯直到
+	//    /sys/block/ 存在,适配 nvme0n1p2、mmcblk0p1、dm-0 等所有情况
+	link := fmt.Sprintf("/sys/dev/block/%d:%d", major, minor)
+	target, err := os.Readlink(link)
+	if err != nil {
+		return "", err
+	}
+	path := filepath.Clean(filepath.Join(filepath.Dir(link), target))
+
+	var disk string
+	for {
+		name := filepath.Base(path) // 当前层目录名
+		if _, err := os.Stat(filepath.Join("/sys/block", name)); err == nil {
+			disk = name // 找到顶层物理盘
+			break
+		}
+		parent := filepath.Dir(path)
+		if parent == path { // 已到根还没找到
+			return "", fmt.Errorf("disk name not found via %s", link)
+		}
+		path = parent
+	}
+
+	// 3. 读取序列号
+	candidates := []string{
+		"/sys/block/" + disk + "/device/serial",
+		"/sys/block/" + disk + "/serial",
+		"/sys/block/" + disk + "/device/vpd_unit_serial",
+		"/sys/block/" + disk + "/device/wwid",
+	}
+	for _, p := range candidates {
+		if b, err := os.ReadFile(p); err == nil {
+			s := strings.TrimSpace(string(b))
+			if s != "" {
+				return s, nil
+			}
+		}
+	}
+	return "", fmt.Errorf("serial not found for %s", disk)
+}
diff --git a/internal/licensecheck/licensecheck.go b/internal/licensecheck/licensecheck.go
new file mode 100644
index 0000000..bc712df
--- /dev/null
+++ b/internal/licensecheck/licensecheck.go
@@ -0,0 +1,109 @@
+package licensecheck
+
+import (
+	"crypto/ecdsa"
+	"crypto/sha256"
+	"crypto/x509"
+	"encoding/asn1"
+	"encoding/base64"
+	"encoding/json"
+	"encoding/pem"
+	"errors"
+	"math/big"
+	"os"
+	"strings"
+	"time"
+
+	"intent-system/internal/hostid" // 你的 GetRootDiskSerial 包
+)
+
+/* ---------- ① 常量:内置公钥 ---------- */
+const publicKeyPEM = `-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKQtVKlXjOro4qPoavqOU8m5qda0k
+olAhHgTayzNuR+nOP1AnjQm10ehhV7Aafo8fUnjxs/rAM+MRdmzcEXrWnw==
+-----END PUBLIC KEY-----`
+
+/* ---------- ② 与服务端保持一致的结构 ---------- */
+type licenseRequest struct {
+	MachineID string   `json:"machine_id"`
+	Expiry    string   `json:"expiry"`
+	Features  []string `json:"features"`
+}
+
+/* ---------- ③ 入口:Validate(path) ---------- */
+func Validate(licPath string) error {
+	raw, err := os.ReadFile(licPath)
+	if err != nil {
+		return err
+	}
+	parts := strings.Split(strings.TrimSpace(string(raw)), ".")
+	if len(parts) != 2 {
+		return errors.New("invalid license format")
+	}
+	payloadB64, sigB64 := parts[0], parts[1]
+
+	payload, err := base64.StdEncoding.DecodeString(payloadB64)
+	if err != nil {
+		return errors.New("payload base64 decode failed")
+	}
+
+	// 1. 验签
+	pubKey, err := parsePublicKey(publicKeyPEM)
+	if err != nil {
+		return err
+	}
+	if !verifySignature(pubKey, payload, sigB64) {
+		return errors.New("signature mismatch")
+	}
+
+	// 2. 解析字段
+	var req licenseRequest
+	if err := json.Unmarshal(payload, &req); err != nil {
+		return err
+	}
+
+	// 3. 校验机身号
+	sn, err := hostid.GetRootDiskSerial()
+	if err != nil {
+		return err
+	}
+	if req.MachineID != sn {
+		return errors.New("machine ID mismatch")
+	}
+
+	// 4. 校验过期
+	expiry, err := time.Parse("2006-01-02", req.Expiry)
+	if err != nil || time.Now().After(expiry) {
+		return errors.New("license expired")
+	}
+
+	return nil // 👍 通过
+}
+
+/* ---------- ④ 辅助函数 ---------- */
+func parsePublicKey(pemStr string) (*ecdsa.PublicKey, error) {
+	block, _ := pem.Decode([]byte(pemStr))
+	if block == nil || block.Type != "PUBLIC KEY" {
+		return nil, errors.New("invalid PEM")
+	}
+	pubAny, err := x509.ParsePKIXPublicKey(block.Bytes)
+	if err != nil {
+		return nil, err
+	}
+	return pubAny.(*ecdsa.PublicKey), nil
+}
+
+type ecdsaSig struct{ R, S *big.Int }
+
+func verifySignature(pub *ecdsa.PublicKey, msg []byte, sigB64 string) bool {
+	sigBytes, err := base64.StdEncoding.DecodeString(sigB64)
+	if err != nil {
+		return false
+	}
+	var sig ecdsaSig
+	if _, err = asn1.Unmarshal(sigBytes, &sig); err != nil {
+		return false
+	}
+	hash := sha256.Sum256(msg)
+	return ecdsa.Verify(pub, hash[:], sig.R, sig.S)
+}
diff --git a/pkg/api/biz_api.go b/pkg/api/biz_api.go
new file mode 100644
index 0000000..574e0fc
--- /dev/null
+++ b/pkg/api/biz_api.go
@@ -0,0 +1,34 @@
+package api
+
+import "github.com/gin-gonic/gin"
+
+type BizApi interface {
+	NewsList(c *gin.Context)         //新闻列表
+	NewsAdd(c *gin.Context)          //新闻新增
+	NewsEdit(c *gin.Context)         //新闻编辑
+	NewsDelete(c *gin.Context)       //新闻删除
+	NewsCompare(c *gin.Context)      //新闻对比
+	NewsPublish(c *gin.Context)      //新闻发布(订阅列表)
+	NewsDraftList(c *gin.Context)    //草稿列表
+	NewsDraftEdit(c *gin.Context)    //草稿编辑
+	NewsDraftPublish(c *gin.Context) //草稿发布
+	NewsDraftDelete(c *gin.Context)  //草稿删除
+	NewsTag(c *gin.Context)          //新闻打标签
+	QaList(c *gin.Context)           //Q&A列表
+	QaAdd(c *gin.Context)            //Q&A新增
+	QaEdit(c *gin.Context)           //Q&A编辑
+	QaDelete(c *gin.Context)         //Q&A删除
+	QaDraftList(c *gin.Context)      //Q&A草稿列表
+	QaDraftEdit(c *gin.Context)      //Q&A草稿编辑
+	QaDraftPublish(c *gin.Context)   //Q&A草稿发布
+	QaDraftDelete(c *gin.Context)    //Q&A草稿删除
+	SubListAll(c *gin.Context)       //全部订阅新闻
+	SubListPushed(c *gin.Context)    //已推送订阅新闻
+	SubListToday(c *gin.Context)     //今日订阅新闻
+	SubAddNews(c *gin.Context)       //新增订阅新闻
+	SubEditNews(c *gin.Context)      //编辑订阅新闻
+	TagList(c *gin.Context)          //标签列表
+	TagAdd(c *gin.Context)           //标签新增
+	TagEdit(c *gin.Context)          //标签编辑
+	TagDelete(c *gin.Context)        //标签删除
+}
diff --git a/pkg/api/common_api.go b/pkg/api/common_api.go
new file mode 100644
index 0000000..2e8b024
--- /dev/null
+++ b/pkg/api/common_api.go
@@ -0,0 +1,7 @@
+package api
+
+import "github.com/gin-gonic/gin"
+
+type CommonApi interface {
+	SendAuthCode(c *gin.Context) // 发送验证码
+}
diff --git a/pkg/api/customer_api.go b/pkg/api/customer_api.go
new file mode 100644
index 0000000..1ff7606
--- /dev/null
+++ b/pkg/api/customer_api.go
@@ -0,0 +1,15 @@
+package api
+
+import "github.com/gin-gonic/gin"
+
+type CustomerApi interface {
+	CustomerRegister(c *gin.Context)    //客户注册
+	CustomerURegister(c *gin.Context)    //客户注册,仅仅依赖“用户名”,只判断用户名是否存在,不做任何其他的判断
+	CustomerLogin(c *gin.Context)       //客户登录
+	CustomerEdit(c *gin.Context)        //客户资料编辑
+	CustomerLogout(c *gin.Context)      //客户登出
+	CustomerSubInfo(c *gin.Context)     //客户订阅信息
+	CustomerSubscribe(c *gin.Context)   //客户发起订阅
+	CustomerUnsubscribe(c *gin.Context) //客户取消订阅
+	CustomerList(c *gin.Context)        //客户列表(需权限检查)
+}
diff --git a/pkg/api/deploy_api.go b/pkg/api/deploy_api.go
new file mode 100644
index 0000000..f363fcc
--- /dev/null
+++ b/pkg/api/deploy_api.go
@@ -0,0 +1,11 @@
+package api
+
+import "github.com/gin-gonic/gin"
+
+type DeployApi interface {
+	DeployDeploy(c *gin.Context) //部署
+	DeployStatus(c *gin.Context) //检测当前机器中模型的信息
+	DeployDelete(c *gin.Context) //删除已经部署好的模型
+	DeployStart(c *gin.Context)  //启动已经部署好的模型
+	DeployStop(c *gin.Context)   //停止已经部署好的模型
+}
diff --git a/pkg/api/gateway_api.go b/pkg/api/gateway_api.go
new file mode 100644
index 0000000..c400e7b
--- /dev/null
+++ b/pkg/api/gateway_api.go
@@ -0,0 +1,7 @@
+package api
+
+import "github.com/gin-gonic/gin"
+
+type GatewayApi interface {
+	WebSocketRequest(c *gin.Context)
+}
diff --git a/pkg/api/manager_api.go b/pkg/api/manager_api.go
new file mode 100644
index 0000000..88f76ef
--- /dev/null
+++ b/pkg/api/manager_api.go
@@ -0,0 +1,9 @@
+package api
+
+type ManagerApi interface {
+	//run service loop
+	Run() (err error)
+
+	//close service
+	Close()
+}
diff --git a/pkg/api/platform_api.go b/pkg/api/platform_api.go
new file mode 100644
index 0000000..bbbe196
--- /dev/null
+++ b/pkg/api/platform_api.go
@@ -0,0 +1,29 @@
+package api
+
+import "github.com/gin-gonic/gin"
+
+type PlatformApi interface {
+	PlatformLogin(c *gin.Context)            // 用户登录
+	PlatformLogout(c *gin.Context)           // 用户登出
+	PlatformCheckExist(c *gin.Context)       // 检查用户名/邮箱是否存在
+	PlatformListUser(c *gin.Context)         // 列出所有系统用户
+	PlatformCreateUser(c *gin.Context)       // 创建系统用户
+	PlatformEditUser(c *gin.Context)         // 编辑系统用户
+	PlatformEnableUser(c *gin.Context)       // 启用系统用户
+	PlatformDisableUser(c *gin.Context)      // 禁用系统用户
+	PlatformDeleteUser(c *gin.Context)       // 删除系统用户
+	PlatformDeleteUsers(c *gin.Context)      // 删除系统用户(批量)
+	PlatformListRole(c *gin.Context)         // 列出所有角色
+	PlatformCreateRole(c *gin.Context)       // 创建角色
+	PlatformEditRole(c *gin.Context)         // 编辑角色
+	PlatformDeleteRole(c *gin.Context)       // 删除角色
+	PlatformAuthRole(c *gin.Context)         // 角色授权
+	PlatformInquireAuth(c *gin.Context)      // 查询权限
+	PlatformPrivilegeTree(c *gin.Context)    // 权限结构
+	PlatformResetPassword(c *gin.Context)    // 管理员充值系统用户密码
+	PlatformChangePassword(c *gin.Context)   // 系统用户自行修改密码
+	PlatformListRoleUser(c *gin.Context)     // 列出角色对应用户
+	PlatformRefreshAuthToken(c *gin.Context) // 刷新访问TOKEN
+	PlatformListOperLog(c *gin.Context)      // 操作日志列表
+	PlatformUploadFile(c *gin.Context)       // 上传文件
+}
diff --git a/pkg/api/ws_api.go b/pkg/api/ws_api.go
new file mode 100644
index 0000000..3efcdd5
--- /dev/null
+++ b/pkg/api/ws_api.go
@@ -0,0 +1,7 @@
+package api
+
+import "github.com/gin-gonic/gin"
+
+type WsApi interface {
+	DeployStatusWS(c *gin.Context) //websocket推送部署的进度信息
+}
diff --git a/pkg/cache/bigcache_init.go b/pkg/cache/bigcache_init.go
new file mode 100644
index 0000000..309b0a6
--- /dev/null
+++ b/pkg/cache/bigcache_init.go
@@ -0,0 +1,55 @@
+package bigcache
+
+import (
+	"github.com/allegro/bigcache/v3"
+	"github.com/civet148/log"
+	"time"
+)
+
+var Cache *bigcache.BigCache
+
+func InitBigCache() {
+	var initErr error
+	config := bigcache.Config{
+		// number of shards (must be a power of 2)
+		Shards: 1024,
+
+		// time after which entry can be evicted
+		LifeWindow: 10 * time.Minute,
+
+		// Interval between removing expired entries (clean up).
+		// If set to <= 0 then no action is performed.
+		// Setting to < 1 second is counterproductive — bigcache has a one second resolution.
+		CleanWindow: 60 * time.Minute,
+
+		// rps * lifeWindow, used only in initial memory allocation
+		MaxEntriesInWindow: 1000 * 10 * 60,
+
+		// max entry size in bytes, used only in initial memory allocation
+		MaxEntrySize: 500,
+
+		// prints information about additional memory allocation
+		Verbose: true,
+
+		// cache will not allocate more memory than this limit, value in MB
+		// if value is reached then the oldest entries can be overridden for the new ones
+		// 0 value means no size limit
+		HardMaxCacheSize: 8192,
+
+		// callback fired when the oldest entry is removed because of its expiration time or no space left
+		// for the new entry, or because delete was called. A bitmask representing the reason will be returned.
+		// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
+		OnRemove: nil,
+
+		// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
+		// for the new entry, or because delete was called. A constant representing the reason will be passed through.
+		// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
+		// Ignored if OnRemove is specified.
+		OnRemoveWithReason: nil,
+	}
+
+	Cache, initErr = bigcache.NewBigCache(config)
+	if initErr != nil {
+		log.Fatal(initErr)
+	}
+}
diff --git a/pkg/config/config.go b/pkg/config/config.go
new file mode 100644
index 0000000..c5b4a1d
--- /dev/null
+++ b/pkg/config/config.go
@@ -0,0 +1,64 @@
+package config
+
+import (
+	"encoding/json"
+	"intent-system/pkg/itypes"
+	"io/ioutil"
+	"os"
+	"path"
+
+	"github.com/civet148/log"
+)
+
+const (
+	strConfName   = "config.json"
+	RunConfigName = "system-backend"
+)
+
+type Config struct {
+	Version       string `json:"version"`                                                                       //版本号
+	DSN           string `json:"dsn" toml:"dsn" db:"dsn" cli:"dsn"`                                             //数据库连接字符串
+	Debug         bool   `json:"debug" toml:"debug" db:"debug" cli:"debug"`                                     //开启debug模式
+	HttpAddr      string `json:"http_addr" toml:"http_addr" db:"http_addr" cli:"http-addr"`                     //监听地址
+	Domain        string `json:"domain" toml:"domain" db:"domain" cli:"domain"`                                 //域名URL配置
+	Static        string `json:"static" toml:"static" db:"static" cli:"static"`                                 //静态页面文件路径
+	ImagePrefix   string `json:"image_prefix" toml:"image_prefix" db:"image_prefix" cli:"image-prefix"`         //用户端图片访问域名
+	ImagePath     string `json:"image_path" toml:"image_path" db:"image_path" cli:"image-path"`                 //图片存储路径
+	GatewayUrl    string `json:"gateway_url" toml:"gateway_url" db:"gateway_url" cli:"gateway-url"`             //网关地址URL
+	GatewayKey    string `json:"gateway_key" toml:"gateway_key" db:"gateway_key" cli:"gateway-key"`             //网关访问Key
+	GatewaySecret string `json:"gateway_secret" toml:"gateway_secret" db:"gateway_secret" cli:"gateway-secret"` //网关访问密码
+	Postgresql    string `json:"postgresql" toml:"postgresql" db:"postgresql" cli:"pg"`                         //Postgresql数据库连接字符串
+	SubCron       string `json:"sub_cron" toml:"sub_cron" db:"sub_cron" cli:"sub-cron"`                         //订阅邮件定时任务
+}
+
+var strFilePath = path.Join(itypes.DefaultConfigHome, strConfName)
+
+func (c *Config) Save() (err error) {
+	var data []byte
+	strConfigPath := itypes.DefaultConfigHome
+	if err = os.MkdirAll(strConfigPath, os.ModePerm); err != nil {
+		return log.Errorf("make dir [%s] error [%s]", strConfigPath, err.Error())
+	}
+	data, err = json.Marshal(c)
+	if err != nil {
+		return log.Errorf("marshal json error: %s", err.Error())
+	}
+	err = ioutil.WriteFile(strFilePath, data, os.ModePerm)
+	if err != nil {
+		return log.Errorf("write to file %s error: %s", strFilePath, err.Error())
+	}
+	return nil
+}
+
+func (c *Config) Load() (err error) {
+	var data []byte
+	data, err = ioutil.ReadFile(strFilePath)
+	if err != nil {
+		return log.Errorf("read file %s error: %s", strFilePath, err.Error())
+	}
+	err = json.Unmarshal(data, c)
+	if err != nil {
+		return log.Errorf("unmarshal error: %s", err.Error())
+	}
+	return nil
+}
diff --git a/pkg/controllers/controller.go b/pkg/controllers/controller.go
new file mode 100644
index 0000000..ef05227
--- /dev/null
+++ b/pkg/controllers/controller.go
@@ -0,0 +1,204 @@
+package controllers
+
+import (
+	"encoding/json"
+	"fmt"
+	"intent-system/pkg/config"
+	"intent-system/pkg/dal/core"
+	"intent-system/pkg/dal/ws"
+	"intent-system/pkg/itypes"
+	"intent-system/pkg/middleware"
+	"intent-system/pkg/sessions"
+	"net/http"
+
+	"github.com/civet148/sqlca/v2"
+
+	"github.com/civet148/log"
+	"github.com/gin-gonic/gin"
+)
+
+type Controller struct {
+	cfg            *config.Config
+	BizCore        *core.BizCore
+	CommonCore     *core.CommonCore
+	PlatformCore   *core.PlatformCore
+	CustomerCore   *core.CustomerCore
+	DeployCore     *core.DeployCore
+	GatewayCore    *core.GatewayCore
+	SchedulerCore  *core.SchedulerCore
+	DeployTaskCore *core.DeployTaskCore
+
+	DeployProgressPool *ws.WebSocketPool //
+}
+
+func NewController(cfg *config.Config) *Controller {
+	var err error
+	var my, pg *sqlca.Engine
+	my, err = sqlca.NewEngine(cfg.DSN)
+	if err != nil {
+		log.Panic("MySQL source [%s] connect error [%s]", cfg.DSN, err.Error())
+		return nil
+	}
+	my.Debug(cfg.Debug)     //SQL调试日志
+	my.SlowQuery(true, 800) //慢查询告警
+	pg, err = sqlca.NewEngine(cfg.Postgresql)
+	if err != nil {
+		log.Panic(" Postgresql source [%s] connect error [%s]", cfg.Postgresql, err.Error())
+		return nil
+	}
+	pg.Debug(cfg.Debug)     //SQL调试日志
+	pg.SlowQuery(true, 800) //慢查询告警
+	return &Controller{
+		cfg:                cfg,
+		BizCore:            core.NewBizCore(cfg, my),
+		CommonCore:         core.NewCommonCore(cfg, my),
+		PlatformCore:       core.NewPlatformCore(cfg, my),
+		CustomerCore:       core.NewCustomerCore(cfg, my),
+		DeployCore:         core.NewDeployCore(cfg, my),
+		GatewayCore:        core.NewGatewayCore(cfg, my),
+		SchedulerCore:      core.NewSchedulerCore(cfg, my, pg),
+		DeployProgressPool: ws.NewWebSocketPool(),
+	}
+}
+
+func (m *Controller) OK(c *gin.Context, data interface{}, count int, total int64) {
+	var bc = itypes.BizOK
+	if data == nil {
+		data = struct{}{}
+	}
+	var r = &itypes.HttpResponse{
+		Header: itypes.HttpHeader{
+			Code:    bc.Code,
+			Message: bc.Message,
+			Count:   count,
+			Total:   total,
+		},
+		Data: data,
+	}
+	c.JSON(http.StatusOK, r)
+	c.Abort()
+}
+
+func (m *Controller) Error(c *gin.Context, bc itypes.BizCode) {
+	var r = &itypes.HttpResponse{
+		Header: itypes.HttpHeader{
+			Code:    bc.Code,
+			Message: bc.Message,
+			Count:   0,
+		},
+		Data: struct{}{},
+	}
+	log.Errorf("[Controller] response error code [%d] message [%s]", bc.Code, bc.Message)
+	c.JSON(http.StatusOK, r)
+	c.Abort()
+}
+
+func (m *Controller) ErrorStatus(c *gin.Context, status int, message string) {
+	log.Errorf("[Controller] http status code [%d] message [%s]", status, message)
+	c.String(status, message)
+	c.Abort()
+}
+
+func (m *Controller) RpcResult(c *gin.Context, data interface{}, err error, id interface{}) {
+	var status = http.StatusOK
+	var strResp string
+	if err != nil {
+		status = http.StatusInternalServerError
+		data = &itypes.RpcResponse{
+			Id:      id,
+			JsonRpc: "2.0",
+			Error: itypes.RpcError{
+				Code:    itypes.CODE_INTERNAL_SERVER_ERROR,
+				Message: err.Error(),
+			},
+			Result: nil,
+		}
+	}
+	switch data.(type) {
+	case string:
+		strResp = data.(string)
+	default:
+		{
+			b, _ := json.Marshal(data)
+			strResp = string(b)
+		}
+	}
+
+	c.String(status, strResp)
+	c.Abort()
+}
+
+func (m *Controller) GetClientIP(c *gin.Context) (strIP string) {
+	return c.ClientIP()
+}
+func (m *Controller) GetClientSystemOS(c *gin.Context) (strOS string) {
+	userAgent := c.GetHeader("User-Agent")
+	return userAgent
+}
+
+func (m *Controller) ContextFromAuthToken(c *gin.Context) (ctx *itypes.Context, ok bool) {
+	var err error
+	ctx = sessions.GetContext(c)
+	if ctx == nil {
+		err = log.Errorf("user session context is nil, token [%s]", middleware.GetAuthToken(c))
+		m.Error(c, itypes.NewBizCode(itypes.CODE_UNAUTHORIZED, err.Error()))
+		return
+	}
+	return ctx, true
+}
+
+func (m *Controller) ContextPlatformPrivilege(c *gin.Context, privileges ...string) (ctx *itypes.Context, ok bool) {
+	var err error
+	ctx = sessions.GetContext(c)
+	if ctx == nil {
+		err = log.Errorf("user session context is nil, token [%s]", middleware.GetAuthToken(c))
+		m.Error(c, itypes.NewBizCode(itypes.CODE_UNAUTHORIZED, err.Error()))
+		return
+	}
+	for _, auth := range privileges {
+		if m.PlatformCore.CheckPrivilege(c, ctx, auth) {
+			return ctx, true
+		}
+	}
+	err = log.Errorf("user name [%s] id [%v] have no privilege %+v", ctx.UserName(), ctx.UserId(), privileges)
+	m.Error(c, itypes.NewBizCode(itypes.CODE_ACCESS_DENY, err.Error()))
+	return ctx, false
+}
+
+func (m *Controller) CheckPrivilege(c *gin.Context, ctx *itypes.Context, privileges ...string) (ok bool) {
+	for _, auth := range privileges {
+		if m.PlatformCore.CheckPrivilege(c, ctx, auth) {
+			return true
+		}
+	}
+	log.Warnf("operator name [%s] id [%v] have no privilege %+v", ctx.UserName(), ctx.UserId(), privileges)
+	return false
+}
+
+func (m *Controller) bindJSON(c *gin.Context, req interface{}) (err error) {
+	if err = c.ShouldBindJSON(req); err != nil {
+		err = log.Errorf("invalid json or required fields, error [%s]", err)
+		m.Error(c, itypes.NewBizCode(itypes.CODE_INVALID_JSON_OR_REQUIRED_PARAMS, err.Error()))
+		c.Abort()
+		return
+	}
+
+	body, _ := json.MarshalIndent(req, "", "\t")
+	log.Debugf("request from [%s] body [%+v]", c.ClientIP(), string(body))
+	return nil
+}
+
+func (m *Controller) isNilString(strIn string) bool {
+	if strIn == "" {
+		return true
+	}
+	return false
+}
+
+func (m *Controller) isZero(n interface{}) bool {
+	strNumber := fmt.Sprintf("%v", n)
+	if strNumber == "0" {
+		return true
+	}
+	return false
+}
diff --git a/pkg/controllers/controller_biz.go b/pkg/controllers/controller_biz.go
new file mode 100644
index 0000000..5eb4111
--- /dev/null
+++ b/pkg/controllers/controller_biz.go
@@ -0,0 +1,536 @@
+package controllers
+
+import (
+	"github.com/civet148/log"
+	"github.com/gin-gonic/gin"
+	"intent-system/pkg/privilege"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/sessions"
+)
+
+func (m *Controller) NewsList(c *gin.Context) {
+	var req proto.NewsListReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx := sessions.GetContext(c)
+	var ok bool
+	if ctx != nil {
+		ok = m.CheckPrivilege(c, ctx, privilege.NewsAccess)
+	}
+	resp, total, code := m.BizCore.NewsList(ctx, &req, ok)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, len(resp.List), total)
+}
+
+func (m *Controller) NewsAdd(c *gin.Context) {
+	var req proto.NewsAddReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.NewsAdd)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.NewsAdd(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) NewsEdit(c *gin.Context) {
+	var req proto.NewsEditReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.NewsEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.NewsEdit(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) NewsDelete(c *gin.Context) {
+	var req proto.NewsDeleteReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.NewsDelete)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.NewsDelete(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) NewsCompare(c *gin.Context) {
+	var req proto.NewsCompareReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.NewsAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.NewsCompare(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) NewsPublish(c *gin.Context) {
+	var req proto.NewsPublishReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.NewsEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.NewsPublish(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) NewsDraftList(c *gin.Context) {
+	var req proto.NewsDraftListReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.NewsAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, total, code := m.BizCore.NewsDraftList(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, len(resp.List), total)
+}
+
+func (m *Controller) NewsDraftEdit(c *gin.Context) {
+	var req proto.NewsDraftEditReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.NewsEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.NewsDraftEdit(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) NewsDraftPublish(c *gin.Context) {
+	var req proto.NewsDraftPublishReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.NewsEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.NewsDraftPublish(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) NewsDraftDelete(c *gin.Context) {
+	var req proto.NewsDraftDeleteReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.NewsDelete)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.NewsDraftDelete(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) NewsTag(c *gin.Context) {
+	var req proto.NewsTagReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.NewsEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.NewsTag(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) QaList(c *gin.Context) {
+	var req proto.QaListReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx := sessions.GetContext(c)
+	var ok bool
+	if ctx != nil {
+		ok = m.CheckPrivilege(c, ctx, privilege.QA_Access)
+	}
+	resp, total, code := m.BizCore.QaList(ctx, &req, ok)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, len(resp.List), total)
+}
+
+func (m *Controller) QaAdd(c *gin.Context) {
+	var req proto.QaAddReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.QA_Add)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.QaAdd(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) QaEdit(c *gin.Context) {
+	var req proto.QaEditReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.QA_Edit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.QaEdit(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) QaDelete(c *gin.Context) {
+	var req proto.QaDeleteReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.QA_Delete)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.QaDelete(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) QaDraftList(c *gin.Context) {
+	var req proto.QaDraftListReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.QA_Access)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, total, code := m.BizCore.QaDraftList(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, len(resp.List), total)
+}
+
+func (m *Controller) QaDraftEdit(c *gin.Context) {
+	var req proto.QaDraftEditReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.QA_Edit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.QaDraftEdit(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) QaDraftPublish(c *gin.Context) {
+	var req proto.QaDraftPublishReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.QA_Edit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.QaDraftPublish(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) QaDraftDelete(c *gin.Context) {
+	var req proto.QaDraftDeleteReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.QA_Delete)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.QaDraftDelete(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) SubListAll(c *gin.Context) {
+	var req proto.SubListAllReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.SubAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, total, code := m.BizCore.SubListAll(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, len(resp.List), total)
+}
+
+func (m *Controller) SubListPushed(c *gin.Context) {
+	var req proto.SubListPushedReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.SubAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, total, code := m.BizCore.SubListPushed(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, len(resp.List), total)
+}
+
+func (m *Controller) SubListToday(c *gin.Context) {
+	var req proto.SubListTodayReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.SubAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, total, code := m.BizCore.SubListToday(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, len(resp.List), total)
+}
+
+func (m *Controller) SubAddNews(c *gin.Context) {
+	var req proto.SubAddNewsReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.SubAdd)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.SubAddNews(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) SubEditNews(c *gin.Context) {
+	var req proto.SubEditNewsReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.SubEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.SubEditNews(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) TagList(c *gin.Context) {
+	var req proto.TagListReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	resp, total, code := m.BizCore.TagList(nil, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, len(resp.List), total)
+}
+
+func (m *Controller) TagAdd(c *gin.Context) {
+	var req proto.TagAddReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.TagAdd)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.TagAdd(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) TagEdit(c *gin.Context) {
+	var req proto.TagEditReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.TagEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.TagEdit(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) TagDelete(c *gin.Context) {
+	var req proto.TagDeleteReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.TagDelete)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, code := m.BizCore.TagDelete(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
diff --git a/pkg/controllers/controller_common.go b/pkg/controllers/controller_common.go
new file mode 100644
index 0000000..7bb9c19
--- /dev/null
+++ b/pkg/controllers/controller_common.go
@@ -0,0 +1,21 @@
+package controllers
+
+import (
+	"github.com/civet148/log"
+	"github.com/gin-gonic/gin"
+	"intent-system/pkg/proto"
+)
+
+func (m *Controller) SendAuthCode(c *gin.Context) {
+	var req proto.SendAuthCodeReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	resp, code := m.CommonCore.SendAuthCode(&req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
diff --git a/pkg/controllers/controller_customer.go b/pkg/controllers/controller_customer.go
new file mode 100644
index 0000000..96ecdc2
--- /dev/null
+++ b/pkg/controllers/controller_customer.go
@@ -0,0 +1,189 @@
+package controllers
+
+import (
+	"intent-system/pkg/itypes"
+	"intent-system/pkg/middleware"
+	"intent-system/pkg/privilege"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/sessions"
+
+	"github.com/civet148/log"
+	"github.com/gin-gonic/gin"
+)
+
+// 客户列表(需权限检查)
+func (m *Controller) CustomerList(c *gin.Context) {
+	var req proto.CustomerListReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.CustomerAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	resp, total, code := m.CustomerCore.CustomerList(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, len(resp.List), total)
+}
+
+func (m *Controller) CustomerRegister(c *gin.Context) {
+	var req proto.CustomerRegisterReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	resp, code := m.CustomerCore.UserRegister(&req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) CustomerURegister(c *gin.Context) {
+	var req proto.CustomerURegisterReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	resp, code := m.CustomerCore.UserNameRegister(&req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) CustomerLogin(c *gin.Context) {
+	var err error
+	var req proto.CustomerLoginReq
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	strIP := m.GetClientIP(c)
+	do, code := m.CustomerCore.CustomerLogin(&req, strIP)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	sub, code := m.CustomerCore.CustomerSubscriber(do.Email)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	s := &itypes.Session{
+		UserId:      do.GetId(),
+		UserName:    do.GetUserName(),
+		Alias:       do.GetUserAlias(),
+		PhoneNumber: do.GetPhoneNumber(),
+		Email:       do.GetEmail(),
+		IsAdmin:     false,
+		IsCustomer:  true,
+		LoginIP:     strIP,
+	}
+
+	if s.AuthToken, err = middleware.GenerateToken(s); err != nil {
+		err = log.Errorf("generate token error [%s]", err.Error())
+		m.Error(c, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error()))
+		return
+	}
+
+	_ = sessions.NewContext(s) // create and save user context
+	log.Debugf("customer [%v] login successful, customer id [%v] token [%s]", s.UserName, s.UserId, s.AuthToken)
+	var resp = proto.CustomerLoginResp{
+		Id:           do.Id,
+		Version:      m.cfg.Version,
+		UserName:     do.UserName,
+		AuthToken:    s.AuthToken,
+		LoginTime:    do.LoginTime,
+		LoginIp:      do.LoginIp,
+		FirstName:    do.FirstName,
+		LastName:     do.LastName,
+		Title:        do.Title,
+		Company:      do.Company,
+		IsSubscribed: do.IsSubscribed,
+		SubTags:      sub.Tags,
+		Privileges:   make([]string, 0),
+	}
+	m.OK(c, &resp, 1, 1)
+}
+
+func (m *Controller) CustomerEdit(c *gin.Context) {
+	var err error
+	var req proto.CustomerEditReq
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx := sessions.GetContext(c)
+	resp, code := m.CustomerCore.CustomerEdit(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) CustomerLogout(c *gin.Context) {
+	resp, code := m.CustomerCore.CustomerLogout(c)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) CustomerSubInfo(c *gin.Context) {
+	var err error
+	var req proto.CustomerSubInfoReq
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx := sessions.GetContext(c)
+	resp, code := m.CustomerCore.CustomerSubInfo(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) CustomerSubscribe(c *gin.Context) {
+	var err error
+	var req proto.CustomerSubscribeReq
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx := sessions.GetContext(c)
+	resp, code := m.CustomerCore.CustomerSubscribe(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
+
+func (m *Controller) CustomerUnsubscribe(c *gin.Context) {
+	var err error
+	var req proto.CustomerUnsubscribeReq
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+	ctx := sessions.GetContext(c)
+	resp, code := m.CustomerCore.CustomerUnsubscribe(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
diff --git a/pkg/controllers/controller_deploy.go b/pkg/controllers/controller_deploy.go
new file mode 100644
index 0000000..02446a1
--- /dev/null
+++ b/pkg/controllers/controller_deploy.go
@@ -0,0 +1,150 @@
+package controllers
+
+import (
+	"intent-system/pkg/privilege"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/sessions"
+
+	"github.com/civet148/log"
+	"github.com/gin-gonic/gin"
+)
+
+func (m *Controller) DeployStatus(c *gin.Context) {
+
+	log.Infof(".......controller.................DeployStatus.......................\n\n")
+
+	var req proto.DeployStatusReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+
+	ctx := sessions.GetContext(c)
+	var ok bool
+	if ctx != nil {
+		ok = m.CheckPrivilege(c, ctx, privilege.Null)
+	}
+	if !ok {
+	}
+
+	resp, code := m.DeployCore.Status(&req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, resp, 0, 1)
+
+}
+
+func (m *Controller) DeployDeploy(c *gin.Context) {
+
+	log.Infof(".......controller.................Deploy.......................\n\n")
+
+	var req proto.DeployDeployReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+
+	ctx := sessions.GetContext(c)
+	var ok bool
+	if ctx != nil {
+		ok = m.CheckPrivilege(c, ctx, privilege.Null)
+	}
+	if !ok {
+	}
+
+	resp, code := m.DeployCore.Deploy(&req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, resp, 0, 1)
+
+}
+
+func (m *Controller) DeployDelete(c *gin.Context) {
+
+	log.Infof(".......controller.................Deploy....Delete...................\n\n")
+
+	var req proto.DeployDeleteReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+
+	ctx := sessions.GetContext(c)
+	var ok bool
+	if ctx != nil {
+		ok = m.CheckPrivilege(c, ctx, privilege.Null)
+	}
+	if !ok {
+	}
+
+	resp, code := m.DeployCore.Delete(&req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, resp, 0, 1)
+
+}
+
+func (m *Controller) DeployStart(c *gin.Context) {
+
+	log.Infof(".......controller.................Deploy....Start...................\n\n")
+
+	var req proto.DeployStartReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+
+	ctx := sessions.GetContext(c)
+	var ok bool
+	if ctx != nil {
+		ok = m.CheckPrivilege(c, ctx, privilege.Null)
+	}
+	if !ok {
+	}
+
+	resp, code := m.DeployCore.Start(&req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, resp, 0, 1)
+
+}
+
+func (m *Controller) DeployStop(c *gin.Context) {
+
+	log.Infof(".......controller.................Deploy....Stop...................\n\n")
+
+	var req proto.DeployStopReq
+	if err := m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+
+	ctx := sessions.GetContext(c)
+	var ok bool
+	if ctx != nil {
+		ok = m.CheckPrivilege(c, ctx, privilege.Null)
+	}
+	if !ok {
+	}
+
+	resp, code := m.DeployCore.Stop(&req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, resp, 0, 1)
+
+}
diff --git a/pkg/controllers/controller_gateway.go b/pkg/controllers/controller_gateway.go
new file mode 100644
index 0000000..d5e253f
--- /dev/null
+++ b/pkg/controllers/controller_gateway.go
@@ -0,0 +1,30 @@
+package controllers
+
+import (
+	"github.com/civet148/log"
+	"github.com/gin-gonic/gin"
+	"github.com/gorilla/websocket"
+	"net/http"
+)
+
+var upgrader = websocket.Upgrader{
+	// 解决跨域问题
+	CheckOrigin: func(r *http.Request) bool {
+		return true
+	},
+} // use default options
+
+func (m *Controller) WebSocketRequest(c *gin.Context) {
+	log.Infof("websocket request %+v", c.Request)
+	conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
+	if err != nil {
+		log.Errorf("websocket upgrade error [%s]", err)
+		m.ErrorStatus(c, http.StatusBadRequest, err.Error())
+		return
+	}
+	defer conn.Close()
+	err = m.GatewayCore.WebSocketRelay(conn)
+	if err != nil {
+		log.Warnf("websocket from [%s] relay closed with error [%s]", m.GetClientIP(c), err.Error())
+	}
+}
diff --git a/pkg/controllers/controller_platform.go b/pkg/controllers/controller_platform.go
new file mode 100644
index 0000000..00f20e7
--- /dev/null
+++ b/pkg/controllers/controller_platform.go
@@ -0,0 +1,612 @@
+package controllers
+
+import (
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/itypes"
+	"intent-system/pkg/middleware"
+	"intent-system/pkg/privilege"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/sessions"
+	"intent-system/pkg/utils"
+
+	"github.com/civet148/log"
+	"github.com/gin-gonic/gin"
+)
+
+func (m *Controller) PlatformLogin(c *gin.Context) { //user login
+	var err error
+	var req proto.PlatformLoginReq
+
+	var ctx *itypes.Context
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+
+	var do *models.UserDO
+	var strIP = m.GetClientIP(c)
+	var code itypes.BizCode
+	if do, code = m.PlatformCore.UserLogin(req.UserName, req.Password, strIP); !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	s := &itypes.Session{
+		UserId:      do.GetId(),
+		UserName:    do.GetUserName(),
+		Alias:       do.GetUserAlias(),
+		PhoneNumber: do.GetPhoneNumber(),
+		IsAdmin:     do.GetIsAdmin(),
+		Email:       do.GetEmail(),
+		LoginIP:     strIP,
+	}
+
+	if s.AuthToken, err = middleware.GenerateToken(s); err != nil {
+		err = log.Errorf("generate token error [%s]", err.Error())
+		m.Error(c, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error()))
+		return
+	}
+
+	ctx = sessions.NewContext(s)
+	log.Debugf("user [%v] login successful, user id [%v] is admin [%v] token [%s]", s.UserName, s.UserId, s.IsAdmin, s.AuthToken)
+
+	role := m.PlatformCore.GetUserRole(ctx, do.GetUserName())
+	if role == nil {
+		err = log.Errorf("user [%s] role not found", req.UserName)
+		m.Error(c, itypes.NewBizCode(itypes.CODE_NOT_FOUND, err.Error()))
+		return
+	}
+
+	privileges := m.PlatformCore.GetUserRoleList(do.UserName)
+	var resp = proto.PlatformLoginResp{
+		Id:        do.Id,
+		Version:   m.cfg.Version,
+		UserName:  do.UserName,
+		AuthToken: s.AuthToken,
+		LoginTime: do.LoginTime,
+		LoginIp:   do.LoginIp,
+		Role:      role.RoleName,
+		Privilege: privileges,
+	}
+	m.OK(c, &resp, 1, 1)
+}
+
+func (m *Controller) PlatformLogout(c *gin.Context) { //user logout
+	sessions.RemoveContext(c)
+	m.OK(c, nil, 0, 0)
+}
+
+func (m *Controller) PlatformCheckExist(c *gin.Context) { //check user account or email exist
+	var err error
+	var req proto.PlatformCheckExistReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.Null)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	code := m.PlatformCore.CheckExist(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, &proto.PlatformCheckExistResp{}, 1, 1)
+}
+
+func (m *Controller) PlatformListUser(c *gin.Context) { //list platform users
+
+	var err error
+	var req proto.PlatformListUserReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.UserAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	users, total, code := m.PlatformCore.ListUser(ctx, &req)
+	if !code.Ok() {
+		log.Errorf("list user code [%s]", code.String())
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, proto.PlatformListUserResp{Users: users}, len(users), total)
+}
+
+func (m *Controller) PlatformCreateUser(c *gin.Context) { //create user account
+
+	var err error
+	var req proto.PlatformCreateUserReq
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.UserAdd)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	if code := m.PlatformCore.CheckUserNameExist(ctx, req.UserName); !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	if req.Email != "" {
+		if code := m.PlatformCore.CheckUserEmailExist(ctx, req.Email); !code.Ok() {
+			m.Error(c, code)
+			return
+		}
+		if !utils.VerifyEmailFormat(req.Email) {
+			err = log.Errorf("email [%s] format error", req.Email)
+			m.Error(c, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error()))
+			return
+		}
+	}
+	user, code := m.PlatformCore.CreateUser(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	var resp = proto.PlatformCreateUserResp{
+		UserId: user.GetId(),
+	}
+
+	m.OK(c, &resp, 1, 1)
+}
+
+func (m *Controller) PlatformEditUser(c *gin.Context) { //edit user information
+	var err error
+	var req proto.PlatformEditUserReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.UserEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	var code itypes.BizCode
+	if code = m.PlatformCore.EditUser(ctx, &req); !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, &proto.PlatformEditUserResp{}, 1, 1)
+}
+
+func (m *Controller) PlatformEnableUser(c *gin.Context) {
+	var err error
+	var req proto.PlatformEnableUserReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.UserEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	r, code := m.PlatformCore.EnableUser(ctx, &req)
+	if !code.Ok() {
+		log.Warnf("name [%s] id [%v]  operator user failed", ctx.UserName(), ctx.UserId())
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, r, 1, 1)
+}
+
+func (m *Controller) PlatformDisableUser(c *gin.Context) {
+	var err error
+	var req proto.PlatformDisableUserReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.UserEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	r, code := m.PlatformCore.DisableUser(ctx, &req)
+	if !code.Ok() {
+		log.Warnf("name [%s] id [%v]  operator user failed", ctx.UserName(), ctx.UserId())
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, r, 1, 1)
+}
+
+func (m *Controller) PlatformDeleteUser(c *gin.Context) { //delete user account
+
+	var err error
+	var req proto.PlatformDeleteUserReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.UserDelete)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+	if req.UserName == ctx.UserName() {
+		err = log.Errorf("can't delete self")
+		m.Error(c, itypes.NewBizCode(itypes.CODE_ACCESS_DENY, err.Error()))
+		return
+	}
+
+	if code := m.PlatformCore.DeleteUser(ctx, &req); !code.Ok() {
+		log.Warnf("operator name [%s] id [%v]  delete user failed", ctx.UserName(), ctx.UserId())
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, &proto.PlatformDeleteUserResp{}, 1, 1)
+}
+
+func (m *Controller) PlatformDeleteUsers(c *gin.Context) { //delete user account
+	var err error
+	var req proto.PlatformDeleteUsersReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.UserDelete)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	if code := m.PlatformCore.DeleteUsers(ctx, &req); !code.Ok() {
+		log.Warnf("operator name [%s] id [%v]  delete user failed", ctx.UserName(), ctx.UserId())
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, &proto.PlatformDeleteUsersResp{}, 1, 1)
+}
+
+func (m *Controller) PlatformListRole(c *gin.Context) { //list platform roles
+	var err error
+	var req proto.PlatformListRoleReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.RoleAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	roles, total, code := m.PlatformCore.ListRole(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	count := len(roles)
+	m.OK(c, &proto.PlatformListRoleResp{Roles: roles}, count, total)
+}
+
+func (m *Controller) PlatformCreateRole(c *gin.Context) { //create a custom platform role
+	var err error
+	var req proto.PlatformCreateRoleReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.RoleAdd)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	if code := m.PlatformCore.CreateRole(ctx, &req); !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, &proto.PlatformCreateRoleResp{}, 1, 1)
+}
+
+func (m *Controller) PlatformEditRole(c *gin.Context) { //edit custom platform role
+	var err error
+	var req proto.PlatformEditRoleReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.RoleEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	if code := m.PlatformCore.EditRole(ctx, &req); !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, &proto.PlatformEditRoleResp{}, 1, 1)
+}
+
+func (m *Controller) PlatformDeleteRole(c *gin.Context) { //delete custom platform role
+	var err error
+	var req proto.PlatformDeleteRoleReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.RoleDelete)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	if code := m.PlatformCore.DeleteRole(ctx, &req); !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, &proto.PlatformDeleteRoleResp{}, 1, 1)
+}
+
+// 权限授权
+func (m *Controller) PlatformAuthRole(c *gin.Context) {
+	var err error
+	var req proto.PlatformAuthRoleReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.RoleAuthority)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	if code := m.PlatformCore.AuthRole(ctx, &req); !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, &proto.PlatformAuthRoleResp{}, 1, 1)
+}
+
+func (m *Controller) PlatformInquireAuth(c *gin.Context) {
+	var err error
+	var req proto.PlatformInquireAuthReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.RoleAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	authority, code := m.PlatformCore.InquireAuth(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, &proto.PlatformInquireAuthResp{Privilege: authority}, 1, 1)
+}
+
+func (m *Controller) PlatformPrivilegeTree(c *gin.Context) {
+
+	var err error
+	var req proto.PlatformPrivilegeTreeReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.RoleAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	PlatformPrivilegeTreeResp, code := m.PlatformCore.PrivilegeTree(ctx, &req)
+	if !code.Ok() {
+		log.Errorf("list device type code [%s]", code)
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, PlatformPrivilegeTreeResp, 1, 1)
+}
+
+func (m *Controller) PlatformResetPassword(c *gin.Context) { //platform administrator reset other user's password
+	var err error
+	var req proto.PlatformResetPasswordReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.UserEdit)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	if code := m.PlatformCore.ResetUserPassword(ctx, &req); !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+
+	m.OK(c, &proto.PlatformResetPasswordResp{}, 1, 1)
+}
+
+func (m *Controller) PlatformChangePassword(c *gin.Context) { //platform user change password by self
+	var err error
+	var req proto.PlatformResetPasswordReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.Null)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	if m.isNilString(req.OldPassword) {
+		err = log.Errorf("request body [%+v] old password is nil or ", req)
+		m.Error(c, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error()))
+		return
+	}
+
+	ok, code := m.PlatformCore.CheckUserPassword(ctx, ctx.UserName(), req.OldPassword)
+	if !ok {
+		if code.Ok() {
+			m.Error(c, code)
+			return
+		}
+		log.Error("user [%s] old password [%s] not match when change password by self", ctx.UserName(), req.OldPassword)
+		m.Error(c, itypes.NewBizCode(itypes.CODE_INVALID_PASSWORD))
+		return
+	}
+	req.UserName = ctx.UserName() //user change password by self (so the user name must be self name)
+	if code = m.PlatformCore.ResetUserPassword(ctx, &req); !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, &proto.PlatformResetPasswordResp{}, 1, 1)
+}
+
+func (m *Controller) PlatformListRoleUser(c *gin.Context) { //list role user
+	var err error
+	var req proto.PlatformListRoleUserReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.RoleAccess)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	users, total, code := m.PlatformCore.ListRoleUser(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	userCount := len(users)
+	m.OK(c, &proto.PlatformListRoleUserResp{
+		RoleName:  req.RoleName,
+		UserCount: userCount,
+		Users:     users,
+	}, userCount, total)
+}
+
+func (m *Controller) PlatformRefreshAuthToken(c *gin.Context) {
+	var err error
+	var req proto.PlatformRefreshAuthTokenReq
+
+	if err = c.BindJSON(&req); err != nil {
+		log.Errorf("%s", err)
+		return
+	}
+
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.Null)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	var IP = m.GetClientIP(c)
+
+	s := &itypes.Session{
+		UserId:      ctx.UserId(),
+		UserName:    ctx.UserName(),
+		Alias:       ctx.Alias(),
+		PhoneNumber: ctx.PhoneNumber(),
+		IsAdmin:     ctx.IsAdmin(),
+		Email:       ctx.GetEmail(),
+		LoginIP:     IP,
+	}
+
+	if s.AuthToken, err = middleware.GenerateToken(s); err != nil {
+		err = log.Errorf("generate token error [%s]", err.Error())
+		m.Error(c, itypes.NewBizCode(itypes.CODE_ERROR, err.Error()))
+		return
+	}
+	_ = sessions.NewContext(s)
+	var resp = proto.PlatformRefreshAuthTokenResp{
+		AuthToken: s.AuthToken,
+	}
+	m.OK(c, &resp, 1, 1)
+}
+
+func (m *Controller) PlatformListOperLog(c *gin.Context) {
+	var err error
+	var req proto.PlatformListOperLogReq
+
+	if err = m.bindJSON(c, &req); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+
+	ctx, ok := m.ContextPlatformPrivilege(c, privilege.Null)
+	if !ok {
+		log.Errorf("user authentication context is nil or privilege check failed")
+		return
+	}
+
+	list, total, code := m.PlatformCore.ListOperLog(ctx, &req)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	count := len(list)
+	m.OK(c, &proto.PlatformListOperLogResp{
+		List: list,
+	}, count, total)
+}
+
+// 上传图片
+func (m *Controller) PlatformUploadFile(c *gin.Context) {
+	resp, code := m.PlatformCore.UploadFile(c)
+	if !code.Ok() {
+		m.Error(c, code)
+		return
+	}
+	m.OK(c, resp, 1, 1)
+}
diff --git a/pkg/controllers/controller_ws.go b/pkg/controllers/controller_ws.go
new file mode 100644
index 0000000..cac6731
--- /dev/null
+++ b/pkg/controllers/controller_ws.go
@@ -0,0 +1,71 @@
+package controllers
+
+import (
+	"intent-system/pkg/dal/core"
+	"net/http"
+	"time"
+
+	"github.com/civet148/log"
+	"github.com/gin-gonic/gin"
+	"github.com/gorilla/websocket"
+)
+
+var upgrader1 = websocket.Upgrader{
+	CheckOrigin: func(r *http.Request) bool {
+		// 开发阶段允许所有来源,生产环境你可以限制
+		return true
+	},
+}
+
+func (m *Controller) DeployStatusWS(c *gin.Context) {
+	appID := c.Param("appID")
+	username := c.Param("user")
+
+	conn, err := upgrader1.Upgrade(c.Writer, c.Request, nil)
+	if err != nil {
+		log.Warnf("WebSocket 升级失败: %v", err)
+		return
+	}
+	defer conn.Close()
+
+	// 设置超时时间
+	conn.SetReadDeadline(time.Now().Add(60 * time.Second))
+
+	// 接收到 pong 帧,刷新 ReadDeadline
+	conn.SetPongHandler(func(string) error {
+		conn.SetReadDeadline(time.Now().Add(60 * time.Second))
+		return nil
+	})
+
+	// 启动 ping 心跳(后台 goroutine 定时发 ping)
+	go func() {
+		ticker := time.NewTicker(30 * time.Second)
+		defer ticker.Stop()
+
+		for range ticker.C {
+			// 发送 ping 帧,保持连接活跃
+			if err := conn.WriteControl(websocket.PingMessage, []byte("ping"), time.Now().Add(10*time.Second)); err != nil {
+				log.Warnf("发送 ping 失败: %v", err)
+				return
+			}
+			log.Debugf("➡️ 已发送 ping 帧到客户端")
+		}
+	}()
+
+	// 获取部署任务并 attach
+	task := core.GlobalTaskManager.Get(appID)
+	if task == nil {
+		conn.WriteMessage(websocket.TextMessage, []byte("任务未启动或已结束"))
+		return
+	}
+	task.Attach(conn, username)
+	defer task.Detach(conn)
+
+	// 保持连接监听(用于捕获客户端断开)
+	for {
+		if _, _, err := conn.ReadMessage(); err != nil {
+			log.Infof("🔌 客户端断开(%s/%s): %v", username, appID, err)
+			break
+		}
+	}
+}
diff --git a/pkg/crypto/crypto_aes.go b/pkg/crypto/crypto_aes.go
new file mode 100644
index 0000000..f6ee695
--- /dev/null
+++ b/pkg/crypto/crypto_aes.go
@@ -0,0 +1,44 @@
+package crypto
+
+import (
+	"github.com/civet148/gotools/cryptos/goaes"
+	_ "github.com/civet148/gotools/cryptos/goaes/cbc" //注册CBC加解密对象创建方法
+	_ "github.com/civet148/gotools/cryptos/goaes/cfb" //注册CFB加解密对象创建方法
+	_ "github.com/civet148/gotools/cryptos/goaes/ctr" //注册CTR加解密对象创建方法
+	_ "github.com/civet148/gotools/cryptos/goaes/ecb" //注册ECB加解密对象创建方法
+	_ "github.com/civet148/gotools/cryptos/goaes/ofb" //注册OFB加解密对象创建方法
+	"github.com/civet148/log"
+)
+
+var DefaultKEY = []byte("c6vgru6d9ic6gu563cyoegnzdq0klvx4") //加密KEY(16/24/32字节)
+var DefaultIV = []byte("1de1c8c41007a070")                  //加密向量(16字节)
+
+type CryptoAES struct {
+	aes goaes.CryptoAES
+}
+
+func NewCryptoAES(key, iv []byte) *CryptoAES {
+	return &CryptoAES{
+		aes: goaes.NewCryptoAES(goaes.AES_Mode_CBC, key, iv),
+	}
+}
+
+func NewCryptoAESDefault() *CryptoAES {
+	return NewCryptoAES(DefaultKEY, DefaultIV)
+}
+
+func (c *CryptoAES) EncryptBase64(in []byte) (string, error) {
+	enc, err := c.aes.EncryptBase64(in)
+	if err != nil {
+		return "", log.Errorf("[%v] encrypt to base64 error [%v]", c.aes.GetMode(), err.Error())
+	}
+	return enc, nil
+}
+
+func (c *CryptoAES) DecryptBase64(in string) ([]byte, error) {
+	dec, err := c.aes.DecryptBase64(in)
+	if err != nil {
+		return nil, log.Errorf("[%v] decrypt from base64 [%s] error [%v]", c.aes.GetMode(), in, err.Error())
+	}
+	return dec, nil
+}
diff --git a/pkg/dal/core/core_biz.go b/pkg/dal/core/core_biz.go
new file mode 100644
index 0000000..93b0a7e
--- /dev/null
+++ b/pkg/dal/core/core_biz.go
@@ -0,0 +1,865 @@
+package core
+
+import (
+	"fmt"
+	"intent-system/pkg/config"
+	"intent-system/pkg/dal/dao"
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/itypes"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/utils"
+
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+)
+
+type BizCore struct {
+	db            *sqlca.Engine
+	cfg           *config.Config
+	tagDAO        *dao.TagDAO
+	userDAO       *dao.UserDAO
+	newsDAO       *dao.NewsDAO
+	newsDraftDAO  *dao.NewsDraftDAO
+	customerDAO   *dao.CustomerDAO
+	dictionaryDAO *dao.DictionaryDAO
+	subDAO        *dao.NewsSubscribeDAO
+	qaDAO         *dao.QuestionAnswerDAO
+	qaDraftDAO    *dao.QuestionDraftDAO
+}
+
+func NewBizCore(cfg *config.Config, db *sqlca.Engine) *BizCore {
+	return &BizCore{
+		db:            db,
+		cfg:           cfg,
+		tagDAO:        dao.NewTagDAO(db),
+		userDAO:       dao.NewUserDAO(db),
+		newsDAO:       dao.NewNewsDAO(db),
+		newsDraftDAO:  dao.NewNewsDraftDAO(db),
+		customerDAO:   dao.NewCustomerDAO(db),
+		dictionaryDAO: dao.NewDictionaryDAO(db),
+		qaDAO:         dao.NewQuestionAnswerDAO(db),
+		subDAO:        dao.NewNewsSubscribeDAO(db),
+		qaDraftDAO:    dao.NewQuestionDraftDAO(db),
+	}
+}
+
+func (m *BizCore) NewsList(ctx *itypes.Context, req *proto.NewsListReq, needExtra bool) (resp *proto.NewsListResp, total int64, code itypes.BizCode) {
+	var err error
+	var dos []*models.NewsDO
+	dos, total, err = m.newsDAO.QueryList(&dao.NewsCondition{
+		PageNo:       req.PageNo,
+		PageSize:     req.PageSize,
+		Id:           req.Id,
+		Tag:          req.Tag,
+		All:          req.All,
+		IsDeleted:    req.IsDeleted,
+		ContainExtra: needExtra,
+		Asc:          req.OrderAsc,
+		Search:       req.Search,
+		Language:     req.Language,
+	})
+	if err != nil {
+		return nil, 0, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.NewsListResp{
+		List: dos,
+	}, total, itypes.BizOK
+}
+
+func (m *BizCore) NewsAdd(ctx *itypes.Context, req *proto.NewsAddReq) (resp *proto.NewsAddResp, code itypes.BizCode) {
+	var id int64
+	var err error
+	logs := models.MakeChangeLog(nil, ctx.UserName(), models.OperType_Create)
+	id, err = m.newsDraftDAO.Insert(&models.NewsDraftDO{
+		NewsId:      0,
+		OrgId:       0,
+		Category:    req.Category,
+		MainTitle:   req.MainTitle,
+		SubTitle:    req.SubTitle,
+		Summary:     req.Summary,
+		Keywords:    utils.JsonMarshal(req.Keywords),
+		SeoKeywords: utils.JsonMarshal(req.SeoKeywords),
+		Tags:        req.Tags,
+		ImageUrl:    req.ImageUrl,
+		Content:     req.Content,
+		IsReplicate: false,
+		Language:    req.Language,
+		ExtraData: models.CommonExtraData{
+			Logs: logs,
+		},
+	})
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.NewsAddResp{
+		DraftId: id,
+	}, itypes.BizOK
+}
+
+func (m *BizCore) NewsEdit(ctx *itypes.Context, req *proto.NewsEditReq) (resp *proto.NewsEditResp, code itypes.BizCode) {
+	var id int64
+	var err error
+	var news *models.NewsDO
+	news, err = m.newsDAO.QueryById(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if news.Id == 0 {
+		err = log.Errorf("news id [%v] not found", req.Id)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+	logs := models.MakeChangeLog(nil, ctx.UserName(), models.OperType_Create)
+
+	id, err = m.newsDraftDAO.Upsert(&models.NewsDraftDO{
+		NewsId:      news.Id,
+		OrgId:       news.OrgId,
+		Content:     news.Content,
+		Category:    news.Category,
+		MainTitle:   news.MainTitle,
+		SubTitle:    news.SubTitle,
+		Summary:     news.Summary,
+		Keywords:    news.Keywords,
+		SeoKeywords: news.SeoKeywords,
+		Tags:        news.Tags,
+		ImageUrl:    news.ImageUrl,
+		IsReplicate: news.IsReplicate,
+		Language:    news.Language,
+		ExtraData: models.CommonExtraData{
+			Logs: logs,
+		},
+	},
+		models.NEWS_DRAFT_COLUMN_CATEGORY,
+		models.NEWS_DRAFT_COLUMN_MAIN_TITLE,
+		models.NEWS_DRAFT_COLUMN_SUB_TITLE,
+		models.NEWS_DRAFT_COLUMN_SUMMARY,
+		models.NEWS_DRAFT_COLUMN_KEYWORDS,
+		models.NEWS_DRAFT_COLUMN_SEO_KEYWORDS,
+		models.NEWS_DRAFT_COLUMN_TAGS,
+		models.NEWS_DRAFT_COLUMN_IMAGE_URL,
+		models.NEWS_DRAFT_COLUMN_CONTENT,
+		models.NEWS_DRAFT_COLUMN_LANGUAGE,
+		models.NEWS_DRAFT_COLUMN_IS_DELETED,
+		models.NEWS_DRAFT_COLUMN_IS_REPLICATE,
+		models.NEWS_DRAFT_COLUMN_EXTRA_DATA,
+	)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	news.ExtraData.Logs = models.MakeChangeLog(news.ExtraData.Logs, ctx.UserName(), models.OperType_Edit)
+	_, err = m.newsDAO.Update(&models.NewsDO{
+		Id:        news.Id,
+		ExtraData: news.ExtraData,
+	}, models.NEWS_COLUMN_EXTRA_DATA)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.NewsEditResp{
+		DraftId: id,
+	}, itypes.BizOK
+}
+
+func (m *BizCore) NewsDelete(ctx *itypes.Context, req *proto.NewsDeleteReq) (resp *proto.NewsDeleteResp, code itypes.BizCode) {
+	var err error
+	var news *models.NewsDO
+	for _, id := range req.Ids {
+		news, err = m.newsDAO.QueryById(id)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+		if news.Id == 0 {
+			err = log.Errorf("news id [%v] not found", id)
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+		}
+
+		news.ExtraData.Logs = models.MakeChangeLog(news.ExtraData.Logs, ctx.UserName(), models.OperType_Delete)
+
+		_, err = m.newsDAO.Update(&models.NewsDO{
+			Id:        id,
+			IsDeleted: true,
+			ExtraData: news.ExtraData,
+		}, models.NEWS_COLUMN_IS_DELETED, models.NEWS_COLUMN_EXTRA_DATA)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	}
+
+	return &proto.NewsDeleteResp{}, itypes.BizOK
+}
+
+func (m *BizCore) NewsCompare(ctx *itypes.Context, req *proto.NewsCompareReq) (resp *proto.NewsCompareResp, code itypes.BizCode) {
+	curNews, err := m.newsDAO.QueryById(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if !curNews.IsReplicate {
+		return &proto.NewsCompareResp{
+			CurNews: curNews,
+			OrgNews: &models.NewsDO{},
+		}, itypes.BizOK
+	}
+	var orgNews *models.NewsDO
+	orgNews, err = m.newsDAO.QueryOriginalNews(curNews.OrgId, curNews.Language)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.NewsCompareResp{
+		CurNews: curNews,
+		OrgNews: orgNews,
+	}, itypes.BizOK
+}
+
+func (m *BizCore) NewsDraftList(ctx *itypes.Context, req *proto.NewsDraftListReq) (resp *proto.NewsDraftListResp, total int64, code itypes.BizCode) {
+	var err error
+	var dos []*models.NewsDraftDO
+	dos, total, err = m.newsDraftDAO.QueryList(&dao.NewsDraftCondition{
+		PageNo:   req.PageNo,
+		PageSize: req.PageSize,
+		Asc:      req.OrderAsc,
+		Id:       req.Id,
+		Search:   req.Search,
+	})
+	if err != nil {
+		return nil, 0, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.NewsDraftListResp{
+		List: dos,
+	}, total, itypes.BizOK
+}
+
+func (m *BizCore) NewsDraftEdit(ctx *itypes.Context, req *proto.NewsDraftEditReq) (resp *proto.NewsDraftEditResp, code itypes.BizCode) {
+	var columns = []string{
+		models.NEWS_DRAFT_COLUMN_CATEGORY,
+		models.NEWS_DRAFT_COLUMN_MAIN_TITLE,
+		models.NEWS_DRAFT_COLUMN_SUB_TITLE,
+		models.NEWS_DRAFT_COLUMN_SUMMARY,
+		models.NEWS_DRAFT_COLUMN_KEYWORDS,
+		models.NEWS_DRAFT_COLUMN_SEO_KEYWORDS,
+		models.NEWS_DRAFT_COLUMN_TAGS,
+		models.NEWS_DRAFT_COLUMN_IMAGE_URL,
+		models.NEWS_DRAFT_COLUMN_CONTENT,
+	}
+	if req.Language != "" {
+		columns = append(columns, models.NEWS_DRAFT_COLUMN_LANGUAGE)
+	}
+	_, err := m.newsDraftDAO.Update(&models.NewsDraftDO{
+		Id:          req.Id,
+		Category:    req.Category,
+		MainTitle:   req.MainTitle,
+		SubTitle:    req.SubTitle,
+		Summary:     req.Summary,
+		Keywords:    utils.JsonMarshal(req.Keywords),
+		SeoKeywords: utils.JsonMarshal(req.SeoKeywords),
+		Tags:        req.Tags,
+		ImageUrl:    req.ImageUrl,
+		Content:     req.Content,
+		Language:    req.Language,
+		ExtraData:   models.CommonExtraData{},
+	}, columns...)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.NewsDraftEditResp{}, itypes.BizOK
+}
+
+func (m *BizCore) NewsDraftPublish(ctx *itypes.Context, req *proto.NewsDraftPublishReq) (resp *proto.NewsDraftPublishResp, code itypes.BizCode) {
+	draft, err := m.newsDraftDAO.QueryById(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if draft.Id == 0 || draft.IsDeleted {
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "news draft id invalid or draft is deleted")
+	}
+	if draft.NewsId == 0 {
+		_, err = m.newsDAO.Insert(&models.NewsDO{
+			OrgId:       draft.OrgId,
+			Category:    draft.Category,
+			MainTitle:   draft.MainTitle,
+			SubTitle:    draft.SubTitle,
+			Summary:     draft.Summary,
+			Keywords:    draft.Keywords,
+			SeoKeywords: draft.SeoKeywords,
+			Tags:        draft.Tags,
+			ImageUrl:    draft.ImageUrl,
+			Content:     draft.Content,
+			Language:    draft.Language,
+			ExtraData:   models.CommonExtraData{},
+		})
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	} else {
+		var news *models.NewsDO
+		news, err = m.newsDAO.QueryById(draft.NewsId)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+		if news.Id == 0 {
+			err = log.Errorf("news id [%d] not found", draft.NewsId)
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+		}
+		_, err = m.newsDAO.Update(&models.NewsDO{
+			Id:          draft.NewsId,
+			Category:    draft.Category,
+			MainTitle:   draft.MainTitle,
+			SubTitle:    draft.SubTitle,
+			Summary:     draft.Summary,
+			Keywords:    draft.Keywords,
+			SeoKeywords: draft.SeoKeywords,
+			Tags:        draft.Tags,
+			ImageUrl:    draft.ImageUrl,
+			Content:     draft.Content,
+			Language:    draft.Language,
+			ExtraData:   models.CommonExtraData{},
+		}, models.NEWS_COLUMN_CONTENT, models.NEWS_COLUMN_LANGUAGE, models.NEWS_COLUMN_EXTRA_DATA)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+		if news.OrgId != 0 { //is a news replicate from AI news, just set overwritten flag
+			_, err = m.newsDAO.UpdateByOrgId(&models.NewsDO{
+				OrgId:         draft.OrgId,
+				IsOverwritten: true,
+			}, models.NEWS_COLUMN_IS_OVERWRITTEN)
+			if err != nil {
+				return nil, itypes.NewBizCodeDatabaseError(err.Error())
+			}
+		}
+	}
+	_, err = m.newsDraftDAO.Update(&models.NewsDraftDO{
+		Id:        draft.Id,
+		IsDeleted: true,
+	}, models.NEWS_DRAFT_COLUMN_IS_DELETED)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.NewsDraftPublishResp{}, itypes.BizOK
+}
+
+func (m *BizCore) NewsDraftDelete(ctx *itypes.Context, req *proto.NewsDraftDeleteReq) (resp *proto.NewsDraftDeleteResp, code itypes.BizCode) {
+	for _, id := range req.Ids {
+		_, err := m.newsDraftDAO.Update(&models.NewsDraftDO{
+			Id:        id,
+			IsDeleted: true,
+			ExtraData: models.CommonExtraData{},
+		}, models.NEWS_DRAFT_COLUMN_IS_DELETED)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	}
+
+	return &proto.NewsDraftDeleteResp{}, itypes.BizOK
+}
+
+func (m *BizCore) NewsTag(ctx *itypes.Context, req *proto.NewsTagReq) (resp *proto.NewsTagResp, code itypes.BizCode) {
+	news, err := m.newsDAO.QueryById(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	news.Tags = req.Tags
+	_, err = m.newsDAO.Update(news, models.NEWS_COLUMN_TAGS)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.NewsTagResp{}, itypes.BizOK
+}
+
+func (m *BizCore) NewsPublish(ctx *itypes.Context, req *proto.NewsPublishReq) (resp *proto.NewsPublishResp, code itypes.BizCode) {
+	var err error
+	var subNewses []*models.NewsDO
+	subNewses, _, err = m.subDAO.QueryTodayNewsList(0, 0)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	for _, s := range subNewses {
+		s.State = dao.NewsState_NotPublish
+		_, err = m.newsDAO.Update(s, models.NEWS_COLUMN_STATE)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	}
+	news, err := m.newsDAO.QueryById(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if news.Id == 0 {
+		err = log.Errorf("news id [%v] not found", req.Id)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+	var subNews *models.NewsSubscribeDO
+	subNews, err = m.subDAO.QueryByNewsId(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	subNews.NewsId = news.Id
+	subNews.NewsUrl = m.makePublishUrl(news)
+	subNews.NewsSubject = news.MainTitle
+	subNews.IsDeleted = false
+	if subNews.Id == 0 {
+		err = m.subDAO.DeleteNotPushed()
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+		_, err = m.subDAO.Insert(subNews)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	} else {
+		if subNews.IsPushed {
+			err = log.Errorf("news id [%v] subject [%s] url [%s] is already pushed", subNews.NewsId, subNews.NewsSubject, subNews.NewsUrl)
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+		}
+		_, err = m.subDAO.Update(subNews,
+			models.NEWS_SUBSCRIBE_COLUMN_NEWS_URL,
+			models.NEWS_SUBSCRIBE_COLUMN_NEWS_SUBJECT,
+			models.NEWS_SUBSCRIBE_COLUMN_NEWS_URL,
+			models.NEWS_SUBSCRIBE_COLUMN_IS_DELETED)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	}
+	news.State = dao.NewsState_Published
+	_, err = m.newsDAO.Update(news, models.NEWS_COLUMN_STATE)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.NewsPublishResp{}, itypes.BizOK
+}
+
+func (m *BizCore) makePublishUrl(news *models.NewsDO) string {
+	return fmt.Sprintf("%s/%v", m.cfg.Domain, news.Id)
+}
+
+func (m *BizCore) QaList(ctx *itypes.Context, req *proto.QaListReq, needExtra bool) (resp *proto.QaListResp, total int64, code itypes.BizCode) {
+	var err error
+	var dos []*models.QuestionAnswerDO
+
+	dos, total, err = m.qaDAO.QueryList(&dao.QuestionAnswerCondition{
+		PageNo:       req.PageNo,
+		PageSize:     req.PageSize,
+		Id:           req.Id,
+		IsDeleted:    req.IsDeleted,
+		ContainExtra: needExtra,
+		Asc:          req.OrderAsc,
+		Search:       req.Search,
+		Language:     req.Language,
+	})
+	if err != nil {
+		err = log.Errorf(err.Error())
+		return nil, 0, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.QaListResp{
+		List: dos,
+	}, total, itypes.BizOK
+}
+
+func (m *BizCore) QaAdd(ctx *itypes.Context, req *proto.QaAddReq) (resp *proto.QaAddResp, code itypes.BizCode) {
+	logs := models.MakeChangeLog(nil, ctx.UserName(), models.OperType_Create)
+
+	id, err := m.qaDraftDAO.Insert(&models.QuestionDraftDO{
+		QaId:          0,
+		Question:      req.Question,
+		Answer:        req.Answer,
+		IsOverwritten: false,
+		IsReplicate:   false,
+		IsDeleted:     false,
+		Language:      req.Language,
+		ExtraData: models.CommonExtraData{
+			Logs: logs,
+		},
+	})
+	if err != nil {
+		err = log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.QaAddResp{
+		DraftId: id,
+	}, itypes.BizOK
+}
+
+func (m *BizCore) QaEdit(ctx *itypes.Context, req *proto.QaEditReq) (resp *proto.QaEditResp, code itypes.BizCode) {
+	var id int64
+	var err error
+	var qa *models.QuestionAnswerDO
+	qa, err = m.qaDAO.QueryById(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if qa.Id == 0 {
+		err = log.Errorf("Q&A id [%v] not found", req.Id)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+	qa.ExtraData.Logs = models.MakeChangeLog(qa.ExtraData.Logs, ctx.UserName(), models.OperType_Edit)
+
+	id, err = m.qaDraftDAO.Upsert(&models.QuestionDraftDO{
+		QaId:        qa.Id,
+		OrgId:       qa.OrgId,
+		Question:    qa.Question,
+		Answer:      qa.Answer,
+		IsReplicate: qa.IsReplicate,
+		ExtraData:   qa.ExtraData,
+		Language:    qa.Language,
+	},
+		models.QUESTION_DRAFT_COLUMN_QUESTION,
+		models.QUESTION_DRAFT_COLUMN_ANSWER,
+		models.QUESTION_DRAFT_COLUMN_LANGUAGE,
+	)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.QaEditResp{
+		DraftId: id,
+	}, itypes.BizOK
+}
+
+func (m *BizCore) QaDelete(ctx *itypes.Context, req *proto.QaDeleteReq) (resp *proto.QaDeleteResp, code itypes.BizCode) {
+	var err error
+	var qa *models.QuestionAnswerDO
+	for _, id := range req.Ids {
+		qa, err = m.qaDAO.QueryById(id)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+		if qa.Id == 0 {
+			err = log.Errorf("Q&A id [%v] not found", id)
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+		}
+		qa.ExtraData.Logs = models.MakeChangeLog(qa.ExtraData.Logs, ctx.UserName(), models.OperType_Delete)
+		_, err = m.qaDAO.Update(&models.QuestionAnswerDO{
+			Id:        id,
+			IsDeleted: true,
+			ExtraData: qa.ExtraData,
+		}, models.QUESTION_ANSWER_COLUMN_IS_DELETED, models.QUESTION_ANSWER_COLUMN_EXTRA_DATA)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	}
+
+	return &proto.QaDeleteResp{}, itypes.BizOK
+}
+
+func (m *BizCore) QaDraftList(ctx *itypes.Context, req *proto.QaDraftListReq) (resp *proto.QaDraftListResp, total int64, code itypes.BizCode) {
+	var err error
+	var dos []*models.QuestionDraftDO
+	dos, total, err = m.qaDraftDAO.QueryList(&dao.QaDraftCondition{
+		PageNo:   req.PageNo,
+		PageSize: req.PageSize,
+		Asc:      req.OrderAsc,
+		Id:       req.Id,
+		Search:   req.Search,
+	})
+	if err != nil {
+		err = log.Errorf(err.Error())
+		return nil, 0, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.QaDraftListResp{
+		List: dos,
+	}, total, itypes.BizOK
+}
+
+func (m *BizCore) QaDraftEdit(ctx *itypes.Context, req *proto.QaDraftEditReq) (resp *proto.QaDraftEditResp, code itypes.BizCode) {
+	var err error
+	var draft *models.QuestionDraftDO
+	draft, err = m.qaDraftDAO.QueryById(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if draft.Id == 0 {
+		err = log.Errorf("Q&A draft id [%v] not found", req.Id)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+	var columns = []string{
+		models.QUESTION_DRAFT_COLUMN_QUESTION,
+		models.QUESTION_DRAFT_COLUMN_ANSWER,
+		models.QUESTION_DRAFT_COLUMN_EXTRA_DATA,
+	}
+	if req.Language != "" {
+		columns = append(columns, models.QUESTION_DRAFT_COLUMN_LANGUAGE)
+	}
+	draft.ExtraData.Logs = models.MakeChangeLog(draft.ExtraData.Logs, ctx.UserName(), models.OperType_Edit)
+	_, err = m.qaDraftDAO.Update(&models.QuestionDraftDO{
+		Id:        req.Id,
+		Question:  req.Question,
+		Answer:    req.Answer,
+		ExtraData: draft.ExtraData,
+		Language:  req.Language,
+	}, columns...)
+	if err != nil {
+		err = log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.QaDraftEditResp{}, itypes.BizOK
+}
+
+func (m *BizCore) QaDraftPublish(ctx *itypes.Context, req *proto.QaDraftPublishReq) (resp *proto.QaDraftPublishResp, code itypes.BizCode) {
+	draft, err := m.qaDraftDAO.QueryById(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if draft.Id == 0 {
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS)
+	}
+	if draft.QaId == 0 {
+		_, err = m.qaDAO.Insert(&models.QuestionAnswerDO{
+			OrgId:     draft.OrgId,
+			Question:  draft.Question,
+			Answer:    draft.Answer,
+			ExtraData: models.CommonExtraData{},
+		})
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	} else {
+		var qa *models.QuestionAnswerDO
+		qa, err = m.qaDAO.QueryById(draft.QaId)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+		if qa.Id == 0 {
+			err = log.Errorf("Q&A id [%d] not found", draft.QaId)
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+		}
+		_, err = m.qaDAO.Insert(&models.QuestionAnswerDO{
+			OrgId:     draft.OrgId,
+			Question:  draft.Question,
+			Answer:    draft.Answer,
+			ExtraData: models.CommonExtraData{},
+		})
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+		if qa.OrgId != 0 { //is a news replicate from AI Q&A, just set overwritten flag
+			_, err = m.qaDAO.Update(&models.QuestionAnswerDO{
+				Id:            draft.QaId,
+				IsOverwritten: true,
+			}, models.NEWS_COLUMN_IS_OVERWRITTEN)
+			if err != nil {
+				return nil, itypes.NewBizCodeDatabaseError(err.Error())
+			}
+		}
+	}
+
+	_, err = m.qaDraftDAO.Update(&models.QuestionDraftDO{
+		Id:        draft.Id,
+		IsDeleted: true,
+	}, models.QUESTION_DRAFT_COLUMN_IS_DELETED)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.QaDraftPublishResp{}, itypes.BizOK
+}
+
+func (m *BizCore) QaDraftDelete(ctx *itypes.Context, req *proto.QaDraftDeleteReq) (resp *proto.QaDraftDeleteResp, code itypes.BizCode) {
+	var err error
+	var draft *models.QuestionDraftDO
+	for _, id := range req.Ids {
+		draft, err = m.qaDraftDAO.QueryById(id)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+		if draft.Id == 0 {
+			err = log.Errorf("Q&A draft id [%v] not found", id)
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+		}
+		draft.ExtraData.Logs = models.MakeChangeLog(draft.ExtraData.Logs, ctx.UserName(), models.OperType_Edit)
+		_, err = m.qaDraftDAO.Update(&models.QuestionDraftDO{
+			Id:        id,
+			IsDeleted: true,
+			ExtraData: draft.ExtraData,
+		}, models.QUESTION_DRAFT_COLUMN_IS_DELETED)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	}
+
+	return &proto.QaDraftDeleteResp{}, itypes.BizOK
+}
+
+func (m *BizCore) SubListAll(ctx *itypes.Context, req *proto.SubListAllReq) (resp *proto.SubListAllResp, total int64, code itypes.BizCode) {
+	var err error
+	var dos []*models.NewsDO
+	dos, total, err = m.subDAO.QueryAllNews(&dao.NewsSubscribeCondition{
+		PageNo:   req.PageNo,
+		PageSize: req.PageSize,
+		Asc:      req.OrderAsc,
+		Id:       req.Id,
+		Search:   req.Search,
+	})
+	if err != nil {
+		return nil, 0, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.SubListAllResp{
+		List: dos,
+	}, total, itypes.BizOK
+}
+
+func (m *BizCore) SubListPushed(ctx *itypes.Context, req *proto.SubListPushedReq) (resp *proto.SubListPushedResp, total int64, code itypes.BizCode) {
+	var err error
+	var dos []*models.NewsDO
+	dos, total, err = m.subDAO.QueryPushedNews(&dao.NewsSubscribeCondition{
+		PageNo:   req.PageNo,
+		PageSize: req.PageSize,
+		Asc:      req.OrderAsc,
+		Id:       req.Id,
+		Search:   req.Search,
+	})
+	if err != nil {
+		return nil, 0, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.SubListPushedResp{
+		List: dos,
+	}, total, itypes.BizOK
+}
+
+func (m *BizCore) SubListToday(ctx *itypes.Context, req *proto.SubListTodayReq) (resp *proto.SubListTodayResp, total int64, code itypes.BizCode) {
+	var err error
+	var dos []*models.NewsDO
+	dos, total, err = m.subDAO.QueryTodayNewsList(0, 1)
+	if err != nil {
+		return nil, 0, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.SubListTodayResp{
+		List: dos,
+	}, total, itypes.BizOK
+}
+
+func (m *BizCore) SubAddNews(ctx *itypes.Context, req *proto.SubAddNewsReq) (resp *proto.SubAddNewsResp, code itypes.BizCode) {
+	return &proto.SubAddNewsResp{}, itypes.NewBizCode(itypes.CODE_ACCESS_DENY, "do not call this method")
+}
+
+func (m *BizCore) SubEditNews(ctx *itypes.Context, req *proto.SubEditNewsReq) (resp *proto.SubEditNewsResp, code itypes.BizCode) {
+	ok, err := m.subDAO.IsSubNewsExist(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if !ok {
+		err = log.Errorf("news id [%d] is not a subscribing news", req.Id)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+	news, err := m.newsDAO.QueryById(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if news.Id == 0 {
+		err = log.Errorf("news id [%d] not found", news.Id)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+	news.Category = req.Category
+	news.MainTitle = req.MainTitle
+	news.SubTitle = req.SubTitle
+	news.Summary = req.Summary
+	news.Keywords = utils.JsonMarshal(req.Keywords)
+	news.SeoKeywords = utils.JsonMarshal(req.SeoKeywords)
+	news.Tags = req.Tags
+	news.ImageUrl = req.ImageUrl
+	news.Content = req.Content
+	news.ExtraData.Logs = models.MakeChangeLog(news.ExtraData.Logs, ctx.UserName(), models.OperType_Edit)
+	_, err = m.newsDAO.Update(news,
+		models.NEWS_COLUMN_CATEGORY,
+		models.NEWS_COLUMN_MAIN_TITLE,
+		models.NEWS_COLUMN_SUB_TITLE,
+		models.NEWS_COLUMN_SUMMARY,
+		models.NEWS_COLUMN_KEYWORDS,
+		models.NEWS_COLUMN_SEO_KEYWORDS,
+		models.NEWS_COLUMN_TAGS,
+		models.NEWS_COLUMN_IMAGE_URL,
+		models.NEWS_COLUMN_CONTENT,
+		models.NEWS_COLUMN_EXTRA_DATA)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.SubEditNewsResp{}, itypes.BizOK
+}
+
+func (m *BizCore) TagList(ctx *itypes.Context, req *proto.TagListReq) (resp *proto.TagListResp, total int64, code itypes.BizCode) {
+	var err error
+	var dos []*models.TagDO
+	dos, total, err = m.tagDAO.QueryAll(req.PageNo, req.PageSize, req.OrderAsc)
+	if err != nil {
+		return nil, 0, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.TagListResp{
+		List: dos,
+	}, total, itypes.BizOK
+}
+
+func (m *BizCore) TagAdd(ctx *itypes.Context, req *proto.TagAddReq) (resp *proto.TagAddResp, code itypes.BizCode) {
+	dos, err := m.tagDAO.QueryByCondition(map[string]interface{}{
+		models.TAG_COLUMN_NAME:       req.Name,
+		models.TAG_COLUMN_NAME_CN:    req.NameCN,
+		models.TAG_COLUMN_IS_DELETED: 0,
+	})
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if len(dos) != 0 {
+		log.Errorf("tag name %s already exists", req.Name)
+		return nil, itypes.NewBizCode(itypes.CODE_ALREADY_EXIST)
+	}
+	logs := models.MakeChangeLog(nil, ctx.UserName(), models.OperType_Edit)
+
+	var id int64
+	id, err = m.tagDAO.Insert(&models.TagDO{
+		Name:       req.Name,
+		IsInherent: false,
+		IsDeleted:  false,
+		NameCn:     req.NameCN,
+		ExtraData: models.CommonExtraData{
+			Logs: logs,
+		},
+	})
+	return &proto.TagAddResp{
+		Id: id,
+	}, itypes.BizOK
+}
+
+func (m *BizCore) TagEdit(ctx *itypes.Context, req *proto.TagEditReq) (resp *proto.TagEditResp, code itypes.BizCode) {
+	tag, err := m.tagDAO.QueryById(req.Id)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if tag.Id == 0 {
+		err = log.Errorf("tag id %s not exists", req.Id)
+		return nil, itypes.NewBizCode(itypes.CODE_ALREADY_EXIST, err.Error())
+	}
+	if tag.IsInherent {
+		return nil, itypes.NewBizCode(itypes.CODE_ACCESS_DENY, "no privilege to edit the inherent tag")
+	}
+	var columns = []string{
+		models.TAG_COLUMN_NAME,
+		models.TAG_COLUMN_EXTRA_DATA,
+		models.TAG_COLUMN_NAME_CN,
+	}
+	tag.Name = req.Name
+	tag.NameCn = req.NameCN
+	tag.ExtraData.Logs = models.MakeChangeLog(tag.ExtraData.Logs, ctx.UserName(), models.OperType_Edit)
+
+	_, err = m.tagDAO.Update(tag, columns...)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.TagEditResp{}, itypes.BizOK
+}
+
+func (m *BizCore) TagDelete(ctx *itypes.Context, req *proto.TagDeleteReq) (resp *proto.TagDeleteResp, code itypes.BizCode) {
+	for _, id := range req.Ids {
+		tag, err := m.tagDAO.QueryById(id)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+		if tag.Id == 0 {
+			err = log.Errorf("tag id %s not exists", id)
+			return nil, itypes.NewBizCode(itypes.CODE_ALREADY_EXIST, err.Error())
+		}
+		if tag.IsInherent {
+			return nil, itypes.NewBizCode(itypes.CODE_ACCESS_DENY, "no privilege to delete the inherent tag")
+		}
+		tag.IsDeleted = true
+		tag.ExtraData.Logs = models.MakeChangeLog(tag.ExtraData.Logs, ctx.UserName(), models.OperType_Edit)
+
+		_, err = m.tagDAO.Update(tag, models.TAG_COLUMN_IS_DELETED, models.TAG_COLUMN_EXTRA_DATA)
+		if err != nil {
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	}
+
+	return &proto.TagDeleteResp{}, itypes.BizOK
+}
diff --git a/pkg/dal/core/core_common.go b/pkg/dal/core/core_common.go
new file mode 100644
index 0000000..0d8a37f
--- /dev/null
+++ b/pkg/dal/core/core_common.go
@@ -0,0 +1,93 @@
+package core
+
+import (
+	"intent-system/pkg/config"
+	"intent-system/pkg/dal/dao"
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/email"
+	"intent-system/pkg/itypes"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/utils"
+	"strings"
+
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+)
+
+type CommonCore struct {
+	db  *sqlca.Engine
+	cfg *config.Config
+
+	loginDAO      *dao.LoginDAO
+	dictionaryDAO *dao.DictionaryDAO
+	inviteCodeDAO *dao.InviteCodeDAO
+	customerDAO   *dao.CustomerDAO
+	userDAO       *dao.UserDAO
+}
+
+func NewCommonCore(cfg *config.Config, db *sqlca.Engine) *CommonCore {
+	return &CommonCore{
+		db:            db,
+		cfg:           cfg,
+		loginDAO:      dao.NewLoginDAO(db),
+		dictionaryDAO: dao.NewDictionaryDAO(db),
+		inviteCodeDAO: dao.NewInviteCodeDAO(db),
+		customerDAO:   dao.NewCustomerDAO(db),
+		userDAO:       dao.NewUserDAO(db),
+	}
+}
+
+func (m *CommonCore) SendAuthCode(req *proto.SendAuthCodeReq) (resp *proto.SendAuthCodeResp, code itypes.BizCode) {
+	var err error
+
+	//TODO: check DDOS
+
+	_, err = m.inviteCodeDAO.DeleteUnusedCodeByUserAcc(req.Email)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+
+	cfg, err := m.dictionaryDAO.SelectEmailConfig()
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+	regCode := utils.GenAuthCode()
+	var strLanguage = req.Language
+	if strLanguage == "" {
+		strLanguage = models.Language_EN
+	}
+	ss := strings.Split(req.Email, "@")
+	if len(ss) == 1 {
+		err = log.Errorf("invalid email address [%s]", req.Email)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+
+	switch req.ActionType {
+	case proto.ActionType_UserRegister:
+		{
+			var body string
+			body = email.RegisterVerificationCodeMessage(req.Language, ss[0], regCode)
+			err = email.SendVerificationCode(cfg, email.SubjectVerificationCodeForRegister, req.Email, body)
+			if err != nil {
+				err = log.Errorf("email send error [%s]", err.Error())
+				return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, err.Error())
+			}
+		}
+	case proto.ActionType_ResetPassword:
+		log.Warnf("TODO: send verification code for reset password")
+	}
+
+	_, err = m.inviteCodeDAO.Insert(&models.InviteCodeDO{
+		RandomCode: regCode,
+		UserAcc:    req.UserName,
+		LinkUrl:    req.Email,
+		State:      dao.InviteCodeStateUnused,
+	})
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	return &proto.SendAuthCodeResp{}, itypes.BizOK
+}
diff --git a/pkg/dal/core/core_customer.go b/pkg/dal/core/core_customer.go
new file mode 100644
index 0000000..37ce1e3
--- /dev/null
+++ b/pkg/dal/core/core_customer.go
@@ -0,0 +1,388 @@
+package core
+
+import (
+	"fmt"
+	"intent-system/pkg/config"
+	"intent-system/pkg/dal/dao"
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/email"
+	"intent-system/pkg/itypes"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/sessions"
+	"intent-system/pkg/utils"
+	"strings"
+	"time"
+
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"github.com/gin-gonic/gin"
+)
+
+type CustomerCore struct {
+	db  *sqlca.Engine
+	cfg *config.Config
+
+	loginDAO      *dao.LoginDAO
+	dictionaryDAO *dao.DictionaryDAO
+	inviteCodeDAO *dao.InviteCodeDAO
+	customerDAO   *dao.CustomerDAO
+	subscriberDAO *dao.SubscriberDAO
+	templateDAO   *dao.EmailTemplateDAO
+}
+
+func NewCustomerCore(cfg *config.Config, db *sqlca.Engine) *CustomerCore {
+	return &CustomerCore{
+		db:            db,
+		cfg:           cfg,
+		loginDAO:      dao.NewLoginDAO(db),
+		dictionaryDAO: dao.NewDictionaryDAO(db),
+		inviteCodeDAO: dao.NewInviteCodeDAO(db),
+		customerDAO:   dao.NewCustomerDAO(db),
+		subscriberDAO: dao.NewSubscriberDAO(db),
+		templateDAO:   dao.NewEmailTemplateDAO(db),
+	}
+}
+
+func (m *CustomerCore) UserRegister(req *proto.CustomerRegisterReq) (resp *proto.CustomerRegisterResp, code itypes.BizCode) {
+	var do *models.CustomerDO
+	do, err := m.customerDAO.SelectCustomerByEmail(req.Email)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	if do.Id != 0 && !do.Deleted {
+		return nil, itypes.NewBizCode(itypes.CODE_ALREADY_EXIST, "email already registered")
+	}
+
+	var skipVerifyRegCode bool
+	if m.cfg.Debug && req.RegCode == "888888" {
+		skipVerifyRegCode = true
+	}
+	if !skipVerifyRegCode {
+		ok, err := m.inviteCodeDAO.CheckUserInviteCode(req.UserName, req.RegCode)
+		if err != nil {
+			log.Errorf(err.Error())
+			return nil, itypes.NewBizCodeDatabaseError()
+		}
+		if !ok {
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_AUTH_CODE, "invalid auth code")
+		}
+		m.inviteCodeDAO.UpdateByUserAndInviteCode(req.UserName, req.RegCode)
+	}
+	code = m.createRegisterUser(req.UserName, req.Password, req.Email)
+	if !code.Ok() {
+		return nil, code
+	}
+	return &proto.CustomerRegisterResp{}, itypes.BizOK
+}
+
+func (m *CustomerCore) UserNameRegister(req *proto.CustomerURegisterReq) (resp *proto.CustomerRegisterResp, code itypes.BizCode) {
+	// 仅检查用户名是否已经被注册
+	do, err := m.customerDAO.SelectCustomerByName(req.UserName)
+	if err != nil {
+		log.Errorf("Database error: %v", err)
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	if do.Id != 0 && !do.Deleted {
+		return nil, itypes.NewBizCode(itypes.CODE_ALREADY_EXIST, "Username already registered")
+	}
+
+	// 直接创建用户,无需邮箱检查或验证码验证
+	code = m.createURegisterUser(req.UserName, req.Password, req.Referral)
+	if !code.Ok() {
+		return nil, code
+	}
+
+	// 返回成功响应
+	return &proto.CustomerRegisterResp{}, itypes.BizOK
+}
+
+func (m *CustomerCore) createRegisterUser(strUserName, strPassword, strEmail string) (code itypes.BizCode) {
+	var err error
+	var strSalt = utils.GenerateSalt()
+	do := &models.CustomerDO{
+		UserName:   strUserName,
+		Password:   strPassword,
+		Salt:       strSalt,
+		UserAlias:  strUserName,
+		IsAdmin:    false,
+		Email:      strEmail,
+		CreateUser: SystemName,
+		EditUser:   SystemName,
+		LoginTime:  utils.Now64(),
+		Deleted:    false,
+		State:      dao.CustomerState_Enabled,
+	}
+	if _, err = m.customerDAO.Insert(do); err != nil {
+		err = log.Errorf(err.Error())
+		return itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, err.Error())
+	}
+	return itypes.BizOK
+}
+
+func (m *CustomerCore) createURegisterUser(strUserName, strPassword, strReferrer string) (code itypes.BizCode) {
+	var err error
+	var strSalt = utils.GenerateSalt()
+	do := &models.CustomerDO{
+		UserName:   strUserName,
+		Password:   strPassword,
+		Salt:       strSalt,
+		UserAlias:  strUserName,
+		IsAdmin:    false,
+		Referrer:   strReferrer,
+		CreateUser: SystemName,
+		EditUser:   SystemName,
+		LoginTime:  utils.Now64(),
+		Deleted:    false,
+		State:      dao.CustomerState_Enabled,
+	}
+	if _, err = m.customerDAO.Insert(do); err != nil {
+		err = log.Errorf(err.Error())
+		return itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, err.Error())
+	}
+	return itypes.BizOK
+}
+
+func (m *CustomerCore) CustomerLogin(req *proto.CustomerLoginReq, strIP string) (customer *models.CustomerDO, code itypes.BizCode) {
+
+	var err error
+	var strUserName = req.UserName
+	var strPassword = req.Password
+	if customer, err = m.customerDAO.SelectCustomerByName(req.UserName); err != nil {
+		log.Errorf("query [user name] from table error [%s]", err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_USER_OR_PASSWORD)
+	}
+
+	if customer == nil || customer.GetId() == 0 {
+		if customer, err = m.customerDAO.SelectCustomerByEmail(strUserName); err != nil {
+			log.Errorf("query [email] from table error [%s]", err.Error())
+			return nil, itypes.NewBizCode(itypes.CODE_DATABASE_ERROR)
+		}
+		if customer == nil || customer.GetId() == 0 {
+			log.Errorf("user name/email [%s] data not found in db", strUserName)
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_USER_OR_PASSWORD)
+		}
+	}
+
+	if customer.State == dao.UserState_Disabled {
+		log.Errorf("user name/email [%s] account was disabled", strUserName)
+		return nil, itypes.NewBizCode(itypes.CODE_ACCOUNT_BANNED)
+	}
+
+	customer.LoginIp = strIP
+	customer.LoginTime = time.Now().Unix()
+	if err = m.customerDAO.UpdateByName(customer, models.USER_COLUMN_LOGIN_IP, models.USER_COLUMN_LOGIN_TIME); err != nil {
+		log.Errorf("update user [%s] login ip error [%s]", strUserName, strIP)
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+
+	if strPassword != customer.Password {
+		err = log.Errorf("user name [%s] password verify failed, password [%s] not match", strUserName, strPassword)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_USER_OR_PASSWORD, err.Error())
+	}
+	_, _ = m.loginDAO.Insert(&models.LoginDO{
+		UserId:    customer.GetId(),
+		LoginType: dao.LoginType_Customer,
+		LoginIp:   strIP,
+		LoginAddr: "",
+	})
+	return customer, itypes.BizOK
+}
+
+func (m *CustomerCore) CustomerEdit(ctx *itypes.Context, req *proto.CustomerEditReq) (resp *proto.CustomerEditResp, code itypes.BizCode) {
+	var strEmail string
+	if ctx == nil {
+		if req.Email == "" {
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "email must not be empty")
+		}
+		strEmail = req.Email
+	} else {
+		strEmail = ctx.GetEmail()
+	}
+	_, err := m.customerDAO.UpdateByEmail(&models.CustomerDO{
+		Email:     strEmail,
+		FirstName: req.FirstName,
+		LastName:  req.LastName,
+		Title:     req.Title,
+		Company:   req.Company,
+	}, models.CUSTOMER_COLUMN_FIRST_NAME, models.CUSTOMER_COLUMN_LAST_NAME, models.CUSTOMER_COLUMN_TITLE, models.CUSTOMER_COLUMN_COMPANY)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.CustomerEditResp{}, itypes.BizOK
+}
+
+func (m *CustomerCore) CustomerSubscriber(strEmail string) (do *models.SubscriberDO, code itypes.BizCode) {
+	var err error
+	do, err = m.subscriberDAO.QueryByEmail(strEmail)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if len(do.Tags) == 0 {
+		do.Tags = make([]string, 0)
+	}
+	return do, code
+}
+
+func (m *CustomerCore) CustomerLogout(c *gin.Context) (resp *proto.CustomerLogoutResp, code itypes.BizCode) {
+	sessions.RemoveContext(c)
+	return &proto.CustomerLogoutResp{}, itypes.BizOK
+}
+
+func (m *CustomerCore) CustomerSubInfo(ctx *itypes.Context, req *proto.CustomerSubInfoReq) (resp *proto.CustomerSubInfoResp, code itypes.BizCode) {
+	var strEmail string
+	if ctx == nil {
+		if req.Email == "" {
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "email must not be empty")
+		}
+		strEmail = req.Email
+	} else {
+		strEmail = ctx.GetEmail()
+	}
+
+	customer, err := m.customerDAO.SelectCustomerByEmail(strEmail)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	do, err := m.subscriberDAO.QueryByEmail(strEmail)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	isSubscribed := do.Id != 0
+
+	return &proto.CustomerSubInfoResp{
+		IsSubscribed: isSubscribed,
+		Tags:         do.Tags,
+		FirstName:    customer.FirstName,
+		LastName:     customer.LastName,
+		Title:        customer.Title,
+		Company:      customer.Company,
+	}, itypes.BizOK
+}
+
+func (m *CustomerCore) CustomerSubscribe(ctx *itypes.Context, req *proto.CustomerSubscribeReq) (resp *proto.CustomerSubscribeResp, code itypes.BizCode) {
+	var userId int32
+	var strEmail string
+	if ctx == nil {
+		if req.Email == "" {
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "email must not be empty")
+		}
+		strEmail = req.Email
+	} else {
+		userId = ctx.UserId()
+		strEmail = ctx.GetEmail()
+	}
+	sub, err := m.subscriberDAO.QueryByEmail(strEmail)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if sub.Id == 0 {
+		err = m.sendSubscriptionWelcomeEmail(req.FirstName, strEmail, req.Language)
+		if err != nil {
+			err = log.Errorf("send welcome email error [%s]", err.Error())
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+		}
+	}
+
+	_, err = m.subscriberDAO.Upsert(&models.SubscriberDO{
+		CustomerId: userId,
+		Email:      strEmail,
+		Tags:       req.Tags,
+		IsDeleted:  false,
+	}, models.SUBSCRIBER_COLUMN_IS_DELETED, models.SUBSCRIBER_COLUMN_TAGS, models.SUBSCRIBER_COLUMN_EXTRA_DATA)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	_, err = m.customerDAO.UpdateByEmail(&models.CustomerDO{
+		FirstName:    req.FirstName,
+		IsSubscribed: true,
+		Deleted:      false,
+	}, models.CUSTOMER_COLUMN_IS_SUBSCRIBED, models.CUSTOMER_COLUMN_FIRST_NAME, models.CUSTOMER_COLUMN_DELETED)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.CustomerSubscribeResp{}, itypes.BizOK
+}
+
+func (m *CustomerCore) sendSubscriptionWelcomeEmail(strUser, strEmail, strLang string) (err error) {
+	var strContent string
+	var strSubject = email.SubscriptionWelcomeSubject
+	var strTemplate = email.SubscriptionWelcomeTemplate
+	var do *models.TemplateDO
+	do, err = m.templateDAO.QueryByTypeLang(models.TemplateType_SubscriptionWelcome, strLang)
+	if do.Id != 0 {
+		strSubject = do.Subject
+		strTemplate = do.Content
+	} else {
+		log.Warnf("no subscription welcome template found, use default")
+	}
+	var cfg *email.EmailConfig
+	cfg, err = m.dictionaryDAO.SelectEmailConfig()
+	if err != nil {
+		return log.Errorf(err.Error())
+	}
+	strReplace := fmt.Sprintf("[%s]", strUser)
+	strContent = strings.Replace(strTemplate, "[%s]", strReplace, -1)
+	err = email.SendEmail(cfg, strSubject, strContent, strEmail)
+	if err != nil {
+		return log.Errorf(err.Error())
+	}
+	return nil
+}
+
+func (m *CustomerCore) CustomerUnsubscribe(ctx *itypes.Context, req *proto.CustomerUnsubscribeReq) (resp *proto.CustomerUnsubscribeResp, code itypes.BizCode) {
+	var userId int32
+	var strEmail string
+	if ctx == nil {
+		if req.Email == "" {
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "email must not be empty")
+		}
+		strEmail = req.Email
+	} else {
+		userId = ctx.UserId()
+		strEmail = ctx.GetEmail()
+	}
+	_, err := m.subscriberDAO.DeleteByUserEmail(&models.SubscriberDO{
+		CustomerId: userId,
+		Email:      strEmail,
+		IsDeleted:  true,
+		ExtraData: models.SubscriberExtraData{
+			UnsubscribeReason: req.Reason,
+		},
+	}, models.SUBSCRIBER_COLUMN_IS_DELETED, models.SUBSCRIBER_COLUMN_EXTRA_DATA)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if userId != 0 {
+		_, err = m.customerDAO.Update(&models.CustomerDO{
+			Id:           userId,
+			IsSubscribed: false,
+		}, models.CUSTOMER_COLUMN_IS_SUBSCRIBED)
+		if err != nil {
+			log.Errorf(err.Error())
+			return nil, itypes.NewBizCodeDatabaseError(err.Error())
+		}
+	}
+	return &proto.CustomerUnsubscribeResp{}, itypes.BizOK
+}
+
+func (m *CustomerCore) CustomerList(ctx *itypes.Context, req *proto.CustomerListReq) (resp *proto.CustomerListResp, total int64, code itypes.BizCode) {
+	var err error
+	var dos []*models.CustomerDO
+	dos, total, err = m.customerDAO.QueryList(req.PageNo, req.PageSize, req.Id, req.Email)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, 0, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	return &proto.CustomerListResp{
+		List: dos,
+	}, total, itypes.BizOK
+}
diff --git a/pkg/dal/core/core_deploy.go b/pkg/dal/core/core_deploy.go
new file mode 100644
index 0000000..02f4308
--- /dev/null
+++ b/pkg/dal/core/core_deploy.go
@@ -0,0 +1,1257 @@
+package core
+
+import (
+	"archive/tar"
+	"bufio"
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"intent-system/pkg/config"
+	"intent-system/pkg/dal/dao"
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/itypes"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/utils"
+	"io"
+	"net"
+	"os"
+	"os/exec"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/client"
+	"github.com/docker/go-connections/nat"
+
+	"github.com/google/go-containerregistry/pkg/name"
+	"github.com/google/go-containerregistry/pkg/v1/daemon"
+)
+
+type DeployCore struct {
+	db  *sqlca.Engine
+	cfg *config.Config
+
+	deployDAO     *dao.DeployDAO
+	tagDAO        *dao.TagDAO
+	userDAO       *dao.UserDAO
+	newsDAO       *dao.NewsDAO
+	newsDraftDAO  *dao.NewsDraftDAO
+	customerDAO   *dao.CustomerDAO
+	dictionaryDAO *dao.DictionaryDAO
+	subDAO        *dao.NewsSubscribeDAO
+	qaDAO         *dao.QuestionAnswerDAO
+	qaDraftDAO    *dao.QuestionDraftDAO
+}
+
+type progressReader struct {
+	r         io.Reader
+	readBytes int64
+	total     int64
+	update    func(p int) // p 是 0-100 的百分比
+	lastPct   int
+}
+
+func GetSupabaseURL() string { // ← 仅首字母大写即可被其他包调用
+	return getSupabaseURL()
+}
+
+func getSupabaseURL() string {
+	// 获取默认网关 IP
+	gatewayIP, err := getDefaultGateway()
+	if err != nil {
+		fmt.Println("⚠️ 获取默认网关失败,使用 fallback:", err)
+		return "http://localhost:8000"
+	}
+
+	// 获取所有本地网卡 IP 和子网
+	ifaces, err := net.Interfaces()
+	if err != nil {
+		fmt.Println("⚠️ 获取本地接口失败:", err)
+		return "http://localhost:8000"
+	}
+
+	for _, iface := range ifaces {
+		if (iface.Flags&net.FlagUp == 0) || (iface.Flags&net.FlagLoopback != 0) {
+			continue // 跳过不活跃或loopback接口
+		}
+
+		addrs, err := iface.Addrs()
+		if err != nil {
+			continue
+		}
+
+		for _, addr := range addrs {
+			ipNet, ok := addr.(*net.IPNet)
+			if !ok || ipNet.IP.To4() == nil {
+				continue
+			}
+
+			// 检查网关是否在这个接口的子网中
+			if ipNet.Contains(gatewayIP) {
+				// 找到同网段的IP
+				return fmt.Sprintf("http://%s:8000", ipNet.IP.String())
+			}
+		}
+	}
+
+	// 如果没有匹配的网段,使用 fallback
+	return "http://localhost:8000"
+}
+
+func getDefaultGateway() (net.IP, error) {
+	// 调用 Linux 命令获取默认路由
+	cmd := exec.Command("ip", "route", "show", "default")
+	var out bytes.Buffer
+	cmd.Stdout = &out
+	err := cmd.Run()
+	if err != nil {
+		return nil, err
+	}
+
+	lines := strings.Split(out.String(), "\n")
+	for _, line := range lines {
+		if strings.HasPrefix(line, "default") {
+			fields := strings.Fields(line)
+			for i, f := range fields {
+				if f == "via" && i+1 < len(fields) {
+					return net.ParseIP(fields[i+1]), nil
+				}
+			}
+		}
+	}
+
+	return nil, fmt.Errorf("default gateway not found")
+}
+
+func (pr *progressReader) Read(p []byte) (int, error) {
+	n, err := pr.r.Read(p)
+	pr.readBytes += int64(n)
+
+	percent := int(float64(pr.readBytes) / float64(pr.total) * 100)
+	if percent > pr.lastPct {
+		pr.lastPct = percent
+		pr.update(percent)
+	}
+
+	return n, err
+}
+
+func NewDeployCore(cfg *config.Config, db *sqlca.Engine) *DeployCore {
+	return &DeployCore{
+		db:            db,
+		cfg:           cfg,
+		tagDAO:        dao.NewTagDAO(db),
+		userDAO:       dao.NewUserDAO(db),
+		newsDAO:       dao.NewNewsDAO(db),
+		newsDraftDAO:  dao.NewNewsDraftDAO(db),
+		deployDAO:     dao.NewDeployDAO(db),
+		customerDAO:   dao.NewCustomerDAO(db),
+		dictionaryDAO: dao.NewDictionaryDAO(db),
+		qaDAO:         dao.NewQuestionAnswerDAO(db),
+		subDAO:        dao.NewNewsSubscribeDAO(db),
+		qaDraftDAO:    dao.NewQuestionDraftDAO(db),
+	}
+}
+
+type manifestItem struct {
+	RepoTags []string `json:"RepoTags"`
+	Config   string   `json:"Config"`
+}
+
+// GetRunningContainerImages 获取正在运行的容器的镜像名称
+func GetRunningContainerImages(cli *client.Client) ([]string, error) {
+	containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
+	if err != nil {
+		return nil, err
+	}
+
+	var images []string
+	for _, container := range containers {
+		images = append(images, container.Image)
+	}
+
+	return images, nil
+}
+
+// GetOCIDigest 获取指定镜像的 OCI Digest
+func GetOCIDigest(imageName string) (string, error) {
+	// 解析镜像名称
+	ref, err := name.ParseReference(imageName)
+	if err != nil {
+		return "", err
+	}
+
+	// 从 Docker Daemon 获取镜像
+	img, err := daemon.Image(ref)
+	if err != nil {
+		return "", err
+	}
+
+	// 计算并返回 OCI Digest
+	digest, err := img.Digest()
+	if err != nil {
+		return "", err
+	}
+
+	return digest.String(), nil
+}
+
+// GetAllRunningContainersOCIDigests 获取所有正在运行容器的镜像及其 OCI Digest
+func GetAllRunningContainersOCIDigests(cli *client.Client) (map[string]string, error) {
+	images, err := GetRunningContainerImages(cli)
+	if err != nil {
+		return nil, err
+	}
+
+	// 按顺序计算每个镜像的 OCI Digest
+	digests := make(map[string]string)
+
+	// 计算每个镜像的 OCI Digest(顺序执行)
+	for _, image := range images {
+		log.Printf("Getting OCI Digest for image: %s", image)
+		digest, err := GetOCIDigest(image)
+		if err != nil {
+			log.Printf("Error calculating OCI Digest for image %s: %v", image, err)
+			continue
+		}
+		// 存储结果
+		digests[image] = digest
+	}
+
+	return digests, nil
+}
+
+// 从镜像 tar 文件中解析镜像名(tag)
+func extractImageNameFromTar(tarPath string) (string, error) {
+	file, err := os.Open(tarPath)
+	if err != nil {
+		return "", fmt.Errorf("open tar failed: %w", err)
+	}
+	defer file.Close()
+
+	tarReader := tar.NewReader(file)
+	for {
+		header, err := tarReader.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return "", fmt.Errorf("read tar error: %w", err)
+		}
+		if header.Name == "manifest.json" {
+			var manifests []manifestItem
+			decoder := json.NewDecoder(tarReader)
+			if err := decoder.Decode(&manifests); err != nil {
+				return "", fmt.Errorf("decode manifest.json error: %w", err)
+			}
+			if len(manifests) == 0 || len(manifests[0].RepoTags) == 0 {
+				return "", fmt.Errorf("no RepoTags found in manifest")
+			}
+			return manifests[0].RepoTags[0], nil
+		}
+	}
+	return "", fmt.Errorf("manifest.json not found")
+}
+
+// 主函数:从 tar 文件部署容器
+func DeployFromTar(ctx context.Context, tarPath string) (string, error) {
+	log.Infof("Loading Docker image from tar: %s", tarPath)
+
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		return "", fmt.Errorf("create docker client error: %w", err)
+	}
+
+	// 加载镜像 tar 到 docker
+	tarFile, err := os.Open(tarPath)
+	if err != nil {
+		return "", fmt.Errorf("open tar error: %w", err)
+	}
+	defer tarFile.Close()
+
+	loadResp, err := cli.ImageLoad(ctx, tarFile, true)
+	if err != nil {
+		return "", fmt.Errorf("image load error: %w", err)
+	}
+	defer loadResp.Body.Close()
+
+	// 解析镜像名
+	imageName, err := extractImageNameFromTar(tarPath)
+	if err != nil {
+		return "", fmt.Errorf("extract image name failed: %w", err)
+	}
+	log.Infof("Loaded image: %s", imageName)
+
+	// 获取暴露端口
+	imageInspect, _, err := cli.ImageInspectWithRaw(ctx, imageName)
+	if err != nil {
+		return "", fmt.Errorf("inspect image failed: %w", err)
+	}
+	if len(imageInspect.Config.ExposedPorts) == 0 {
+		return "", fmt.Errorf("image has no exposed ports")
+	}
+
+	portBindings := nat.PortMap{}
+	exposedPorts := nat.PortSet{}
+	for port := range imageInspect.Config.ExposedPorts {
+		hostPort := port.Port() // 同端口映射
+		portBindings[port] = []nat.PortBinding{{
+			HostIP:   "0.0.0.0",
+			HostPort: hostPort,
+		}}
+		exposedPorts[port] = struct{}{}
+		log.Infof("Exposing container port %s -> host port %s", port.Port(), hostPort)
+	}
+
+	// 使用镜像名作为容器名,但去掉冒号和标签部分
+	containerName := imageName
+	if strings.Contains(containerName, ":") {
+		// 如果包含冒号(:),就去掉冒号后面的部分
+		containerName = strings.SplitN(containerName, ":", 2)[0]
+	}
+
+	// 创建容器
+	resp, err := cli.ContainerCreate(ctx, &container.Config{
+		Image:        imageName,
+		ExposedPorts: exposedPorts,
+	}, &container.HostConfig{
+		PortBindings: portBindings,
+		RestartPolicy: container.RestartPolicy{
+			Name: "unless-stopped",
+		},
+		Runtime: "nvidia",               // Enable GPU support (requires nvidia-docker runtime)
+		ShmSize: 2 * 1024 * 1024 * 1024, // Set shared memory size to 2GB
+	}, nil, nil, containerName)
+	if err != nil {
+		return "", fmt.Errorf("create container error: %w", err)
+	}
+
+	// 启动容器
+	if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+		return "", fmt.Errorf("start container error: %w", err)
+	}
+
+	log.Infof("Container started successfully: %s", resp.ID)
+	return resp.ID, nil
+}
+
+// DeployCradle loads /data/intent-system/cradle/prebuilt_models/cradle_amd64.tar,
+// removes any existing cradle container & image, then redeploys and starts it.
+func DeployCradleTar_old(ctx context.Context) (string, error) {
+	const tarPath = "/data/intent-system/cradle/prebuilt_models/cradle_amd64.tar"
+
+	// ── Docker client ───────────────────────────────────────────────
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		return "", fmt.Errorf("create docker client error: %w", err)
+	}
+	defer cli.Close()
+
+	// ── 解析镜像名 ───────────────────────────────────────────────────
+	imageName, err := extractImageNameFromTar(tarPath)
+	if err != nil {
+		return "", fmt.Errorf("extract image name failed: %w", err)
+	}
+	containerName := strings.SplitN(imageName, ":", 2)[0]
+
+	// ── 若存在旧容器则停止并删除 ───────────────────────────────────────
+	{
+		timeout := 10 * time.Second
+		if err := cli.ContainerStop(ctx, containerName, &timeout); err == nil {
+			log.Infof("stopped old container %s", containerName)
+		}
+		_ = cli.ContainerRemove(ctx, containerName, types.ContainerRemoveOptions{Force: true})
+	}
+
+	// ── 若存在旧镜像则删除 ───────────────────────────────────────────
+	{
+		_, _ = cli.ImageRemove(ctx, imageName, types.ImageRemoveOptions{
+			Force:         true,
+			PruneChildren: true,
+		})
+	}
+
+	// ── 加载新镜像 ───────────────────────────────────────────────────
+	tarFile, err := os.Open(tarPath)
+	if err != nil {
+		return "", fmt.Errorf("open tar error: %w", err)
+	}
+	defer tarFile.Close()
+
+	loadResp, err := cli.ImageLoad(ctx, tarFile, true)
+	if err != nil {
+		return "", fmt.Errorf("image load error: %w", err)
+	}
+	_ = loadResp.Body.Close()
+	log.Infof("loaded image %s", imageName)
+
+	// ── 端口映射信息 ────────────────────────────────────────────────
+	imageInspect, _, err := cli.ImageInspectWithRaw(ctx, imageName)
+	if err != nil {
+		return "", fmt.Errorf("inspect image failed: %w", err)
+	}
+	if len(imageInspect.Config.ExposedPorts) == 0 {
+		return "", fmt.Errorf("image has no exposed ports")
+	}
+	portBindings := nat.PortMap{}
+	exposedPorts := nat.PortSet{}
+	for port := range imageInspect.Config.ExposedPorts {
+		hostPort := port.Port()
+		portBindings[port] = []nat.PortBinding{{HostIP: "0.0.0.0", HostPort: hostPort}}
+		exposedPorts[port] = struct{}{}
+	}
+
+	// ── 创建并启动容器 ───────────────────────────────────────────────
+	supabaseURL := getSupabaseURL()
+	resp, err := cli.ContainerCreate(ctx, &container.Config{
+		Image:        imageName,
+		ExposedPorts: exposedPorts,
+		Env:          []string{fmt.Sprintf("SUPABASE_URL=%s", supabaseURL)},
+	}, &container.HostConfig{
+		PortBindings: portBindings,
+		RestartPolicy: container.RestartPolicy{
+			Name: "unless-stopped",
+		},
+		Runtime: "nvidia",
+		ShmSize: 2 * 1024 * 1024 * 1024,
+	}, nil, nil, containerName)
+	if err != nil {
+		return "", fmt.Errorf("create container error: %w", err)
+	}
+
+	if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+		return "", fmt.Errorf("start container error: %w", err)
+	}
+
+	log.Infof("✅ cradle container started: %s", resp.ID)
+	return resp.ID, nil
+}
+
+// DeployCradleTar 安全部署:探活通过才切换
+func DeployCradleTar(ctx context.Context) (string, error) {
+	const tarPath = "/data/intent-system/cradle/prebuilt_models/cradle_amd64.tar"
+
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		return "", fmt.Errorf("create docker client: %w", err)
+	}
+	defer cli.Close()
+
+	/* ---------- 0. 从 tar 预取镜像名 ---------- */
+	imageName, err := extractImageNameFromTar(tarPath)
+	if err != nil || imageName == "" {
+		return "", fmt.Errorf("extract image name: %w", err)
+	}
+	if !strings.Contains(imageName, ":") {
+		imageName += ":latest"
+	}
+	containerName := strings.SplitN(imageName, ":", 2)[0]
+	log.Infof("[DEBUG] imageName=%s, containerName=%s", imageName, containerName)
+
+	/* ---------- 1. 清理旧容器 ---------- */
+	cs, _ := cli.ContainerList(ctx, types.ContainerListOptions{All: true})
+	for _, c := range cs {
+		for _, n := range c.Names {
+			if strings.TrimPrefix(n, "/") == containerName {
+				timeout := 10 * time.Second
+				_ = cli.ContainerStop(ctx, c.ID, &timeout)
+				_ = cli.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{Force: true})
+				break
+			}
+		}
+	}
+
+	/* ---------- 2. 清理旧镜像(忽略找不到的错误) ---------- */
+	_, _ = cli.ImageRemove(ctx, imageName, types.ImageRemoveOptions{Force: true, PruneChildren: true})
+
+	/* ---------- 3. 重新加载镜像 ---------- */
+	if err := loadImage(ctx, cli, tarPath); err != nil {
+		return "", err
+	}
+
+	/* ---------- 4. 镜像探活 ---------- */
+	if ok, probeErr := probeImage(ctx, cli, imageName); !ok {
+		return "", fmt.Errorf("镜像探活失败: %w", probeErr)
+	}
+
+	/* ---------- 5. 端口映射 ---------- */
+	inspect, _, err := cli.ImageInspectWithRaw(ctx, imageName)
+	if err != nil {
+		return "", err
+	}
+	pb, ep := makePortMaps(inspect.Config.ExposedPorts)
+	if len(ep) == 0 {
+		return "", fmt.Errorf("image has no exposed ports")
+	}
+
+	/* ---------- 6. 创建并启动容器 ---------- */
+	resp, err := cli.ContainerCreate(ctx, &container.Config{
+		Image:        imageName,
+		ExposedPorts: ep,
+		Env:          []string{fmt.Sprintf("SUPABASE_URL=%s", getSupabaseURL())},
+	}, &container.HostConfig{
+		PortBindings: pb,
+		RestartPolicy: container.RestartPolicy{
+			Name: "unless-stopped",
+		},
+		Runtime: "nvidia",
+		ShmSize: 2 * 1024 * 1024 * 1024,
+	}, nil, nil, containerName)
+	if err != nil {
+		return "", err
+	}
+	if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+		return "", err
+	}
+
+	log.Infof("✅ cradle container started: %s", resp.ID)
+	return resp.ID, nil
+}
+
+/* ---------- 辅助函数 ---------- */
+
+// loadImage 只负责 docker load
+// func loadImage(ctx context.Context, cli *client.Client, tarPath string) error {
+// 	f, err := os.Open(tarPath)
+// 	if err != nil {
+// 		return err
+// 	}
+// 	defer f.Close()
+// 	resp, err := cli.ImageLoad(ctx, f, true)
+// 	if err != nil {
+// 		return err
+// 	}
+// 	_ = resp.Body.Close()
+// 	return nil
+// }
+
+func loadImage(ctx context.Context, cli *client.Client, tarPath string) error {
+	f, err := os.Open(tarPath)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	resp, err := cli.ImageLoad(ctx, f, true)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
+	// 尝试解析 tag 信息(ImageLoad 返回 JSON 流)
+	scanner := bufio.NewScanner(resp.Body)
+	for scanner.Scan() {
+		var line map[string]interface{}
+		_ = json.Unmarshal(scanner.Bytes(), &line)
+		if val, ok := line["stream"]; ok {
+			log.Infof("镜像加载输出: %s", val)
+		}
+	}
+	return nil
+}
+
+// probeImage: docker create --rm true,成功返回 (true,nil)
+func probeImage(ctx context.Context, cli *client.Client, image string) (bool, error) {
+	ctr, err := cli.ContainerCreate(ctx, &container.Config{Image: image, Cmd: []string{"true"}}, nil, nil, nil, "")
+	if err != nil {
+		return false, err
+	}
+	_ = cli.ContainerRemove(ctx, ctr.ID, types.ContainerRemoveOptions{Force: true})
+	return true, nil
+}
+
+// makePortMaps 把镜像里暴露的端口映射到本机同端口
+func makePortMaps(src nat.PortSet) (nat.PortMap, nat.PortSet) {
+	pm := nat.PortMap{}
+	ep := nat.PortSet{}
+	for p := range src {
+		hostPort := p.Port() // 与容器端口同号
+		pm[p] = []nat.PortBinding{{HostIP: "0.0.0.0", HostPort: hostPort}}
+		ep[p] = struct{}{}
+	}
+	return pm, ep
+}
+
+func DeployFromTarWithProgress(ctx context.Context, tarPath string, update func(progress int, phase string)) (string, error) {
+	update(5, "5%")
+
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		return "", fmt.Errorf("failed to create Docker client: %w", err)
+	}
+
+	tarFile, err := os.Open(tarPath)
+	if err != nil {
+		return "", fmt.Errorf("failed to open tar file: %w", err)
+	}
+	defer tarFile.Close()
+
+	fi, err := tarFile.Stat()
+	if err != nil {
+		return "", fmt.Errorf("failed to get tar file size: %w", err)
+	}
+	totalSize := fi.Size()
+
+	update(10, "10%")
+
+	progressReader := &progressReader{
+		r:     tarFile,
+		total: totalSize,
+		update: func(p int) {
+			progress := 15 + p*20/100 // maps 0–100 → 15–35
+			update(progress, fmt.Sprintf("%d%%", progress))
+		},
+	}
+
+	loadResp, err := cli.ImageLoad(ctx, progressReader, true)
+	if err != nil {
+		return "", fmt.Errorf("failed to load image into Docker: %w", err)
+	}
+	defer loadResp.Body.Close()
+
+	update(36, "36%")
+
+	imageName, err := extractImageNameFromTar(tarPath)
+	if err != nil {
+		return "", fmt.Errorf("failed to extract image name from tar file: %w", err)
+	}
+
+	update(45, "45%")
+
+	imageInspect, _, err := cli.ImageInspectWithRaw(ctx, imageName)
+	if err != nil {
+		return "", fmt.Errorf("failed to inspect image '%s': %w", imageName, err)
+	}
+	if len(imageInspect.Config.ExposedPorts) == 0 {
+		return "", fmt.Errorf("image '%s' does not expose any ports", imageName)
+	}
+
+	portBindings := nat.PortMap{}
+	exposedPorts := nat.PortSet{}
+	for port := range imageInspect.Config.ExposedPorts {
+		hostPort := port.Port()
+		portBindings[port] = []nat.PortBinding{{
+			HostIP:   "0.0.0.0",
+			HostPort: hostPort,
+		}}
+		exposedPorts[port] = struct{}{}
+	}
+
+	update(60, "60%")
+
+	containerName := imageName
+	if strings.Contains(containerName, ":") {
+		containerName = strings.SplitN(containerName, ":", 2)[0]
+	}
+
+	supabaseURL := getSupabaseURL()
+
+	// log.Infof("============================>> Retrieved SUPABASE_URL = %s", supabaseURL)
+
+	resp, err := cli.ContainerCreate(ctx, &container.Config{
+		Image:        imageName,
+		ExposedPorts: exposedPorts,
+		Env: []string{
+			fmt.Sprintf("SUPABASE_URL=%s", supabaseURL),
+		},
+	}, &container.HostConfig{
+		PortBindings: portBindings,
+		RestartPolicy: container.RestartPolicy{
+			Name: "unless-stopped",
+		},
+		Runtime: "nvidia",
+		ShmSize: 2 * 1024 * 1024 * 1024,
+	}, nil, nil, containerName)
+	if err != nil {
+		return "", fmt.Errorf("failed to create container from image '%s': %w", imageName, err)
+	}
+
+	update(80, "80%")
+
+	if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+		return "", fmt.Errorf("failed to start container '%s': %w", containerName, err)
+	}
+
+	update(100, "100%")
+
+	return resp.ID, nil
+}
+
+func DeployFromTarWithProgress_ch(ctx context.Context, tarPath string, update func(progress int, phase string)) (string, error) {
+	update(5, "打开镜像文件")
+
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		return "", fmt.Errorf("创建 Docker 客户端失败: %w", err)
+	}
+
+	tarFile, err := os.Open(tarPath)
+	if err != nil {
+		return "", fmt.Errorf("打开 tar 文件失败: %w", err)
+	}
+	defer tarFile.Close()
+
+	// 获取镜像 tar 文件大小
+	fi, err := tarFile.Stat()
+	if err != nil {
+		return "", fmt.Errorf("获取 tar 文件大小失败: %w", err)
+	}
+	totalSize := fi.Size()
+
+	update(10, fmt.Sprintf("读取镜像文件完成,大小 %.2f MB", float64(totalSize)/1024/1024))
+
+	// 包装进度统计 Reader
+	progressReader := &progressReader{
+		r:     tarFile,
+		total: totalSize,
+		update: func(p int) {
+			// 映射上传过程的进度(占据从 15 到 35 的区间)
+			progress := 15 + p*20/100
+			update(progress, fmt.Sprintf("上传镜像中...(%d%%)", p))
+		},
+	}
+
+	// 调用 Docker API 加载镜像
+	loadResp, err := cli.ImageLoad(ctx, progressReader, true)
+	if err != nil {
+		return "", fmt.Errorf("镜像加载失败: %w", err)
+	}
+	defer loadResp.Body.Close()
+
+	update(36, "镜像上传完毕,解析镜像名称")
+
+	// 解析镜像名称
+	imageName, err := extractImageNameFromTar(tarPath)
+	if err != nil {
+		return "", fmt.Errorf("提取镜像名失败: %w", err)
+	}
+
+	update(45, "检查镜像端口配置")
+
+	imageInspect, _, err := cli.ImageInspectWithRaw(ctx, imageName)
+	if err != nil {
+		return "", fmt.Errorf("镜像检查失败: %w", err)
+	}
+	if len(imageInspect.Config.ExposedPorts) == 0 {
+		return "", fmt.Errorf("镜像没有暴露端口")
+	}
+
+	portBindings := nat.PortMap{}
+	exposedPorts := nat.PortSet{}
+	for port := range imageInspect.Config.ExposedPorts {
+		hostPort := port.Port()
+		portBindings[port] = []nat.PortBinding{{
+			HostIP:   "0.0.0.0",
+			HostPort: hostPort,
+		}}
+		exposedPorts[port] = struct{}{}
+	}
+
+	update(60, "创建中...")
+
+	containerName := imageName
+	if strings.Contains(containerName, ":") {
+		containerName = strings.SplitN(containerName, ":", 2)[0]
+	}
+
+	// 获取本地 IP 与默认网关在同一网段的地址
+	supabaseURL := getSupabaseURL() // 👈 新增:调用获取 Supabase URL 的函数
+
+	log.Infof("============================>>获取到的 SUPABASE_URL = %s", supabaseURL)
+
+	resp, err := cli.ContainerCreate(ctx, &container.Config{
+		Image:        imageName,
+		ExposedPorts: exposedPorts,
+		Env: []string{
+			fmt.Sprintf("SUPABASE_URL=%s", supabaseURL), // 👈 新增:传入 SUPABASE_URL 环境变量
+		},
+	}, &container.HostConfig{
+		PortBindings: portBindings,
+		RestartPolicy: container.RestartPolicy{
+			Name: "unless-stopped",
+		},
+		Runtime: "nvidia",
+		ShmSize: 2 * 1024 * 1024 * 1024,
+	}, nil, nil, containerName)
+	if err != nil {
+		return "", fmt.Errorf("创建失败: %w", err)
+	}
+
+	update(80, "启动中...")
+
+	if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+		return "", fmt.Errorf("模型启动失败: %w", err)
+	}
+
+	update(100, "部署完成")
+
+	return resp.ID, nil
+}
+
+func mustParseAppID(appID string) int64 {
+	id, _ := strconv.ParseInt(appID, 10, 64)
+	return id
+}
+
+func (m *DeployCore) Deploy(req *proto.DeployDeployReq) (resp *proto.DeployDeployResp, code itypes.BizCode) {
+	log.Infof(".......dal_core.................Deploy..........AppID=%s", req.AppID)
+
+	if GlobalTaskManager.Get(req.AppID) != nil {
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "部署任务已存在")
+	}
+
+	appID, err := strconv.ParseInt(req.AppID, 10, 64)
+	if err != nil {
+		log.Errorf("invalid AppID: %s", req.AppID)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "invalid AppID")
+	}
+
+	if m.newsDAO == nil {
+		log.Errorf("newsDAO is nil!")
+		return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "newsDAO is nil")
+	}
+
+	do, err := m.newsDAO.QueryById(appID)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if do.Id == 0 {
+		log.Warnf("QueryById returned nil for appID=%d", appID)
+		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "app not found")
+	}
+
+	tarPath := do.Url
+	if tarPath == "" {
+		log.Errorf("tarPath 为空 (appID=%v)", appID)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "镜像路径为空")
+	}
+
+	repo_name := do.RepoName
+	mbUUID := utils.GetMBUUID()
+
+	task := GlobalTaskManager.Create(
+		req.AppID,
+		tarPath,
+		m.deployDAO,
+		req.UserName, // ✅ 传入 UserName
+		repo_name,    // ✅ 传入 RepoName
+		mbUUID,       // ✅ 传入 MbUuid
+	)
+	if task == nil {
+		return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "无法创建部署任务")
+	}
+
+	sameUrlNewsList, err := m.newsDAO.QueryAllByUrl(tarPath)
+	if err != nil {
+		log.Errorf("查询相同 URL 的记录失败: %v", err)
+		// 可选:return 或继续执行
+	}
+	bytes, _ := json.MarshalIndent(sameUrlNewsList, "", "  ")
+	log.Infof("=====================>sameUrlNewsList=\n%s", string(bytes))
+	for _, news := range sameUrlNewsList {
+		deployRecord := &models.DeployDO{
+			Nid:         int32(news.Id),
+			Status:      "deploying",
+			UserName:    req.UserName,
+			RepoName:    repo_name,
+			MbUuid:      mbUUID,
+			CreatedTime: time.Now(),
+		}
+		if _, err := m.deployDAO.Insert(deployRecord); err != nil {
+			log.Errorf("写入部署记录失败 (nid=%d): %v", news.Id, err)
+		}
+	}
+
+	return &proto.DeployDeployResp{}, itypes.BizOK
+}
+
+func (m *DeployCore) Status(req *proto.DeployStatusReq) (resp *proto.DeployStatusResp, code itypes.BizCode) {
+	log.Infof(".......dal_core.................DeployStatus..........AppID=%s", req.AppID)
+	appID, err := strconv.ParseInt(req.AppID, 10, 64)
+	if err != nil {
+		log.Errorf("invalid AppID: %s", req.AppID)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "invalid AppID")
+	}
+
+	if m.newsDAO == nil {
+		log.Errorf("newsDAO is nil!")
+		return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "internal error: newsDAO is nil")
+	}
+
+	do, err := m.newsDAO.QueryById(appID)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if do.Id == 0 {
+		log.Warnf("QueryById returned nil for appID=%d", appID)
+		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "app not found")
+	}
+
+	do1, err := m.deployDAO.QueryByNid(appID)
+	if err != nil {
+		return nil, itypes.NewBizCodeDatabaseError(err.Error())
+	}
+	if do1.Id == 0 {
+		log.Warnf("QueryByNid returned nil for nID=%d", appID)
+		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "no deploy record found")
+	}
+
+	log.Infof("返回部署记录 ID=%d,状态=%s", do1.Id, do1.Status)
+
+	// 返回完整记录
+	return &proto.DeployStatusResp{
+		Data: do1,
+	}, itypes.BizOK
+}
+
+func (m *DeployCore) Delete(req *proto.DeployDeleteReq) (resp *proto.DeployDeleteResp, code itypes.BizCode) {
+	log.Infof(".......dal_core.................DeployDelete..........NID=%s, User=%s", req.Nid, req.UserName)
+
+	nid, err := strconv.ParseInt(req.Nid, 10, 64)
+	if err != nil {
+		log.Errorf("非法的 n_id: %s", req.Nid)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "非法的 n_id")
+	}
+
+	// 1. 查找部署记录
+	do, err := m.deployDAO.QueryByNid(nid)
+	if err != nil {
+		log.Errorf("查询部署记录失败: %v", err)
+		return nil, itypes.NewBizCodeDatabaseError("数据库查询失败")
+	}
+	if do == nil || do.Id == 0 {
+		log.Warnf("未找到部署记录, Nid=%s", req.Nid)
+		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "部署记录不存在")
+	}
+	if do.UserName != req.UserName {
+		log.Warnf("用户不匹配,拒绝删除:req.UserName=%s, record.UserName=%s", req.UserName, do.UserName)
+		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "没有权限删除该容器")
+	}
+
+	repoName := do.RepoName
+	imageRef := do.RepoName + ":latest"
+
+	if repoName == "" {
+		log.Errorf("repo_name 为空,不能删除容器")
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "repo_name 为空")
+	}
+
+	// 2. 初始化 Docker 客户端
+	dockerCli, err := client.NewClientWithOpts(client.FromEnv)
+	if err != nil {
+		log.Errorf("初始化 Docker 客户端失败: %v", err)
+		return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "Docker 初始化失败")
+	}
+	defer dockerCli.Close()
+	ctx := context.Background()
+
+	// 3. 停止容器
+	timeout := 10 * time.Second
+	log.Infof("尝试停止容器:%s", repoName)
+	if err := dockerCli.ContainerStop(ctx, repoName, &timeout); err != nil {
+		log.Warnf("停止容器失败(可能已退出):%v", err)
+	}
+
+	// 4. 等待容器状态为 exited
+	maxRetry := 10
+	for i := 0; i < maxRetry; i++ {
+		inspect, err := dockerCli.ContainerInspect(ctx, repoName)
+		if err != nil {
+			if client.IsErrNotFound(err) {
+				break // 容器已不存在,跳出
+			}
+			log.Errorf("查询容器状态失败: %v", err)
+			return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "查询容器状态失败")
+		}
+
+		status := inspect.State.Status
+		log.Infof("当前容器状态: %s", status)
+
+		if status == "exited" || status == "dead" {
+			break
+		}
+		time.Sleep(1 * time.Second)
+	}
+
+	// 5. 删除容器
+	log.Infof("尝试删除容器:%s", repoName)
+	if err := dockerCli.ContainerRemove(ctx, repoName, types.ContainerRemoveOptions{Force: false}); err != nil {
+		log.Errorf("删除容器失败: %v", err)
+		return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "容器删除失败: "+err.Error())
+	}
+
+	if imageRef != "" {
+		log.Infof("尝试删除镜像:%s", imageRef)
+		_, err := dockerCli.ImageRemove(ctx, imageRef, types.ImageRemoveOptions{
+			Force:         true,
+			PruneChildren: true,
+		})
+		if err != nil {
+			log.Warnf("镜像删除失败(可忽略): %v", err)
+		} else {
+			log.Infof("✅ 镜像删除成功: %s", imageRef)
+		}
+	}
+
+	// 7. 删除数据库记录
+	// if err := m.deployDAO.DeleteByNid(nid); err != nil {
+	// 	log.Warnf("从数据库中删除部署记录失败: %v", err)
+	// }
+
+	// 🔍 查出一条记录,提取 mb_uuid/repo_name/user_name 作为匹配条件
+	record, err := m.deployDAO.QueryByNid(nid)
+	if err != nil {
+		log.Warnf("查询部署记录失败: %v", err)
+	} else {
+		if err := m.deployDAO.DeleteByFields(record.MbUuid, record.RepoName, record.UserName); err != nil {
+			log.Warnf("批量删除部署记录失败: %v", err)
+		}
+	}
+
+	log.Infof("✅ 容器删除成功,Nid=%d", nid)
+	return &proto.DeployDeleteResp{}, itypes.BizOK
+}
+
+// func (m *DeployCore) Start(req *proto.DeployStartReq) (resp *proto.DeployStartResp, code itypes.BizCode) {
+// 	log.Infof(".......dal_core.................DeployStart..........NID=%s, User=%s", req.Nid, req.UserName)
+
+// 	nid, err := strconv.ParseInt(req.Nid, 10, 64)
+// 	if err != nil {
+// 		log.Errorf("非法的 n_id: %s", req.Nid)
+// 		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "非法的 n_id")
+// 	}
+
+// 	// 1. 查找部署记录
+// 	do, err := m.deployDAO.QueryByNid(nid)
+// 	if err != nil {
+// 		log.Errorf("查询部署记录失败: %v", err)
+// 		return nil, itypes.NewBizCodeDatabaseError("数据库查询失败")
+// 	}
+// 	if do == nil || do.Id == 0 {
+// 		log.Warnf("未找到部署记录, Nid=%s", req.Nid)
+// 		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "部署记录不存在")
+// 	}
+// 	if do.UserName != req.UserName {
+// 		log.Warnf("用户不匹配,拒绝删除:req.UserName=%s, record.UserName=%s", req.UserName, do.UserName)
+// 		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "没有权限删除该容器")
+// 	}
+
+// 	repoName := do.RepoName
+// 	imageRef := do.RepoName + ":latest"
+
+// 	if repoName == "" {
+// 		log.Errorf("repo_name 为空,不能删除容器")
+// 		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "repo_name 为空")
+// 	}
+
+// 	// 2. 初始化 Docker 客户端
+// 	dockerCli, err := client.NewClientWithOpts(client.FromEnv)
+// 	if err != nil {
+// 		log.Errorf("初始化 Docker 客户端失败: %v", err)
+// 		return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "Docker 初始化失败")
+// 	}
+// 	defer dockerCli.Close()
+// 	ctx := context.Background()
+
+// 	// 3. 停止容器
+// 	timeout := 10 * time.Second
+// 	log.Infof("尝试停止容器:%s", repoName)
+// 	if err := dockerCli.ContainerStop(ctx, repoName, &timeout); err != nil {
+// 		log.Warnf("停止容器失败(可能已退出):%v", err)
+// 	}
+
+// 	// 4. 等待容器状态为 exited
+// 	maxRetry := 10
+// 	for i := 0; i < maxRetry; i++ {
+// 		inspect, err := dockerCli.ContainerInspect(ctx, repoName)
+// 		if err != nil {
+// 			if client.IsErrNotFound(err) {
+// 				break // 容器已不存在,跳出
+// 			}
+// 			log.Errorf("查询容器状态失败: %v", err)
+// 			return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "查询容器状态失败")
+// 		}
+
+// 		status := inspect.State.Status
+// 		log.Infof("当前容器状态: %s", status)
+
+// 		if status == "exited" || status == "dead" {
+// 			break
+// 		}
+// 		time.Sleep(1 * time.Second)
+// 	}
+
+// 	// 5. 删除容器
+// 	log.Infof("尝试删除容器:%s", repoName)
+// 	if err := dockerCli.ContainerRemove(ctx, repoName, types.ContainerRemoveOptions{Force: false}); err != nil {
+// 		log.Errorf("删除容器失败: %v", err)
+// 		return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "容器删除失败: "+err.Error())
+// 	}
+
+// 	if imageRef != "" {
+// 		log.Infof("尝试删除镜像:%s", imageRef)
+// 		_, err := dockerCli.ImageRemove(ctx, imageRef, types.ImageRemoveOptions{
+// 			Force:         true,
+// 			PruneChildren: true,
+// 		})
+// 		if err != nil {
+// 			log.Warnf("镜像删除失败(可忽略): %v", err)
+// 		} else {
+// 			log.Infof("✅ 镜像删除成功: %s", imageRef)
+// 		}
+// 	}
+
+// 	// 7. 删除数据库记录
+// 	// if err := m.deployDAO.DeleteByNid(nid); err != nil {
+// 	// 	log.Warnf("从数据库中删除部署记录失败: %v", err)
+// 	// }
+
+// 	// 🔍 查出一条记录,提取 mb_uuid/repo_name/user_name 作为匹配条件
+// 	record, err := m.deployDAO.QueryByNid(nid)
+// 	if err != nil {
+// 		log.Warnf("查询部署记录失败: %v", err)
+// 	} else {
+// 		if err := m.deployDAO.DeleteByFields(record.MbUuid, record.RepoName, record.UserName); err != nil {
+// 			log.Warnf("批量删除部署记录失败: %v", err)
+// 		}
+// 	}
+
+// 	log.Infof("✅ 容器删除成功,Nid=%d", nid)
+// 	return &proto.DeployStartResp{}, itypes.BizOK
+// }
+
+func (m *DeployCore) Start(req *proto.DeployStartReq) (resp *proto.DeployStartResp, code itypes.BizCode) {
+	log.Infof(".......dal_core.................DeployStart..........NID=%s, User=%s", req.Nid, req.UserName)
+
+	nid, err := strconv.ParseInt(req.Nid, 10, 64)
+	if err != nil {
+		log.Errorf("Invalid n_id: %s", req.Nid)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "Invalid n_id")
+	}
+
+	// 1. Query deploy record
+	do, err := m.deployDAO.QueryByNid(nid)
+	if err != nil {
+		log.Errorf("Failed to query deploy record: %v", err)
+		return nil, itypes.NewBizCodeDatabaseError("Database query failed")
+	}
+	if do == nil || do.Id == 0 {
+		log.Warnf("No deploy record found, Nid=%s", req.Nid)
+		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "Deploy record not found")
+	}
+	if do.UserName != req.UserName {
+		log.Warnf("User mismatch, permission denied: req.UserName=%s, record.UserName=%s", req.UserName, do.UserName)
+		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "No permission to start this container")
+	}
+
+	repoName := do.RepoName
+	if repoName == "" {
+		log.Errorf("repo_name is empty, cannot start container")
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "repo_name is empty")
+	}
+
+	// 2. Init Docker client
+	dockerCli, err := client.NewClientWithOpts(client.FromEnv)
+	if err != nil {
+		log.Errorf("Failed to initialize Docker client: %v", err)
+		return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "Docker initialization failed")
+	}
+	defer dockerCli.Close()
+	ctx := context.Background()
+
+	// 3. Start the container
+	log.Infof("Attempting to start container: %s", repoName)
+	if err := dockerCli.ContainerStart(ctx, repoName, types.ContainerStartOptions{}); err != nil {
+		log.Errorf("Failed to start container '%s': %v", repoName, err)
+		return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "Failed to start container: "+err.Error())
+	}
+
+	// 4. Update DB status to "running"
+	if err := m.deployDAO.UpdateStatusByFields(do.MbUuid, do.RepoName, do.UserName, "running"); err != nil {
+		log.Warnf("Container started, but failed to update DB: %v", err)
+	}
+
+	log.Infof("✅ Container started successfully: %s", repoName)
+	return &proto.DeployStartResp{}, itypes.BizOK
+}
+
+func (m *DeployCore) Stop(req *proto.DeployStopReq) (resp *proto.DeployStopResp, code itypes.BizCode) {
+	log.Infof(".......dal_core.................DeployStop..........NID=%s, User=%s", req.Nid, req.UserName)
+
+	nid, err := strconv.ParseInt(req.Nid, 10, 64)
+	if err != nil {
+		log.Errorf("Invalid nid: %s", req.Nid)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "Invalid nid")
+	}
+
+	// 1. Query deployment record
+	do, err := m.deployDAO.QueryByNid(nid)
+	if err != nil {
+		log.Errorf("Failed to query deployment record: %v", err)
+		return nil, itypes.NewBizCodeDatabaseError("Failed to query deployment record")
+	}
+	if do == nil || do.Id == 0 {
+		log.Warnf("Deployment record not found, Nid=%s", req.Nid)
+		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "Deployment not found")
+	}
+	if do.UserName != req.UserName {
+		log.Warnf("Permission denied: req.UserName=%s, record.UserName=%s", req.UserName, do.UserName)
+		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, "No permission to stop this container")
+	}
+
+	repoName := do.RepoName
+	if repoName == "" {
+		log.Errorf("repo_name is empty, cannot stop container")
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "repo_name is empty")
+	}
+
+	// 2. Init Docker client
+	dockerCli, err := client.NewClientWithOpts(client.FromEnv)
+	if err != nil {
+		log.Errorf("Failed to initialize Docker client: %v", err)
+		return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "Docker initialization failed")
+	}
+	defer dockerCli.Close()
+	ctx := context.Background()
+
+	// 3. Stop container
+	timeout := 10 * time.Second
+	log.Infof("Attempting to stop container: %s", repoName)
+	if err := dockerCli.ContainerStop(ctx, repoName, &timeout); err != nil {
+		log.Warnf("Failed to stop container (might already be stopped): %v", err)
+	}
+
+	// 4. Wait for container to be exited or dead
+	maxRetry := 10
+	for i := 0; i < maxRetry; i++ {
+		inspect, err := dockerCli.ContainerInspect(ctx, repoName)
+		if err != nil {
+			if client.IsErrNotFound(err) {
+				break // container not found
+			}
+			log.Errorf("Failed to inspect container: %v", err)
+			return nil, itypes.NewBizCode(itypes.CODE_INTERNAL_SERVER_ERROR, "Failed to inspect container status")
+		}
+
+		status := inspect.State.Status
+		log.Infof("Current container status: %s", status)
+
+		if status == "exited" || status == "dead" {
+			break
+		}
+		time.Sleep(1 * time.Second)
+	}
+
+	// 5. Update status in database
+	if err := m.deployDAO.UpdateStatusByFields(do.MbUuid, do.RepoName, do.UserName, "stopped"); err != nil {
+		log.Warnf("Failed to update deployment status in database: %v", err)
+	}
+
+	log.Infof("✅ Container stopped successfully, Nid=%d", nid)
+	return &proto.DeployStopResp{}, itypes.BizOK
+}
diff --git a/pkg/dal/core/core_deploy_task.go b/pkg/dal/core/core_deploy_task.go
new file mode 100644
index 0000000..eaf5249
--- /dev/null
+++ b/pkg/dal/core/core_deploy_task.go
@@ -0,0 +1,182 @@
+package core
+
+import (
+	"context"
+	"fmt"
+	"intent-system/pkg/dal/dao"
+	"sync"
+	"time"
+
+	"github.com/civet148/log"
+	"github.com/gorilla/websocket"
+)
+
+type DeployTaskCore struct {
+	AppID    string
+	TarPath  string
+	Progress int
+	Status   string
+	Err      error
+
+	Ctx    context.Context
+	Cancel context.CancelFunc
+
+	Clients   map[*websocket.Conn]string
+	Lock      sync.RWMutex
+	DeployDAO *dao.DeployDAO
+	UserName  string
+	RepoName  string
+	MbUuid    string
+}
+
+var GlobalTaskManager = &TaskManager{
+	tasks: make(map[string]*DeployTaskCore),
+}
+
+type TaskManager struct {
+	tasks map[string]*DeployTaskCore
+	lock  sync.RWMutex
+}
+
+// ✅ 改为接收 tarPath
+func (m *TaskManager) Create(appID string, tarPath string, deployDAO *dao.DeployDAO, userName string, repoName string, mbUuid string) *DeployTaskCore {
+	m.lock.Lock()
+	defer m.lock.Unlock()
+
+	if _, exists := m.tasks[appID]; exists {
+		log.Infof("重复任务,退出!")
+		return nil
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+	task := &DeployTaskCore{
+		AppID:     appID,
+		TarPath:   tarPath, // ✅ 保存 tar 路径
+		Ctx:       ctx,
+		Cancel:    cancel,
+		Clients:   make(map[*websocket.Conn]string),
+		DeployDAO: deployDAO, // ✅ 注入 DAO
+		UserName:  userName,  // ✅ 保存进结构体
+		RepoName:  repoName,  // ✅ 保存进结构体
+		MbUuid:    mbUuid,    // ✅ 保存进结构体
+	}
+
+	m.tasks[appID] = task
+	go task.Run()
+	return task
+}
+
+func (m *TaskManager) Get(appID string) *DeployTaskCore {
+	m.lock.RLock()
+	defer m.lock.RUnlock()
+	return m.tasks[appID]
+}
+
+func (m *TaskManager) Remove(appID string) {
+	m.lock.Lock()
+	defer m.lock.Unlock()
+	delete(m.tasks, appID)
+}
+
+func (t *DeployTaskCore) Run() {
+	defer GlobalTaskManager.Remove(t.AppID)
+
+	// ====== 等待数据库有四元组记录再继续 ======
+	const maxWait = 30 // 最多等30秒
+	waited := 0
+	for {
+		count, err := t.DeployDAO.CountByUserRepoUUIDAndStatus(t.UserName, t.RepoName, t.MbUuid, "deploying")
+		if err != nil {
+			log.Errorf("查询部署记录数量失败: %v", err)
+			// 你可以选择直接 return,或者 continue 重试,或者 sleep 一下后重试
+			// 这里建议直接 return,如果查 count 都报错,那后续也不用等了
+			return
+		}
+
+		if count > 0 {
+			break // 数据库已有记录,可以继续执行后续流程
+		}
+		if waited >= maxWait {
+			t.Err = fmt.Errorf("等待超时,仍未发现deploying记录")
+			t.broadcast([]byte("[DeployTaskCore Error] 等待超时,未检测到deploying记录"))
+			return
+		}
+		time.Sleep(time.Second)
+		waited++
+	}
+
+	// ✅ 失败清理逻辑,在任务退出前执行
+	defer func() {
+		if t.Err != nil {
+			log.Warnf("部署失败,删除四元组记录:user_name=%s repo_name=%s mb_uuid=%s status=deploying", t.UserName, t.RepoName, t.MbUuid)
+			err := t.DeployDAO.DeleteByUserRepoUUIDAndStatus(t.UserName, t.RepoName, t.MbUuid, "deploying")
+			if err != nil {
+				log.Errorf("清理失败部署记录时出错: %v", err)
+			}
+		}
+	}()
+
+	t.updateProgress(5, "5%")
+
+	// Deploy using tar file
+	containerID, err := DeployFromTarWithProgress(t.Ctx, t.TarPath, func(p int, phase string) {
+		t.updateProgress(p, phase)
+	})
+
+	if err != nil {
+		t.Lock.Lock()
+		t.Status = "deployment_failed"
+		t.Err = err
+		t.Lock.Unlock()
+		t.broadcast([]byte(fmt.Sprintf("[Deployment Failed] %v", err)))
+		return
+	}
+
+	t.updateProgress(100, "100%")
+
+	// Update deployment status in database
+	err = t.DeployDAO.UpdateStatusByAppID(int64(mustParseAppID(t.AppID)), "running")
+	if err != nil {
+		log.Warnf("failed to update status to 'running' (appID=%v): %v", t.AppID, err)
+	}
+
+	t.broadcast([]byte("[Deployment Successful] Container ID: " + containerID))
+}
+
+func (t *DeployTaskCore) updateProgress(p int, status string) {
+	t.Lock.Lock()
+	t.Progress = p
+	t.Status = status
+	t.Lock.Unlock()
+	t.broadcast([]byte(status)) // 只广播纯百分比字符串
+}
+
+func (t *DeployTaskCore) broadcast(msg []byte) {
+	t.Lock.RLock()
+	defer t.Lock.RUnlock()
+
+	log.Infof("📤 Broadcasting message to %d clients: %s", len(t.Clients), msg)
+
+	for conn, username := range t.Clients {
+		prefix := fmt.Sprintf("[User %s] ", username)
+		_ = conn.WriteMessage(websocket.TextMessage, []byte(prefix+string(msg)))
+	}
+}
+
+func (t *DeployTaskCore) Attach(conn *websocket.Conn, username string) {
+	t.Lock.Lock()
+	defer t.Lock.Unlock()
+
+	t.Clients[conn] = username
+	log.Infof("用户 %s 连接部署任务 %s", username, t.AppID)
+}
+
+func (t *DeployTaskCore) Detach(conn *websocket.Conn) {
+	t.Lock.Lock()
+	username := t.Clients[conn]
+	delete(t.Clients, conn)
+	t.Lock.Unlock()
+
+	_ = conn.Close()
+	log.Infof("用户 %s 断开部署任务 %s 的连接", username, t.AppID)
+}
diff --git a/pkg/dal/core/core_gateway.go b/pkg/dal/core/core_gateway.go
new file mode 100644
index 0000000..48d2d10
--- /dev/null
+++ b/pkg/dal/core/core_gateway.go
@@ -0,0 +1,252 @@
+package core
+
+import (
+	"encoding/json"
+	"fmt"
+	"intent-system/pkg/config"
+	"intent-system/pkg/itypes"
+	"intent-system/pkg/utils"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/civet148/gotools/randoms"
+	"github.com/civet148/httpc"
+	"github.com/civet148/log"
+	"github.com/civet148/socketx"
+	sapi "github.com/civet148/socketx/api"
+	"github.com/civet148/sqlca/v2"
+	"github.com/gorilla/websocket"
+)
+
+const (
+	MessageId_Init               = "init"
+	MessageId_CreateConversation = "createConversation"
+	MessageId_SendMessage        = "sendMessage"
+	MessageId_ToolCallResult     = "toolCallsResult"
+)
+
+const (
+	MessageClientToolCallFunctionFinished = "INTENTION_CLIENT_TOOL_CALL_FUNCTION_FINISHED"
+)
+
+type ChatInitReq struct {
+	MessageId string `json:"messageId"`
+	Key       string `json:"key"`
+	Secret    string `json:"secret"`
+}
+
+type ChatInitResp struct {
+	Result  string `json:"result"`
+	Message string `json:"message"`
+	Token   string `json:"token"`
+}
+
+type ChatConversationReq struct {
+	MessageId string `json:"messageId"`
+	Key       string `json:"key"`
+}
+
+type ChatConversationResp struct {
+	Result         string `json:"result"`
+	Message        string `json:"message"`
+	ConversationId string `json:"conversationId"`
+}
+
+type ChatContentMessage struct {
+	MessageId      string `json:"messageId"`
+	Key            string `json:"key"`
+	ConversationId string `json:"conversationId"`
+	Content        string `json:"content"`
+}
+
+type GatewayCore struct {
+	db  *sqlca.Engine
+	cfg *config.Config
+}
+
+func NewGatewayCore(cfg *config.Config, db *sqlca.Engine) *GatewayCore {
+	return &GatewayCore{
+		db:  db,
+		cfg: cfg,
+	}
+}
+
+func (m *GatewayCore) makeSessionId() string {
+	strRandomTime := utils.NowRandom()
+	strRandom := randoms.RandomAlphaOrNumeric(12, true, true)
+	return fmt.Sprintf("%s-%s", strRandom, strRandomTime)
+}
+
+func (m *GatewayCore) WebSocketRelay(conn *websocket.Conn) (err error) {
+	var ws *itypes.WebSocket
+	var socket *socketx.SocketClient
+	var strToken string
+	strToken, err = m.httpChatInit()
+	if err != nil {
+		return log.Errorf(err.Error())
+	}
+	ws, socket, err = m.webSocketConnect(conn, strToken)
+	if err != nil {
+		return log.Errorf("relay to [%s] error [%s]", m.cfg.GatewayUrl, err.Error())
+	}
+	go m.webSocketRemoteLoop(ws, socket)
+	return m.webSocketPeerLoop(ws, socket)
+}
+
+func (m *GatewayCore) makeInitHttpUrl(strUrl string) string {
+	ui, err := url.Parse(strUrl)
+	if err != nil {
+		return fmt.Sprintf("http://%s", strUrl)
+	}
+	return fmt.Sprintf("http://%s", ui.Host)
+}
+
+func (m *GatewayCore) makeGatewayTokenUrl(strUrl, strToken string) string {
+	ui, err := url.Parse(strUrl)
+	if err != nil {
+		return fmt.Sprintf("ws://%s/ws/%s", strUrl, strToken)
+	}
+	return fmt.Sprintf("ws://%s/ws/%s", ui.Host, strToken)
+}
+
+func (m *GatewayCore) httpChatInit() (string, error) {
+	var err error
+	strGatewayUrl := m.cfg.GatewayUrl
+	strGatewayKey := m.cfg.GatewayKey
+	strGatewaySecret := m.cfg.GatewaySecret
+
+	strInitUrl := m.makeInitHttpUrl(strGatewayUrl)
+	var header = m.makeBasicAuthHeader(strGatewayKey, strGatewaySecret)
+	cli := httpc.NewHttpClient(&httpc.Option{
+		Timeout: 30,
+		Header:  header,
+	})
+	var response *httpc.Response
+	log.Debugf("chat init to [%s] with header [%+v]", strInitUrl, header)
+	var initReq = &ChatInitReq{
+		MessageId: MessageId_Init,
+		Key:       strGatewayKey,
+		Secret:    strGatewaySecret,
+	}
+	response, err = cli.PostJson(strInitUrl, initReq)
+	if response.StatusCode != http.StatusOK {
+		return "", log.Errorf("chat init response status [%v] message [%s]", response.StatusCode, response.Body)
+	}
+	log.Debugf("chat init request [%+v] response [%s]", initReq, response.Body)
+	var resp ChatInitResp
+	err = response.Unmarshal(&resp)
+	if err != nil {
+		return "", log.Errorf("unmarshal response body to json structure error [%s]", err.Error())
+	}
+	if resp.Result != "True" {
+		return "", log.Errorf("chat init failed with message [%s]", resp.Message)
+	}
+	if resp.Token == "" {
+		return "", log.Errorf("chat init failed with empty token")
+	}
+	log.Infof("chat init response token [%s]", resp.Token)
+	return resp.Token, nil
+}
+
+func (m *GatewayCore) makeBasicAuthHeader(strGatewayKey, strGatewaySecret string) http.Header {
+	strBasic := socketx.BasicAuth(strGatewayKey, strGatewaySecret)
+	return map[string][]string{
+		itypes.HEADER_AUTHORIZATION: {strBasic},
+	}
+}
+
+func (m *GatewayCore) webSocketConnect(conn *websocket.Conn, strToken string) (*itypes.WebSocket, *socketx.SocketClient, error) {
+	var err error
+	var ws *itypes.WebSocket
+	sock := socketx.NewClient()
+	strGatewayKey := m.cfg.GatewayKey
+	strGatewaySecret := m.cfg.GatewaySecret
+	strGatewayUrl := m.makeGatewayTokenUrl(m.cfg.GatewayUrl, strToken)
+	log.Debugf("websocket connect to [%s] key [%s] secret [%s]", strGatewayUrl, strGatewayKey, strGatewaySecret)
+	var header = m.makeBasicAuthHeader(strGatewayKey, strGatewaySecret)
+	err = sock.Connect(strGatewayUrl, sapi.SocketOption{
+		Header: header,
+	})
+	if err != nil {
+		return nil, nil, log.Errorf("connect to [%s] with header [%+v] error [%s]", strGatewayUrl, header, err)
+	}
+	createReq := &ChatConversationReq{
+		MessageId: MessageId_CreateConversation,
+		Key:       strGatewayKey,
+	}
+	log.Json("create conversation request", createReq)
+	_, err = sock.SendJson(createReq)
+	if err != nil {
+		return nil, nil, log.Errorf("send create conversation request error [%s]", err.Error())
+	}
+	var msg *sapi.SockMessage
+	msg, err = sock.Recv(-1)
+	if err != nil {
+		return nil, nil, log.Errorf("receive create conversation response error [%s]", err.Error())
+	}
+	log.Infof("create conversation response body [%s]", msg.Data)
+	var conversation ChatConversationResp
+	err = json.Unmarshal(msg.Data, &conversation)
+	if err != nil {
+		return nil, nil, log.Errorf("unmarshal conversation response [%s] error [%s]", msg.Data, err.Error())
+	}
+	log.Infof("conversation response [%+v]", conversation)
+	if conversation.Result == "False" {
+		return nil, nil, log.Errorf("conversation create failed, message body [%s] ", msg.Data)
+	}
+	ws, err = itypes.NewWebSocket(conn, conversation.ConversationId)
+	if err != nil {
+		return nil, nil, log.Errorf("new websocket error [%s]", err)
+	}
+	return ws, sock, nil
+}
+
+func (m *GatewayCore) webSocketPeerLoop(ws *itypes.WebSocket, socket *socketx.SocketClient) (err error) {
+	defer ws.Close()
+	defer socket.Close()
+	for {
+		var msg []byte
+		msg, err = ws.ReadMessage()
+		if err != nil {
+			log.Warnf("websocket [%s] read message error [%s]", ws.Id(), err.Error())
+			return err
+		}
+		log.Debugf("websocket [%s] received msg [%s]", ws.Id(), msg)
+		var strMessageId = MessageId_SendMessage
+		if strings.Compare(string(msg), MessageClientToolCallFunctionFinished) == 0 {
+			strMessageId = MessageId_ToolCallResult //客户端处理函数结束
+		}
+		var chat = ChatContentMessage{
+			MessageId:      strMessageId,
+			Key:            m.cfg.GatewayKey,
+			ConversationId: ws.Id(),
+			Content:        string(msg),
+		}
+
+		_, err = socket.SendJson(chat)
+		if err != nil {
+			log.Errorf("relay socket closed by remote server, error [%s]", err.Error())
+			return err
+		}
+	}
+}
+
+func (m *GatewayCore) webSocketRemoteLoop(ws *itypes.WebSocket, socket *socketx.SocketClient) (err error) {
+	defer ws.Close()
+	defer socket.Close()
+	for {
+		var msg *sapi.SockMessage
+		msg, err = socket.Recv(-1)
+		if err != nil {
+			log.Warnf("relay socket read message error [%s]", err.Error())
+			return err
+		}
+		log.Debugf("websocket [%s] received msg [%s]", ws.Id(), msg.Data)
+		err = ws.WriteMessage(msg.Data)
+		if err != nil {
+			log.Errorf("websocket write message error [%s]", err.Error())
+			return err
+		}
+	}
+}
diff --git a/pkg/dal/core/core_platform.go b/pkg/dal/core/core_platform.go
new file mode 100644
index 0000000..342e874
--- /dev/null
+++ b/pkg/dal/core/core_platform.go
@@ -0,0 +1,1122 @@
+package core
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"image"
+	"image/png"
+	"intent-system/pkg/config"
+	"intent-system/pkg/dal/dao"
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/email"
+	"intent-system/pkg/itypes"
+	"intent-system/pkg/privilege"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/utils"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"github.com/gin-gonic/gin"
+)
+
+const (
+	InherentAdminName       = "admin"
+	SystemName              = "system"
+	InherentAdminPassword   = "c333e2df646d876661e4a13e86feba99" //Coeus@123456
+	InherentAdminNameRemark = "inherent administrator"
+	InherentAdminRoleRemark = "supper administrator role"
+
+	InherentAdminRole  = "超级管理员"
+	InherentEditRole   = "编辑者"
+	InherentAccessRole = "访问者"
+)
+
+var inherentRoles = map[string]string{
+	InherentAdminRole:  "administrator",
+	InherentEditRole:   "editor",
+	InherentAccessRole: "accessor",
+}
+
+var isInitialized bool
+
+type PlatformCore struct {
+	db            *sqlca.Engine
+	cfg           *config.Config
+	cas           *privilege.CasbinRule
+	userDAO       *dao.UserDAO
+	roleDAO       *dao.RoleDAO
+	loginDAO      *dao.LoginDAO
+	userRoleDAO   *dao.UserRoleDAO
+	privilegeDAO  *dao.PrivilegeDAO
+	dictionaryDAO *dao.DictionaryDAO
+	inviteCodeDAO *dao.InviteCodeDAO
+	customerDAO   *dao.CustomerDAO
+}
+
+func NewPlatformCore(cfg *config.Config, db *sqlca.Engine) *PlatformCore {
+	cas := privilege.NewCasbinRule(privilege.CasbinOption{
+		DSN:   cfg.DSN,
+		Model: privilege.DefaultCasbinModel,
+	})
+	m := &PlatformCore{
+		db:            db,
+		cfg:           cfg,
+		cas:           cas,
+		userDAO:       dao.NewUserDAO(db),
+		roleDAO:       dao.NewRoleDAO(db),
+		loginDAO:      dao.NewLoginDAO(db),
+		userRoleDAO:   dao.NewUserRoleDAO(db),
+		privilegeDAO:  dao.NewPrivilegeDAO(db),
+		dictionaryDAO: dao.NewDictionaryDAO(db),
+		inviteCodeDAO: dao.NewInviteCodeDAO(db),
+		customerDAO:   dao.NewCustomerDAO(db),
+	}
+	return m.initialize()
+}
+
+// initialize role and privilege of platform inherent
+func (m *PlatformCore) initialize() *PlatformCore {
+	if !isInitialized {
+		m.initializeInherentRoles()
+		m.initializeInherentAccount()
+	}
+	isInitialized = true
+	return m
+}
+
+func (m *PlatformCore) initializeInherentRoles() {
+	for role, alias := range inherentRoles {
+		m.initializeInherentRole(role, alias)
+	}
+	m.initializeInherentPrivileges()
+}
+
+func (m *PlatformCore) initializeInherentRole(role, alias string) {
+	var err error
+	var exist bool
+
+	if exist, err = m.roleDAO.CheckRoleExistByName(role); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+
+	if !exist {
+		if _, err = m.roleDAO.Insert(&models.RoleDO{
+			RoleName:   role,
+			RoleAlias:  alias,
+			IsInherent: true,
+		}); err != nil {
+			log.Errorf(err.Error())
+			return
+		}
+	}
+}
+
+func (m *PlatformCore) initializeInherentPrivileges() {
+	for role, alias := range inherentRoles {
+		var authorities []string
+		switch role {
+		case InherentAdminRole:
+			authorities = m.cas.TotalPrivileges()
+		case InherentEditRole:
+			authorities = m.cas.EditPrivileges()
+		case InherentAccessRole:
+			authorities = m.cas.AccessPrivileges()
+		default:
+			log.Warnf("no inherent privilege for role [%s] alias [%s]", role, alias)
+			continue
+		}
+		for _, authority := range authorities {
+			strPath := m.cas.GetPrivilegePath(authority)
+			if strings.EqualFold(strPath, "") {
+				log.Errorf("unknown authority [%s]", authority)
+				continue
+			}
+			// 给角色授予权限
+			m.cas.AddRoleAuthority(role, strPath, authority)
+		}
+	}
+}
+
+func (m *PlatformCore) initializeInherentAccount() {
+	if ok, err := m.userDAO.CheckActiveUserByUserName(InherentAdminName); err != nil {
+		log.Errorf("query by user name error [%s]", err.Error())
+		return
+	} else if ok {
+		return
+	}
+
+	m.cas.AddUserRole(InherentAdminName, InherentAdminRole)
+
+	var lastId int64
+	var err error
+	var strSalt = utils.GenerateSalt()
+	do := &models.UserDO{
+		UserName:   InherentAdminName,
+		Password:   InherentAdminPassword,
+		Salt:       strSalt,
+		UserAlias:  InherentAdminName,
+		IsAdmin:    true,
+		Remark:     InherentAdminNameRemark,
+		CreateUser: SystemName,
+		EditUser:   SystemName,
+		LoginTime:  utils.Now64(),
+		Deleted:    false,
+		State:      dao.UserState_Enabled,
+	}
+	if lastId, err = m.userDAO.Insert(do); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	do.SetId(int32(lastId))
+	if err = m.userRoleDAO.Insert(&models.UserRoleDO{
+		UserName:   InherentAdminName,
+		RoleName:   InherentAdminRole,
+		CreateUser: SystemName,
+		EditUser:   SystemName,
+		Deleted:    false,
+	}); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+}
+
+func (m *PlatformCore) UserLogin(strUserName, strPassword, strIP string) (user *models.UserDO, code itypes.BizCode) {
+
+	var err error
+	if user, err = m.userDAO.SelectUserByName(strUserName); err != nil {
+		log.Errorf("select [user name] from user table error [%s]", err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_USER_OR_PASSWORD)
+	}
+
+	if user == nil || user.GetId() == 0 {
+		if user, err = m.userDAO.SelectUserByEmail(strUserName); err != nil {
+			log.Errorf("select [phone] from user table error [%s]", err.Error())
+			return nil, itypes.NewBizCode(itypes.CODE_DATABASE_ERROR)
+		}
+		if user == nil || user.GetId() == 0 {
+			log.Errorf("user name/email [%s] data not found in db", strUserName)
+			return nil, itypes.NewBizCode(itypes.CODE_INVALID_USER_OR_PASSWORD)
+		}
+	}
+
+	if user.State == dao.UserState_Disabled {
+		log.Errorf("user name/email [%s] account was disabled", strUserName)
+		return nil, itypes.NewBizCode(itypes.CODE_ACCOUNT_BANNED)
+	}
+
+	user.LoginIp = strIP
+	user.LoginTime = time.Now().Unix()
+	if err = m.userDAO.UpdateByName(user, models.USER_COLUMN_LOGIN_IP, models.USER_COLUMN_LOGIN_TIME); err != nil {
+		log.Errorf("update user [%s] login ip error [%s]", strUserName, strIP)
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+
+	if strPassword != user.Password {
+		err = fmt.Errorf("user name [%s] password verify failed, password [%s] not match", strUserName, strPassword)
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_USER_OR_PASSWORD)
+	}
+	_, _ = m.loginDAO.Insert(&models.LoginDO{
+		UserId:    user.GetId(),
+		LoginType: 0,
+		LoginIp:   strIP,
+		LoginAddr: "",
+	})
+	return
+}
+
+func (m *PlatformCore) GetUserRole(ctx *itypes.Context, strUserName string) (role *models.RoleDO) {
+	var err error
+
+	fmt.Println(ctx.UserName())
+	if role, err = m.roleDAO.SelectUserRole(strUserName); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (m *PlatformCore) GetUserRoleList(strUserName string) []string {
+	return m.cas.GetUserRoleList(strUserName)
+}
+
+func (m *PlatformCore) CheckPrivilege(c *gin.Context, ctx *itypes.Context, authority string) (ok bool) {
+	if authority == privilege.Null {
+		return true
+	}
+	var err error
+	if ok, err = m.cas.Enforce(ctx.UserName(), c.Request.URL.RequestURI(), authority); ok {
+		return true
+	}
+	if err != nil {
+		log.Errorf("user id [%d] privilege [%v] error:%s", ctx.UserId(), authority, err.Error())
+	}
+	return
+}
+
+func (m *PlatformCore) CheckUserNameExist(ctx *itypes.Context, strUserName string) (code itypes.BizCode) {
+	if ok, err := m.userDAO.CheckActiveUserByUserName(strUserName); err != nil {
+		log.Errorf("query by user name %s error [%s]", strUserName, err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	} else if ok {
+		return itypes.NewBizCode(itypes.CODE_ALREADY_EXIST)
+	}
+	return itypes.BizOK
+}
+
+func (m *PlatformCore) CheckUserEmailExist(ctx *itypes.Context, strEmail string) (code itypes.BizCode) {
+
+	if ok, err := m.userDAO.CheckActiveUserByEmail(strEmail); err != nil {
+		log.Errorf("query by email error [%s]", err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	} else if ok {
+		return itypes.NewBizCode(itypes.CODE_ALREADY_EXIST, "email already exists")
+	}
+	return itypes.BizOK
+}
+
+func (m *PlatformCore) CheckUserPhoneExist(ctx *itypes.Context, strPhone string) (code itypes.BizCode) {
+
+	if ok, err := m.userDAO.CheckActiveUserByPhone(strPhone); err != nil {
+		log.Error(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	} else if ok {
+		return itypes.NewBizCode(itypes.CODE_ALREADY_EXIST)
+	}
+	return itypes.BizOK
+}
+
+func (m *PlatformCore) ListUser(ctx *itypes.Context, req *proto.PlatformListUserReq) (totalUsers []*proto.PlatformTotalUser, total int64, code itypes.BizCode) {
+	var err error
+	var users = make([]*proto.PlatformUser, 0)
+	totalUsers = make([]*proto.PlatformTotalUser, 0)
+	if users, total, err = m.userRoleDAO.SelectUsers(req); err != nil {
+		log.Errorf(err.Error())
+		return nil, 0, itypes.NewBizCodeDatabaseError()
+	}
+	for _, user := range users {
+		sysUser := &proto.PlatformTotalUser{
+			UserId:      user.UserId,
+			UserName:    user.UserName,
+			UserAlias:   user.UserAlias,
+			PhoneNumber: user.PhoneNumber,
+			Email:       user.Email,
+			Remark:      user.UserRemark,
+			LoginTime:   user.LoginTime,
+			RoleName:    user.RoleName,
+			CreateUser:  user.CreateUser,
+			State:       user.State,
+			CreatedTime: user.CreatedTime,
+		}
+		totalUsers = append(totalUsers, sysUser)
+	}
+	return
+}
+
+func (m *PlatformCore) CreateUser(ctx *itypes.Context, req *proto.PlatformCreateUserReq) (do *models.UserDO, code itypes.BizCode) {
+
+	var err error
+	var role *models.RoleDO
+
+	if req.PhoneNumber != "" && !utils.VerifyMobileFormat(req.PhoneNumber) {
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "invalid phone number")
+	}
+
+	if req.RoleName != "" {
+		if role, err = m.roleDAO.SelectRoleByName(req.RoleName); err != nil {
+			log.Errorf(err.Error())
+			return nil, itypes.NewBizCodeDatabaseError()
+		}
+		if role == nil || role.GetId() == 0 {
+			err = log.Errorf("role name [%s] not found", req.RoleName)
+			return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND, err.Error())
+		}
+
+		//TODO: merge two operations into one transaction later
+
+		// 添加用户角色
+		m.cas.AddUserRole(req.UserName, req.RoleName)
+	} else {
+		err = log.Errorf("no select role name")
+		return nil, itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+
+	var lastId int64
+	var strSalt = utils.GenerateSalt()
+	do = &models.UserDO{
+		UserName:    req.UserName,
+		Password:    req.Password,
+		Salt:        strSalt,
+		UserAlias:   req.UserAlias,
+		IsAdmin:     false,
+		PhoneNumber: req.PhoneNumber,
+		Email:       req.Email,
+		Remark:      req.Remark,
+		CreateUser:  ctx.UserName(),
+		EditUser:    ctx.UserName(),
+		LoginTime:   utils.Now64(),
+		Deleted:     false,
+		State:       dao.UserState_Enabled,
+	}
+	if lastId, err = m.userDAO.Insert(do); err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	do.SetId(int32(lastId))
+	if err = m.userRoleDAO.Insert(&models.UserRoleDO{
+		UserName:   req.UserName,
+		RoleName:   req.RoleName,
+		CreateUser: ctx.UserName(),
+		EditUser:   ctx.UserName(),
+		Deleted:    false,
+	}); err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	return do, itypes.BizOK
+}
+
+// 修改账户(系统用户)信息,包括修改其对应的角色
+func (m *PlatformCore) EditUser(ctx *itypes.Context, req *proto.PlatformEditUserReq) (code itypes.BizCode) {
+	var err error
+	var user *models.UserDO
+	var userRoleDo = make([]*models.UserRoleDO, 0)
+	if user, code = m.GetUserByName(ctx, req.UserName); !code.Ok() {
+		log.Errorf("edit user return code [%v]", code)
+		return code
+	}
+
+	if req.PhoneNumber != "" && user.PhoneNumber != req.PhoneNumber {
+		if !utils.VerifyMobileFormat(req.PhoneNumber) {
+			return itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "invalid phone number")
+		}
+	}
+
+	if user.Email != req.Email {
+		if !utils.VerifyEmailFormat(req.Email) {
+			return itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "invalid email")
+		}
+		var ok bool
+		if ok, err = m.userDAO.CheckActiveUserByEmail(req.Email); err != nil {
+			log.Errorf(err.Error())
+			return itypes.NewBizCodeDatabaseError()
+		} else if ok {
+			return itypes.NewBizCode(itypes.CODE_ALREADY_EXIST)
+		}
+	}
+
+	if userRoleDo, err = m.userRoleDAO.SelectUserByName(req.UserName); err != nil {
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+
+	user.PhoneNumber = req.PhoneNumber
+	user.Remark = req.Remark
+	user.UserAlias = req.UserAlias
+	// user.Password = req.Password
+	user.Email = req.Email
+	user.EditUser = ctx.UserName()
+	if err = m.userDAO.UpdateByName(user,
+		models.USER_COLUMN_USER_ALIAS,
+		models.USER_COLUMN_REMARK,
+		// models.USER_COLUMN_PASSWORD,
+		models.USER_COLUMN_EMAIL,
+		models.USER_COLUMN_PHONE_NUMBER,
+		models.USER_COLUMN_EDIT_USER); err != nil {
+
+		log.Errorf("%s", err)
+		return itypes.NewBizCodeDatabaseError()
+	}
+
+	if strings.TrimSpace(req.RoleName) == "" {
+		return itypes.BizOK
+	}
+
+	if req.UserName == ctx.UserName() {
+		err = log.Errorf("can't edit own role")
+		return itypes.NewBizCode(itypes.CODE_ACCESS_DENY, err.Error())
+	}
+	if user.IsAdmin {
+		err = log.Errorf("user %s is supper administrator", user.UserName)
+		return itypes.NewBizCode(itypes.CODE_ACCESS_DENY, err.Error())
+	}
+
+	role, err := m.roleDAO.SelectRoleByName(req.RoleName)
+	if err != nil {
+		err = log.Errorf("select role by name error [%s]", err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	if role == nil || role.GetId() == 0 {
+		err = log.Errorf("role name [%s] not found", req.RoleName)
+		return itypes.NewBizCode(itypes.CODE_NOT_FOUND, err.Error())
+	}
+
+	// 用户角色更新
+	for _, userRole := range userRoleDo {
+		m.cas.DeleteUserRole(userRole.UserName, userRole.RoleName)
+		m.cas.AddUserRole(userRole.UserName, req.RoleName)
+	}
+	//upsert a new role of platform
+	if err = m.userRoleDAO.Upsert(&models.UserRoleDO{
+		UserName:   user.GetUserName(),
+		RoleName:   req.RoleName,
+		Deleted:    false,
+		CreateUser: ctx.UserName(),
+		EditUser:   ctx.UserName(),
+	}, models.USER_ROLE_COLUMN_ROLE_NAME, models.USER_ROLE_COLUMN_EDIT_USER); err != nil {
+
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	return itypes.BizOK
+}
+
+func (m *PlatformCore) ResetUserPassword(ctx *itypes.Context, req *proto.PlatformResetPasswordReq) (code itypes.BizCode) {
+	var err error
+	var user *models.UserDO
+
+	if user, code = m.GetUserByName(ctx, req.UserName); !code.Ok() {
+		log.Errorf("reset user password return code [%v]", code)
+		return code
+	}
+	user.EditUser = ctx.UserName()
+	user.Password = req.NewPassword
+	if err = m.userDAO.UpdateByName(user, models.USER_COLUMN_PASSWORD, models.USER_COLUMN_EDIT_USER); err != nil {
+		log.Error(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	return itypes.BizOK
+}
+
+func (m *PlatformCore) EnableUser(ctx *itypes.Context, req *proto.PlatformEnableUserReq) (r *proto.PlatformEnableUserResp, code itypes.BizCode) {
+	if err := m.userDAO.UpdateUserState(req.UserName, dao.UserState_Enabled); err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	return &proto.PlatformEnableUserResp{}, itypes.BizOK
+}
+
+func (m *PlatformCore) DisableUser(ctx *itypes.Context, req *proto.PlatformDisableUserReq) (r *proto.PlatformDisableUserResp, code itypes.BizCode) {
+	if err := m.userDAO.UpdateUserState(req.UserName, dao.UserState_Disabled); err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	return &proto.PlatformDisableUserResp{}, itypes.BizOK
+}
+
+func (m *PlatformCore) DeleteUser(ctx *itypes.Context, req *proto.PlatformDeleteUserReq) (code itypes.BizCode) {
+	var err error
+	var user *models.UserDO
+	if req.UserName == "" {
+		err = log.Errorf("user name to delete is nil")
+		return itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+
+	if user, code = m.GetUserByName(ctx, req.UserName); !code.Ok() {
+		log.Errorf("delete user return code [%v] ", code)
+		return code
+	}
+
+	if user.GetId() == 0 {
+		err = log.Errorf("user %s not found", req.UserName)
+		return itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+
+	if user.IsAdmin {
+		err = log.Errorf("user %s is supper administrator", req.UserName)
+		return itypes.NewBizCode(itypes.CODE_ACCESS_DENY, err.Error())
+	}
+
+	if err = m.userRoleDAO.Delete(&models.UserRoleDO{
+		UserName: req.UserName,
+		EditUser: ctx.UserName(),
+		Deleted:  true,
+	}); err != nil {
+		log.Errorf("delete role by user name error [%s]", err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	m.cas.DeleteUser(req.UserName)
+	user.Deleted = true
+	user.EditUser = ctx.UserName()
+	if err = m.userDAO.DeleteUser(user); err != nil {
+		log.Errorf("delete user error [%s]", err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	return itypes.BizOK
+}
+
+func (m *PlatformCore) DeleteUsers(ctx *itypes.Context, req *proto.PlatformDeleteUsersReq) (code itypes.BizCode) {
+	var err error
+	for _, name := range req.UserNames {
+		if name == "" {
+			err = log.Errorf("user name to delete is nil")
+			return itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+		}
+	}
+
+	dos, err := m.userDAO.SelectUsersByNames(req.UserNames)
+	if err != nil {
+		err = log.Errorf("get users error [%v] ", err.Error())
+		return itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+
+	if len(dos) == 0 || len(dos) != len(req.UserNames) {
+		err = log.Errorf("user count not match with request")
+		return itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+	for _, do := range dos {
+		m.DeleteUser(ctx, &proto.PlatformDeleteUserReq{UserName: do.UserName})
+	}
+	return itypes.BizOK
+}
+
+func (m *PlatformCore) GetUserByName(ctx *itypes.Context, strUserName string) (do *models.UserDO, code itypes.BizCode) {
+
+	var err error
+	do, err = m.userDAO.SelectUserByName(strUserName)
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+
+	if do == nil || do.GetId() == 0 {
+		log.Errorf("user [%s] not found", strUserName)
+		return nil, itypes.NewBizCode(itypes.CODE_NOT_FOUND)
+	}
+	return do, itypes.BizOK
+}
+
+func (m *PlatformCore) ListRole(ctx *itypes.Context, req *proto.PlatformListRoleReq) (roleLists []*proto.PlatformSysRole, total int64, code itypes.BizCode) {
+	var err error
+	log.Debugf("request processing...")
+	var roles = make([]*proto.PlatformRole, 0)
+	roleLists = make([]*proto.PlatformSysRole, 0)
+	if roles, total, err = m.roleDAO.SelectPlatformRoles(req.PageNo, req.PageSize, req.RoleName); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	for _, role := range roles {
+		privileges := m.cas.GetRoleAuthority(role.RoleName)
+		roleLists = append(roleLists, &proto.PlatformSysRole{
+			Id:          role.Id,
+			RoleName:    role.RoleName,
+			RoleAlias:   role.RoleAlias,
+			CreateUser:  role.CreateUser,
+			IsInherent:  role.IsInherent,
+			Remark:      role.Remark,
+			CreatedTime: role.CreatedTime,
+			Privileges:  privileges,
+		})
+	}
+	return
+}
+
+func (m *PlatformCore) CheckUserPassword(ctx *itypes.Context, strUserName, strPassword string) (ok bool, code itypes.BizCode) {
+	var do *models.UserDO
+
+	if do, code = m.GetUserByName(ctx, strUserName); !code.Ok() {
+		log.Errorf("check user password return code [%v]", code)
+		return
+	}
+	if strPassword != do.Password {
+		return false, itypes.BizOK
+	}
+	return true, itypes.BizOK
+}
+
+func (m *PlatformCore) CreateRole(ctx *itypes.Context, req *proto.PlatformCreateRoleReq) (code itypes.BizCode) {
+	var err error
+	var exist bool
+
+	if exist, err = m.roleDAO.CheckRoleExistByName(req.RoleName); err != nil {
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+
+	if exist {
+		log.Errorf("role name [%s] already exist", req.RoleName)
+		return itypes.NewBizCode(itypes.CODE_ALREADY_EXIST)
+	}
+
+	if _, err = m.roleDAO.Insert(&models.RoleDO{
+		RoleName:   req.RoleName,
+		CreateUser: ctx.UserName(),
+		EditUser:   ctx.UserName(),
+		Remark:     req.Remark,
+	}); err != nil {
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	return itypes.BizOK
+}
+
+func (m *PlatformCore) EditRole(ctx *itypes.Context, req *proto.PlatformEditRoleReq) (code itypes.BizCode) {
+	var err error
+	var ok bool
+	var do *models.RoleDO
+	// 查询role表获取信息
+	if do, err = m.roleDAO.SelectRoleById(req.Id); err != nil {
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	if do == nil || do.GetId() == 0 {
+		err = log.Errorf("role is not found")
+		return itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+	// 若角色名称相同则直接更新描述
+	if do.RoleName == req.RoleName {
+		do.Remark = req.Remark
+		if err = m.roleDAO.Update(do,
+			models.ROLE_COLUMN_ROLE_NAME,
+			models.ROLE_COLUMN_REMARK); err != nil {
+			log.Errorf(err.Error())
+			return itypes.NewBizCodeDatabaseError()
+		}
+		return itypes.BizOK
+	}
+	// 检查名称是否存在
+	if ok, err = m.roleDAO.CheckRoleExistByName(req.RoleName); err != nil {
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	if ok {
+		log.Errorf("role name [%s] already exist", req.RoleName)
+		return itypes.NewBizCode(itypes.CODE_ALREADY_EXIST, "role name already exist")
+	}
+	//角色权限继承 roleA 继承/获取 roleB权限
+	m.cas.InheritRoleAuthority(req.RoleName, do.RoleName)
+	// 角色更名时,用户继承新角色,并删除旧角色
+	m.cas.InheritUserRole(req.RoleName, do.RoleName)
+	m.cas.DeleteRole(do.RoleName)
+	// 更新用户角色表
+	var dos = make([]*models.UserRoleDO, 0)
+	if dos, err = m.userRoleDAO.SelectUserByRole(do.RoleName); err != nil {
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	for _, doRole := range dos {
+		doRole.RoleName = req.RoleName
+		err = m.userRoleDAO.UpdateUserById(doRole)
+		if err != nil {
+			log.Errorf(err.Error())
+			continue
+		}
+	}
+
+	do.RoleName = req.RoleName
+	do.Remark = req.Remark
+	// 更新role表
+	if err = m.roleDAO.Update(do,
+		models.ROLE_COLUMN_ROLE_NAME,
+		models.ROLE_COLUMN_REMARK,
+	); err != nil {
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	return itypes.BizOK
+}
+
+func (m *PlatformCore) DeleteRole(ctx *itypes.Context, req *proto.PlatformDeleteRoleReq) (code itypes.BizCode) {
+	var exist bool
+	var err error
+	// 不可以删除超级管理员角色
+	if req.RoleName == InherentAdminRole {
+		err = log.Errorf("role name [%s] is supper administrator role", req.RoleName)
+		return itypes.NewBizCode(itypes.CODE_ACCESS_DENY, err.Error())
+	}
+
+	if exist, err = m.roleDAO.CheckRoleExistByName(req.RoleName); err != nil {
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	if !exist {
+		log.Warnf("role name [%s] is not found", req.RoleName)
+		return itypes.NewBizCode(itypes.CODE_NOT_FOUND, "role name is not found")
+	}
+
+	var dos = make([]*models.UserRoleDO, 0)
+	if dos, err = m.userRoleDAO.SelectUserByRole(req.RoleName); err != nil {
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	if len(dos) > 0 {
+		log.Warnf("role: %s had %d user", req.RoleName, len(dos))
+		return itypes.NewBizCode(itypes.CODE_ACCESS_VIOLATE, "role has too many users to delete")
+	}
+	// 删除角色
+	m.cas.DeleteRole(req.RoleName)
+
+	if err = m.roleDAO.Delete(&models.RoleDO{
+		RoleName: req.RoleName,
+		Deleted:  true,
+		EditUser: ctx.UserName(),
+	}); err != nil {
+		log.Errorf(err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	return itypes.BizOK
+}
+
+func (m *PlatformCore) AuthRole(ctx *itypes.Context, req *proto.PlatformAuthRoleReq) (code itypes.BizCode) {
+	var err error
+	// 不可以改超级管理员角色的权限,超级管理员角色必须拥有所有权限
+	if req.RoleName == InherentAdminRole {
+		err = log.Errorf("role name [%s] is supper administrator role", req.RoleName)
+		return itypes.NewBizCode(itypes.CODE_ACCESS_DENY, err.Error())
+	}
+	role, err := m.roleDAO.SelectRoleByName(req.RoleName)
+	if err != nil {
+		err = log.Errorf("get role error [%s]", err.Error())
+		return itypes.NewBizCodeDatabaseError()
+	}
+	if role == nil || role.GetId() == 0 {
+		err = log.Errorf("role is not found")
+		return itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+
+	if len(req.Privilege) == 0 {
+		m.cas.DeleteRole(req.RoleName)
+		return itypes.BizOK
+	}
+	// 获取具有角色的用户
+	res, err := m.cas.GetUsersForRole(req.RoleName)
+	if err != nil {
+		log.Errorf("get users for role:%s error:%s", req.RoleName, err.Error())
+	}
+	m.cas.DeleteRole(req.RoleName)
+	for _, authority := range req.Privilege {
+		role := req.RoleName
+		path := m.cas.GetPrivilegePath(authority)
+		if strings.EqualFold(path, "") {
+			log.Errorf("unknown authority : %s", authority)
+			continue
+		}
+		// 给角色授予权限
+		m.cas.AddRoleAuthority(role, path, authority)
+		if authority == privilege.UserAdd || authority == privilege.UserEdit {
+			strRolePath := m.cas.GetPrivilegePath(privilege.RoleAccess)
+			m.cas.AddRoleAuthority(role, strRolePath, privilege.RoleAccess) //角色拥有用户添加和编辑权限默认附带角色查看权限
+		}
+	}
+	for _, user := range res {
+		// 给用户添加角色
+		m.cas.AddUserRole(user, req.RoleName)
+	}
+	return itypes.BizOK
+}
+
+// 获取 账户或者角色 所拥有的权限
+func (m *PlatformCore) InquireAuth(ctx *itypes.Context, req *proto.PlatformInquireAuthReq) (auth []string, code itypes.BizCode) {
+	auth = make([]string, 0)
+	switch req.NameType {
+	case proto.TypeUser:
+		auth = m.cas.GetUserRoleList(req.Name)
+	case proto.TypeRole:
+		auth = m.cas.GetRoleAuthority(req.Name)
+	default:
+		return auth, itypes.NewBizCode(itypes.CODE_TYPE_UNDEFINED, "auth type not defined")
+	}
+	return auth, itypes.BizOK
+}
+
+func (m *PlatformCore) UserQuery(ctx *itypes.Context, req *proto.PlatformUserQueryReq) (userList proto.PlatformUserQueryResp, total int64, code itypes.BizCode) {
+	var err error
+	var userName = make([]*models.UserDO, 0)
+	userList = proto.PlatformUserQueryResp{
+		NameList: make([]string, 0),
+	}
+	if userName, total, err = m.userDAO.SelectUserName(req); err != nil {
+		log.Errorf("select user name list error:%s\n", err.Error())
+		return
+	}
+	for _, name := range userName {
+		userList.NameList = append(userList.NameList, name.UserName)
+	}
+	return
+}
+
+func (m *PlatformCore) PrivilegeTree(ctx *itypes.Context, req *proto.PlatformPrivilegeTreeReq) (resp *proto.PlatformPrivilegeTreeResp, code itypes.BizCode) {
+	return &proto.PlatformPrivilegeTreeResp{
+		TreeList: m.cas.GetAllPrivileges(),
+	}, itypes.BizOK
+}
+
+func (m *PlatformCore) GetEmailConfig(ctx *itypes.Context, req *proto.PlatformGetEmailConfigReq) (cfg *email.EmailConfig, code itypes.BizCode) {
+	var err error
+	cfg, err = m.dictionaryDAO.SelectEmailConfig()
+	if err != nil {
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+	return cfg, itypes.BizOK
+}
+
+func (m *PlatformCore) SetEmailConfig(ctx *itypes.Context, req *proto.PlatformSetEmailConfigReq) (resp *proto.PlatformSetEmailConfigResp, code itypes.BizCode) {
+	if !utils.VerifyEmailFormat(req.SmtpName) {
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, "malformed email address")
+	}
+	var err error
+	emailServerDo := &models.DictionaryDO{
+		Name:        dao.Dictionary_Name_Email_Server,
+		ConfigKey:   dao.Dictionary_Key_Email_Server,
+		ConfigValue: req.SmtpServer,
+		Remark:      dao.Dictionary_Remark_Email_Server,
+		Deleted:     false,
+	}
+	err = m.dictionaryDAO.Upsert(emailServerDo)
+	if err != nil {
+		log.Errorf("emailServerDo error:%s", err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	portDo := &models.DictionaryDO{
+		Name:        dao.Dictionary_Name_Email_Port,
+		ConfigKey:   dao.Dictionary_Key_Email_Port,
+		ConfigValue: req.SmtpPort,
+		Remark:      dao.Dictionary_Remark_Email_Port,
+		Deleted:     false,
+	}
+	err = m.dictionaryDAO.Upsert(portDo)
+	if err != nil {
+		log.Errorf("portDo error:%s", err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	emailNameDo := &models.DictionaryDO{
+		Name:        dao.Dictionary_Name_Email_Name,
+		ConfigKey:   dao.Dictionary_Key_Email_Name,
+		ConfigValue: req.SmtpName,
+		Remark:      dao.Dictionary_Remark_Email_Name,
+		Deleted:     false,
+	}
+	err = m.dictionaryDAO.Upsert(emailNameDo)
+	if err != nil {
+		log.Errorf("emailServerDo error:%s", err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	autoCodeDo := &models.DictionaryDO{
+		Name:        dao.Dictionary_Name_Email_Auth_Code,
+		ConfigKey:   dao.Dictionary_Key_Email_Auth_Code,
+		ConfigValue: req.AuthCode,
+		Remark:      dao.Dictionary_Remark_Email_Auth_Code,
+		Deleted:     false,
+	}
+	err = m.dictionaryDAO.Upsert(autoCodeDo)
+	if err != nil {
+		log.Errorf("autoCodeDo error:%s\n", err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	SendNameDo := &models.DictionaryDO{
+		Name:        dao.Dictionary_Name_Email_Send_Name,
+		ConfigKey:   dao.Dictionary_Key_Email_Send_Name,
+		ConfigValue: req.SendName,
+		Remark:      dao.Dictionary_Remark_Email_Send_Name,
+		Deleted:     false,
+	}
+	err = m.dictionaryDAO.Upsert(SendNameDo)
+	if err != nil {
+		log.Errorf("emailServerDo error:%s\n", err.Error())
+		return nil, itypes.NewBizCodeDatabaseError()
+	}
+	return &proto.PlatformSetEmailConfigResp{}, itypes.BizOK
+}
+
+func (m *PlatformCore) CheckExist(ctx *itypes.Context, req *proto.PlatformCheckExistReq) (code itypes.BizCode) {
+	switch req.CheckType {
+	case itypes.CheckType_UserName:
+		{
+			if do, err := m.userDAO.SelectUserByName(req.Name); err != nil {
+				log.Errorf(err.Error())
+				return itypes.NewBizCodeDatabaseError()
+			} else if do != nil && do.GetId() != 0 {
+				return itypes.NewBizCode(itypes.CODE_ALREADY_EXIST)
+			}
+		}
+	case itypes.CheckType_UserPhoneNumber:
+		{
+			if do, err := m.userDAO.SelectUserByPhone(req.Name); err != nil {
+				log.Errorf(err.Error())
+				return itypes.NewBizCodeDatabaseError()
+			} else if do != nil && do.GetId() != 0 {
+				return itypes.NewBizCode(itypes.CODE_ALREADY_EXIST)
+			}
+		}
+	case itypes.CheckType_UserEmail:
+		{
+			if do, err := m.userDAO.SelectUserByEmail(req.Name); err != nil {
+				log.Errorf(err.Error())
+				return itypes.NewBizCodeDatabaseError()
+			} else if do != nil && do.GetId() != 0 {
+				return itypes.NewBizCode(itypes.CODE_ALREADY_EXIST)
+			}
+		}
+	case itypes.CheckType_RoleName:
+		{
+			if do, err := m.roleDAO.SelectRoleByName(req.Name); err != nil {
+				log.Errorf(err.Error())
+				return itypes.NewBizCodeDatabaseError()
+			} else if do != nil && do.GetId() != 0 {
+				return itypes.NewBizCode(itypes.CODE_ALREADY_EXIST)
+			}
+		}
+	}
+	return itypes.NewBizCode(itypes.CODE_NOT_FOUND)
+}
+
+func (m *PlatformCore) ListRoleUser(ctx *itypes.Context, req *proto.PlatformListRoleUserReq) (users []*proto.PlatformUser, total int64, code itypes.BizCode) {
+	var err error
+
+	if users, total, err = m.userRoleDAO.SelectRoleUsers(req.RoleName, req.PageNo, req.PageSize); err != nil {
+		log.Errorf(err.Error())
+		return nil, 0, itypes.NewBizCodeDatabaseError()
+	}
+	return
+}
+
+func (m *PlatformCore) ListOperLog(ctx *itypes.Context, req *proto.PlatformListOperLogReq) (list []*proto.OperLog, total int64, code itypes.BizCode) {
+	var err error
+	_ = err
+	return
+}
+
+func (m *PlatformCore) UploadFile(c *gin.Context) (resp *proto.UploadFileResp, code itypes.BizCode) {
+
+	var err error
+	fileHeader, err := c.FormFile(proto.UploadFileData)
+	if err != nil {
+		err = log.Errorf("form file key [%s] read error [%s]", proto.UploadFileData, err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+	log.Infof("file data name:[%s],size:[%d]", fileHeader.Filename, fileHeader.Size)
+
+	strFileTime := fmt.Sprintf("%v", time.Now().UnixNano())
+	strFileName, ok := c.GetPostForm(proto.UploadFileName)
+	if !ok {
+		log.Infof("form key [%s] not found", proto.UploadFileName)
+		// err := log.Errorf("form key [%s] not found", proto.UploadFileName)
+		// return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+		strFileName = strFileTime + strings.Replace(strings.TrimSpace(fileHeader.Filename), " ", "", -1)
+	} else {
+		strFileName = strFileTime + strings.Replace(strings.TrimSpace(strFileName), " ", "", -1)
+	}
+
+	var dir string
+	if strings.HasSuffix(m.cfg.ImagePath, "/") {
+		dir = m.cfg.ImagePath
+	} else {
+		dir = m.cfg.ImagePath + "/"
+	}
+
+	if err = c.SaveUploadedFile(fileHeader, dir+strFileName); err != nil {
+		err = log.Errorf("save file error [%s]", err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+
+	strFileUrl := m.cfg.ImagePrefix + "/" + strFileName
+	return &proto.UploadFileResp{
+		FileName: strFileName,
+		FileUrl:  strFileUrl,
+	}, itypes.BizOK
+}
+
+func (m *PlatformCore) UploadFileByBase64(c *gin.Context, req *proto.UploadFileReq) (resp *proto.UploadFileResp, code itypes.BizCode) {
+	if strings.TrimSpace(req.FileName) == "" {
+		err := log.Errorf("form key [%s] not found", proto.UploadFileName)
+		return nil, itypes.NewBizCode(itypes.CODE_INVALID_PARAMS, err.Error())
+	}
+
+	if strings.Contains(req.FileData, ";base64,") {
+		s := strings.Split(req.FileData, ",")
+		req.FileData = s[1]
+	}
+
+	log.Truncate(log.LEVEL_INFO, 1000, "after remove suffix: [%s]", req.FileData)
+	extension := path.Ext(req.FileName)
+	extension = strings.ToLower(extension)
+	switch extension {
+	case ".jpg", ".jpeg", ".png", ".gif", ".bmp", ".webp":
+	// case ".png":
+
+	default:
+		arr := strings.Split(req.FileName, ".")
+		req.FileName = arr[len(arr)-1] + ".png"
+	}
+
+	// req.FileData = strings.Replace(req.FileData, "data:image/png;base64,", "", 1)
+
+	strFileTime := fmt.Sprintf("%v", time.Now().UnixNano())
+	strFileName := strFileTime + strings.Replace(strings.TrimSpace(req.FileName), " ", "", -1)
+
+	// 解码 Base64 数据
+	decodedData, err := base64.StdEncoding.DecodeString(req.FileData)
+	if err != nil {
+		err = log.Errorf("Failed to decode Base64 data:[%s]", err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+
+	// 创建一个图片对象
+	img, _, err := image.Decode(strings.NewReader(string(decodedData)))
+	if err != nil {
+		err = log.Errorf("create image error: [%s]", err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+
+	var dir string
+	if strings.HasSuffix(m.cfg.ImagePath, "/") {
+		dir = m.cfg.ImagePath
+	} else {
+		dir = m.cfg.ImagePath + "/"
+	}
+	log.Infof("strFileName:[%s]", dir+strFileName)
+
+	// 将图片保存到文件
+	dst := dir + strFileName
+
+	err = os.MkdirAll(filepath.Dir(dst), 0750)
+	if err != nil {
+		err = log.Errorf("create directory error: [%s]", err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+
+	// out, err := os.Create(dst)
+	out, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR, os.FileMode(0644))
+	if err != nil {
+		err = log.Errorf("create file error: [%s]", err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+
+	defer out.Close()
+
+	// 以 PNG 格式保存图片
+	err = png.Encode(out, img)
+	if err != nil {
+		err = log.Errorf("save file error: [%s]", err.Error())
+		return nil, itypes.NewBizCode(itypes.CODE_ERROR, err.Error())
+	}
+
+	strFileUrl := m.cfg.ImagePrefix + "/" + strFileName
+	return &proto.UploadFileResp{
+		FileName: strFileName,
+		FileUrl:  strFileUrl,
+	}, itypes.BizOK
+}
+
+func DecodePng(base64Png string) (*bytes.Buffer, error) {
+	out := bytes.NewBuffer([]byte{})
+	unBased, err := base64.StdEncoding.DecodeString(base64Png)
+	if err != nil {
+		return nil, err
+	}
+	im, err := png.Decode(bytes.NewReader(unBased))
+	if err != nil {
+		return nil, err
+	}
+	if err = png.Encode(out, im); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
diff --git a/pkg/dal/core/core_scheduler.go b/pkg/dal/core/core_scheduler.go
new file mode 100644
index 0000000..f866eba
--- /dev/null
+++ b/pkg/dal/core/core_scheduler.go
@@ -0,0 +1,280 @@
+package core
+
+import (
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"github.com/go-co-op/gocron"
+	"intent-system/pkg/config"
+	"intent-system/pkg/dal/dao"
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/email"
+	"intent-system/pkg/utils"
+	"time"
+)
+
+const (
+	syncNewsAiIntervalDuration     = "30s" //AI新闻文章数据同步时间间隔
+	syncNewsSpiderIntervalDuration = "30s" //爬虫新闻文章数据同步时间间隔
+	syncQnaAiIntervalDuration      = "30s" //AI Q&A数据同步时间间隔
+)
+
+type SchedulerCore struct {
+	my              *sqlca.Engine
+	pg              *sqlca.Engine
+	cfg             *config.Config
+	cron            *gocron.Scheduler
+	newsDAO         *dao.NewsDAO
+	newsSpiderDAO   *dao.NewsSpiderDAO
+	wkSpiderNewsDAO *dao.WkSpiderNewsDAO
+	wkAigNewsDAO    *dao.WkAigNewsDAO
+	wkSpiderQnaDAO  *dao.WkSpiderQnaDAO
+	wkAigQnaDAO     *dao.WkAigQnaDAO
+	subscriberDAO   *dao.SubscriberDAO
+	subDAO          *dao.NewsSubscribeDAO
+	dictionaryDAO   *dao.DictionaryDAO
+	qaDAO           *dao.QuestionAnswerDAO
+}
+
+func NewSchedulerCore(cfg *config.Config, my, pg *sqlca.Engine) *SchedulerCore {
+	scheduler := &SchedulerCore{
+		my:              my,
+		pg:              pg,
+		cfg:             cfg,
+		cron:            gocron.NewScheduler(time.Local),
+		wkAigNewsDAO:    dao.NewWkAigNewsDAO(pg),
+		wkSpiderNewsDAO: dao.NewWkSpiderNewsDAO(pg),
+		wkAigQnaDAO:     dao.NewWkAigQnaDAO(pg),
+		wkSpiderQnaDAO:  dao.NewWkSpiderQnaDAO(pg),
+		newsDAO:         dao.NewNewsDAO(my),
+		newsSpiderDAO:   dao.NewNewsSpiderDAO(my),
+		subscriberDAO:   dao.NewSubscriberDAO(my),
+		subDAO:          dao.NewNewsSubscribeDAO(my),
+		dictionaryDAO:   dao.NewDictionaryDAO(my),
+		qaDAO:           dao.NewQuestionAnswerDAO(my),
+	}
+	return scheduler.runSchedule()
+}
+
+func (m *SchedulerCore) runSchedule() *SchedulerCore {
+	_, err := m.cron.Every(syncNewsSpiderIntervalDuration).Do(func() {
+		m.runCronTaskSyncNewsSpider()
+	})
+	if err != nil {
+		log.Panic("start crontab task error [%s]", err.Error())
+	}
+	_, err = m.cron.Every(syncNewsAiIntervalDuration).Do(func() {
+		m.runCronTaskSyncNewsAI(models.Language_EN, models.TableNameWkAigNews)
+		m.runCronTaskSyncNewsAI(models.Language_CN, models.TableNameWkAigNewsChs)
+	})
+	if err != nil {
+		log.Panic("start crontab task error [%s]", err.Error())
+	}
+	_, err = m.cron.Every(syncQnaAiIntervalDuration).Do(func() {
+		m.runCronTaskSyncQnaAI(models.Language_EN, models.TableNameWkAigQna)
+		m.runCronTaskSyncQnaAI(models.Language_CN, models.TableNameWkAigQnaChs)
+	})
+	if err != nil {
+		log.Panic("start crontab task error [%s]", err.Error())
+	}
+	_, err = m.cron.Cron(m.cfg.SubCron).Do(func() {
+		m.runCronTaskSubscribe()
+	})
+	if err != nil {
+		log.Panic("start crontab task error [%s]", err.Error())
+	}
+	m.cron.StartAsync()
+	return m
+}
+
+func (m *SchedulerCore) runCronTaskSubscribe() {
+	dos, err := m.subscriberDAO.QueryByCondition(map[string]interface{}{
+		models.SUBSCRIBER_COLUMN_IS_DELETED: 0,
+	})
+	if err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	if len(dos) == 0 {
+		return //no subscriber
+	}
+	var newses []*models.NewsSubscribeDO
+	newses, err = m.subDAO.QueryByCondition(map[string]interface{}{
+		models.NEWS_SUBSCRIBE_COLUMN_IS_PUSHED:  0,
+		models.NEWS_SUBSCRIBE_COLUMN_IS_DELETED: 0,
+	})
+	if err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	for _, news := range newses {
+		m.pushNewsToSubscribers(news, dos)
+		time.Sleep(30 * time.Second)
+	}
+}
+
+func (m *SchedulerCore) runCronTaskSyncNewsAI(language models.LanguageType, strTable string) {
+	lastId, err := m.newsDAO.QueryMaxSyncId(language)
+	if err != nil {
+		log.Errorf("query max news id from mysql error [%s]", err.Error())
+		return
+	}
+	var dos []*models.WkAigNewsDO
+	dos, err = m.wkAigNewsDAO.QueryLatest(strTable, lastId)
+	if err != nil {
+		log.Errorf("sync from postgresql database error [%s]", err.Error())
+		return
+	}
+	for _, do := range dos {
+		var tags = []string{do.Tag}
+		if do.Summary == "" {
+			do.Summary = do.SubTitle
+		}
+		_, err = m.newsDAO.Insert(&models.NewsDO{
+			OrgId:       do.Id,
+			SpiderId:    do.OrgId,
+			Tag:         do.Tag,
+			Tags:        tags,
+			Category:    do.Category,
+			MainTitle:   do.MainTitle,
+			SubTitle:    do.SubTitle,
+			Summary:     do.Summary,
+			Keywords:    do.Keywords,
+			SeoKeywords: do.SeoKeywords,
+			Url:         do.Url,
+			ImageUrl:    do.ImageUrl,
+			Content:     do.Content,
+			IsHotspot:   do.IsHotspot,
+			IsDeleted:   false,
+			Language:    language,
+			DataTime:    utils.FormatTimestampTZ(do.CreatedTime),
+		})
+		if err != nil {
+			log.Errorf("insert database error [%s]", err.Error())
+			return
+		}
+	}
+}
+
+func (m *SchedulerCore) runCronTaskSyncNewsSpider() {
+	lastId, err := m.newsSpiderDAO.QueryMaxSyncId()
+	if err != nil {
+		log.Errorf("query max news id from mysql error [%s]", err.Error())
+		return
+	}
+	var dos []*models.WkSpiderNewsDO
+	dos, err = m.wkSpiderNewsDAO.QueryLatest(lastId)
+	if err != nil {
+		log.Errorf("sync from postgresql database error [%s]", err.Error())
+		return
+	}
+	for _, do := range dos {
+		_, err = m.newsSpiderDAO.Insert(&models.NewsSpiderDO{
+			OrgId:       do.Id,
+			Tag:         do.Tag,
+			Category:    do.Category,
+			MainTitle:   do.MainTitle,
+			SubTitle:    do.SubTitle,
+			Summary:     do.Summary,
+			Keywords:    do.Keywords,
+			SeoKeywords: do.SeoKeywords,
+			Url:         do.Url,
+			ImageUrl:    do.ImageUrl,
+			Content:     do.Content,
+			IsHotspot:   do.IsHotspot,
+			IsDeleted:   false,
+		})
+		if err != nil {
+			log.Errorf("insert database error [%s]", err.Error())
+			return
+		}
+	}
+}
+
+func (m *SchedulerCore) runCronTaskSyncQnaAI(language models.LanguageType, strTable string) {
+	lastId, err := m.qaDAO.QueryMaxSyncId(language)
+	if err != nil {
+		log.Errorf("query max news id from mysql error [%s]", err.Error())
+		return
+	}
+	var dos []*models.WkAigQnaDO
+	dos, err = m.wkAigQnaDAO.QueryLatest(strTable, lastId)
+	if err != nil {
+		log.Errorf("sync from postgresql database error [%s]", err.Error())
+		return
+	}
+	for _, do := range dos {
+		_, err = m.qaDAO.Insert(&models.QuestionAnswerDO{
+			OrgId:     do.Id,
+			Question:  do.Question,
+			Answer:    do.Answer,
+			IsDeleted: false,
+			Language:  language,
+			DataTime:  utils.FormatTimestampTZ(do.CreatedTime),
+		})
+		if err != nil {
+			log.Errorf("insert database error [%s]", err.Error())
+			return
+		}
+	}
+}
+
+func (m *SchedulerCore) pushNewsToSubscribers(sub *models.NewsSubscribeDO, subscribers []*models.SubscriberDO) {
+	var err error
+	var toAddrs []string
+
+	cfg, err := m.dictionaryDAO.SelectEmailConfig()
+	if err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	news, err := m.newsDAO.QueryById(sub.NewsId)
+	if err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+
+	for _, s := range subscribers {
+		if len(s.Tags) == 0 {
+			toAddrs = append(toAddrs, s.Email)
+		} else {
+			if m.isContain(news.Tags, s.Tags) {
+				toAddrs = append(toAddrs, s.Email)
+			}
+		}
+	}
+
+	strMessage := email.SubscriptionMessage(sub.NewsSubject, news.MainTitle, news.SubTitle, news.Summary, sub.NewsUrl)
+	log.Debugf("sending email url [%s] subject [%s] content [%s] to %+v message [%s]", sub.NewsUrl, sub.NewsSubject, sub.NewsUrl, toAddrs, strMessage)
+	err = email.SendEmailBcc(cfg, sub.NewsSubject, strMessage, toAddrs...)
+	if err != nil {
+		log.Errorf("send subscribe email error [%s]", err.Error())
+		return
+	}
+	_, err = m.newsDAO.Update(&models.NewsDO{
+		Id:    sub.NewsId,
+		State: dao.NewsState_Pushed,
+	}, models.NEWS_COLUMN_STATE)
+	if err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	_, err = m.subDAO.Update(&models.NewsSubscribeDO{
+		Id:       sub.Id,
+		IsPushed: true,
+	}, models.NEWS_SUBSCRIBE_COLUMN_IS_PUSHED)
+	if err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+}
+
+func (m *SchedulerCore) isContain(src, dest []string) bool {
+	for _, s := range src {
+		for _, d := range dest {
+			if d == s {
+				return true
+			}
+		}
+	}
+	return false
+}
diff --git a/pkg/dal/dao/casbin_rule.go b/pkg/dal/dao/casbin_rule.go
new file mode 100644
index 0000000..32549ba
--- /dev/null
+++ b/pkg/dal/dao/casbin_rule.go
@@ -0,0 +1,55 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type CasbinRuleDAO struct {
+	db *sqlca.Engine
+}
+
+func NewCasbinRuleDAO(db *sqlca.Engine) *CasbinRuleDAO {
+	return &CasbinRuleDAO{
+		db: db,
+	}
+}
+
+//insert into table by data model
+func (dao *CasbinRuleDAO) Insert(do *models.CasbinRuleDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameCasbinRule).Insert()
+}
+
+//insert if not exist or update columns on duplicate key...
+func (dao *CasbinRuleDAO) Upsert(do *models.CasbinRuleDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameCasbinRule).Select(columns...).Upsert()
+}
+
+//update table set columns where id=xxx
+func (dao *CasbinRuleDAO) Update(do *models.CasbinRuleDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameCasbinRule).Select(columns...).Update()
+}
+
+//query records by id
+func (dao *CasbinRuleDAO) QueryById(id interface{}, columns ...string) (do *models.CasbinRuleDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameCasbinRule).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+//query records by conditions
+func (dao *CasbinRuleDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.CasbinRuleDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameCasbinRule).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
diff --git a/pkg/dal/dao/customer.go b/pkg/dal/dao/customer.go
new file mode 100644
index 0000000..c205b14
--- /dev/null
+++ b/pkg/dal/dao/customer.go
@@ -0,0 +1,159 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+const (
+	CustomerState_Enabled  = 1
+	CustomerState_Disabled = 2
+)
+
+type CustomerDAO struct {
+	db *sqlca.Engine
+}
+
+func NewCustomerDAO(db *sqlca.Engine) *CustomerDAO {
+	return &CustomerDAO{
+		db: db,
+	}
+}
+
+
+
+// insert into table by data model
+func (dao *CustomerDAO) Insert(do *models.CustomerDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameCustomer).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *CustomerDAO) Upsert(do *models.CustomerDO, columns ...string) (lastInsertId int64, err error) {
+	if len(columns) == 0 {
+		return 0, fmt.Errorf("no columns to update")
+	}
+	return dao.db.Model(&do).Table(models.TableNameCustomer).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *CustomerDAO) Update(do *models.CustomerDO, columns ...string) (rows int64, err error) {
+	if len(columns) == 0 {
+		return 0, fmt.Errorf("no columns to update")
+	}
+	return dao.db.Model(&do).Table(models.TableNameCustomer).Select(columns...).Update()
+}
+
+func (dao *CustomerDAO) UpdateByEmail(do *models.CustomerDO, columns ...string) (rows int64, err error) {
+	if len(columns) == 0 {
+		return 0, fmt.Errorf("no columns to update")
+	}
+	return dao.db.Model(&do).
+		Table(models.TableNameCustomer).
+		Eq(models.CUSTOMER_COLUMN_EMAIL, do.Email).
+		Eq(models.CUSTOMER_COLUMN_DELETED, 0).
+		Select(columns...).
+		Update()
+}
+
+// query records by id
+func (dao *CustomerDAO) QueryById(id interface{}, columns ...string) (do *models.CustomerDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameCustomer).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *CustomerDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.CustomerDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameCustomer).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *CustomerDAO) QueryList(pageNo, pageSize int, id int32, email string, columns ...string) (dos []*models.CustomerDO, total int64, err error) {
+	e := dao.db.Model(&dos).Table(models.TableNameCustomer).Select(columns...).Page(pageNo, pageSize).Desc(models.CUSTOMER_COLUMN_ID)
+	if id != 0 {
+		e.Eq(models.CUSTOMER_COLUMN_ID, id)
+	}
+	if email != "" {
+		e.Like(models.CUSTOMER_COLUMN_EMAIL, email)
+	}
+	if _, total, err = e.QueryEx(); err != nil {
+		return nil, 0, log.Errorf(err.Error())
+	}
+	return
+}
+
+func (dao *CustomerDAO) UpdateByName(do *models.CustomerDO, columns ...string) (err error) {
+	if _, err = dao.db.Model(do).
+		Table(models.TableNameCustomer).
+		Select(columns...).
+		Exclude(models.CUSTOMER_COLUMN_USER_NAME, models.CUSTOMER_COLUMN_IS_ADMIN, models.CUSTOMER_COLUMN_EMAIL).
+		Eq(models.CUSTOMER_COLUMN_USER_NAME, do.UserName).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+// user name
+func (dao *CustomerDAO) SelectCustomerByName(strName string) (do *models.CustomerDO, err error) {
+	if _, err = dao.db.Model(&do).
+		Table(models.TableNameCustomer).
+		Select(
+			models.CUSTOMER_COLUMN_ID,
+			models.CUSTOMER_COLUMN_USER_NAME,
+			models.CUSTOMER_COLUMN_PASSWORD,
+			models.CUSTOMER_COLUMN_REFERRER,
+			models.CUSTOMER_COLUMN_REFERRAL_CODE,
+			models.CUSTOMER_COLUMN_FIRST_NAME,
+			models.CUSTOMER_COLUMN_LAST_NAME,
+			models.CUSTOMER_COLUMN_TITLE,
+			models.CUSTOMER_COLUMN_COMPANY,
+			models.CUSTOMER_COLUMN_SALT,
+			models.CUSTOMER_COLUMN_USER_ALIAS,
+			models.CUSTOMER_COLUMN_PHONE_NUMBER,
+			models.CUSTOMER_COLUMN_IS_ADMIN,
+			models.CUSTOMER_COLUMN_EMAIL,
+			models.CUSTOMER_COLUMN_ADDRESS,
+			models.CUSTOMER_COLUMN_REMARK,
+			models.CUSTOMER_COLUMN_STATE,
+			models.CUSTOMER_COLUMN_LOGIN_IP,
+			models.CUSTOMER_COLUMN_LOGIN_TIME,
+			models.CUSTOMER_COLUMN_CREATED_TIME,
+			models.CUSTOMER_COLUMN_UPDATED_TIME,
+		).
+		Eq(models.CUSTOMER_COLUMN_USER_NAME, strName).
+		Eq(models.CUSTOMER_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+// user email
+func (dao *CustomerDAO) SelectCustomerByEmail(strEmail string) (do *models.CustomerDO, err error) {
+	if _, err = dao.db.Model(&do).
+		Table(models.TableNameCustomer).
+		Eq(models.CUSTOMER_COLUMN_EMAIL, strEmail).
+		Eq(models.CUSTOMER_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
diff --git a/pkg/dal/dao/deploy.go b/pkg/dal/dao/deploy.go
new file mode 100644
index 0000000..3660823
--- /dev/null
+++ b/pkg/dal/dao/deploy.go
@@ -0,0 +1,155 @@
+package dao
+
+import (
+	"fmt"
+	"intent-system/pkg/dal/models"
+
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+)
+
+const (
+	LoginType_Admin1    = 0
+	LoginType_Customer1 = 1
+)
+
+type DeployDAO struct {
+	db *sqlca.Engine
+}
+
+func NewDeployDAO(db *sqlca.Engine) *DeployDAO {
+
+	return &DeployDAO{
+		db: db,
+	}
+}
+
+func (dao *DeployDAO) Insert(dos ...*models.DeployDO) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(&dos).Table(models.TableNameDeploy).Insert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *DeployDAO) UpdateByNid(do *models.DeployDO, columns ...string) (err error) {
+	if _, err = dao.db.Model(do).
+		Table(models.TableNameDeploy).
+		Select(columns...).
+		Eq(models.DEPLOY_COLUMN_NID, do.Nid).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *DeployDAO) UpdateStatusByAppID(appID int64, newStatus string) error {
+	var record models.DeployDO
+	_, err := dao.db.Model(&record).
+		Table(models.TableNameDeploy).
+		Where("n_id = ?", appID).
+		Limit(1).
+		Query()
+	if err != nil {
+		log.Errorf("根据 appID 查询 DeployDO 失败: %v", err)
+		return err
+	}
+	if record.Id == 0 {
+		log.Warnf("找不到 appID=%d 对应的部署记录", appID)
+		return fmt.Errorf("无部署记录")
+	}
+
+	// 🔧 修正拼接,避免语法错误
+	updateObj := &models.DeployDO{
+		Status: newStatus,
+	}
+	_, err = dao.db.Model(updateObj).
+		Table(models.TableNameDeploy).
+		Select(models.DEPLOY_COLUMN_STATUS).
+		Where("mb_uuid = ? AND repo_name = ? AND user_name = ?", record.MbUuid, record.RepoName, record.UserName).
+		Update()
+	if err != nil {
+		log.Errorf("批量更新状态失败: %v", err)
+		return err
+	}
+	return nil
+}
+
+// SelectDeployById
+func (dao *DeployDAO) SelectDeployById(id int64, columns ...string) (do *models.DeployDO, err error) {
+	log.Infof(".......dal_dao.................Deploy.......................\n\n")
+	if _, err = dao.db.Model(&do).Table(models.TableNameDeploy).Id(id).Select(columns...).Query(); err != nil {
+		return nil, log.Errorf(err.Error())
+	}
+	return
+}
+
+// query records by n_id
+func (dao *DeployDAO) QueryByNid(id int64, columns ...string) (do *models.DeployDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameDeploy).Where("n_id = ?", id).Select(columns...).Query(); err != nil {
+		return nil, log.Errorf(err.Error())
+	}
+	return
+}
+
+func (dao *DeployDAO) DeleteByNid(nid int64) error {
+	_, err := dao.db.Where("n_id = ?", nid).Table(models.TableNameDeploy).Delete()
+	return err
+}
+
+func (dao *DeployDAO) DeleteByFields(mbUuid, repoName, userName string) error {
+	_, err := dao.db.
+		Table(models.TableNameDeploy).
+		Where("mb_uuid = ? AND repo_name = ? AND user_name = ?", mbUuid, repoName, userName).
+		Delete()
+	return err
+}
+
+func (dao *DeployDAO) UpdateStatusByFields(mbUuid, repoName, userName string, strStatus string) error {
+	updates := map[string]interface{}{
+		"status": strStatus,
+	}
+	_, err := dao.db.
+		Model(&updates).
+		Table(models.TableNameDeploy).
+		And("mb_uuid = ?", mbUuid).
+		And("repo_name = ?", repoName).
+		And("user_name = ?", userName).
+		Update()
+	return err
+}
+
+func (dao *DeployDAO) DeleteByUserRepoUUIDAndStatus(user, repo, uuid, status string) error {
+	log.Infof("🔥 Start to delete record from 'deploy' table")
+	log.Infof("🧩 Conditions: user_name=%s, repo_name=%s, mb_uuid=%s, status=%s", user, repo, uuid, status)
+
+	affected, err := dao.db.Table(models.TableNameDeploy).
+		Where("user_name = ? AND repo_name = ? AND mb_uuid = ? AND status = ?", user, repo, uuid, status).
+		Delete()
+	if err != nil {
+		log.Errorf("❌ Delete failed: %v", err)
+		return err
+	}
+
+	log.Infof("✅ Successfully deleted %d record(s) with user_name=%s, repo_name=%s, mb_uuid=%s, status=%s",
+		affected, user, repo, uuid, status)
+	return nil
+}
+
+func (dao *DeployDAO) CountByUserRepoUUIDAndStatus(user, repo, uuid, status string) (int64, error) {
+	var count int64
+	_, err := dao.db.Model(&count).
+		Table(models.TableNameDeploy).
+		Where("user_name = ? AND repo_name = ? AND mb_uuid = ? AND status = ?", user, repo, uuid, status).
+		Count("id").
+		Query()
+	if err != nil {
+		log.Errorf("❌ Count failed: %v", err)
+		return 0, err
+	}
+
+	log.Infof("✅ Counted %d record(s) with user_name=%s, repo_name=%s, mb_uuid=%s, status=%s",
+		count, user, repo, uuid, status)
+	return count, nil
+}
diff --git a/pkg/dal/dao/dictionary.go b/pkg/dal/dao/dictionary.go
new file mode 100644
index 0000000..6524718
--- /dev/null
+++ b/pkg/dal/dao/dictionary.go
@@ -0,0 +1,123 @@
+package dao
+
+import (
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/email"
+	"strconv"
+)
+
+const (
+	Dictionary_Name_Email_Server    = "邮箱服务器"
+	Dictionary_Name_Email_Port      = "端口"
+	Dictionary_Name_Email_Name      = "邮箱名"
+	Dictionary_Name_Email_Auth_Code = "授权码"
+	Dictionary_Name_Email_Send_Name = "发件人名称"
+)
+
+const (
+	Dictionary_Key_Email_Server    = "smtp_server"
+	Dictionary_Key_Email_Port      = "smtp_port"
+	Dictionary_Key_Email_Name      = "smtp_name"
+	Dictionary_Key_Email_Auth_Code = "auth_code"
+	Dictionary_Key_Email_Send_Name = "send_name"
+)
+
+const (
+	Dictionary_Remark_Email_Server    = "邮箱SMTP服务器"
+	Dictionary_Remark_Email_Port      = "SMTP服务器端口号"
+	Dictionary_Remark_Email_Name      = "SMTP邮箱服务器用户自己的邮箱名"
+	Dictionary_Remark_Email_Auth_Code = "SMTP服务器密码,这里是设置账户中的授权码"
+	Dictionary_Remark_Email_Send_Name = "邮件发送人名称"
+)
+
+type DictionaryDAO struct {
+	db *sqlca.Engine
+}
+
+func NewDictionaryDAO(db *sqlca.Engine) *DictionaryDAO {
+
+	return &DictionaryDAO{
+		db: db,
+	}
+}
+
+func (dao *DictionaryDAO) SelectKey(key string) (do *models.DictionaryDO, ok bool, err error) {
+	var count int64
+	if count, err = dao.db.Model(&do).
+		Table(models.TableNameDictionary).
+		Where("`%s`='%s'", models.DICTIONARY_COLUMN_CONFIG_KEY, key).
+		And("%s=0", models.DICTIONARY_COLUMN_DELETED).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	if count != 0 {
+		return do, true, nil
+	}
+	return do, false, nil
+}
+
+func (dao *DictionaryDAO) Insert(do *models.DictionaryDO) (id int64, err error) {
+	if id, err = dao.db.Model(&do).Table(models.TableNameDictionary).Insert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *DictionaryDAO) UpdateByKey(do *models.DictionaryDO, columns ...string) (err error) {
+	if _, err = dao.db.Model(do).
+		Table(models.TableNameDictionary).
+		Select(columns...).
+		Exclude(models.DICTIONARY_COLUMN_CONFIG_KEY).
+		Where("`%s`='%s'", models.DICTIONARY_COLUMN_CONFIG_KEY, do.ConfigKey).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *DictionaryDAO) Upsert(do *models.DictionaryDO) (err error) {
+	var ok bool
+	_, ok, err = dao.SelectKey(do.ConfigKey)
+	if err != nil {
+		return err
+	}
+	if ok {
+		err = dao.UpdateByKey(do)
+		if err != nil {
+			return err
+		}
+	} else {
+		_, err = dao.Insert(do)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (dao *DictionaryDAO) SelectEmailConfig() (cfg *email.EmailConfig, err error) {
+	smtpServer, _, _ := dao.SelectKey(Dictionary_Key_Email_Server)
+	smtpPort, _, _ := dao.SelectKey(Dictionary_Key_Email_Port)
+	smtpName, _, _ := dao.SelectKey(Dictionary_Key_Email_Name)
+	authCode, _, _ := dao.SelectKey(Dictionary_Key_Email_Auth_Code)
+	sendName, _, _ := dao.SelectKey(Dictionary_Key_Email_Send_Name)
+	var port int
+	port, err = strconv.Atoi(smtpPort.ConfigValue)
+	if err != nil {
+		return nil, log.Errorf("smtp port convert error [%s]", err.Error())
+	}
+	cfg = &email.EmailConfig{
+		SmtpServer: smtpServer.ConfigValue,
+		SmtpPort:   uint32(port),
+		SmtpName:   smtpName.ConfigValue,
+		AuthCode:   authCode.ConfigValue,
+		SendName:   sendName.ConfigValue,
+	}
+	return cfg, nil
+}
diff --git a/pkg/dal/dao/invite_code.go b/pkg/dal/dao/invite_code.go
new file mode 100644
index 0000000..f3f8b35
--- /dev/null
+++ b/pkg/dal/dao/invite_code.go
@@ -0,0 +1,220 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type InviteCodeCondition struct {
+	RandomCode string `json:"invite_code"`
+	Account    string `json:"account"`
+	State      int    `json:"state"`
+	PageNo     int    `json:"page_no"`
+	PageSize   int    `json:"page_size"`
+}
+
+const (
+	InviteCodeStateUnused = 1 //等待校验
+	InviteCodeStateUsed   = 2 //已校验
+)
+
+type InviteCodeDAO struct {
+	db *sqlca.Engine
+}
+
+func NewInviteCodeDAO(db *sqlca.Engine) *InviteCodeDAO {
+	return &InviteCodeDAO{
+		db: db,
+	}
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *InviteCodeDAO) Upsert(do *models.InviteCodeDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameInviteCode).Select(columns...).Upsert()
+}
+
+// query records by id
+func (dao *InviteCodeDAO) QueryById(id interface{}, columns ...string) (do *models.InviteCodeDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameInviteCode).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *InviteCodeDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.InviteCodeDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameInviteCode).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *InviteCodeDAO) CheckInviteCodeValid(strRandomCode string) (ok bool, err error) {
+	var do *models.InviteCodeDO
+	_, err = dao.db.Model(&do).
+		Table(models.TableNameInviteCode).
+		Equal(models.INVITE_CODE_COLUMN_RANDOM_CODE, strRandomCode).
+		Equal(models.INVITE_CODE_COLUMN_STATE, InviteCodeStateUnused).
+		Equal(models.INVITE_CODE_COLUMN_DELETED, 0).
+		Query()
+	if err != nil {
+		return false, log.Errorf(err.Error())
+	}
+	if do.Id == 0 {
+		return false, nil
+	}
+	if do.UserId != 0 && do.State == InviteCodeStateUsed {
+		return false, nil
+	}
+	return true, nil
+}
+
+func (dao *InviteCodeDAO) CheckUserInviteCode(strUserAcc, strRandomCode string) (ok bool, err error) {
+	var do *models.InviteCodeDO
+	_, err = dao.db.Model(&do).
+		Table(models.TableNameInviteCode).
+		Equal(models.INVITE_CODE_COLUMN_USER_ACC, strUserAcc).
+		Equal(models.INVITE_CODE_COLUMN_RANDOM_CODE, strRandomCode).
+		Equal(models.INVITE_CODE_COLUMN_DELETED, 0).
+		Query()
+	if err != nil {
+		return false, log.Errorf(err.Error())
+	}
+	if do.Id == 0 {
+		return false, nil
+	}
+	if do.State == InviteCodeStateUsed {
+		return false, nil
+	}
+	return true, nil
+}
+
+func (dao *InviteCodeDAO) Insert(do *models.InviteCodeDO) (lastId int64, err error) {
+	var id int32
+	_, err = dao.db.Model(&id).
+		Table(models.TableNameInviteCode).
+		Equal(models.INVITE_CODE_COLUMN_RANDOM_CODE, do.RandomCode).
+		Query()
+	if err != nil {
+		return 0, log.Errorf(err.Error())
+	}
+	if id != 0 {
+		return 0, log.Errorf("invite code already exists")
+	}
+	if lastId, err = dao.db.Model(&do).Table(models.TableNameInviteCode).Insert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *InviteCodeDAO) Update(do *models.InviteCodeDO, columns ...string) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(&do).
+		Table(models.TableNameInviteCode).
+		Select(columns...).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *InviteCodeDAO) UpdateByUserAndInviteCode(strUserAcc, strRandomCode string) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(InviteCodeStateUsed).
+		Table(models.TableNameInviteCode).
+		Select(models.INVITE_CODE_COLUMN_STATE).
+		Equal(models.INVITE_CODE_COLUMN_USER_ACC, strUserAcc).
+		Equal(models.INVITE_CODE_COLUMN_RANDOM_CODE, strRandomCode).
+		Equal(models.INVITE_CODE_COLUMN_DELETED, 0).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+func (dao *InviteCodeDAO) DeleteById(id int32) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(1).
+		Select(models.INVITE_CODE_COLUMN_DELETED).
+		Table(models.TableNameInviteCode).
+		Equal(models.INVITE_CODE_COLUMN_ID, id).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *InviteCodeDAO) DeleteByInviteCodeUnused(strRandomCode string) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(1).
+		Select(models.INVITE_CODE_COLUMN_DELETED).
+		Table(models.TableNameInviteCode).
+		Equal(models.INVITE_CODE_COLUMN_STATE, InviteCodeStateUnused).
+		Equal(models.INVITE_CODE_COLUMN_RANDOM_CODE, strRandomCode).
+		Equal(models.INVITE_CODE_COLUMN_DELETED, 0).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *InviteCodeDAO) DeleteUnusedCodeByUserAcc(strUserAcc string) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(1).
+		Select(models.INVITE_CODE_COLUMN_DELETED).
+		Table(models.TableNameInviteCode).
+		Equal(models.INVITE_CODE_COLUMN_STATE, InviteCodeStateUnused).
+		Equal(models.INVITE_CODE_COLUMN_USER_ACC, strUserAcc).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *InviteCodeDAO) SelectByCondition(cond *InviteCodeCondition) (dos []*models.InviteCodeDO, total int64, err error) {
+	e := dao.db.Model(&dos).
+		Table(models.TableNameInviteCode).
+		Page(cond.PageNo, cond.PageSize).
+		Desc(models.INVITE_CODE_COLUMN_CREATED_TIME).
+		Equal(models.INVITE_CODE_COLUMN_DELETED, 0)
+
+	if cond.RandomCode != "" {
+		e.Equal(models.INVITE_CODE_COLUMN_RANDOM_CODE, cond.RandomCode)
+	}
+	if cond.Account != "" {
+		e.Equal(models.INVITE_CODE_COLUMN_USER_ACC, cond.Account)
+	}
+	if cond.State != 0 {
+		e.Equal(models.INVITE_CODE_COLUMN_STATE, cond.State)
+	}
+	_, total, err = e.QueryEx()
+	if err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *InviteCodeDAO) SelectByAccAndCode(strUserAcc, strRandomCode string) (do *models.InviteCodeDO, err error) {
+	_, err = dao.db.Model(&do).
+		Table(models.TableNameInviteCode).
+		Equal(models.INVITE_CODE_COLUMN_DELETED, 0).
+		Equal(models.INVITE_CODE_COLUMN_USER_ACC, strUserAcc).
+		Equal(models.INVITE_CODE_COLUMN_RANDOM_CODE, strRandomCode).
+		Equal(models.INVITE_CODE_COLUMN_DELETED, 0).
+		Query()
+	if err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
diff --git a/pkg/dal/dao/login.go b/pkg/dal/dao/login.go
new file mode 100644
index 0000000..59859a2
--- /dev/null
+++ b/pkg/dal/dao/login.go
@@ -0,0 +1,31 @@
+package dao
+
+import (
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+const (
+	LoginType_Admin    = 0
+	LoginType_Customer = 1
+)
+
+type LoginDAO struct {
+	db *sqlca.Engine
+}
+
+func NewLoginDAO(db *sqlca.Engine) *LoginDAO {
+
+	return &LoginDAO{
+		db: db,
+	}
+}
+
+func (dao *LoginDAO) Insert(dos ...*models.LoginDO) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(&dos).Table(models.TableNameLogin).Insert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
diff --git a/pkg/dal/dao/news.go b/pkg/dal/dao/news.go
new file mode 100644
index 0000000..fe2b2e0
--- /dev/null
+++ b/pkg/dal/dao/news.go
@@ -0,0 +1,208 @@
+package dao
+
+import (
+	"fmt"
+	"intent-system/pkg/dal/models"
+
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+)
+
+const (
+	NewsState_NotPublish = 0 //未发布
+	NewsState_Published  = 1 //已发布到订阅列表
+	NewsState_Pushed     = 2 //已推送订阅
+)
+
+type NewsCondition struct {
+	PageNo       int
+	PageSize     int
+	Id           int64
+	Tag          string
+	All          bool
+	IsDeleted    bool
+	ContainExtra bool
+	Asc          bool
+	Search       string
+	Language     string
+}
+
+type NewsDAO struct {
+	db *sqlca.Engine
+}
+
+func NewNewsDAO(db *sqlca.Engine) *NewsDAO {
+	return &NewsDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *NewsDAO) Insert(do *models.NewsDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNews).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *NewsDAO) Upsert(do *models.NewsDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNews).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *NewsDAO) Update(do *models.NewsDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNews).Select(columns...).Update()
+}
+func (dao *NewsDAO) UpdateByOrgId(do *models.NewsDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNews).Eq(models.NEWS_COLUMN_ORG_ID, do.OrgId).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *NewsDAO) QueryById(id int64, columns ...string) (do *models.NewsDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameNews).Id(id).Select(columns...).Query(); err != nil {
+		return nil, log.Errorf(err.Error())
+	}
+	return
+}
+
+// query records by url
+func (dao *NewsDAO) QueryByUrl(url string, columns ...string) (do *models.NewsDO, err error) {
+	if _, err = dao.db.Model(&do).
+		Table(models.TableNameNews).
+		Where("url = ?", url).
+		Select(columns...).
+		Query(); err != nil {
+		return nil, log.Errorf(err.Error())
+	}
+	return
+}
+
+func (dao *NewsDAO) QueryAllByUrl(fullPath string) ([]*models.NewsDO, error) {
+	var list []*models.NewsDO
+
+	_, err := dao.db.Model(&list).
+		Table(models.TableNameNews).
+		Where("url = ?", fullPath). // ✅ 参数绑定,不拼接
+		Query()
+
+	return list, err
+}
+
+func (dao *NewsDAO) QueryOriginalNews(orgId int64, lang string, columns ...string) (do *models.NewsDO, err error) {
+	if _, err = dao.db.Model(&do).
+		Table(models.TableNameNews).
+		Eq(models.NEWS_COLUMN_ORG_ID, orgId).
+		Eq(models.NEWS_COLUMN_LANGUAGE, lang).
+		Eq(models.NEWS_COLUMN_IS_REPLICATE, 0).
+		Eq(models.NEWS_COLUMN_IS_OVERWRITTEN, 1).
+		Select(columns...).
+		Query(); err != nil {
+		return nil, log.Errorf(err.Error())
+	}
+	return
+}
+
+// query records by conditions
+func (dao *NewsDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.NewsDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameNews).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query max news id
+func (dao *NewsDAO) QueryMaxSyncId(lang models.LanguageType) (lastId int64, err error) {
+	if _, err = dao.db.Model(&lastId).
+		Table(models.TableNameNews).
+		Max(models.NEWS_COLUMN_ORG_ID).
+		Eq(models.NEWS_COLUMN_LANGUAGE, lang).
+		Query(); err != nil {
+		return 0, err
+	}
+	return
+}
+
+func (dao *NewsDAO) QueryNotPushed(pageNo, pageSize int) (dos []*models.NewsDO, total int64, err error) {
+	e := dao.db.Model(&dos).
+		Table(models.TableNameNews).
+		Eq(models.NEWS_COLUMN_IS_DELETED, 0).
+		Eq(models.NEWS_COLUMN_STATE, NewsState_NotPublish).
+		Page(pageNo, pageSize).
+		Desc(models.QUESTION_ANSWER_COLUMN_UPDATED_TIME)
+	_, total, err = e.QueryEx()
+	if err != nil {
+		return nil, 0, log.Errorf(err.Error())
+	}
+	return
+}
+
+func (dao *NewsDAO) QueryList(cond *NewsCondition) (dos []*models.NewsDO, total int64, err error) {
+	e := dao.db.Model(&dos).
+		Table(models.TableNameNews).
+		Select(
+			models.NEWS_COLUMN_ID,
+			models.NEWS_COLUMN_ORG_ID,
+			models.NEWS_COLUMN_SPIDER_ID,
+			models.NEWS_COLUMN_PNAME_ID,
+			models.NEWS_COLUMN_TAG,
+			models.NEWS_COLUMN_CATEGORY,
+			models.NEWS_COLUMN_MAIN_TITLE,
+			models.NEWS_COLUMN_SUB_TITLE,
+			models.NEWS_COLUMN_SUMMARY,
+			models.NEWS_COLUMN_KEYWORDS,
+			models.NEWS_COLUMN_SEO_KEYWORDS,
+			models.NEWS_COLUMN_TAGS,
+			models.NEWS_COLUMN_URL,
+			models.NEWS_COLUMN_IMAGE_URL,
+			models.NEWS_COLUMN_LOGO_URL,
+			models.NEWS_COLUMN_MODEL_PARAMETER,
+			models.NEWS_COLUMN_CONTENT,
+			models.NEWS_COLUMN_IS_HOTSPOT,
+			models.NEWS_COLUMN_IS_OVERWRITTEN,
+			models.NEWS_COLUMN_IS_DELETED,
+			models.NEWS_COLUMN_IS_REPLICATE,
+			models.NEWS_COLUMN_STATE,
+			models.NEWS_COLUMN_CREATED_TIME,
+			models.NEWS_COLUMN_UPDATED_TIME,
+		).
+		Page(cond.PageNo, cond.PageSize)
+
+	if cond.Id != 0 {
+		e.Eq(models.NEWS_COLUMN_ID, cond.Id)
+	}
+	if cond.Tag != "" {
+		e.JsonContainArray(models.NEWS_COLUMN_TAGS, cond.Tag)
+	}
+	if !cond.All {
+		e.Eq(models.NEWS_COLUMN_IS_OVERWRITTEN, 0)
+	}
+	if cond.ContainExtra {
+		e.Select(models.NEWS_COLUMN_EXTRA_DATA)
+	}
+	if cond.IsDeleted {
+		e.Eq(models.NEWS_COLUMN_IS_DELETED, 1)
+	} else {
+		e.Eq(models.NEWS_COLUMN_IS_DELETED, 0)
+	}
+	if cond.Language != "" {
+		e.Eq(models.NEWS_COLUMN_LANGUAGE, cond.Language)
+	}
+	if cond.Search != "" {
+		e.Like(models.NEWS_COLUMN_MAIN_TITLE, cond.Search)
+	}
+	if cond.Asc {
+		e.Asc(models.NEWS_COLUMN_UPDATED_TIME, models.NEWS_COLUMN_ID)
+	} else {
+		e.Desc(models.NEWS_COLUMN_UPDATED_TIME, models.NEWS_COLUMN_ID)
+	}
+	_, total, err = e.QueryEx()
+	if err != nil {
+		return nil, 0, log.Errorf(err.Error())
+	}
+	return
+}
diff --git a/pkg/dal/dao/news_draft.go b/pkg/dal/dao/news_draft.go
new file mode 100644
index 0000000..c7f6cf1
--- /dev/null
+++ b/pkg/dal/dao/news_draft.go
@@ -0,0 +1,104 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type NewsDraftCondition struct {
+	PageNo   int
+	PageSize int
+	Asc      bool
+	Id       int64
+	Search   string
+}
+
+type NewsDraftDAO struct {
+	db *sqlca.Engine
+}
+
+func NewNewsDraftDAO(db *sqlca.Engine) *NewsDraftDAO {
+	return &NewsDraftDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *NewsDraftDAO) Insert(do *models.NewsDraftDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNewsDraft).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *NewsDraftDAO) Upsert(do *models.NewsDraftDO, columns ...string) (lastInsertId int64, err error) {
+	_, err = dao.db.Model(&do).
+		Table(models.TableNameNewsDraft).
+		Select(models.NEWS_DRAFT_COLUMN_ID).
+		Eq(models.NEWS_DRAFT_COLUMN_NEWS_ID, do.NewsId).
+		Eq(models.NEWS_DRAFT_COLUMN_IS_DELETED, 0).
+		Query()
+	if err != nil {
+		return 0, log.Errorf(err.Error())
+	}
+	if do.Id != 0 {
+		_, err = dao.db.Model(&do).Table(models.TableNameNewsDraft).Select(columns...).Update()
+		if err != nil {
+			return 0, log.Errorf(err.Error())
+		}
+		return do.Id, nil
+	}
+	return dao.db.Model(&do).Table(models.TableNameNewsDraft).Select(columns...).Insert()
+}
+
+// update table set columns where id=xxx
+func (dao *NewsDraftDAO) Update(do *models.NewsDraftDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNewsDraft).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *NewsDraftDAO) QueryById(id interface{}, columns ...string) (do *models.NewsDraftDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameNewsDraft).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *NewsDraftDAO) QueryList(cond *NewsDraftCondition) (dos []*models.NewsDraftDO, total int64, err error) {
+	e := dao.db.Model(&dos).
+		Table(models.TableNameNewsDraft).
+		Eq(models.NEWS_DRAFT_COLUMN_IS_DELETED, 0).
+		Page(cond.PageNo, cond.PageSize)
+
+	if cond.Id != 0 {
+		e.Eq(models.NEWS_DRAFT_COLUMN_ID, cond.Id)
+	}
+	if cond.Search != "" {
+		e.Like(models.NEWS_DRAFT_COLUMN_MAIN_TITLE, cond.Search)
+	}
+	if cond.Asc {
+		e.Asc(models.NEWS_DRAFT_COLUMN_UPDATED_TIME, models.NEWS_DRAFT_COLUMN_ID)
+	} else {
+		e.Desc(models.NEWS_DRAFT_COLUMN_UPDATED_TIME, models.NEWS_DRAFT_COLUMN_ID)
+	}
+	_, total, err = e.QueryEx()
+	if err != nil {
+		return nil, 0, log.Errorf(err.Error())
+	}
+	return
+}
+
+// query records by conditions
+func (dao *NewsDraftDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.NewsDraftDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameNewsDraft).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
diff --git a/pkg/dal/dao/news_spider.go b/pkg/dal/dao/news_spider.go
new file mode 100644
index 0000000..16d67cb
--- /dev/null
+++ b/pkg/dal/dao/news_spider.go
@@ -0,0 +1,75 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type NewsSpiderDAO struct {
+	db *sqlca.Engine
+}
+
+func NewNewsSpiderDAO(db *sqlca.Engine) *NewsSpiderDAO {
+	return &NewsSpiderDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *NewsSpiderDAO) Insert(do *models.NewsSpiderDO) (lastInsertId int64, err error) {
+	_, err = dao.db.Model(&do).
+		Table(models.TableNameNewsSpider).
+		Select(models.NEWS_SPIDER_COLUMN_ID).
+		Eq(models.NEWS_SPIDER_COLUMN_ORG_ID, do.OrgId).
+		Query()
+	if err != nil {
+		return 0, log.Errorf(err.Error())
+	}
+	if do.Id != 0 {
+		return do.Id, nil
+	}
+	return dao.db.Model(&do).Table(models.TableNameNewsSpider).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *NewsSpiderDAO) Upsert(do *models.NewsSpiderDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNewsSpider).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *NewsSpiderDAO) Update(do *models.NewsSpiderDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNewsSpider).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *NewsSpiderDAO) QueryById(id interface{}, columns ...string) (do *models.NewsSpiderDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameNewsSpider).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query max news id
+func (dao *NewsSpiderDAO) QueryMaxSyncId() (lastId int64, err error) {
+	if _, err = dao.db.Model(&lastId).Table(models.TableNameNewsSpider).Max(models.NEWS_SPIDER_COLUMN_ORG_ID).Query(); err != nil {
+		return 0, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *NewsSpiderDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.NewsSpiderDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameNewsSpider).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
diff --git a/pkg/dal/dao/news_subscribe.go b/pkg/dal/dao/news_subscribe.go
new file mode 100644
index 0000000..4fdc3ed
--- /dev/null
+++ b/pkg/dal/dao/news_subscribe.go
@@ -0,0 +1,167 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type NewsSubscribeCondition struct {
+	PageNo   int
+	PageSize int
+	Asc      bool
+	Id       int64
+	Search   string
+}
+
+type NewsSubscribeDAO struct {
+	db *sqlca.Engine
+}
+
+func NewNewsSubscribeDAO(db *sqlca.Engine) *NewsSubscribeDAO {
+	return &NewsSubscribeDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *NewsSubscribeDAO) Insert(do *models.NewsSubscribeDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNewsSubscribe).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *NewsSubscribeDAO) Upsert(do *models.NewsSubscribeDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNewsSubscribe).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *NewsSubscribeDAO) Update(do *models.NewsSubscribeDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameNewsSubscribe).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *NewsSubscribeDAO) QueryById(id interface{}, columns ...string) (do *models.NewsSubscribeDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameNewsSubscribe).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *NewsSubscribeDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.NewsSubscribeDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameNewsSubscribe).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *NewsSubscribeDAO) QueryByNewsId(newsId interface{}, columns ...string) (do *models.NewsSubscribeDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameNewsSubscribe).Eq(models.NEWS_SUBSCRIBE_COLUMN_NEWS_ID, newsId).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *NewsSubscribeDAO) QueryAllNews(cond *NewsSubscribeCondition) (dos []*models.NewsDO, total int64, err error) {
+
+	e := dao.db.Model(&dos).
+		Table(models.TableNameNews).
+		Eq(models.NEWS_COLUMN_IS_DELETED, 0).
+		Desc(models.NEWS_COLUMN_UPDATED_TIME).
+		Page(cond.PageNo, cond.PageSize)
+	if cond.Id != 0 {
+		e.Eq(models.NEWS_SUBSCRIBE_COLUMN_ID, 0)
+	}
+	if cond.Search != "" {
+		e.Like(models.NEWS_SUBSCRIBE_COLUMN_NEWS_SUBJECT, cond.Search)
+	}
+	if cond.Asc {
+		e.Asc(models.NEWS_COLUMN_UPDATED_TIME)
+	} else {
+		e.Desc(models.NEWS_COLUMN_UPDATED_TIME)
+	}
+	_, total, err = e.QueryEx()
+	if err != nil {
+		return nil, 0, log.Errorf(err.Error())
+	}
+	return
+}
+
+func (dao *NewsSubscribeDAO) QueryTodayNewsList(pageNo, pageSize int) (dos []*models.NewsDO, total int64, err error) {
+	strTables := fmt.Sprintf("%s a, %s b", models.TableNameNews, models.TableNameNewsSubscribe)
+	_, total, err = dao.db.Model(&dos).
+		Table(strTables).
+		Select("a.*").
+		And("a.id = b.news_id").
+		And("b.is_deleted=0").
+		And("b.is_pushed='0'").
+		Desc("b.updated_time").
+		Page(pageNo, pageSize).
+		QueryEx()
+	if err != nil {
+		return nil, 0, log.Errorf(err.Error())
+	}
+	return
+}
+
+func (dao *NewsSubscribeDAO) QueryPushedNews(cond *NewsSubscribeCondition) (dos []*models.NewsDO, total int64, err error) {
+	strTables := fmt.Sprintf("%s a, %s b", models.TableNameNews, models.TableNameNewsSubscribe)
+	e := dao.db.Model(&dos).
+		Table(strTables).
+		Select("a.*").
+		And("a.id = b.news_id").
+		And("b.is_deleted=0").
+		And("b.is_pushed=1").
+		Page(cond.PageNo, cond.PageSize)
+
+	if cond.Id != 0 {
+		e.Eq("a.id", cond.Id)
+	}
+	if cond.Search != "" {
+		e.Like("a.main_title", cond.Search)
+	}
+	if cond.Asc {
+		e.Asc("b.updated_time")
+	} else {
+		e.Desc("b.updated_time")
+	}
+	_, total, err = e.QueryEx()
+	if err != nil {
+		return nil, 0, log.Errorf(err.Error())
+	}
+	return
+}
+
+func (dao *NewsSubscribeDAO) IsSubNewsExist(newsId int64) (ok bool, err error) {
+	var do *models.NewsSubscribeDO
+	_, err = dao.db.Model(&do).
+		Table(models.TableNameNewsSubscribe).
+		Eq(models.NEWS_SUBSCRIBE_COLUMN_NEWS_ID, newsId).
+		Eq(models.NEWS_SUBSCRIBE_COLUMN_IS_PUSHED, 0).
+		Eq(models.NEWS_SUBSCRIBE_COLUMN_IS_DELETED, 0).
+		Query()
+	if err != nil {
+		return false, log.Errorf(err.Error())
+	}
+	return do.Id != 0, nil
+}
+
+func (dao *NewsSubscribeDAO) DeleteNotPushed() (err error) {
+	_, err = dao.db.Model(1).
+		Table(models.TableNameNewsSubscribe).
+		Select(models.NEWS_SUBSCRIBE_COLUMN_IS_DELETED).
+		Eq(models.NEWS_SUBSCRIBE_COLUMN_IS_PUSHED, 0).
+		Update()
+	if err != nil {
+		return log.Errorf(err.Error())
+	}
+	return nil
+}
diff --git a/pkg/dal/dao/oper_log.go b/pkg/dal/dao/oper_log.go
new file mode 100644
index 0000000..b6dd1f7
--- /dev/null
+++ b/pkg/dal/dao/oper_log.go
@@ -0,0 +1,26 @@
+package dao
+
+import (
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type OperLogDAO struct {
+	db *sqlca.Engine
+}
+
+func NewOperLogDAO(db *sqlca.Engine) *OperLogDAO {
+
+	return &OperLogDAO{
+		db: db,
+	}
+}
+
+func (dao *OperLogDAO) Insert(dos ...*models.OperLogDO) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(&dos).Table(models.TableNameOperLog).Insert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
diff --git a/pkg/dal/dao/privilege.go b/pkg/dal/dao/privilege.go
new file mode 100644
index 0000000..e01cfa9
--- /dev/null
+++ b/pkg/dal/dao/privilege.go
@@ -0,0 +1,78 @@
+package dao
+
+import (
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type PrivilegeDAO struct {
+	db *sqlca.Engine
+}
+
+func NewPrivilegeDAO(db *sqlca.Engine) *PrivilegeDAO {
+
+	return &PrivilegeDAO{
+		db: db,
+	}
+}
+
+func (dao *PrivilegeDAO) Insert(dos *models.PrivilegeDO) (id int64, err error) {
+	if id, err = dao.db.Model(&dos).Table(models.TableNamePrivilege).Insert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *PrivilegeDAO) Upsert(do *models.PrivilegeDO, columns ...string) (lastId int64, err error) {
+	_, err = dao.db.Model(&do).
+		Table(models.TableNamePrivilege).
+		Select(models.PRIVILEGE_COLUMN_ID).
+		Eq(models.PRIVILEGE_COLUMN_NAME, do.Name).
+		Eq(models.PRIVILEGE_COLUMN_LABEL, do.Label).
+		Query()
+	if err != nil {
+		return 0, log.Errorf(err.Error())
+	}
+	if do.Id == 0 {
+		if lastId, err = dao.db.Model(&do).Table(models.TableNamePrivilege).Insert(); err != nil {
+			return 0, log.Errorf(err.Error())
+		}
+	} else {
+		if lastId, err = dao.db.Model(&do).Table(models.TableNamePrivilege).Select(columns...).Update(); err != nil {
+			return 0, log.Errorf(err.Error())
+		}
+	}
+	return lastId, nil
+}
+
+func (dao *PrivilegeDAO) IsTableEmpty() (bool, error) {
+	var count int64
+	if _, err := dao.db.Model(&count).Table(models.TableNamePrivilege).Count(models.PRIVILEGE_COLUMN_ID).Query(); err != nil {
+		return false, log.Errorf(err.Error())
+	}
+	if count == 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
+func (dao *PrivilegeDAO) QueryPrivileges() (privileges models.TreePrivilege) {
+	var err error
+	var dos []*models.PrivilegeDO
+	_, err = dao.db.Model(&dos).Table(models.TableNamePrivilege).Query()
+	if err != nil {
+		log.Errorf(err.Error())
+		return nil
+	}
+	for _, do := range dos {
+		privileges = append(privileges, models.Privilege{
+			Label:    do.Label,
+			Name:     do.Name,
+			Path:     do.Path,
+			Children: do.Children,
+		})
+	}
+	return
+}
diff --git a/pkg/dal/dao/question_answer.go b/pkg/dal/dao/question_answer.go
new file mode 100644
index 0000000..fea4674
--- /dev/null
+++ b/pkg/dal/dao/question_answer.go
@@ -0,0 +1,129 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type QuestionAnswerCondition struct {
+	PageNo       int
+	PageSize     int
+	Id           int64
+	IsDeleted    bool
+	ContainExtra bool
+	Asc          bool
+	Search       string
+	Language     string
+}
+
+type QuestionAnswerDAO struct {
+	db *sqlca.Engine
+}
+
+func NewQuestionAnswerDAO(db *sqlca.Engine) *QuestionAnswerDAO {
+	return &QuestionAnswerDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *QuestionAnswerDAO) Insert(do *models.QuestionAnswerDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameQuestionAnswer).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *QuestionAnswerDAO) Upsert(do *models.QuestionAnswerDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameQuestionAnswer).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *QuestionAnswerDAO) Update(do *models.QuestionAnswerDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameQuestionAnswer).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *QuestionAnswerDAO) QueryById(id interface{}, columns ...string) (do *models.QuestionAnswerDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameQuestionAnswer).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *QuestionAnswerDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.QuestionAnswerDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameQuestionAnswer).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *QuestionAnswerDAO) QueryList(cond *QuestionAnswerCondition) (dos []*models.QuestionAnswerDO, total int64, err error) {
+	e := dao.db.Model(&dos).
+		Table(models.TableNameQuestionAnswer).
+		Select(
+			models.QUESTION_ANSWER_COLUMN_ID,
+			models.QUESTION_ANSWER_COLUMN_ORG_ID,
+			models.QUESTION_ANSWER_COLUMN_QUESTION,
+			models.QUESTION_ANSWER_COLUMN_ANSWER,
+			models.QUESTION_ANSWER_COLUMN_STATE,
+			models.QUESTION_ANSWER_COLUMN_IS_OVERWRITTEN,
+			models.QUESTION_ANSWER_COLUMN_IS_REPLICATE,
+			models.QUESTION_ANSWER_COLUMN_IS_DELETED,
+			models.QUESTION_ANSWER_COLUMN_CREATED_TIME,
+			models.QUESTION_ANSWER_COLUMN_UPDATED_TIME,
+		).
+		Page(cond.PageNo, cond.PageSize)
+
+	if cond.Id != 0 {
+		e.Eq(models.QUESTION_ANSWER_COLUMN_ID, cond.Id)
+	}
+	if cond.IsDeleted {
+		e.Eq(models.QUESTION_ANSWER_COLUMN_IS_DELETED, 1)
+	} else {
+		e.Eq(models.QUESTION_ANSWER_COLUMN_IS_DELETED, 0)
+	}
+	if cond.ContainExtra {
+		e.Select(models.QUESTION_ANSWER_COLUMN_EXTRA_DATA)
+	}
+	if cond.Language != "" {
+		e.Eq(models.QUESTION_ANSWER_COLUMN_LANGUAGE, cond.Language)
+	}
+	if cond.Search != "" {
+		e.Like(models.QUESTION_ANSWER_COLUMN_QUESTION, cond.Search)
+	}
+	if cond.Asc {
+		e.Asc(models.QUESTION_ANSWER_COLUMN_UPDATED_TIME, models.QUESTION_ANSWER_COLUMN_ID)
+	} else {
+		e.Desc(models.QUESTION_ANSWER_COLUMN_UPDATED_TIME, models.QUESTION_ANSWER_COLUMN_ID)
+	}
+	_, total, err = e.QueryEx()
+	if err != nil {
+		return nil, 0, log.Errorf(err.Error())
+	}
+	return
+}
+
+func (dao *QuestionAnswerDAO) UpdateByOrgId(do *models.QuestionAnswerDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameQuestionAnswer).Eq(models.QUESTION_ANSWER_COLUMN_ORG_ID, do.OrgId).Select(columns...).Update()
+}
+
+// query max news id
+func (dao *QuestionAnswerDAO) QueryMaxSyncId(lang models.LanguageType) (lastId int64, err error) {
+	if _, err = dao.db.Model(&lastId).
+		Table(models.TableNameQuestionAnswer).
+		Max(models.QUESTION_ANSWER_COLUMN_ORG_ID).
+		Eq(models.QUESTION_ANSWER_COLUMN_LANGUAGE, lang).
+		Query(); err != nil {
+		return 0, err
+	}
+	return
+}
diff --git a/pkg/dal/dao/question_draft.go b/pkg/dal/dao/question_draft.go
new file mode 100644
index 0000000..77ca9a9
--- /dev/null
+++ b/pkg/dal/dao/question_draft.go
@@ -0,0 +1,99 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type QaDraftCondition struct {
+	PageNo   int
+	PageSize int
+	Asc      bool
+	Id       int64
+	Search   string
+}
+
+type QuestionDraftDAO struct {
+	db *sqlca.Engine
+}
+
+func NewQuestionDraftDAO(db *sqlca.Engine) *QuestionDraftDAO {
+	return &QuestionDraftDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *QuestionDraftDAO) Insert(do *models.QuestionDraftDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameQuestionDraft).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *QuestionDraftDAO) Upsert(do *models.QuestionDraftDO, columns ...string) (lastInsertId int64, err error) {
+	_, err = dao.db.Model(&do).
+		Table(models.TableNameQuestionDraft).
+		Select(models.QUESTION_DRAFT_COLUMN_ID).
+		Eq(models.QUESTION_DRAFT_COLUMN_QA_ID, do.QaId).
+		Query()
+	if err != nil {
+		return 0, log.Errorf(err.Error())
+	}
+	if do.Id == 0 {
+		return dao.db.Model(&do).Table(models.TableNameQuestionDraft).Insert()
+	}
+	return dao.db.Model(&do).Table(models.TableNameQuestionDraft).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *QuestionDraftDAO) Update(do *models.QuestionDraftDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameQuestionDraft).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *QuestionDraftDAO) QueryById(id interface{}, columns ...string) (do *models.QuestionDraftDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameQuestionDraft).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *QuestionDraftDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.QuestionDraftDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameQuestionDraft).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *QuestionDraftDAO) QueryList(cond *QaDraftCondition) (dos []*models.QuestionDraftDO, total int64, err error) {
+	e := dao.db.Model(&dos).
+		Table(models.TableNameQuestionDraft).
+		Eq(models.QUESTION_DRAFT_COLUMN_IS_DELETED, 0).
+		Page(cond.PageNo, cond.PageSize)
+
+	if cond.Id != 0 {
+		e.Eq(models.QUESTION_DRAFT_COLUMN_ID, cond.Id)
+	}
+	if cond.Search != "" {
+		e.Like(models.QUESTION_DRAFT_COLUMN_QUESTION, cond.Search)
+	}
+	if cond.Asc {
+		e.Asc(models.QUESTION_DRAFT_COLUMN_UPDATED_TIME, models.QUESTION_DRAFT_COLUMN_ID)
+	} else {
+		e.Desc(models.QUESTION_DRAFT_COLUMN_UPDATED_TIME, models.QUESTION_DRAFT_COLUMN_ID)
+	}
+	_, total, err = e.QueryEx()
+	if err != nil {
+		return nil, 0, log.Errorf(err.Error())
+	}
+	return
+}
diff --git a/pkg/dal/dao/role.go b/pkg/dal/dao/role.go
new file mode 100644
index 0000000..ac9af7c
--- /dev/null
+++ b/pkg/dal/dao/role.go
@@ -0,0 +1,134 @@
+package dao
+
+import (
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/utils"
+
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+)
+
+type RoleDAO struct {
+	db *sqlca.Engine
+}
+
+func NewRoleDAO(db *sqlca.Engine) *RoleDAO {
+
+	return &RoleDAO{
+		db: db,
+	}
+}
+
+func (dao *RoleDAO) SelectRoleById(id int32) (do *models.RoleDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameRole).Where("%s='%d'", models.ROLE_COLUMN_ID, id).Query(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *RoleDAO) Insert(do *models.RoleDO) (id int64, err error) {
+	if id, err = dao.db.Model(&do).Table(models.TableNameRole).Insert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *RoleDAO) Upsert(do *models.RoleDO, columns ...string) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(do).Table(models.TableNameRole).Select(columns...).Upsert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *RoleDAO) Update(do *models.RoleDO, columns ...string) (err error) {
+	if _, err = dao.db.Model(do).
+		Table(models.TableNameRole).
+		Select(columns...).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *RoleDAO) Delete(do *models.RoleDO) (err error) {
+	var strName = do.RoleName
+	do.RoleName = utils.MakeTimestampSuffix(do.RoleName)
+	_, err = dao.db.Model(do).
+		Table(models.TableNameRole).
+		Select(
+			models.ROLE_COLUMN_ROLE_NAME,
+			models.ROLE_COLUMN_DELETED,
+			models.ROLE_COLUMN_EDIT_USER,
+		).
+		Eq(models.ROLE_COLUMN_ROLE_NAME, strName).
+		Eq(models.ROLE_COLUMN_IS_INHERENT, 0).
+		Update()
+	return
+}
+
+func (dao *RoleDAO) SelectPlatformRoles(pageNo, pageSize int, strRoleName string) (roles []*proto.PlatformRole, total int64, err error) {
+	e := dao.db.Model(&roles).
+		Table(models.TableNameRole).
+		Eq(models.ROLE_COLUMN_DELETED, 0).
+		Page(pageNo, pageSize)
+
+	if strRoleName != "" {
+		e.And("%s='%s'", models.ROLE_COLUMN_ROLE_NAME, strRoleName)
+	}
+	if _, total, err = e.QueryEx(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *RoleDAO) SelectUserRole(strUserName string) (role *models.RoleDO, err error) {
+
+	if _, err = dao.db.Model(&role).
+		Table("user_role a", "`role` b").
+		Select("b.id, b.role_name, b.role_alias, b.remark").
+		Where("a.user_name='%v'", strUserName).
+		And("a.deleted=0").
+		And("a.role_name=b.role_name").
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+
+	return
+}
+
+func (dao *RoleDAO) SelectRoleByName(strRoleName string) (role *models.RoleDO, err error) {
+	_, err = dao.db.Model(&role).
+		Table(models.TableNameRole).
+		Eq(models.ROLE_COLUMN_ROLE_NAME, strRoleName).
+		Eq(models.ROLE_COLUMN_DELETED, 0).
+		Query()
+	return
+}
+
+func (dao *RoleDAO) CheckRoleExistByName(strRoleName string) (ok bool, err error) {
+	var count int64
+	var do *models.RoleDO
+	if count, err = dao.db.Model(&do).
+		Table(models.TableNameRole).
+		Eq(models.ROLE_COLUMN_ROLE_NAME, strRoleName).
+		Eq(models.ROLE_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf("error [%s]", err.Error())
+		return
+	}
+
+	if count == 0 {
+		return false, nil
+	}
+	return true, nil
+}
diff --git a/pkg/dal/dao/run_config.go b/pkg/dal/dao/run_config.go
new file mode 100644
index 0000000..836ff3e
--- /dev/null
+++ b/pkg/dal/dao/run_config.go
@@ -0,0 +1,65 @@
+package dao
+
+import (
+	"intent-system/pkg/dal/models"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+)
+
+type RunConfigDAO struct {
+	db *sqlca.Engine
+}
+
+func NewRunConfigDAO(db *sqlca.Engine) *RunConfigDAO {
+
+	return &RunConfigDAO{
+		db: db,
+	}
+}
+
+func (dao *RunConfigDAO) Insert(dos ...*models.RunConfigDO) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(&dos).
+		Table(models.TableNameRunConfig).
+		Insert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *RunConfigDAO) Update(do *models.RunConfigDO, columns ...string) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(&do).
+		Select(columns...).
+		Table(models.TableNameRunConfig).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *RunConfigDAO) UpdateByConfigKey(do *models.RunConfigDO, columns ...string) (lastId int64, err error) {
+	if lastId, err = dao.db.Model(&do).
+		Select(columns...).
+		Table(models.TableNameRunConfig).
+		Eq(models.RUN_CONFIG_COLUMN_CONFIG_NAME, do.ConfigName).
+		Eq(models.RUN_CONFIG_COLUMN_CONFIG_KEY, do.ConfigKey).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *RunConfigDAO) SelectIntValueByConfigKey(strConfigName, strConfigKey string) (value int, err error) {
+	if _, err = dao.db.Model(&value).
+		Select(models.RUN_CONFIG_COLUMN_CONFIG_VALUE).
+		Table(models.TableNameRunConfig).
+		Eq(models.RUN_CONFIG_COLUMN_CONFIG_NAME, strConfigName).
+		Eq(models.RUN_CONFIG_COLUMN_CONFIG_KEY, strConfigKey).
+		Query(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
diff --git a/pkg/dal/dao/subscriber.go b/pkg/dal/dao/subscriber.go
new file mode 100644
index 0000000..e87c643
--- /dev/null
+++ b/pkg/dal/dao/subscriber.go
@@ -0,0 +1,87 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type SubscriberDAO struct {
+	db *sqlca.Engine
+}
+
+func NewSubscriberDAO(db *sqlca.Engine) *SubscriberDAO {
+	return &SubscriberDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *SubscriberDAO) Insert(do *models.SubscriberDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameSubscriber).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *SubscriberDAO) Upsert(do *models.SubscriberDO, columns ...string) (lastInsertId int64, err error) {
+	_, err = dao.db.Model(&do).
+		Table(models.TableNameSubscriber).
+		Select(models.SUBSCRIBER_COLUMN_ID).
+		Eq(models.SUBSCRIBER_COLUMN_EMAIL, do.Email).
+		Query()
+	if err != nil {
+		return 0, log.Error(err.Error())
+	}
+	if do.Id != 0 {
+		return dao.db.Model(&do).Table(models.TableNameSubscriber).Select(columns...).Update()
+	}
+	return dao.db.Model(&do).Table(models.TableNameSubscriber).Select(columns...).Insert()
+}
+
+// update table set columns where id=xxx
+func (dao *SubscriberDAO) Update(do *models.SubscriberDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameSubscriber).Select(columns...).Update()
+}
+
+func (dao *SubscriberDAO) DeleteByUserEmail(do *models.SubscriberDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).
+		Table(models.TableNameSubscriber).
+		Eq(models.SUBSCRIBER_COLUMN_EMAIL, do.Email).
+		Select(columns...).
+		Update()
+}
+
+// query records by id
+func (dao *SubscriberDAO) QueryById(id interface{}, columns ...string) (do *models.SubscriberDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameSubscriber).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *SubscriberDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.SubscriberDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameSubscriber).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *SubscriberDAO) QueryByEmail(strEmail string) (do *models.SubscriberDO, err error) {
+	_, err = dao.db.Model(&do).
+		Table(models.TableNameSubscriber).
+		Eq(models.SUBSCRIBER_COLUMN_EMAIL, strEmail).
+		Eq(models.SUBSCRIBER_COLUMN_IS_DELETED, 0).
+		Query()
+	if err != nil {
+		return nil, log.Error(err.Error())
+	}
+	return do, nil
+}
diff --git a/pkg/dal/dao/tag.go b/pkg/dal/dao/tag.go
new file mode 100644
index 0000000..b87b3d3
--- /dev/null
+++ b/pkg/dal/dao/tag.go
@@ -0,0 +1,74 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type TagDAO struct {
+	db *sqlca.Engine
+}
+
+func NewTagDAO(db *sqlca.Engine) *TagDAO {
+	return &TagDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *TagDAO) Insert(do *models.TagDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameTag).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *TagDAO) Upsert(do *models.TagDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameTag).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *TagDAO) Update(do *models.TagDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameTag).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *TagDAO) QueryById(id interface{}, columns ...string) (do *models.TagDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameTag).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *TagDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.TagDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameTag).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *TagDAO) QueryAll(pageNo, pageSize int, asc bool) (dos []*models.TagDO, total int64, err error) {
+	e := dao.db.Model(&dos).
+		Table(models.TableNameTag).
+		Eq(models.TAG_COLUMN_IS_DELETED, 0).
+		Page(pageNo, pageSize).
+		Asc(models.TAG_COLUMN_CREATED_TIME)
+	if asc {
+		e.Asc(models.TAG_COLUMN_UPDATED_TIME, models.TAG_COLUMN_ID)
+	} else {
+		e.Desc(models.TAG_COLUMN_UPDATED_TIME, models.TAG_COLUMN_ID)
+	}
+	_, total, err = e.QueryEx()
+	if err != nil {
+		return nil, 0, log.Errorf(err.Error())
+	}
+	return
+}
diff --git a/pkg/dal/dao/template.go b/pkg/dal/dao/template.go
new file mode 100644
index 0000000..39e5dac
--- /dev/null
+++ b/pkg/dal/dao/template.go
@@ -0,0 +1,73 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type EmailTemplateDAO struct {
+	db *sqlca.Engine
+}
+
+func NewEmailTemplateDAO(db *sqlca.Engine) *EmailTemplateDAO {
+	return &EmailTemplateDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *EmailTemplateDAO) Insert(do *models.TemplateDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameTemplate).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *EmailTemplateDAO) Upsert(do *models.TemplateDO, columns ...string) (lastInsertId int64, err error) {
+	if len(columns) == 0 {
+		return 0, fmt.Errorf("no columns to update")
+	}
+	return dao.db.Model(&do).Table(models.TableNameTemplate).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *EmailTemplateDAO) Update(do *models.TemplateDO, columns ...string) (rows int64, err error) {
+	if len(columns) == 0 {
+		return 0, fmt.Errorf("no columns to update")
+	}
+	return dao.db.Model(&do).Table(models.TableNameTemplate).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *EmailTemplateDAO) QueryById(id interface{}, columns ...string) (do *models.TemplateDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameTemplate).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *EmailTemplateDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.TemplateDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameTemplate).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *EmailTemplateDAO) QueryByTypeLang(templateType models.TemplateType, lang string, columns ...string) (do *models.TemplateDO, err error) {
+	e := dao.db.Model(&do).
+		Table(models.TableNameTemplate).
+		Select(columns...).
+		Eq(models.TEMPLATE_COLUMN_TEMPLATE_TYPE, templateType).
+		Eq(models.TEMPLATE_COLUMN_LANGUAGE, lang)
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
diff --git a/pkg/dal/dao/user.go b/pkg/dal/dao/user.go
new file mode 100644
index 0000000..20b6540
--- /dev/null
+++ b/pkg/dal/dao/user.go
@@ -0,0 +1,390 @@
+package dao
+
+import (
+	"fmt"
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/proto"
+	"intent-system/pkg/utils"
+
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+)
+
+const (
+	UserState_Enabled  = 1
+	UserState_Disabled = 2
+)
+
+type UserDAO struct {
+	db *sqlca.Engine
+}
+
+func NewUserDAO(db *sqlca.Engine) *UserDAO {
+
+	return &UserDAO{
+		db: db,
+	}
+}
+
+func (dao *UserDAO) Insert(do *models.UserDO) (id int64, err error) {
+	if id, err = dao.db.Model(&do).Table(models.TableNameUser).Insert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserDAO) Upsert(do *models.UserDO, columns ...string) (lastId int64, err error) {
+	if len(columns) == 0 {
+		return 0, fmt.Errorf("no columns to update")
+	}
+	if lastId, err = dao.db.Model(do).Table(models.TableNameUser).Select(columns...).Upsert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserDAO) UpdateByName(do *models.UserDO, columns ...string) (err error) {
+	if _, err = dao.db.Model(do).
+		Table(models.TableNameUser).
+		Select(columns...).
+		Exclude(models.USER_COLUMN_USER_NAME, models.USER_COLUMN_IS_ADMIN, models.USER_COLUMN_EMAIL).
+		Eq(models.USER_COLUMN_USER_NAME, do.UserName).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserDAO) DeleteUser(do *models.UserDO) (err error) {
+	var strUserName = do.UserName
+	do.UserName = utils.MakeTimestampSuffix(do.UserName)
+	do.Email = utils.MakeTimestampSuffix(do.Email)
+	_, err = dao.db.Model(do).
+		Table(models.TableNameUser).
+		Select(
+			models.USER_COLUMN_USER_NAME,
+			models.USER_COLUMN_EMAIL,
+			models.USER_COLUMN_DELETED,
+			models.USER_COLUMN_EDIT_USER,
+		).
+		Eq(models.USER_COLUMN_DELETED, 0).
+		Eq(models.USER_COLUMN_USER_NAME, strUserName).
+		Update()
+	return
+}
+func (dao *UserDAO) DeleteUsers(dos []*models.UserDO) (err error) {
+	for _, do := range dos {
+		err = dao.DeleteUser(do)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (dao *UserDAO) CheckActiveUserByUserName(strUserName string) (ok bool, err error) {
+	var count int64
+	var do *models.UserDO
+	if count, err = dao.db.Model(&do).
+		Table(models.TableNameUser).
+		Select(models.USER_COLUMN_ID).
+		Eq(models.USER_COLUMN_USER_NAME, strUserName).
+		Eq(models.USER_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	if count != 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
+func (dao *UserDAO) CheckActiveUserByEmail(strEmail string) (ok bool, err error) {
+	var count int64
+	var do *models.UserDO
+	if count, err = dao.db.Model(&do).
+		Table(models.TableNameUser).
+		Select(models.USER_COLUMN_ID).
+		Eq(models.USER_COLUMN_EMAIL, strEmail).
+		Eq(models.USER_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	if count != 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
+func (dao *UserDAO) CheckActiveUserByPhone(strPhone string) (ok bool, err error) {
+	var count int64
+	var do *models.UserDO
+	if count, err = dao.db.Model(&do).
+		Table(models.TableNameUser).
+		Select(models.USER_COLUMN_ID).
+		Eq(models.USER_COLUMN_PHONE_NUMBER, strPhone).
+		Eq(models.USER_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	if count != 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
+func (dao *UserDAO) SelectUsers(pageNo, pageSize int) (dos []*models.UserDO, total int64, err error) {
+	dos = make([]*models.UserDO, 0)
+	if _, total, err = dao.db.Model(&dos).
+		Table(models.TableNameUser).
+		Select(
+			models.USER_COLUMN_ID,
+			models.USER_COLUMN_USER_NAME,
+			models.USER_COLUMN_USER_ALIAS,
+			models.USER_COLUMN_PHONE_NUMBER,
+			models.USER_COLUMN_IS_ADMIN,
+			models.USER_COLUMN_EMAIL,
+			models.USER_COLUMN_ADDRESS,
+			models.USER_COLUMN_REMARK,
+			models.USER_COLUMN_STATE,
+			models.USER_COLUMN_LOGIN_IP,
+			models.USER_COLUMN_LOGIN_TIME,
+			models.USER_COLUMN_CREATED_TIME,
+			models.USER_COLUMN_UPDATED_TIME,
+		).
+		Page(pageNo, pageSize).
+		Eq(models.USER_COLUMN_DELETED, 0).
+		Desc(models.USER_COLUMN_CREATED_TIME).
+		QueryEx(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+// user name
+func (dao *UserDAO) SelectUserByName(strName string) (do *models.UserDO, err error) {
+	if _, err = dao.db.Model(&do).
+		Table(models.TableNameUser).
+		Select(
+			models.USER_COLUMN_ID,
+			models.USER_COLUMN_USER_NAME,
+			models.USER_COLUMN_PASSWORD,
+			models.USER_COLUMN_SALT,
+			models.USER_COLUMN_USER_ALIAS,
+			models.USER_COLUMN_PHONE_NUMBER,
+			models.USER_COLUMN_IS_ADMIN,
+			models.USER_COLUMN_EMAIL,
+			models.USER_COLUMN_ADDRESS,
+			models.USER_COLUMN_REMARK,
+			models.USER_COLUMN_STATE,
+			models.USER_COLUMN_LOGIN_IP,
+			models.USER_COLUMN_LOGIN_TIME,
+			models.USER_COLUMN_CREATED_TIME,
+			models.USER_COLUMN_UPDATED_TIME,
+		).
+		Eq(models.USER_COLUMN_USER_NAME, strName).
+		Eq(models.USER_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserDAO) SelectUsersByNames(strNames []string) (dos []*models.UserDO, err error) {
+	if _, err = dao.db.Model(&dos).
+		Table(models.TableNameUser).
+		Select(
+			models.USER_COLUMN_ID,
+			models.USER_COLUMN_USER_NAME,
+			models.USER_COLUMN_PASSWORD,
+			models.USER_COLUMN_SALT,
+			models.USER_COLUMN_USER_ALIAS,
+			models.USER_COLUMN_PHONE_NUMBER,
+			models.USER_COLUMN_IS_ADMIN,
+			models.USER_COLUMN_EMAIL,
+			models.USER_COLUMN_ADDRESS,
+			models.USER_COLUMN_REMARK,
+			models.USER_COLUMN_STATE,
+			models.USER_COLUMN_LOGIN_IP,
+			models.USER_COLUMN_LOGIN_TIME,
+			models.USER_COLUMN_CREATED_TIME,
+			models.USER_COLUMN_UPDATED_TIME,
+		).
+		In(models.USER_COLUMN_USER_NAME, strNames).
+		Eq(models.USER_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+// user email
+func (dao *UserDAO) SelectUserByEmail(strEmail string) (do *models.UserDO, err error) {
+	if _, err = dao.db.Model(&do).
+		Table(models.TableNameUser).
+		Select(
+			models.USER_COLUMN_ID,
+			models.USER_COLUMN_USER_NAME,
+			models.USER_COLUMN_PASSWORD,
+			models.USER_COLUMN_SALT,
+			models.USER_COLUMN_USER_ALIAS,
+			models.USER_COLUMN_PHONE_NUMBER,
+			models.USER_COLUMN_IS_ADMIN,
+			models.USER_COLUMN_EMAIL,
+			models.USER_COLUMN_ADDRESS,
+			models.USER_COLUMN_REMARK,
+			models.USER_COLUMN_STATE,
+			models.USER_COLUMN_LOGIN_IP,
+			models.USER_COLUMN_LOGIN_TIME,
+			models.USER_COLUMN_CREATED_TIME,
+			models.USER_COLUMN_UPDATED_TIME,
+		).
+		Eq(models.USER_COLUMN_EMAIL, strEmail).
+		Eq(models.USER_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserDAO) SelectUserByPhone(strPhone string) (do *models.UserDO, err error) {
+	if _, err = dao.db.Model(&do).
+		Table(models.TableNameUser).
+		Select(
+			models.USER_COLUMN_ID,
+			models.USER_COLUMN_USER_NAME,
+			models.USER_COLUMN_PASSWORD,
+			models.USER_COLUMN_SALT,
+			models.USER_COLUMN_USER_ALIAS,
+			models.USER_COLUMN_PHONE_NUMBER,
+			models.USER_COLUMN_IS_ADMIN,
+			models.USER_COLUMN_EMAIL,
+			models.USER_COLUMN_ADDRESS,
+			models.USER_COLUMN_REMARK,
+			models.USER_COLUMN_STATE,
+			models.USER_COLUMN_LOGIN_IP,
+			models.USER_COLUMN_LOGIN_TIME,
+			models.USER_COLUMN_CREATED_TIME,
+			models.USER_COLUMN_UPDATED_TIME,
+		).
+		Eq(models.USER_COLUMN_PHONE_NUMBER, strPhone).
+		Eq(models.USER_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserDAO) SelectActiveUserPasswordAndSalt(strUserName string) (do *models.UserDO, err error) {
+	if _, err = dao.db.Model(&do).
+		Table(models.TableNameUser).
+		Select(
+			models.USER_COLUMN_PASSWORD,
+			models.USER_COLUMN_SALT,
+		).
+		Eq(models.USER_COLUMN_USER_NAME, strUserName).
+		Eq(models.USER_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserDAO) SelectUserName(req *proto.PlatformUserQueryReq) (dos []*models.UserDO, total int64, err error) {
+	dos = make([]*models.UserDO, 0)
+	c := dao.db.Model(&dos).
+		Table(models.TableNameUser).
+		Select(
+			models.USER_COLUMN_ID,
+			models.USER_COLUMN_USER_NAME,
+		).
+		Where("%s=0", models.USER_COLUMN_DELETED)
+	if req.Name != "" {
+		c.And("%s like '%%%s%%'", models.USER_COLUMN_USER_NAME, req.Name)
+	}
+	if _, total, err = c.
+		QueryEx(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+// user name
+func (dao *UserDAO) SelectUserByDid(strDid string) (do *models.UserDO, err error) {
+	//SELECT b.* FROM user_acl a, `user` b WHERE a.user_did='0xbeFDC8e41103D1F720B6F4D9aB046cE693521C4a' AND a.user_id=b.id AND b.deleted=0
+	if _, err = dao.db.Model(&do).
+		Select("b.*").
+		Table("user_acl a, `user` b").
+		Where("a.user_did='%s'", strDid).
+		And("a.user_id=b.id").
+		And("b.deleted=0").
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return nil, err
+	}
+	return
+}
+
+func (dao *UserDAO) UpdateUserState(strUserName string, state int) error {
+	if _, err := dao.db.Model(&state).
+		Table(models.TableNameUser).
+		Select(models.USER_COLUMN_STATE).
+		Where("%s='%s'", models.USER_COLUMN_USER_NAME, strUserName).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return err
+	}
+	return nil
+}
+
+func (dao *UserDAO) UpdateUserStateById(id int32, state int) error {
+	if _, err := dao.db.Model(&state).
+		Table(models.TableNameUser).
+		Select(models.USER_COLUMN_STATE).
+		Where("%s=%d", models.USER_COLUMN_ID, id).
+		Update(); err != nil {
+		log.Errorf(err.Error())
+		return err
+	}
+	return nil
+}
+
+func (dao *UserDAO) IsUserBanned(userId int32) (bool, error) {
+	var state int32
+	if _, err := dao.db.Model(&state).
+		Table(models.TableNameUser).
+		Select(models.USER_COLUMN_STATE).
+		Eq(models.USER_COLUMN_ID, userId).
+		Query(); err != nil {
+		log.Errorf(err.Error())
+		return false, err
+	}
+	if state == UserState_Disabled {
+		return true, nil
+	}
+	return false, nil
+}
diff --git a/pkg/dal/dao/user_role.go b/pkg/dal/dao/user_role.go
new file mode 100644
index 0000000..a6895c2
--- /dev/null
+++ b/pkg/dal/dao/user_role.go
@@ -0,0 +1,205 @@
+package dao
+
+import (
+	"fmt"
+	"intent-system/pkg/dal/models"
+	"intent-system/pkg/proto"
+
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+)
+
+type UserRoleDAO struct {
+	db *sqlca.Engine
+}
+
+func NewUserRoleDAO(db *sqlca.Engine) *UserRoleDAO {
+
+	return &UserRoleDAO{
+		db: db,
+	}
+}
+
+func (dao *UserRoleDAO) Insert(do *models.UserRoleDO) (err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameUserRole).Insert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserRoleDAO) Upsert(do *models.UserRoleDO, columns ...string) (err error) {
+	if _, err = dao.db.Model(do).Table(models.TableNameUserRole).Select(columns...).Upsert(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserRoleDAO) SelectUserByRole(role string) (dos []*models.UserRoleDO, err error) {
+	if _, err = dao.db.Model(&dos).
+		Table(models.TableNameUserRole).
+		Eq(models.USER_ROLE_COLUMN_ROLE_NAME, role).
+		Eq(models.USER_ROLE_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserRoleDAO) SelectUserByName(Name string) (dos []*models.UserRoleDO, err error) {
+	if _, err = dao.db.Model(&dos).
+		Table(models.TableNameUserRole).
+		Eq(models.USER_ROLE_COLUMN_USER_NAME, Name).
+		Eq(models.USER_ROLE_COLUMN_DELETED, 0).
+		Query(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserRoleDAO) UpdateRoleNameByUser(do *models.UserRoleDO, columns ...string) (err error) {
+	if _, err = dao.db.Model(do).
+		Table(models.TableNameUserRole).
+		Select(columns...).
+		Eq(models.USER_ROLE_COLUMN_USER_NAME, do.UserName).
+		Eq(models.USER_ROLE_COLUMN_DELETED, 0).
+		Update(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserRoleDAO) UpdateUserById(do *models.UserRoleDO, columns ...string) (err error) {
+	if _, err = dao.db.Model(do).
+		Table(models.TableNameUserRole).
+		Select(columns...).
+		Id(do.Id).
+		Update(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserRoleDAO) Delete(do *models.UserRoleDO) (err error) {
+
+	if _, err = dao.db.Model(do).
+		Table(models.TableNameUserRole).
+		Select(
+			models.USER_ROLE_COLUMN_USER_NAME,
+			models.USER_ROLE_COLUMN_DELETED,
+			models.USER_ROLE_COLUMN_EDIT_USER,
+		).
+		Eq(models.USER_ROLE_COLUMN_USER_NAME, do.UserName).
+		Eq(models.USER_ROLE_COLUMN_DELETED, 0).
+		Delete(); err != nil {
+
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserRoleDAO) SelectUsers(req *proto.PlatformListUserReq) (users []*proto.PlatformUser, total int64, err error) {
+
+	users = make([]*proto.PlatformUser, 0)
+	/*
+			-- 查询平台用户
+			SELECT
+			    a.user_name, a.role_name, a.create_user,
+			    b.id AS user_id, b.user_alias, b.phone_number, b.is_admin, b.password,
+		        b.email, b.address, b.remark as user_remark, b.state, b.login_ip, b.login_time,  b.created_time, b.updated_time
+			FROM user_role a,  USER b
+			WHERE a.user_name=b.user_name AND b.deleted=0 AND b.user_name='john'
+			ORDER BY b.created_time DESC
+	*/
+	strSelect := ` a.user_name, a.role_name, a.create_user,
+	    b.id AS user_id, b.user_alias, b.phone_number, b.is_admin, b.password, 
+        b.email, b.address, b.remark as user_remark, b.state, b.login_ip, b.login_time,  b.created_time, b.updated_time`
+
+	var strWhere string
+	if req.UserName != "" {
+		strWhere = fmt.Sprintf(`a.user_name=b.user_name AND b.deleted=0 AND b.user_name='%s'`, req.UserName)
+	} else {
+		strWhere = fmt.Sprintf(`a.user_name=b.user_name AND b.deleted=0`)
+	}
+	if req.Id != 0 {
+		strWhere += fmt.Sprintf(` AND b.id='%d'`, req.Id)
+	}
+	strFrom := fmt.Sprintf("%s a, %s b", models.TableNameUserRole, models.TableNameUser)
+
+	if _, total, err = dao.db.Model(&users).
+		Table(strFrom).
+		Select(strSelect).
+		Where(strWhere).
+		Page(req.PageNo, req.PageSize).
+		Desc("b.created_time").
+		QueryEx(); err != nil {
+
+		log.Errorf("database query error [%s]", err.Error())
+		return
+	}
+	return
+}
+
+func (dao *UserRoleDAO) SelectRoleUsers(strRoleName string, pageNo, pageSize int) (users []*proto.PlatformUser, total int64, err error) {
+
+	users = make([]*proto.PlatformUser, 0)
+	/*
+		-- 查询平台用户
+		SELECT
+		    a.user_name, a.role_name,
+		    b.id AS user_id, b.user_alias, b.phone_number, b.is_admin, b.email, b.address, b.remark as user_remark, b.state, b.login_ip, b.login_time,  b.created_time, b.updated_time,
+		    c.role_name AS role_name, c.alias AS role_alias, c.privileges
+		FROM user_role a,  USER b, ROLE c
+		WHERE a.pool_id=0 AND a.cluster_id=0 AND a.user_name=b.user_name AND b.deleted=0 AND a.role_name=c.role_name AND a.role_name='platform-admin'
+		ORDER BY b.created_time DESC
+	*/
+	strSelect := ` a.user_name, a.role_name, 
+	    b.id AS user_id, b.user_alias, b.phone_number, b.is_admin, b.email, b.address, b.remark as user_remark, b.state, b.login_ip, b.login_time,  b.created_time, b.updated_time,
+	    c.role_name, c.alias AS role_alias, c.privileges `
+
+	strWhere := fmt.Sprintf(`a.user_name=b.user_name AND b.deleted=0 AND a.role_name=c.role_name AND a.role_name='%s'`, strRoleName)
+
+	strFrom := fmt.Sprintf("%s a, %s b, %s c", models.TableNameUserRole, models.TableNameUser, models.TableNameRole)
+
+	if _, total, err = dao.db.Model(&users).
+		Table(strFrom).
+		Select(strSelect).
+		Where(strWhere).
+		Page(pageNo, pageSize).
+		Desc("b.created_time").
+		QueryEx(); err != nil {
+
+		log.Errorf("database query error [%s]", err.Error())
+		return
+	}
+
+	return
+}
+
+func (dao *UserRoleDAO) DeleteMultiple(dos []*models.UserRoleDO, userNames []string) (err error) {
+
+	if _, err = dao.db.Model(dos).
+		Table(models.TableNameUserRole).
+		Select(
+			models.USER_ROLE_COLUMN_USER_NAME,
+			models.USER_ROLE_COLUMN_DELETED,
+			models.USER_ROLE_COLUMN_EDIT_USER,
+		).
+		In(models.USER_ROLE_COLUMN_USER_NAME, userNames).
+		Eq(models.USER_ROLE_COLUMN_DELETED, 0).
+		Delete(); err != nil {
+		log.Errorf(err.Error())
+		return
+	}
+	return
+}
diff --git a/pkg/dal/dao/wk_aig_news.go b/pkg/dal/dao/wk_aig_news.go
new file mode 100644
index 0000000..bcc70ba
--- /dev/null
+++ b/pkg/dal/dao/wk_aig_news.go
@@ -0,0 +1,69 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type WkAigNewsDAO struct {
+	db *sqlca.Engine
+}
+
+func NewWkAigNewsDAO(db *sqlca.Engine) *WkAigNewsDAO {
+	return &WkAigNewsDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *WkAigNewsDAO) Insert(do *models.WkAigNewsDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkAigNews).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *WkAigNewsDAO) Upsert(do *models.WkAigNewsDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkAigNews).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *WkAigNewsDAO) Update(do *models.WkAigNewsDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkAigNews).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *WkAigNewsDAO) QueryById(id interface{}, columns ...string) (do *models.WkAigNewsDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameWkAigNews).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *WkAigNewsDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.WkAigNewsDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameWkAigNews).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *WkAigNewsDAO) QueryLatest(strTable string, lastId int64) (dos []*models.WkAigNewsDO, err error) {
+	_, err = dao.db.Model(&dos).
+		Table(strTable).
+		Gt(models.WK_AIG_NEWS_COLUMN_ID, lastId).
+		Asc(models.WK_AIG_NEWS_COLUMN_ID).
+		Limit(1000).
+		Query()
+	if err != nil {
+		return nil, log.Errorf(err.Error())
+	}
+	return dos, nil
+}
diff --git a/pkg/dal/dao/wk_aig_qna.go b/pkg/dal/dao/wk_aig_qna.go
new file mode 100644
index 0000000..f2b49f1
--- /dev/null
+++ b/pkg/dal/dao/wk_aig_qna.go
@@ -0,0 +1,69 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type WkAigQnaDAO struct {
+	db *sqlca.Engine
+}
+
+func NewWkAigQnaDAO(db *sqlca.Engine) *WkAigQnaDAO {
+	return &WkAigQnaDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *WkAigQnaDAO) Insert(do *models.WkAigQnaDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkAigQna).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *WkAigQnaDAO) Upsert(do *models.WkAigQnaDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkAigQna).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *WkAigQnaDAO) Update(do *models.WkAigQnaDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkAigQna).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *WkAigQnaDAO) QueryById(id interface{}, columns ...string) (do *models.WkAigQnaDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameWkAigQna).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *WkAigQnaDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.WkAigQnaDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameWkAigQna).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *WkAigQnaDAO) QueryLatest(strTable string, lastId int64) (dos []*models.WkAigQnaDO, err error) {
+	_, err = dao.db.Model(&dos).
+		Table(strTable).
+		Gt(models.WK_AIG_NEWS_COLUMN_ID, lastId).
+		Asc(models.WK_AIG_NEWS_COLUMN_ID).
+		Limit(1000).
+		Query()
+	if err != nil {
+		return nil, log.Errorf(err.Error())
+	}
+	return dos, nil
+}
diff --git a/pkg/dal/dao/wk_spider_news.go b/pkg/dal/dao/wk_spider_news.go
new file mode 100644
index 0000000..81f7f6b
--- /dev/null
+++ b/pkg/dal/dao/wk_spider_news.go
@@ -0,0 +1,69 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type WkSpiderNewsDAO struct {
+	db *sqlca.Engine
+}
+
+func NewWkSpiderNewsDAO(db *sqlca.Engine) *WkSpiderNewsDAO {
+	return &WkSpiderNewsDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *WkSpiderNewsDAO) Insert(do *models.WkSpiderNewsDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkSpiderNews).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *WkSpiderNewsDAO) Upsert(do *models.WkSpiderNewsDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkSpiderNews).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *WkSpiderNewsDAO) Update(do *models.WkSpiderNewsDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkSpiderNews).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *WkSpiderNewsDAO) QueryById(id interface{}, columns ...string) (do *models.WkSpiderNewsDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameWkSpiderNews).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *WkSpiderNewsDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.WkSpiderNewsDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameWkSpiderNews).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *WkSpiderNewsDAO) QueryLatest(lastId int64) (dos []*models.WkSpiderNewsDO, err error) {
+	_, err = dao.db.Model(&dos).
+		Table(models.TableNameWkSpiderNews).
+		Gt(models.WK_SPIDER_NEWS_COLUMN_ID, lastId).
+		Asc(models.WK_SPIDER_NEWS_COLUMN_ID).
+		Limit(1000).
+		Query()
+	if err != nil {
+		return nil, log.Errorf(err.Error())
+	}
+	return dos, nil
+}
diff --git a/pkg/dal/dao/wk_spider_qna.go b/pkg/dal/dao/wk_spider_qna.go
new file mode 100644
index 0000000..499d9bd
--- /dev/null
+++ b/pkg/dal/dao/wk_spider_qna.go
@@ -0,0 +1,69 @@
+package dao
+
+import (
+	"fmt"
+	"github.com/civet148/log"
+	"github.com/civet148/sqlca/v2"
+	"intent-system/pkg/dal/models"
+)
+
+type WkSpiderQnaDAO struct {
+	db *sqlca.Engine
+}
+
+func NewWkSpiderQnaDAO(db *sqlca.Engine) *WkSpiderQnaDAO {
+	return &WkSpiderQnaDAO{
+		db: db,
+	}
+}
+
+// insert into table by data model
+func (dao *WkSpiderQnaDAO) Insert(do *models.WkSpiderQnaDO) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkSpiderQna).Insert()
+}
+
+// insert if not exist or update columns on duplicate key...
+func (dao *WkSpiderQnaDAO) Upsert(do *models.WkSpiderQnaDO, columns ...string) (lastInsertId int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkSpiderQna).Select(columns...).Upsert()
+}
+
+// update table set columns where id=xxx
+func (dao *WkSpiderQnaDAO) Update(do *models.WkSpiderQnaDO, columns ...string) (rows int64, err error) {
+	return dao.db.Model(&do).Table(models.TableNameWkSpiderQna).Select(columns...).Update()
+}
+
+// query records by id
+func (dao *WkSpiderQnaDAO) QueryById(id interface{}, columns ...string) (do *models.WkSpiderQnaDO, err error) {
+	if _, err = dao.db.Model(&do).Table(models.TableNameWkSpiderQna).Id(id).Select(columns...).Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+// query records by conditions
+func (dao *WkSpiderQnaDAO) QueryByCondition(conditions map[string]interface{}, columns ...string) (dos []*models.WkSpiderQnaDO, err error) {
+	if len(conditions) == 0 {
+		return nil, fmt.Errorf("condition must not be empty")
+	}
+	e := dao.db.Model(&dos).Table(models.TableNameWkSpiderQna).Select(columns...)
+	for k, v := range conditions {
+		e.Eq(k, v)
+	}
+	if _, err = e.Query(); err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (dao *WkSpiderQnaDAO) QueryLatest(lastId int64) (dos []*models.WkSpiderQnaDO, err error) {
+	_, err = dao.db.Model(&dos).
+		Table(models.TableNameWkSpiderQna).
+		Gt(models.WK_SPIDER_NEWS_COLUMN_ID, lastId).
+		Asc(models.WK_SPIDER_QNA_COLUMN_ID).
+		Limit(1000).
+		Query()
+	if err != nil {
+		return nil, log.Errorf(err.Error())
+	}
+	return dos, nil
+}
diff --git a/pkg/dal/db2go/gen_models.sh b/pkg/dal/db2go/gen_models.sh
new file mode 100644
index 0000000..d60a382
--- /dev/null
+++ b/pkg/dal/db2go/gen_models.sh
@@ -0,0 +1,21 @@
+OUT_DIR=..
+PACK_NAME=models
+SUFFIX_NAME="do"
+READ_ONLY="created_time,updated_time"
+DB_NAME="intent-system"
+WITH_OUT=""
+TAGS="bson"
+DSN_URL="mysql://root:123456@127.0.0.1:3306/intent-system?charset=utf8"
+#JSON_PROPERTIES="omitempty"
+SPEC_TYPES="template.template_type=TemplateType,news.language=LanguageType,news_draft.language=LanguageType,question_answer.language=LanguageType,question_draft.language=LanguageType,subscriber.extra_data=SubscriberExtraData,privilege.children=TreePrivilege,subscriber.tags=[]string,question_draft.extra_data=CommonExtraData,news.tags=[]string,news.extra_data=CommonExtraData,news_draft.extra_data=CommonExtraData,tag.extra_data=CommonExtraData,question_answer.extra_data=CommonExtraData,question_answer.state=QAState,news_draft.tags=[]string"
+TINYINT_TO_BOOL="deleted,disabled,ok,is_admin,is_inherent,is_offline,is_default,is_hotspot,is_overwritten,is_deleted,is_pushed,is_replicate,is_draft,is_subscribed"
+TABLE_NAME=""
+IMPORT_MODELS=intent-system/pkg/dal/models
+
+db2go --url "${DSN_URL}" --out "${OUT_DIR}" --db "${DB_NAME}" --table "${TABLE_NAME}" --enable-decimal --spec-type "${SPEC_TYPES}" \
+      --suffix "${SUFFIX_NAME}" --package "${PACK_NAME}" --readonly "${READ_ONLY}" --without "${WITH_OUT}" --tag "${TAGS}" --tinyint-as-bool "${TINYINT_TO_BOOL}" \
+      --dao dao --import-models "${IMPORT_MODELS}"
+
+echo generate go file ok, formatting...
+gofmt -w ${OUT_DIR}/${PACK_NAME}
+
diff --git a/pkg/dal/models/casbin_rule_do.go b/pkg/dal/models/casbin_rule_do.go
new file mode 100644
index 0000000..9570b8f
--- /dev/null
+++ b/pkg/dal/models/casbin_rule_do.go
@@ -0,0 +1,60 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameCasbinRule = "casbin_rule" //
+
+const (
+	CASBIN_RULE_COLUMN_P_TYPE = "p_type"
+	CASBIN_RULE_COLUMN_V0     = "v0"
+	CASBIN_RULE_COLUMN_V1     = "v1"
+	CASBIN_RULE_COLUMN_V2     = "v2"
+	CASBIN_RULE_COLUMN_V3     = "v3"
+	CASBIN_RULE_COLUMN_V4     = "v4"
+	CASBIN_RULE_COLUMN_V5     = "v5"
+)
+
+type CasbinRuleDO struct {
+	PType string `json:"p_type" db:"p_type" bson:"p_type"` //
+	V0    string `json:"v0" db:"v0" bson:"v0"`             //
+	V1    string `json:"v1" db:"v1" bson:"v1"`             //
+	V2    string `json:"v2" db:"v2" bson:"v2"`             //
+	V3    string `json:"v3" db:"v3" bson:"v3"`             //
+	V4    string `json:"v4" db:"v4" bson:"v4"`             //
+	V5    string `json:"v5" db:"v5" bson:"v5"`             //
+}
+
+func (do *CasbinRuleDO) GetPType() string  { return do.PType }
+func (do *CasbinRuleDO) SetPType(v string) { do.PType = v }
+func (do *CasbinRuleDO) GetV0() string     { return do.V0 }
+func (do *CasbinRuleDO) SetV0(v string)    { do.V0 = v }
+func (do *CasbinRuleDO) GetV1() string     { return do.V1 }
+func (do *CasbinRuleDO) SetV1(v string)    { do.V1 = v }
+func (do *CasbinRuleDO) GetV2() string     { return do.V2 }
+func (do *CasbinRuleDO) SetV2(v string)    { do.V2 = v }
+func (do *CasbinRuleDO) GetV3() string     { return do.V3 }
+func (do *CasbinRuleDO) SetV3(v string)    { do.V3 = v }
+func (do *CasbinRuleDO) GetV4() string     { return do.V4 }
+func (do *CasbinRuleDO) SetV4(v string)    { do.V4 = v }
+func (do *CasbinRuleDO) GetV5() string     { return do.V5 }
+func (do *CasbinRuleDO) SetV5(v string)    { do.V5 = v }
+
+/*
+CREATE TABLE `casbin_rule` (
+  `p_type` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '',
+  `v0` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '',
+  `v1` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '',
+  `v2` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '',
+  `v3` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '',
+  `v4` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '',
+  `v5` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '',
+  KEY `IDX_casbin_rule_v0` (`v0`) USING BTREE,
+  KEY `IDX_casbin_rule_v1` (`v1`) USING BTREE,
+  KEY `IDX_casbin_rule_v2` (`v2`) USING BTREE,
+  KEY `IDX_casbin_rule_v3` (`v3`) USING BTREE,
+  KEY `IDX_casbin_rule_v4` (`v4`) USING BTREE,
+  KEY `IDX_casbin_rule_v5` (`v5`) USING BTREE,
+  KEY `IDX_casbin_rule_p_type` (`p_type`) USING BTREE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC;
+*/
diff --git a/pkg/dal/models/customer_do.go b/pkg/dal/models/customer_do.go
new file mode 100644
index 0000000..bf09bf4
--- /dev/null
+++ b/pkg/dal/models/customer_do.go
@@ -0,0 +1,144 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameCustomer = "customer" //客户信息表
+
+const (
+	CUSTOMER_COLUMN_ID            = "id"
+	CUSTOMER_COLUMN_USER_NAME     = "user_name"
+	CUSTOMER_COLUMN_USER_ALIAS    = "user_alias"
+	CUSTOMER_COLUMN_REFERRER	  = "referrer"
+	CUSTOMER_COLUMN_REFERRAL_CODE = "referral_code"
+	CUSTOMER_COLUMN_PASSWORD      = "password"
+	CUSTOMER_COLUMN_FIRST_NAME    = "first_name"
+	CUSTOMER_COLUMN_LAST_NAME     = "last_name"
+	CUSTOMER_COLUMN_TITLE         = "title"
+	CUSTOMER_COLUMN_COMPANY       = "company"
+	CUSTOMER_COLUMN_SALT          = "salt"
+	CUSTOMER_COLUMN_PHONE_NUMBER  = "phone_number"
+	CUSTOMER_COLUMN_IS_ADMIN      = "is_admin"
+	CUSTOMER_COLUMN_EMAIL         = "email"
+	CUSTOMER_COLUMN_ADDRESS       = "address"
+	CUSTOMER_COLUMN_REMARK        = "remark"
+	CUSTOMER_COLUMN_DELETED       = "deleted"
+	CUSTOMER_COLUMN_STATE         = "state"
+	CUSTOMER_COLUMN_IS_SUBSCRIBED = "is_subscribed"
+	CUSTOMER_COLUMN_LOGIN_IP      = "login_ip"
+	CUSTOMER_COLUMN_LOGIN_TIME    = "login_time"
+	CUSTOMER_COLUMN_CREATE_USER   = "create_user"
+	CUSTOMER_COLUMN_EDIT_USER     = "edit_user"
+	CUSTOMER_COLUMN_CREATED_TIME  = "created_time"
+	CUSTOMER_COLUMN_UPDATED_TIME  = "updated_time"
+	CUSTOMER_COLUMN_EXTRA_DATA    = "extra_data"
+)
+
+type CustomerDO struct {
+	Id           int32    `json:"id" db:"id" bson:"_id"`                                               //用户ID(自增)
+	UserName     string   `json:"user_name" db:"user_name" bson:"user_name"`                           //登录名称
+	UserAlias    string   `json:"user_alias" db:"user_alias" bson:"user_alias"`                        //账户别名
+	Password     string   `json:"password" db:"password" bson:"password"`                              //登录密码(MD5+SALT)
+	Referrer     string   `json:"referrer" db:"referrer" bson:"referrer"`                              //推荐人
+	ReferralCode string   `json:"referral_code" db:"referral_code" bson:"referral_code"`               //推荐码
+	FirstName    string   `json:"first_name" db:"first_name" bson:"first_name"`                        //姓
+	LastName     string   `json:"last_name" db:"last_name" bson:"last_name"`                           //名
+	Title        string   `json:"title" db:"title" bson:"title"`                                       //职称
+	Company      string   `json:"company" db:"company" bson:"company"`                                 //公司名称
+	Salt         string   `json:"salt" db:"salt" bson:"salt"`                                          //MD5加密盐
+	PhoneNumber  string   `json:"phone_number" db:"phone_number" bson:"phone_number"`                  //联系手机号
+	IsAdmin      bool     `json:"is_admin" db:"is_admin" bson:"is_admin"`                              //是否为超级管理员(0=普通账户 1=超级管理员)
+	Email        string   `json:"email" db:"email" sqlca:"isnull" bson:"email"`                        //邮箱地址
+	Address      string   `json:"address" db:"address" bson:"address"`                                 //家庭住址/公司地址
+	Remark       string   `json:"remark" db:"remark" bson:"remark"`                                    //备注
+	Deleted      bool     `json:"deleted" db:"deleted" bson:"deleted"`                                 //是否已删除(0=未删除 1=已删除)
+	State        int8     `json:"state" db:"state" bson:"state"`                                       //是否已冻结(1=已启用 2=已冻结)
+	IsSubscribed bool     `json:"is_subscribed" db:"is_subscribed" bson:"is_subscribed"`               //是否已订阅(0=未订阅 1=已订阅)
+	LoginIp      string   `json:"login_ip" db:"login_ip" bson:"login_ip"`                              //最近登录IP
+	LoginTime    int64    `json:"login_time" db:"login_time" bson:"login_time"`                        //最近登录时间
+	CreateUser   string   `json:"create_user" db:"create_user" bson:"create_user"`                     //创建人
+	EditUser     string   `json:"edit_user" db:"edit_user" bson:"edit_user"`                           //最近编辑人
+	CreatedTime  string   `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime  string   `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+	ExtraData    struct{} `json:"extra_data" db:"extra_data" sqlca:"isnull" bson:"extra_data"`         //附带数据(JSON)
+}
+
+func (do *CustomerDO) GetId() int32            { return do.Id }
+func (do *CustomerDO) SetId(v int32)           { do.Id = v }
+func (do *CustomerDO) GetUserName() string     { return do.UserName }
+func (do *CustomerDO) SetUserName(v string)    { do.UserName = v }
+func (do *CustomerDO) GetUserAlias() string    { return do.UserAlias }
+func (do *CustomerDO) SetUserAlias(v string)   { do.UserAlias = v }
+func (do *CustomerDO) GetPassword() string     { return do.Password }
+func (do *CustomerDO) SetPassword(v string)    { do.Password = v }
+func (do *CustomerDO) GetFirstName() string    { return do.FirstName }
+func (do *CustomerDO) SetFirstName(v string)   { do.FirstName = v }
+func (do *CustomerDO) GetLastName() string     { return do.LastName }
+func (do *CustomerDO) SetLastName(v string)    { do.LastName = v }
+func (do *CustomerDO) GetTitle() string        { return do.Title }
+func (do *CustomerDO) SetTitle(v string)       { do.Title = v }
+func (do *CustomerDO) GetCompany() string      { return do.Company }
+func (do *CustomerDO) SetCompany(v string)     { do.Company = v }
+func (do *CustomerDO) GetSalt() string         { return do.Salt }
+func (do *CustomerDO) SetSalt(v string)        { do.Salt = v }
+func (do *CustomerDO) GetPhoneNumber() string  { return do.PhoneNumber }
+func (do *CustomerDO) SetPhoneNumber(v string) { do.PhoneNumber = v }
+func (do *CustomerDO) GetIsAdmin() bool        { return do.IsAdmin }
+func (do *CustomerDO) SetIsAdmin(v bool)       { do.IsAdmin = v }
+func (do *CustomerDO) GetEmail() string        { return do.Email }
+func (do *CustomerDO) SetEmail(v string)       { do.Email = v }
+func (do *CustomerDO) GetAddress() string      { return do.Address }
+func (do *CustomerDO) SetAddress(v string)     { do.Address = v }
+func (do *CustomerDO) GetRemark() string       { return do.Remark }
+func (do *CustomerDO) SetRemark(v string)      { do.Remark = v }
+func (do *CustomerDO) GetDeleted() bool        { return do.Deleted }
+func (do *CustomerDO) SetDeleted(v bool)       { do.Deleted = v }
+func (do *CustomerDO) GetState() int8          { return do.State }
+func (do *CustomerDO) SetState(v int8)         { do.State = v }
+func (do *CustomerDO) GetIsSubscribed() bool   { return do.IsSubscribed }
+func (do *CustomerDO) SetIsSubscribed(v bool)  { do.IsSubscribed = v }
+func (do *CustomerDO) GetLoginIp() string      { return do.LoginIp }
+func (do *CustomerDO) SetLoginIp(v string)     { do.LoginIp = v }
+func (do *CustomerDO) GetLoginTime() int64     { return do.LoginTime }
+func (do *CustomerDO) SetLoginTime(v int64)    { do.LoginTime = v }
+func (do *CustomerDO) GetCreateUser() string   { return do.CreateUser }
+func (do *CustomerDO) SetCreateUser(v string)  { do.CreateUser = v }
+func (do *CustomerDO) GetEditUser() string     { return do.EditUser }
+func (do *CustomerDO) SetEditUser(v string)    { do.EditUser = v }
+func (do *CustomerDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *CustomerDO) SetCreatedTime(v string) { do.CreatedTime = v }
+func (do *CustomerDO) GetUpdatedTime() string  { return do.UpdatedTime }
+func (do *CustomerDO) SetUpdatedTime(v string) { do.UpdatedTime = v }
+func (do *CustomerDO) GetExtraData() struct{}  { return do.ExtraData }
+func (do *CustomerDO) SetExtraData(v struct{}) { do.ExtraData = v }
+
+/*
+CREATE TABLE `customer` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT '用户ID(自增)',
+  `user_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '登录名称',
+  `user_alias` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '账户别名',
+  `password` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '登录密码(MD5+SALT)',
+  `first_name` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '姓',
+  `last_name` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '名',
+  `title` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '职称',
+  `company` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '公司名称',
+  `salt` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT 'MD5加密盐',
+  `phone_number` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '联系手机号',
+  `is_admin` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为超级管理员(0=普通账户 1=超级管理员)',
+  `email` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT '' COMMENT '邮箱地址',
+  `address` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '家庭住址/公司地址',
+  `remark` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '备注',
+  `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)',
+  `state` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已冻结(1=已启用 2=已冻结)',
+  `is_subscribed` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已订阅(0=未订阅 1=已订阅)',
+  `login_ip` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近登录IP',
+  `login_time` bigint NOT NULL DEFAULT '0' COMMENT '最近登录时间',
+  `create_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '创建人',
+  `edit_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近编辑人',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)',
+  PRIMARY KEY (`id`) USING BTREE,
+  UNIQUE KEY `UNIQ_USER_NAME` (`user_name`)
+) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='客户信息表';
+*/
diff --git a/pkg/dal/models/deploy_do.go b/pkg/dal/models/deploy_do.go
new file mode 100644
index 0000000..4a9fa1c
--- /dev/null
+++ b/pkg/dal/models/deploy_do.go
@@ -0,0 +1,52 @@
+package models
+
+import "time"
+
+const TableNameDeploy = "deploy" //登录记录表
+
+const (
+	DEPLOY_COLUMN_ID           = "id"
+	DEPLOY_COLUMN_NID          = "n_id"
+	DEPLOY_COLUMN_MB_UUID      = "mb_uuid"
+	DEPLOY_COLUMN_REPO_NAME    = "repo_name"
+	DEPLOY_COLUMN_DIGEST       = "digest"
+	DEPLOY_COLUMN_TYPE         = "type"
+	DEPLOY_COLUMN_STATUS       = "status"
+	DEPLOY_COLUMN_USERNAME     = "user_name"
+	DEPLOY_COLUMN_CREATED_TIME = "created_time"
+	DEPLOY_COLUMN_UPDATED_TIME = "updated_time"
+)
+
+type DeployDO struct {
+	Id          int32     `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	Nid         int32     `json:"n_id" db:"n_id" bson:"n_id"`                                          //对应news表的自增id
+	MbUuid      string    `json:"mb_uuid" db:"mb_uuid" bson:"mb_uuid"`                                 //服务器主板uuid
+	RepoName    string    `json:"repo_name" db:"repo_name" bson:"repo_name"`                           //对应docker build时的映像名称,即repository name
+	Digest      string    `json:"digest" db:"digest" bson:"diget"`                                     //对应docker映像文件的OCI hash值
+	Type        int32     `json:"type" db:"type" bson:"type"`                                          //1->模型,2->应用,4->保留,8->保留,16->保留,32->保留,64->保留,128->保留
+	Status      string    `json:"status" db:"status" bson:"status"`                                    //模型在系统中所处的状态,running,stopped,removed,deploying
+	UserName    string    `json:"user_name" db:"user_name" bson:"user_name"`                           //执行部署这个模型的用户名
+	CreatedTime time.Time `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime time.Time `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+}
+
+func (do *DeployDO) GetId() int32               { return do.Id }
+func (do *DeployDO) SetId(v int32)              { do.Id = v }
+func (do *DeployDO) GetNid() int32              { return do.Nid }
+func (do *DeployDO) SetNid(v int32)             { do.Nid = v }
+func (do *DeployDO) GetMbUuid() string          { return do.MbUuid }
+func (do *DeployDO) SetMbUuid(v string)         { do.MbUuid = v }
+func (do *DeployDO) GetRepoName() string        { return do.RepoName }
+func (do *DeployDO) SetRepoName(v string)       { do.RepoName = v }
+func (do *DeployDO) GetDigest() string          { return do.Digest }
+func (do *DeployDO) SetDigest(v string)         { do.Digest = v }
+func (do *DeployDO) GetType() int32             { return do.Type }
+func (do *DeployDO) SetType(v int32)            { do.Type = v }
+func (do *DeployDO) GetStatus() string          { return do.Status }
+func (do *DeployDO) SetStatus(v string)         { do.Status = v }
+func (do *DeployDO) GetUserName() string        { return do.UserName }
+func (do *DeployDO) SetUserName(v string)       { do.UserName = v }
+func (do *DeployDO) GetCreatedTime() time.Time  { return do.CreatedTime }
+func (do *DeployDO) SetCreatedTime(v time.Time) { do.CreatedTime = v }
+func (do *DeployDO) GetUpdatedTime() time.Time  { return do.UpdatedTime }
+func (do *DeployDO) SetUpdatedTime(v time.Time) { do.UpdatedTime = v }
diff --git a/pkg/dal/models/dictionary_do.go b/pkg/dal/models/dictionary_do.go
new file mode 100644
index 0000000..de1d870
--- /dev/null
+++ b/pkg/dal/models/dictionary_do.go
@@ -0,0 +1,60 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameDictionary = "dictionary" //
+
+const (
+	DICTIONARY_COLUMN_ID           = "id"
+	DICTIONARY_COLUMN_NAME         = "name"
+	DICTIONARY_COLUMN_CONFIG_KEY   = "config_key"
+	DICTIONARY_COLUMN_CONFIG_VALUE = "config_value"
+	DICTIONARY_COLUMN_REMARK       = "remark"
+	DICTIONARY_COLUMN_DELETED      = "deleted"
+	DICTIONARY_COLUMN_CREATED_TIME = "created_time"
+	DICTIONARY_COLUMN_UPDATED_TIME = "updated_time"
+)
+
+type DictionaryDO struct {
+	Id          int32  `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	Name        string `json:"name" db:"name" bson:"name"`                                          //名称
+	ConfigKey   string `json:"config_key" db:"config_key" bson:"config_key"`                        //KEY
+	ConfigValue string `json:"config_value" db:"config_value" bson:"config_value"`                  //VALUE
+	Remark      string `json:"remark" db:"remark" bson:"remark"`                                    //备注
+	Deleted     bool   `json:"deleted" db:"deleted" bson:"deleted"`                                 //是否已删除(0=未删除 1=已删除)
+	CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+}
+
+func (do *DictionaryDO) GetId() int32            { return do.Id }
+func (do *DictionaryDO) SetId(v int32)           { do.Id = v }
+func (do *DictionaryDO) GetName() string         { return do.Name }
+func (do *DictionaryDO) SetName(v string)        { do.Name = v }
+func (do *DictionaryDO) GetConfigKey() string    { return do.ConfigKey }
+func (do *DictionaryDO) SetConfigKey(v string)   { do.ConfigKey = v }
+func (do *DictionaryDO) GetConfigValue() string  { return do.ConfigValue }
+func (do *DictionaryDO) SetConfigValue(v string) { do.ConfigValue = v }
+func (do *DictionaryDO) GetRemark() string       { return do.Remark }
+func (do *DictionaryDO) SetRemark(v string)      { do.Remark = v }
+func (do *DictionaryDO) GetDeleted() bool        { return do.Deleted }
+func (do *DictionaryDO) SetDeleted(v bool)       { do.Deleted = v }
+func (do *DictionaryDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *DictionaryDO) SetCreatedTime(v string) { do.CreatedTime = v }
+func (do *DictionaryDO) GetUpdatedTime() string  { return do.UpdatedTime }
+func (do *DictionaryDO) SetUpdatedTime(v string) { do.UpdatedTime = v }
+
+/*
+CREATE TABLE `dictionary` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '名称',
+  `config_key` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'KEY',
+  `config_value` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'VALUE',
+  `remark` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '备注',
+  `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  PRIMARY KEY (`id`) USING BTREE,
+  UNIQUE KEY `key` (`config_key`)
+) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC;
+*/
diff --git a/pkg/dal/models/invite_code_do.go b/pkg/dal/models/invite_code_do.go
new file mode 100644
index 0000000..c05cac5
--- /dev/null
+++ b/pkg/dal/models/invite_code_do.go
@@ -0,0 +1,75 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameInviteCode = "invite_code" //
+
+const (
+	INVITE_CODE_COLUMN_ID           = "id"
+	INVITE_CODE_COLUMN_USER_ID      = "user_id"
+	INVITE_CODE_COLUMN_USER_ACC     = "user_acc"
+	INVITE_CODE_COLUMN_RANDOM_CODE  = "random_code"
+	INVITE_CODE_COLUMN_LINK_URL     = "link_url"
+	INVITE_CODE_COLUMN_STATE        = "state"
+	INVITE_CODE_COLUMN_EXPIRE_TIME  = "expire_time"
+	INVITE_CODE_COLUMN_ACTION_TYPE  = "action_type"
+	INVITE_CODE_COLUMN_DELETED      = "deleted"
+	INVITE_CODE_COLUMN_CREATED_TIME = "created_time"
+	INVITE_CODE_COLUMN_UPDATED_TIME = "updated_time"
+)
+
+type InviteCodeDO struct {
+	Id          int32  `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	UserId      int32  `json:"user_id" db:"user_id" bson:"user_id"`                                 //注册用户ID
+	UserAcc     string `json:"user_acc" db:"user_acc" bson:"user_acc"`                              //注册账户
+	RandomCode  string `json:"random_code" db:"random_code" bson:"random_code"`                     //认证码(5位字母和数字组合)
+	LinkUrl     string `json:"link_url" db:"link_url" bson:"link_url"`                              //链接URL(保留字段)
+	State       int8   `json:"state" db:"state" bson:"state"`                                       //状态(1=等待校验 2=已校验)
+	ExpireTime  int64  `json:"expire_time" db:"expire_time" bson:"expire_time"`                     //过期时间(UNIX时间戳)
+	ActionType  int8   `json:"action_type" db:"action_type" bson:"action_type"`                     //操作类型(0=注册 1=重置密码)
+	Deleted     bool   `json:"deleted" db:"deleted" bson:"deleted"`                                 //是否已删除(0=未删除 1=已删除)
+	CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+}
+
+func (do *InviteCodeDO) GetId() int32            { return do.Id }
+func (do *InviteCodeDO) SetId(v int32)           { do.Id = v }
+func (do *InviteCodeDO) GetUserId() int32        { return do.UserId }
+func (do *InviteCodeDO) SetUserId(v int32)       { do.UserId = v }
+func (do *InviteCodeDO) GetUserAcc() string      { return do.UserAcc }
+func (do *InviteCodeDO) SetUserAcc(v string)     { do.UserAcc = v }
+func (do *InviteCodeDO) GetRandomCode() string   { return do.RandomCode }
+func (do *InviteCodeDO) SetRandomCode(v string)  { do.RandomCode = v }
+func (do *InviteCodeDO) GetLinkUrl() string      { return do.LinkUrl }
+func (do *InviteCodeDO) SetLinkUrl(v string)     { do.LinkUrl = v }
+func (do *InviteCodeDO) GetState() int8          { return do.State }
+func (do *InviteCodeDO) SetState(v int8)         { do.State = v }
+func (do *InviteCodeDO) GetExpireTime() int64    { return do.ExpireTime }
+func (do *InviteCodeDO) SetExpireTime(v int64)   { do.ExpireTime = v }
+func (do *InviteCodeDO) GetActionType() int8     { return do.ActionType }
+func (do *InviteCodeDO) SetActionType(v int8)    { do.ActionType = v }
+func (do *InviteCodeDO) GetDeleted() bool        { return do.Deleted }
+func (do *InviteCodeDO) SetDeleted(v bool)       { do.Deleted = v }
+func (do *InviteCodeDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *InviteCodeDO) SetCreatedTime(v string) { do.CreatedTime = v }
+func (do *InviteCodeDO) GetUpdatedTime() string  { return do.UpdatedTime }
+func (do *InviteCodeDO) SetUpdatedTime(v string) { do.UpdatedTime = v }
+
+/*
+CREATE TABLE `invite_code` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `user_id` int NOT NULL COMMENT '注册用户ID',
+  `user_acc` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '注册账户',
+  `random_code` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '认证码(5位字母和数字组合)',
+  `link_url` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '链接URL(保留字段)',
+  `state` tinyint(1) NOT NULL COMMENT '状态(1=等待校验 2=已校验)',
+  `expire_time` bigint NOT NULL DEFAULT '0' COMMENT '过期时间(UNIX时间戳)',
+  `action_type` tinyint(1) NOT NULL DEFAULT '0' COMMENT '操作类型(0=注册 1=重置密码)',
+  `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `UNIQ_INVITECODE` (`user_acc`,`random_code`,`deleted`)
+) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+*/
diff --git a/pkg/dal/models/login_do.go b/pkg/dal/models/login_do.go
new file mode 100644
index 0000000..9364f1a
--- /dev/null
+++ b/pkg/dal/models/login_do.go
@@ -0,0 +1,54 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameLogin = "login" //登录记录表
+
+const (
+	LOGIN_COLUMN_ID           = "id"
+	LOGIN_COLUMN_LOGIN_TYPE   = "login_type"
+	LOGIN_COLUMN_USER_ID      = "user_id"
+	LOGIN_COLUMN_LOGIN_IP     = "login_ip"
+	LOGIN_COLUMN_LOGIN_ADDR   = "login_addr"
+	LOGIN_COLUMN_CREATED_TIME = "created_time"
+	LOGIN_COLUMN_UPDATED_TIME = "updated_time"
+)
+
+type LoginDO struct {
+	Id          int32  `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	LoginType   int8   `json:"login_type" db:"login_type" bson:"login_type"`                        //登录类型(0=管理用户 1=注册用户)
+	UserId      int32  `json:"user_id" db:"user_id" bson:"user_id"`                                 //登录用户ID
+	LoginIp     string `json:"login_ip" db:"login_ip" bson:"login_ip"`                              //登录IP
+	LoginAddr   string `json:"login_addr" db:"login_addr" bson:"login_addr"`                        //登录地址
+	CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+}
+
+func (do *LoginDO) GetId() int32            { return do.Id }
+func (do *LoginDO) SetId(v int32)           { do.Id = v }
+func (do *LoginDO) GetLoginType() int8      { return do.LoginType }
+func (do *LoginDO) SetLoginType(v int8)     { do.LoginType = v }
+func (do *LoginDO) GetUserId() int32        { return do.UserId }
+func (do *LoginDO) SetUserId(v int32)       { do.UserId = v }
+func (do *LoginDO) GetLoginIp() string      { return do.LoginIp }
+func (do *LoginDO) SetLoginIp(v string)     { do.LoginIp = v }
+func (do *LoginDO) GetLoginAddr() string    { return do.LoginAddr }
+func (do *LoginDO) SetLoginAddr(v string)   { do.LoginAddr = v }
+func (do *LoginDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *LoginDO) SetCreatedTime(v string) { do.CreatedTime = v }
+func (do *LoginDO) GetUpdatedTime() string  { return do.UpdatedTime }
+func (do *LoginDO) SetUpdatedTime(v string) { do.UpdatedTime = v }
+
+/*
+CREATE TABLE `login` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `login_type` tinyint NOT NULL DEFAULT '0' COMMENT '登录类型(0=管理用户 1=注册用户)',
+  `user_id` int NOT NULL COMMENT '登录用户ID',
+  `login_ip` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '登录IP',
+  `login_addr` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '登录地址',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  PRIMARY KEY (`id`) USING BTREE
+) ENGINE=InnoDB AUTO_INCREMENT=24 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='登录记录表';
+*/
diff --git a/pkg/dal/models/news_do.go b/pkg/dal/models/news_do.go
new file mode 100644
index 0000000..4b48235
--- /dev/null
+++ b/pkg/dal/models/news_do.go
@@ -0,0 +1,164 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameNews = "news" //新闻文章数据表(AI编辑)
+
+const (
+	NEWS_COLUMN_ID             = "id"
+	NEWS_COLUMN_ORG_ID         = "org_id"
+	NEWS_COLUMN_SPIDER_ID      = "spider_id"
+	NEWS_COLUMN_PNAME_ID       = "p_name"
+	NEWS_COLUMN_TAG            = "tag"
+	NEWS_COLUMN_CATEGORY       = "category"
+	NEWS_COLUMN_MAIN_TITLE     = "main_title"
+	NEWS_COLUMN_SUB_TITLE      = "sub_title"
+	NEWS_COLUMN_SUMMARY        = "summary"
+	NEWS_COLUMN_KEYWORDS       = "keywords"
+	NEWS_COLUMN_SEO_KEYWORDS   = "seo_keywords"
+	NEWS_COLUMN_TAGS           = "tags"
+	NEWS_COLUMN_URL            = "url"
+	NEWS_COLUMN_DIGEST         = "digest"
+	NEWS_COLUMN_REPO_NAME      = "repo_name"	
+	NEWS_COLUMN_IMAGE_URL      = "image_url"
+	NEWS_COLUMN_LOGO_URL       = "logo_url"
+	NEWS_COLUMN_MODEL_PARAMETER= "model_parameter"
+	NEWS_COLUMN_CONTENT        = "content"
+	NEWS_COLUMN_IS_HOTSPOT     = "is_hotspot"
+	NEWS_COLUMN_IS_OVERWRITTEN = "is_overwritten"
+	NEWS_COLUMN_IS_DELETED     = "is_deleted"
+	NEWS_COLUMN_IS_REPLICATE   = "is_replicate"
+	NEWS_COLUMN_STATE          = "state"
+	NEWS_COLUMN_LANGUAGE       = "language"
+	NEWS_COLUMN_DATA_TIME      = "data_time"
+	NEWS_COLUMN_CREATED_TIME   = "created_time"
+	NEWS_COLUMN_UPDATED_TIME   = "updated_time"
+	NEWS_COLUMN_EXTRA_DATA     = "extra_data"
+)
+
+type NewsDO struct {
+	Id            int64           `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	OrgId         int64           `json:"org_id" db:"org_id" bson:"org_id"`                                    //AI文章同步ID
+	SpiderId      int64           `json:"spider_id" db:"spider_id" bson:"spider_id"`                           //爬虫文章ID
+	PnameId       string          `json:"p_name" db:"p_name" sqlca:"isnull" bson:"p_name"`             		   //模型/产品名称
+	Tag           string          `json:"tag" db:"tag" bson:"tag"`                                             //文章标签(原始标签)
+	Category      string          `json:"category" db:"category" bson:"category"`                              //分类
+	MainTitle     string          `json:"main_title" db:"main_title" bson:"main_title"`                        //主标题
+	SubTitle      string          `json:"sub_title" db:"sub_title" bson:"sub_title"`                           //副标题
+	Summary       string          `json:"summary" db:"summary" sqlca:"isnull" bson:"summary"`                  //摘要
+	Keywords      string          `json:"keywords" db:"keywords" sqlca:"isnull" bson:"keywords"`               //文章关键词
+	SeoKeywords   string          `json:"seo_keywords" db:"seo_keywords" sqlca:"isnull" bson:"seo_keywords"`   //SEO关键词
+	Tags          []string        `json:"tags" db:"tags" sqlca:"isnull" bson:"tags"`                           //人工打标签(多选)
+	Url           string          `json:"url" db:"url" sqlca:"isnull" bson:"url"`                              //文章链接
+	Digest        string          `json:"digest" db:"digest" sqlca:"isnull" bson:"digest"`                     //文章链接
+	RepoName      string          `json:"repo_name" db:"repo_name" sqlca:"isnull" bson:"repo_name"`            //docker build时的名字,repository name 
+	ImageUrl      string          `json:"image_url" db:"image_url" sqlca:"isnull" bson:"image_url"`            //图片URL
+	LogoUrl       string          `json:"logo_url" db:"logo_url" sqlca:"isnull" bson:"logo_url"`               //Logo URL
+	ModelParameter int8         `json:"model_parameter" db:"model_parameter" sqlca:"isnull" bson:"model_parameter"`               //模型的参数量
+	Content       string          `json:"content" db:"content" sqlca:"isnull" bson:"content"`                  //文章内容
+	IsHotspot     bool            `json:"is_hotspot" db:"is_hotspot" bson:"is_hotspot"`                        //是否热门(0=否 1=是)
+	IsOverwritten bool            `json:"is_overwritten" db:"is_overwritten" bson:"is_overwritten"`            //是否已被覆盖(0=否 1=是)
+	IsDeleted     bool            `json:"is_deleted" db:"is_deleted" bson:"is_deleted"`                        //是否已删除(0=否 1=是)
+	IsReplicate   bool            `json:"is_replicate" db:"is_replicate" bson:"is_replicate"`                  //是否为副本(0=否 1=是)
+	State         int8            `json:"state" db:"state" bson:"state"`                                       //状态(0=未发布订阅 1=已发布订阅 2=已推送)
+	Language      LanguageType    `json:"language" db:"language" bson:"language"`                              //语言(zh-CN=中文 en=英文)
+	DataTime      string          `json:"data_time" db:"data_time" bson:"data_time"`                           //数据生成时间
+	CreatedTime   string          `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //数据创建时间
+	UpdatedTime   string          `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //数据更新时间
+	ExtraData     CommonExtraData `json:"extra_data" db:"extra_data" sqlca:"isnull" bson:"extra_data"`         //附带数据(JSON)
+}
+
+func (do *NewsDO) GetId() int64                   { return do.Id }
+func (do *NewsDO) SetId(v int64)                  { do.Id = v }
+func (do *NewsDO) GetOrgId() int64                { return do.OrgId }
+func (do *NewsDO) SetOrgId(v int64)               { do.OrgId = v }
+func (do *NewsDO) GetSpiderId() int64             { return do.SpiderId }
+func (do *NewsDO) SetSpiderId(v int64)            { do.SpiderId = v }
+func (do *NewsDO) GetPnameId() string             { return do.PnameId }
+func (do *NewsDO) SetPnameId(v string)            { do.PnameId = v }
+func (do *NewsDO) GetTag() string                 { return do.Tag }
+func (do *NewsDO) SetTag(v string)                { do.Tag = v }
+func (do *NewsDO) GetCategory() string            { return do.Category }
+func (do *NewsDO) SetCategory(v string)           { do.Category = v }
+func (do *NewsDO) GetMainTitle() string           { return do.MainTitle }
+func (do *NewsDO) SetMainTitle(v string)          { do.MainTitle = v }
+func (do *NewsDO) GetSubTitle() string            { return do.SubTitle }
+func (do *NewsDO) SetSubTitle(v string)           { do.SubTitle = v }
+func (do *NewsDO) GetSummary() string             { return do.Summary }
+func (do *NewsDO) SetSummary(v string)            { do.Summary = v }
+func (do *NewsDO) GetKeywords() string            { return do.Keywords }
+func (do *NewsDO) SetKeywords(v string)           { do.Keywords = v }
+func (do *NewsDO) GetSeoKeywords() string         { return do.SeoKeywords }
+func (do *NewsDO) SetSeoKeywords(v string)        { do.SeoKeywords = v }
+func (do *NewsDO) GetTags() []string              { return do.Tags }
+func (do *NewsDO) SetTags(v []string)             { do.Tags = v }
+func (do *NewsDO) GetUrl() string                 { return do.Url }
+func (do *NewsDO) SetUrl(v string)                { do.Url = v }
+func (do *NewsDO) GetDigest() string              { return do.Digest }
+func (do *NewsDO) SetDigest(v string)             { do.Digest = v }
+func (do *NewsDO) GetRepoName() string            { return do.RepoName }
+func (do *NewsDO) SetRepoName(v string)           { do.RepoName = v }
+func (do *NewsDO) GetImageUrl() string            { return do.ImageUrl }
+func (do *NewsDO) SetImageUrl(v string)           { do.ImageUrl = v }
+func (do *NewsDO) GetLogoUrl() string             { return do.LogoUrl }
+func (do *NewsDO) SetLogoUrl(v string)            { do.LogoUrl = v }
+func (do *NewsDO) GetModelParameter() int8        { return do.ModelParameter }
+func (do *NewsDO) SetModelParameter(v int8)       { do.ModelParameter = v }
+func (do *NewsDO) GetContent() string             { return do.Content }
+func (do *NewsDO) SetContent(v string)            { do.Content = v }
+func (do *NewsDO) GetIsHotspot() bool             { return do.IsHotspot }
+func (do *NewsDO) SetIsHotspot(v bool)            { do.IsHotspot = v }
+func (do *NewsDO) GetIsOverwritten() bool         { return do.IsOverwritten }
+func (do *NewsDO) SetIsOverwritten(v bool)        { do.IsOverwritten = v }
+func (do *NewsDO) GetIsDeleted() bool             { return do.IsDeleted }
+func (do *NewsDO) SetIsDeleted(v bool)            { do.IsDeleted = v }
+func (do *NewsDO) GetIsReplicate() bool           { return do.IsReplicate }
+func (do *NewsDO) SetIsReplicate(v bool)          { do.IsReplicate = v }
+func (do *NewsDO) GetState() int8                 { return do.State }
+func (do *NewsDO) SetState(v int8)                { do.State = v }
+func (do *NewsDO) GetLanguage() LanguageType      { return do.Language }
+func (do *NewsDO) SetLanguage(v LanguageType)     { do.Language = v }
+func (do *NewsDO) GetDataTime() string            { return do.DataTime }
+func (do *NewsDO) SetDataTime(v string)           { do.DataTime = v }
+func (do *NewsDO) GetCreatedTime() string         { return do.CreatedTime }
+func (do *NewsDO) SetCreatedTime(v string)        { do.CreatedTime = v }
+func (do *NewsDO) GetUpdatedTime() string         { return do.UpdatedTime }
+func (do *NewsDO) SetUpdatedTime(v string)        { do.UpdatedTime = v }
+func (do *NewsDO) GetExtraData() CommonExtraData  { return do.ExtraData }
+func (do *NewsDO) SetExtraData(v CommonExtraData) { do.ExtraData = v }
+
+/*
+CREATE TABLE `news` (
+  `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `org_id` bigint NOT NULL DEFAULT '0' COMMENT 'AI文章同步ID',
+  `spider_id` bigint NOT NULL DEFAULT '0' COMMENT '爬虫文章ID',
+  `tag` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '文章标签(原始标签)',
+  `category` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '分类',
+  `main_title` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '主标题',
+  `sub_title` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '副标题',
+  `summary` text COLLATE utf8mb4_unicode_ci COMMENT '摘要',
+  `keywords` text COLLATE utf8mb4_unicode_ci COMMENT '文章关键词',
+  `seo_keywords` text COLLATE utf8mb4_unicode_ci COMMENT 'SEO关键词',
+  `tags` json DEFAULT NULL COMMENT '人工打标签(多选)',
+  `url` varchar(2048) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT '文章链接',
+  `image_url` text COLLATE utf8mb4_unicode_ci COMMENT '图片URL',
+  `content` longtext COLLATE utf8mb4_unicode_ci COMMENT '文章内容',
+  `is_hotspot` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否热门(0=否 1=是)',
+  `is_overwritten` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已被覆盖(0=否 1=是)',
+  `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=否 1=是)',
+  `is_replicate` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为副本(0=否 1=是)',
+  `state` tinyint(1) NOT NULL DEFAULT '0' COMMENT '状态(0=未发布订阅 1=已发布订阅 2=已推送)',
+  `language` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '语言(zh-CN=中文 en=英文)',
+  `data_time` timestamp NOT NULL COMMENT '数据生成时间',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '数据创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '数据更新时间',
+  `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)',
+  PRIMARY KEY (`id`),
+  KEY `INDEX_MAIN_TITLE` (`main_title`),
+  KEY `INDEX_SUB_TITLE` (`sub_title`),
+  KEY `INDEX_CREATED_TIME` (`created_time` DESC),
+  KEY `INDEX_TAG` (`tag`),
+  KEY `INDEX_HOTSPOT` (`is_hotspot`,`is_overwritten`,`is_deleted`)
+) ENGINE=InnoDB AUTO_INCREMENT=213 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='新闻文章数据表(AI编辑)';
+*/
diff --git a/pkg/dal/models/news_draft_do.go b/pkg/dal/models/news_draft_do.go
new file mode 100644
index 0000000..a915004
--- /dev/null
+++ b/pkg/dal/models/news_draft_do.go
@@ -0,0 +1,111 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameNewsDraft = "news_draft" //草稿箱
+
+const (
+	NEWS_DRAFT_COLUMN_ID           = "id"
+	NEWS_DRAFT_COLUMN_NEWS_ID      = "news_id"
+	NEWS_DRAFT_COLUMN_ORG_ID       = "org_id"
+	NEWS_DRAFT_COLUMN_CATEGORY     = "category"
+	NEWS_DRAFT_COLUMN_MAIN_TITLE   = "main_title"
+	NEWS_DRAFT_COLUMN_SUB_TITLE    = "sub_title"
+	NEWS_DRAFT_COLUMN_SUMMARY      = "summary"
+	NEWS_DRAFT_COLUMN_KEYWORDS     = "keywords"
+	NEWS_DRAFT_COLUMN_SEO_KEYWORDS = "seo_keywords"
+	NEWS_DRAFT_COLUMN_TAGS         = "tags"
+	NEWS_DRAFT_COLUMN_IMAGE_URL    = "image_url"
+	NEWS_DRAFT_COLUMN_CONTENT      = "content"
+	NEWS_DRAFT_COLUMN_LANGUAGE     = "language"
+	NEWS_DRAFT_COLUMN_IS_DELETED   = "is_deleted"
+	NEWS_DRAFT_COLUMN_IS_REPLICATE = "is_replicate"
+	NEWS_DRAFT_COLUMN_CREATED_TIME = "created_time"
+	NEWS_DRAFT_COLUMN_UPDATED_TIME = "updated_time"
+	NEWS_DRAFT_COLUMN_EXTRA_DATA   = "extra_data"
+)
+
+type NewsDraftDO struct {
+	Id          int64           `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	NewsId      int64           `json:"news_id" db:"news_id" bson:"news_id"`                                 //新闻ID(对应news表id字段)
+	OrgId       int64           `json:"org_id" db:"org_id" bson:"org_id"`                                    //源新闻ID(对应news表org_id字段)
+	Category    string          `json:"category" db:"category" bson:"category"`                              //分类
+	MainTitle   string          `json:"main_title" db:"main_title" bson:"main_title"`                        //主标题
+	SubTitle    string          `json:"sub_title" db:"sub_title" bson:"sub_title"`                           //副标题
+	Summary     string          `json:"summary" db:"summary" sqlca:"isnull" bson:"summary"`                  //摘要
+	Keywords    string          `json:"keywords" db:"keywords" sqlca:"isnull" bson:"keywords"`               //关键字(JSON数组)
+	SeoKeywords string          `json:"seo_keywords" db:"seo_keywords" sqlca:"isnull" bson:"seo_keywords"`   //SEO关键字(JSON数组)
+	Tags        []string        `json:"tags" db:"tags" sqlca:"isnull" bson:"tags"`                           //标签(JSON数组)
+	ImageUrl    string          `json:"image_url" db:"image_url" sqlca:"isnull" bson:"image_url"`            //图片URL
+	Content     string          `json:"content" db:"content" sqlca:"isnull" bson:"content"`                  //文章内容
+	Language    LanguageType    `json:"language" db:"language" bson:"language"`                              //语言(zh-CN=中文 en=英文)
+	IsDeleted   bool            `json:"is_deleted" db:"is_deleted" bson:"is_deleted"`                        //是否已删除(0=否 1=是)
+	IsReplicate bool            `json:"is_replicate" db:"is_replicate" bson:"is_replicate"`                  //是否为副本(0=否 1=是)
+	CreatedTime string          `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //数据创建时间
+	UpdatedTime string          `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //数据更新时间
+	ExtraData   CommonExtraData `json:"extra_data" db:"extra_data" sqlca:"isnull" bson:"extra_data"`         //附带数据(JSON)
+}
+
+func (do *NewsDraftDO) GetId() int64                   { return do.Id }
+func (do *NewsDraftDO) SetId(v int64)                  { do.Id = v }
+func (do *NewsDraftDO) GetNewsId() int64               { return do.NewsId }
+func (do *NewsDraftDO) SetNewsId(v int64)              { do.NewsId = v }
+func (do *NewsDraftDO) GetOrgId() int64                { return do.OrgId }
+func (do *NewsDraftDO) SetOrgId(v int64)               { do.OrgId = v }
+func (do *NewsDraftDO) GetCategory() string            { return do.Category }
+func (do *NewsDraftDO) SetCategory(v string)           { do.Category = v }
+func (do *NewsDraftDO) GetMainTitle() string           { return do.MainTitle }
+func (do *NewsDraftDO) SetMainTitle(v string)          { do.MainTitle = v }
+func (do *NewsDraftDO) GetSubTitle() string            { return do.SubTitle }
+func (do *NewsDraftDO) SetSubTitle(v string)           { do.SubTitle = v }
+func (do *NewsDraftDO) GetSummary() string             { return do.Summary }
+func (do *NewsDraftDO) SetSummary(v string)            { do.Summary = v }
+func (do *NewsDraftDO) GetKeywords() string            { return do.Keywords }
+func (do *NewsDraftDO) SetKeywords(v string)           { do.Keywords = v }
+func (do *NewsDraftDO) GetSeoKeywords() string         { return do.SeoKeywords }
+func (do *NewsDraftDO) SetSeoKeywords(v string)        { do.SeoKeywords = v }
+func (do *NewsDraftDO) GetTags() []string              { return do.Tags }
+func (do *NewsDraftDO) SetTags(v []string)             { do.Tags = v }
+func (do *NewsDraftDO) GetImageUrl() string            { return do.ImageUrl }
+func (do *NewsDraftDO) SetImageUrl(v string)           { do.ImageUrl = v }
+func (do *NewsDraftDO) GetContent() string             { return do.Content }
+func (do *NewsDraftDO) SetContent(v string)            { do.Content = v }
+func (do *NewsDraftDO) GetLanguage() LanguageType      { return do.Language }
+func (do *NewsDraftDO) SetLanguage(v LanguageType)     { do.Language = v }
+func (do *NewsDraftDO) GetIsDeleted() bool             { return do.IsDeleted }
+func (do *NewsDraftDO) SetIsDeleted(v bool)            { do.IsDeleted = v }
+func (do *NewsDraftDO) GetIsReplicate() bool           { return do.IsReplicate }
+func (do *NewsDraftDO) SetIsReplicate(v bool)          { do.IsReplicate = v }
+func (do *NewsDraftDO) GetCreatedTime() string         { return do.CreatedTime }
+func (do *NewsDraftDO) SetCreatedTime(v string)        { do.CreatedTime = v }
+func (do *NewsDraftDO) GetUpdatedTime() string         { return do.UpdatedTime }
+func (do *NewsDraftDO) SetUpdatedTime(v string)        { do.UpdatedTime = v }
+func (do *NewsDraftDO) GetExtraData() CommonExtraData  { return do.ExtraData }
+func (do *NewsDraftDO) SetExtraData(v CommonExtraData) { do.ExtraData = v }
+
+/*
+CREATE TABLE `news_draft` (
+  `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `news_id` bigint NOT NULL DEFAULT '0' COMMENT '新闻ID(对应news表id字段)',
+  `org_id` bigint NOT NULL DEFAULT '0' COMMENT '源新闻ID(对应news表org_id字段)',
+  `category` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '分类',
+  `main_title` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '主标题',
+  `sub_title` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '副标题',
+  `summary` text COLLATE utf8mb4_unicode_ci COMMENT '摘要',
+  `keywords` text COLLATE utf8mb4_unicode_ci COMMENT '关键字(JSON数组)',
+  `seo_keywords` text COLLATE utf8mb4_unicode_ci COMMENT 'SEO关键字(JSON数组)',
+  `tags` json DEFAULT NULL COMMENT '标签(JSON数组)',
+  `image_url` text COLLATE utf8mb4_unicode_ci COMMENT '图片URL',
+  `content` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '文章内容',
+  `language` varchar(32) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '语言(zh-CN=中文 en=英文)',
+  `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=否 1=是)',
+  `is_replicate` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为副本(0=否 1=是)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '数据创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '数据更新时间',
+  `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)',
+  PRIMARY KEY (`id`),
+  KEY `INDEX_CREATED_TIME` (`created_time` DESC),
+  KEY `INDEX_HOTSPOT` (`is_deleted`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='草稿箱';
+*/
diff --git a/pkg/dal/models/news_spider_do.go b/pkg/dal/models/news_spider_do.go
new file mode 100644
index 0000000..c121271
--- /dev/null
+++ b/pkg/dal/models/news_spider_do.go
@@ -0,0 +1,110 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameNewsSpider = "news_spider" //新闻文章数据表(爬虫)
+
+const (
+	NEWS_SPIDER_COLUMN_ID           = "id"
+	NEWS_SPIDER_COLUMN_ORG_ID       = "org_id"
+	NEWS_SPIDER_COLUMN_TAG          = "tag"
+	NEWS_SPIDER_COLUMN_CATEGORY     = "category"
+	NEWS_SPIDER_COLUMN_MAIN_TITLE   = "main_title"
+	NEWS_SPIDER_COLUMN_SUB_TITLE    = "sub_title"
+	NEWS_SPIDER_COLUMN_SUMMARY      = "summary"
+	NEWS_SPIDER_COLUMN_KEYWORDS     = "keywords"
+	NEWS_SPIDER_COLUMN_SEO_KEYWORDS = "seo_keywords"
+	NEWS_SPIDER_COLUMN_URL          = "url"
+	NEWS_SPIDER_COLUMN_IMAGE_URL    = "image_url"
+	NEWS_SPIDER_COLUMN_CONTENT      = "content"
+	NEWS_SPIDER_COLUMN_IS_HOTSPOT   = "is_hotspot"
+	NEWS_SPIDER_COLUMN_IS_DELETED   = "is_deleted"
+	NEWS_SPIDER_COLUMN_CREATED_TIME = "created_time"
+	NEWS_SPIDER_COLUMN_UPDATED_TIME = "updated_time"
+	NEWS_SPIDER_COLUMN_EXTRA_DATA   = "extra_data"
+)
+
+type NewsSpiderDO struct {
+	Id          int64    `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	OrgId       int64    `json:"org_id" db:"org_id" bson:"org_id"`                                    //新闻同步ID
+	Tag         string   `json:"tag" db:"tag" bson:"tag"`                                             //文章标签
+	Category    string   `json:"category" db:"category" bson:"category"`                              //分类
+	MainTitle   string   `json:"main_title" db:"main_title" bson:"main_title"`                        //主标题
+	SubTitle    string   `json:"sub_title" db:"sub_title" bson:"sub_title"`                           //副标题
+	Summary     string   `json:"summary" db:"summary" sqlca:"isnull" bson:"summary"`                  //摘要
+	Keywords    string   `json:"keywords" db:"keywords" sqlca:"isnull" bson:"keywords"`               //文章关键词
+	SeoKeywords string   `json:"seo_keywords" db:"seo_keywords" sqlca:"isnull" bson:"seo_keywords"`   //SEO关键词
+	Url         string   `json:"url" db:"url" sqlca:"isnull" bson:"url"`                              //文章链接
+	ImageUrl    string   `json:"image_url" db:"image_url" sqlca:"isnull" bson:"image_url"`            //图片URL
+	Content     string   `json:"content" db:"content" sqlca:"isnull" bson:"content"`                  //文章内容
+	IsHotspot   bool     `json:"is_hotspot" db:"is_hotspot" bson:"is_hotspot"`                        //是否热门(0=否 1=是)
+	IsDeleted   bool     `json:"is_deleted" db:"is_deleted" bson:"is_deleted"`                        //是否已删除(0=否 1=是)
+	CreatedTime string   `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //数据创建时间
+	UpdatedTime string   `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //数据更新时间
+	ExtraData   struct{} `json:"extra_data" db:"extra_data" sqlca:"isnull" bson:"extra_data"`         //附带数据(JSON)
+}
+
+func (do *NewsSpiderDO) GetId() int64            { return do.Id }
+func (do *NewsSpiderDO) SetId(v int64)           { do.Id = v }
+func (do *NewsSpiderDO) GetOrgId() int64         { return do.OrgId }
+func (do *NewsSpiderDO) SetOrgId(v int64)        { do.OrgId = v }
+func (do *NewsSpiderDO) GetTag() string          { return do.Tag }
+func (do *NewsSpiderDO) SetTag(v string)         { do.Tag = v }
+func (do *NewsSpiderDO) GetCategory() string     { return do.Category }
+func (do *NewsSpiderDO) SetCategory(v string)    { do.Category = v }
+func (do *NewsSpiderDO) GetMainTitle() string    { return do.MainTitle }
+func (do *NewsSpiderDO) SetMainTitle(v string)   { do.MainTitle = v }
+func (do *NewsSpiderDO) GetSubTitle() string     { return do.SubTitle }
+func (do *NewsSpiderDO) SetSubTitle(v string)    { do.SubTitle = v }
+func (do *NewsSpiderDO) GetSummary() string      { return do.Summary }
+func (do *NewsSpiderDO) SetSummary(v string)     { do.Summary = v }
+func (do *NewsSpiderDO) GetKeywords() string     { return do.Keywords }
+func (do *NewsSpiderDO) SetKeywords(v string)    { do.Keywords = v }
+func (do *NewsSpiderDO) GetSeoKeywords() string  { return do.SeoKeywords }
+func (do *NewsSpiderDO) SetSeoKeywords(v string) { do.SeoKeywords = v }
+func (do *NewsSpiderDO) GetUrl() string          { return do.Url }
+func (do *NewsSpiderDO) SetUrl(v string)         { do.Url = v }
+func (do *NewsSpiderDO) GetImageUrl() string     { return do.ImageUrl }
+func (do *NewsSpiderDO) SetImageUrl(v string)    { do.ImageUrl = v }
+func (do *NewsSpiderDO) GetContent() string      { return do.Content }
+func (do *NewsSpiderDO) SetContent(v string)     { do.Content = v }
+func (do *NewsSpiderDO) GetIsHotspot() bool      { return do.IsHotspot }
+func (do *NewsSpiderDO) SetIsHotspot(v bool)     { do.IsHotspot = v }
+func (do *NewsSpiderDO) GetIsDeleted() bool      { return do.IsDeleted }
+func (do *NewsSpiderDO) SetIsDeleted(v bool)     { do.IsDeleted = v }
+func (do *NewsSpiderDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *NewsSpiderDO) SetCreatedTime(v string) { do.CreatedTime = v }
+func (do *NewsSpiderDO) GetUpdatedTime() string  { return do.UpdatedTime }
+func (do *NewsSpiderDO) SetUpdatedTime(v string) { do.UpdatedTime = v }
+func (do *NewsSpiderDO) GetExtraData() struct{}  { return do.ExtraData }
+func (do *NewsSpiderDO) SetExtraData(v struct{}) { do.ExtraData = v }
+
+/*
+CREATE TABLE `news_spider` (
+  `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `org_id` bigint NOT NULL DEFAULT '0' COMMENT '新闻同步ID',
+  `tag` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '文章标签',
+  `category` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '分类',
+  `main_title` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '主标题',
+  `sub_title` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '副标题',
+  `summary` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '摘要',
+  `keywords` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '文章关键词',
+  `seo_keywords` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT 'SEO关键词',
+  `url` varchar(2048) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT '文章链接',
+  `image_url` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '图片URL',
+  `content` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '文章内容',
+  `is_hotspot` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否热门(0=否 1=是)',
+  `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=否 1=是)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '数据创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '数据更新时间',
+  `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)',
+  PRIMARY KEY (`id`),
+  KEY `INDEX_MAIN_TITLE` (`main_title`),
+  KEY `INDEX_SUB_TITLE` (`sub_title`),
+  KEY `INDEX_CREATED_TIME` (`created_time` DESC),
+  KEY `INDEX_TAG` (`tag`),
+  KEY `INDEX_UPDATED_TIME` (`updated_time` DESC),
+  KEY `INDEX_NEWS_ID` (`org_id`)
+) ENGINE=InnoDB AUTO_INCREMENT=196 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='新闻文章数据表(爬虫)';
+*/
diff --git a/pkg/dal/models/news_subscribe_do.go b/pkg/dal/models/news_subscribe_do.go
new file mode 100644
index 0000000..ac999a1
--- /dev/null
+++ b/pkg/dal/models/news_subscribe_do.go
@@ -0,0 +1,65 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameNewsSubscribe = "news_subscribe" //
+
+const (
+	NEWS_SUBSCRIBE_COLUMN_ID           = "id"
+	NEWS_SUBSCRIBE_COLUMN_NEWS_ID      = "news_id"
+	NEWS_SUBSCRIBE_COLUMN_NEWS_SUBJECT = "news_subject"
+	NEWS_SUBSCRIBE_COLUMN_NEWS_URL     = "news_url"
+	NEWS_SUBSCRIBE_COLUMN_IS_PUSHED    = "is_pushed"
+	NEWS_SUBSCRIBE_COLUMN_IS_DELETED   = "is_deleted"
+	NEWS_SUBSCRIBE_COLUMN_CREATED_TIME = "created_time"
+	NEWS_SUBSCRIBE_COLUMN_UPDATED_TIME = "updated_time"
+	NEWS_SUBSCRIBE_COLUMN_EXTRA_DATA   = "extra_data"
+)
+
+type NewsSubscribeDO struct {
+	Id          int64    `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	NewsId      int64    `json:"news_id" db:"news_id" bson:"news_id"`                                 //订阅推送新闻ID(对应news表id字段)
+	NewsSubject string   `json:"news_subject" db:"news_subject" bson:"news_subject"`                  //邮件主题
+	NewsUrl     string   `json:"news_url" db:"news_url" bson:"news_url"`                              //订阅新闻推送URL
+	IsPushed    bool     `json:"is_pushed" db:"is_pushed" bson:"is_pushed"`                           //是否已推送(0=否 1=是)
+	IsDeleted   bool     `json:"is_deleted" db:"is_deleted" bson:"is_deleted"`                        //是否已删除(0=否 1=是)
+	CreatedTime string   `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string   `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+	ExtraData   struct{} `json:"extra_data" db:"extra_data" sqlca:"isnull" bson:"extra_data"`         //附带数据(JSON)
+}
+
+func (do *NewsSubscribeDO) GetId() int64            { return do.Id }
+func (do *NewsSubscribeDO) SetId(v int64)           { do.Id = v }
+func (do *NewsSubscribeDO) GetNewsId() int64        { return do.NewsId }
+func (do *NewsSubscribeDO) SetNewsId(v int64)       { do.NewsId = v }
+func (do *NewsSubscribeDO) GetNewsSubject() string  { return do.NewsSubject }
+func (do *NewsSubscribeDO) SetNewsSubject(v string) { do.NewsSubject = v }
+func (do *NewsSubscribeDO) GetNewsUrl() string      { return do.NewsUrl }
+func (do *NewsSubscribeDO) SetNewsUrl(v string)     { do.NewsUrl = v }
+func (do *NewsSubscribeDO) GetIsPushed() bool       { return do.IsPushed }
+func (do *NewsSubscribeDO) SetIsPushed(v bool)      { do.IsPushed = v }
+func (do *NewsSubscribeDO) GetIsDeleted() bool      { return do.IsDeleted }
+func (do *NewsSubscribeDO) SetIsDeleted(v bool)     { do.IsDeleted = v }
+func (do *NewsSubscribeDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *NewsSubscribeDO) SetCreatedTime(v string) { do.CreatedTime = v }
+func (do *NewsSubscribeDO) GetUpdatedTime() string  { return do.UpdatedTime }
+func (do *NewsSubscribeDO) SetUpdatedTime(v string) { do.UpdatedTime = v }
+func (do *NewsSubscribeDO) GetExtraData() struct{}  { return do.ExtraData }
+func (do *NewsSubscribeDO) SetExtraData(v struct{}) { do.ExtraData = v }
+
+/*
+CREATE TABLE `news_subscribe` (
+  `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `news_id` bigint NOT NULL COMMENT '订阅推送新闻ID(对应news表id字段)',
+  `news_subject` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '邮件主题',
+  `news_url` varchar(1024) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '订阅新闻推送URL',
+  `is_pushed` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已推送(0=否 1=是)',
+  `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=否 1=是)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)',
+  PRIMARY KEY (`id`),
+  KEY `INDEX_NEWS_ID` (`news_id`,`is_deleted`)
+) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+*/
diff --git a/pkg/dal/models/oper_log_do.go b/pkg/dal/models/oper_log_do.go
new file mode 100644
index 0000000..a79afc7
--- /dev/null
+++ b/pkg/dal/models/oper_log_do.go
@@ -0,0 +1,54 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameOperLog = "oper_log" //
+
+const (
+	OPER_LOG_COLUMN_ID           = "id"
+	OPER_LOG_COLUMN_OPER_USER    = "oper_user"
+	OPER_LOG_COLUMN_OPER_TYPE    = "oper_type"
+	OPER_LOG_COLUMN_OPER_TIME    = "oper_time"
+	OPER_LOG_COLUMN_OPER_CONTENT = "oper_content"
+	OPER_LOG_COLUMN_CREATED_TIME = "created_time"
+	OPER_LOG_COLUMN_UPDATED_TIME = "updated_time"
+)
+
+type OperLogDO struct {
+	Id          int32  `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	OperUser    string `json:"oper_user" db:"oper_user" bson:"oper_user"`                           //操作用户
+	OperType    int8   `json:"oper_type" db:"oper_type" bson:"oper_type"`                           //操作类型(1=首页 2=系统管理 3=存储管理 4=资源管理 5=告警中心)
+	OperTime    string `json:"oper_time" db:"oper_time" bson:"oper_time"`                           //操作时间
+	OperContent string `json:"oper_content" db:"oper_content" bson:"oper_content"`                  //操作内容
+	CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+}
+
+func (do *OperLogDO) GetId() int32            { return do.Id }
+func (do *OperLogDO) SetId(v int32)           { do.Id = v }
+func (do *OperLogDO) GetOperUser() string     { return do.OperUser }
+func (do *OperLogDO) SetOperUser(v string)    { do.OperUser = v }
+func (do *OperLogDO) GetOperType() int8       { return do.OperType }
+func (do *OperLogDO) SetOperType(v int8)      { do.OperType = v }
+func (do *OperLogDO) GetOperTime() string     { return do.OperTime }
+func (do *OperLogDO) SetOperTime(v string)    { do.OperTime = v }
+func (do *OperLogDO) GetOperContent() string  { return do.OperContent }
+func (do *OperLogDO) SetOperContent(v string) { do.OperContent = v }
+func (do *OperLogDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *OperLogDO) SetCreatedTime(v string) { do.CreatedTime = v }
+func (do *OperLogDO) GetUpdatedTime() string  { return do.UpdatedTime }
+func (do *OperLogDO) SetUpdatedTime(v string) { do.UpdatedTime = v }
+
+/*
+CREATE TABLE `oper_log` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `oper_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '操作用户',
+  `oper_type` tinyint(1) NOT NULL COMMENT '操作类型(1=首页 2=系统管理 3=存储管理 4=资源管理 5=告警中心)',
+  `oper_time` timestamp NOT NULL COMMENT '操作时间',
+  `oper_content` varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '操作内容',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  PRIMARY KEY (`id`) USING BTREE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC;
+*/
diff --git a/pkg/dal/models/privilege_do.go b/pkg/dal/models/privilege_do.go
new file mode 100644
index 0000000..fd26247
--- /dev/null
+++ b/pkg/dal/models/privilege_do.go
@@ -0,0 +1,74 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNamePrivilege = "privilege" //角色-菜单关系表
+
+const (
+	PRIVILEGE_COLUMN_ID           = "id"
+	PRIVILEGE_COLUMN_CATEGORY     = "category"
+	PRIVILEGE_COLUMN_NAME         = "name"
+	PRIVILEGE_COLUMN_LABEL        = "label"
+	PRIVILEGE_COLUMN_PATH         = "path"
+	PRIVILEGE_COLUMN_CHILDREN     = "children"
+	PRIVILEGE_COLUMN_IS_INHERENT  = "is_inherent"
+	PRIVILEGE_COLUMN_REMARK       = "remark"
+	PRIVILEGE_COLUMN_DELETED      = "deleted"
+	PRIVILEGE_COLUMN_CREATED_TIME = "created_time"
+	PRIVILEGE_COLUMN_UPDATED_TIME = "updated_time"
+)
+
+type PrivilegeDO struct {
+	Id          int32         `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	Category    int8          `json:"category" db:"category" bson:"category"`                              //权限分类(保留字段)
+	Name        string        `json:"name" db:"name" bson:"name"`                                          //权限名称
+	Label       string        `json:"label" db:"label" bson:"label"`                                       //权限标签
+	Path        string        `json:"path" db:"path" bson:"path"`                                          //权限访问路径
+	Children    TreePrivilege `json:"children" db:"children" sqlca:"isnull" bson:"children"`               //子权限树
+	IsInherent  bool          `json:"is_inherent" db:"is_inherent" bson:"is_inherent"`                     //是否固有权限(0=否 1=是)
+	Remark      string        `json:"remark" db:"remark" bson:"remark"`                                    //权限备注
+	Deleted     bool          `json:"deleted" db:"deleted" bson:"deleted"`                                 //是否已删除(0=未删除 1=已删除)
+	CreatedTime string        `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string        `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+}
+
+func (do *PrivilegeDO) GetId() int32                { return do.Id }
+func (do *PrivilegeDO) SetId(v int32)               { do.Id = v }
+func (do *PrivilegeDO) GetCategory() int8           { return do.Category }
+func (do *PrivilegeDO) SetCategory(v int8)          { do.Category = v }
+func (do *PrivilegeDO) GetName() string             { return do.Name }
+func (do *PrivilegeDO) SetName(v string)            { do.Name = v }
+func (do *PrivilegeDO) GetLabel() string            { return do.Label }
+func (do *PrivilegeDO) SetLabel(v string)           { do.Label = v }
+func (do *PrivilegeDO) GetPath() string             { return do.Path }
+func (do *PrivilegeDO) SetPath(v string)            { do.Path = v }
+func (do *PrivilegeDO) GetChildren() TreePrivilege  { return do.Children }
+func (do *PrivilegeDO) SetChildren(v TreePrivilege) { do.Children = v }
+func (do *PrivilegeDO) GetIsInherent() bool         { return do.IsInherent }
+func (do *PrivilegeDO) SetIsInherent(v bool)        { do.IsInherent = v }
+func (do *PrivilegeDO) GetRemark() string           { return do.Remark }
+func (do *PrivilegeDO) SetRemark(v string)          { do.Remark = v }
+func (do *PrivilegeDO) GetDeleted() bool            { return do.Deleted }
+func (do *PrivilegeDO) SetDeleted(v bool)           { do.Deleted = v }
+func (do *PrivilegeDO) GetCreatedTime() string      { return do.CreatedTime }
+func (do *PrivilegeDO) SetCreatedTime(v string)     { do.CreatedTime = v }
+func (do *PrivilegeDO) GetUpdatedTime() string      { return do.UpdatedTime }
+func (do *PrivilegeDO) SetUpdatedTime(v string)     { do.UpdatedTime = v }
+
+/*
+CREATE TABLE `privilege` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `category` tinyint(1) NOT NULL DEFAULT '0' COMMENT '权限分类(保留字段)',
+  `name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '权限名称',
+  `label` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '权限标签',
+  `path` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '权限访问路径',
+  `children` mediumtext COLLATE utf8mb4_unicode_ci COMMENT '子权限树',
+  `is_inherent` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否固有权限(0=否 1=是)',
+  `remark` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '权限备注',
+  `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  PRIMARY KEY (`id`) USING BTREE
+) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='角色-菜单关系表';
+*/
diff --git a/pkg/dal/models/public_do.go b/pkg/dal/models/public_do.go
new file mode 100644
index 0000000..a42848a
--- /dev/null
+++ b/pkg/dal/models/public_do.go
@@ -0,0 +1,70 @@
+package models
+
+import "intent-system/pkg/utils"
+
+type QAState int
+
+const (
+	QAState_Drafted   QAState = 0 //已存草稿
+	QAState_Published QAState = 1 //已上架
+	QAState_Removed   QAState = 2 //已下架
+)
+
+type OperType = string
+
+const (
+	OperType_Create OperType = "create"
+	OperType_Edit   OperType = "edit"
+	OperType_Delete OperType = "delete"
+)
+
+type ChangeLog struct {
+	OperUser string   `json:"oper_user"`
+	OperTime string   `json:"oper_time"`
+	OperType OperType `json:"oper_type"`
+}
+
+func MakeChangeLog(logs []*ChangeLog, strUserName, strOperType OperType) []*ChangeLog {
+	if logs == nil {
+		logs = make([]*ChangeLog, 0)
+	}
+	strOperTime := utils.Now()
+	logs = append(logs, &ChangeLog{
+		OperUser: strUserName,
+		OperTime: strOperTime,
+		OperType: strOperType,
+	})
+	return logs
+}
+
+type CommonExtraData struct {
+	Logs []*ChangeLog `json:"logs"`
+}
+
+type Privilege struct {
+	Label    string        `json:"label" db:"label"`
+	Name     string        `json:"name" db:"name"`
+	Path     string        `json:"path" db:"path"`
+	Children TreePrivilege `json:"children" db:"children"`
+}
+
+// 权限树结构
+type TreePrivilege []Privilege
+
+type SubscriberExtraData struct {
+	UnsubscribeReason string `json:"unsubscribe_reason"`
+}
+
+type LanguageType = string
+
+const (
+	Language_Null LanguageType = ""
+	Language_EN   LanguageType = "en"
+	Language_CN   LanguageType = "zh-CN"
+)
+
+type TemplateType = int32
+
+const (
+	TemplateType_SubscriptionWelcome TemplateType = 1 //订阅欢迎邮件模板
+)
diff --git a/pkg/dal/models/question_answer_do.go b/pkg/dal/models/question_answer_do.go
new file mode 100644
index 0000000..3b1eaea
--- /dev/null
+++ b/pkg/dal/models/question_answer_do.go
@@ -0,0 +1,86 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameQuestionAnswer = "question_answer" //
+
+const (
+	QUESTION_ANSWER_COLUMN_ID             = "id"
+	QUESTION_ANSWER_COLUMN_ORG_ID         = "org_id"
+	QUESTION_ANSWER_COLUMN_QUESTION       = "question"
+	QUESTION_ANSWER_COLUMN_ANSWER         = "answer"
+	QUESTION_ANSWER_COLUMN_STATE          = "state"
+	QUESTION_ANSWER_COLUMN_LANGUAGE       = "language"
+	QUESTION_ANSWER_COLUMN_IS_OVERWRITTEN = "is_overwritten"
+	QUESTION_ANSWER_COLUMN_IS_REPLICATE   = "is_replicate"
+	QUESTION_ANSWER_COLUMN_IS_DELETED     = "is_deleted"
+	QUESTION_ANSWER_COLUMN_DATA_TIME      = "data_time"
+	QUESTION_ANSWER_COLUMN_CREATED_TIME   = "created_time"
+	QUESTION_ANSWER_COLUMN_UPDATED_TIME   = "updated_time"
+	QUESTION_ANSWER_COLUMN_EXTRA_DATA     = "extra_data"
+)
+
+type QuestionAnswerDO struct {
+	Id            int64           `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	OrgId         int64           `json:"org_id" db:"org_id" bson:"org_id"`                                    //Q&A源ID(同步ID)
+	Question      string          `json:"question" db:"question" sqlca:"isnull" bson:"question"`               //问题
+	Answer        string          `json:"answer" db:"answer" sqlca:"isnull" bson:"answer"`                     //答案
+	State         QAState         `json:"state" db:"state" bson:"state"`                                       //是否已发布(0=草稿 1=已发布 2=已下架)
+	Language      LanguageType    `json:"language" db:"language" bson:"language"`                              //语言(zh-CN=中文 en=英文)
+	IsOverwritten bool            `json:"is_overwritten" db:"is_overwritten" bson:"is_overwritten"`            //是否已被覆盖(0=否 1=是)
+	IsReplicate   bool            `json:"is_replicate" db:"is_replicate" bson:"is_replicate"`                  //是否为副本(0=否 1=是)
+	IsDeleted     bool            `json:"is_deleted" db:"is_deleted" bson:"is_deleted"`                        //是否已删除(0=未删除 1=已删除)
+	DataTime      string          `json:"data_time" db:"data_time" bson:"data_time"`                           //数据生成时间
+	CreatedTime   string          `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime   string          `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+	ExtraData     CommonExtraData `json:"extra_data" db:"extra_data" sqlca:"isnull" bson:"extra_data"`         //附带数据(JSON)
+}
+
+func (do *QuestionAnswerDO) GetId() int64                   { return do.Id }
+func (do *QuestionAnswerDO) SetId(v int64)                  { do.Id = v }
+func (do *QuestionAnswerDO) GetOrgId() int64                { return do.OrgId }
+func (do *QuestionAnswerDO) SetOrgId(v int64)               { do.OrgId = v }
+func (do *QuestionAnswerDO) GetQuestion() string            { return do.Question }
+func (do *QuestionAnswerDO) SetQuestion(v string)           { do.Question = v }
+func (do *QuestionAnswerDO) GetAnswer() string              { return do.Answer }
+func (do *QuestionAnswerDO) SetAnswer(v string)             { do.Answer = v }
+func (do *QuestionAnswerDO) GetState() QAState              { return do.State }
+func (do *QuestionAnswerDO) SetState(v QAState)             { do.State = v }
+func (do *QuestionAnswerDO) GetLanguage() LanguageType      { return do.Language }
+func (do *QuestionAnswerDO) SetLanguage(v LanguageType)     { do.Language = v }
+func (do *QuestionAnswerDO) GetIsOverwritten() bool         { return do.IsOverwritten }
+func (do *QuestionAnswerDO) SetIsOverwritten(v bool)        { do.IsOverwritten = v }
+func (do *QuestionAnswerDO) GetIsReplicate() bool           { return do.IsReplicate }
+func (do *QuestionAnswerDO) SetIsReplicate(v bool)          { do.IsReplicate = v }
+func (do *QuestionAnswerDO) GetIsDeleted() bool             { return do.IsDeleted }
+func (do *QuestionAnswerDO) SetIsDeleted(v bool)            { do.IsDeleted = v }
+func (do *QuestionAnswerDO) GetDataTime() string            { return do.DataTime }
+func (do *QuestionAnswerDO) SetDataTime(v string)           { do.DataTime = v }
+func (do *QuestionAnswerDO) GetCreatedTime() string         { return do.CreatedTime }
+func (do *QuestionAnswerDO) SetCreatedTime(v string)        { do.CreatedTime = v }
+func (do *QuestionAnswerDO) GetUpdatedTime() string         { return do.UpdatedTime }
+func (do *QuestionAnswerDO) SetUpdatedTime(v string)        { do.UpdatedTime = v }
+func (do *QuestionAnswerDO) GetExtraData() CommonExtraData  { return do.ExtraData }
+func (do *QuestionAnswerDO) SetExtraData(v CommonExtraData) { do.ExtraData = v }
+
+/*
+CREATE TABLE `question_answer` (
+  `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `org_id` bigint NOT NULL DEFAULT '0' COMMENT 'Q&A源ID(同步ID)',
+  `question` mediumtext CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '问题',
+  `answer` mediumtext COLLATE utf8mb4_unicode_ci COMMENT '答案',
+  `state` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已发布(0=草稿 1=已发布 2=已下架)',
+  `language` varchar(32) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '语言(zh-CN=中文 en=英文)',
+  `is_overwritten` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已被覆盖(0=否 1=是)',
+  `is_replicate` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为副本(0=否 1=是)',
+  `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)',
+  `data_time` timestamp NOT NULL COMMENT '数据生成时间',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)',
+  PRIMARY KEY (`id`),
+  KEY `INDEX_CREATED_TIME` (`created_time` DESC),
+  KEY `INDEX_UPDATED_TIME` (`updated_time` DESC)
+) ENGINE=InnoDB AUTO_INCREMENT=1454 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+*/
diff --git a/pkg/dal/models/question_draft_do.go b/pkg/dal/models/question_draft_do.go
new file mode 100644
index 0000000..de97479
--- /dev/null
+++ b/pkg/dal/models/question_draft_do.go
@@ -0,0 +1,81 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameQuestionDraft = "question_draft" //
+
+const (
+	QUESTION_DRAFT_COLUMN_ID             = "id"
+	QUESTION_DRAFT_COLUMN_QA_ID          = "qa_id"
+	QUESTION_DRAFT_COLUMN_ORG_ID         = "org_id"
+	QUESTION_DRAFT_COLUMN_QUESTION       = "question"
+	QUESTION_DRAFT_COLUMN_ANSWER         = "answer"
+	QUESTION_DRAFT_COLUMN_LANGUAGE       = "language"
+	QUESTION_DRAFT_COLUMN_IS_OVERWRITTEN = "is_overwritten"
+	QUESTION_DRAFT_COLUMN_IS_REPLICATE   = "is_replicate"
+	QUESTION_DRAFT_COLUMN_IS_DELETED     = "is_deleted"
+	QUESTION_DRAFT_COLUMN_CREATED_TIME   = "created_time"
+	QUESTION_DRAFT_COLUMN_UPDATED_TIME   = "updated_time"
+	QUESTION_DRAFT_COLUMN_EXTRA_DATA     = "extra_data"
+)
+
+type QuestionDraftDO struct {
+	Id            int64           `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	QaId          int64           `json:"qa_id" db:"qa_id" bson:"qa_id"`                                       //源ID(对应question_answer表id字段)
+	OrgId         int64           `json:"org_id" db:"org_id" bson:"org_id"`                                    //Q&A同步ID
+	Question      string          `json:"question" db:"question" bson:"question"`                              //问题
+	Answer        string          `json:"answer" db:"answer" sqlca:"isnull" bson:"answer"`                     //答案
+	Language      LanguageType    `json:"language" db:"language" bson:"language"`                              //语言(zh-CN=中文 en=英文)
+	IsOverwritten bool            `json:"is_overwritten" db:"is_overwritten" bson:"is_overwritten"`            //是否已被覆盖(0=否 1=是)
+	IsReplicate   bool            `json:"is_replicate" db:"is_replicate" bson:"is_replicate"`                  //是否为副本(0=否 1=是)
+	IsDeleted     bool            `json:"is_deleted" db:"is_deleted" bson:"is_deleted"`                        //是否已删除(0=未删除 1=已删除)
+	CreatedTime   string          `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime   string          `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+	ExtraData     CommonExtraData `json:"extra_data" db:"extra_data" sqlca:"isnull" bson:"extra_data"`         //附带数据(JSON)
+}
+
+func (do *QuestionDraftDO) GetId() int64                   { return do.Id }
+func (do *QuestionDraftDO) SetId(v int64)                  { do.Id = v }
+func (do *QuestionDraftDO) GetQaId() int64                 { return do.QaId }
+func (do *QuestionDraftDO) SetQaId(v int64)                { do.QaId = v }
+func (do *QuestionDraftDO) GetOrgId() int64                { return do.OrgId }
+func (do *QuestionDraftDO) SetOrgId(v int64)               { do.OrgId = v }
+func (do *QuestionDraftDO) GetQuestion() string            { return do.Question }
+func (do *QuestionDraftDO) SetQuestion(v string)           { do.Question = v }
+func (do *QuestionDraftDO) GetAnswer() string              { return do.Answer }
+func (do *QuestionDraftDO) SetAnswer(v string)             { do.Answer = v }
+func (do *QuestionDraftDO) GetLanguage() LanguageType      { return do.Language }
+func (do *QuestionDraftDO) SetLanguage(v LanguageType)     { do.Language = v }
+func (do *QuestionDraftDO) GetIsOverwritten() bool         { return do.IsOverwritten }
+func (do *QuestionDraftDO) SetIsOverwritten(v bool)        { do.IsOverwritten = v }
+func (do *QuestionDraftDO) GetIsReplicate() bool           { return do.IsReplicate }
+func (do *QuestionDraftDO) SetIsReplicate(v bool)          { do.IsReplicate = v }
+func (do *QuestionDraftDO) GetIsDeleted() bool             { return do.IsDeleted }
+func (do *QuestionDraftDO) SetIsDeleted(v bool)            { do.IsDeleted = v }
+func (do *QuestionDraftDO) GetCreatedTime() string         { return do.CreatedTime }
+func (do *QuestionDraftDO) SetCreatedTime(v string)        { do.CreatedTime = v }
+func (do *QuestionDraftDO) GetUpdatedTime() string         { return do.UpdatedTime }
+func (do *QuestionDraftDO) SetUpdatedTime(v string)        { do.UpdatedTime = v }
+func (do *QuestionDraftDO) GetExtraData() CommonExtraData  { return do.ExtraData }
+func (do *QuestionDraftDO) SetExtraData(v CommonExtraData) { do.ExtraData = v }
+
+/*
+CREATE TABLE `question_draft` (
+  `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `qa_id` bigint NOT NULL DEFAULT '0' COMMENT '源ID(对应question_answer表id字段)',
+  `org_id` bigint NOT NULL DEFAULT '0' COMMENT 'Q&A同步ID',
+  `question` varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '问题',
+  `answer` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '答案',
+  `language` varchar(32) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '语言(zh-CN=中文 en=英文)',
+  `is_overwritten` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已被覆盖(0=否 1=是)',
+  `is_replicate` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为副本(0=否 1=是)',
+  `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)',
+  PRIMARY KEY (`id`),
+  KEY `INDEX_CREATED_TIME` (`created_time` DESC),
+  KEY `INDEX_UPDATED_TIME` (`updated_time` DESC)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+*/
diff --git a/pkg/dal/models/role_do.go b/pkg/dal/models/role_do.go
new file mode 100644
index 0000000..593ac48
--- /dev/null
+++ b/pkg/dal/models/role_do.go
@@ -0,0 +1,70 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameRole = "role" //角色信息表
+
+const (
+	ROLE_COLUMN_ID           = "id"
+	ROLE_COLUMN_ROLE_NAME    = "role_name"
+	ROLE_COLUMN_ROLE_ALIAS   = "role_alias"
+	ROLE_COLUMN_CREATE_USER  = "create_user"
+	ROLE_COLUMN_EDIT_USER    = "edit_user"
+	ROLE_COLUMN_REMARK       = "remark"
+	ROLE_COLUMN_IS_INHERENT  = "is_inherent"
+	ROLE_COLUMN_DELETED      = "deleted"
+	ROLE_COLUMN_CREATED_TIME = "created_time"
+	ROLE_COLUMN_UPDATED_TIME = "updated_time"
+)
+
+type RoleDO struct {
+	Id          int32  `json:"id" db:"id" bson:"_id"`                                               //角色ID(自增)
+	RoleName    string `json:"role_name" db:"role_name" bson:"role_name"`                           //角色名称
+	RoleAlias   string `json:"role_alias" db:"role_alias" bson:"role_alias"`                        //角色别名
+	CreateUser  string `json:"create_user" db:"create_user" bson:"create_user"`                     //创建人
+	EditUser    string `json:"edit_user" db:"edit_user" bson:"edit_user"`                           //最近编辑人
+	Remark      string `json:"remark" db:"remark" bson:"remark"`                                    //备注
+	IsInherent  bool   `json:"is_inherent" db:"is_inherent" bson:"is_inherent"`                     //是否固有角色(0=自定义角色 1=平台固有角色)
+	Deleted     bool   `json:"deleted" db:"deleted" bson:"deleted"`                                 //是否已删除(0=未删除 1=已删除)
+	CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+}
+
+func (do *RoleDO) GetId() int32            { return do.Id }
+func (do *RoleDO) SetId(v int32)           { do.Id = v }
+func (do *RoleDO) GetRoleName() string     { return do.RoleName }
+func (do *RoleDO) SetRoleName(v string)    { do.RoleName = v }
+func (do *RoleDO) GetRoleAlias() string    { return do.RoleAlias }
+func (do *RoleDO) SetRoleAlias(v string)   { do.RoleAlias = v }
+func (do *RoleDO) GetCreateUser() string   { return do.CreateUser }
+func (do *RoleDO) SetCreateUser(v string)  { do.CreateUser = v }
+func (do *RoleDO) GetEditUser() string     { return do.EditUser }
+func (do *RoleDO) SetEditUser(v string)    { do.EditUser = v }
+func (do *RoleDO) GetRemark() string       { return do.Remark }
+func (do *RoleDO) SetRemark(v string)      { do.Remark = v }
+func (do *RoleDO) GetIsInherent() bool     { return do.IsInherent }
+func (do *RoleDO) SetIsInherent(v bool)    { do.IsInherent = v }
+func (do *RoleDO) GetDeleted() bool        { return do.Deleted }
+func (do *RoleDO) SetDeleted(v bool)       { do.Deleted = v }
+func (do *RoleDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *RoleDO) SetCreatedTime(v string) { do.CreatedTime = v }
+func (do *RoleDO) GetUpdatedTime() string  { return do.UpdatedTime }
+func (do *RoleDO) SetUpdatedTime(v string) { do.UpdatedTime = v }
+
+/*
+CREATE TABLE `role` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT '角色ID(自增)',
+  `role_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '角色名称',
+  `role_alias` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '角色别名',
+  `create_user` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '创建人',
+  `edit_user` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近编辑人',
+  `remark` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '备注',
+  `is_inherent` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否固有角色(0=自定义角色 1=平台固有角色)',
+  `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  PRIMARY KEY (`id`) USING BTREE,
+  UNIQUE KEY `UNIQ_ROLE_NAME` (`role_name`) COMMENT '角色名称唯一约束'
+) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='角色信息表';
+*/
diff --git a/pkg/dal/models/run_config_do.go b/pkg/dal/models/run_config_do.go
new file mode 100644
index 0000000..0ebfabb
--- /dev/null
+++ b/pkg/dal/models/run_config_do.go
@@ -0,0 +1,55 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameRunConfig = "run_config" //run config table
+
+const (
+	RUN_CONFIG_COLUMN_ID           = "id"
+	RUN_CONFIG_COLUMN_CONFIG_NAME  = "config_name"
+	RUN_CONFIG_COLUMN_CONFIG_KEY   = "config_key"
+	RUN_CONFIG_COLUMN_CONFIG_VALUE = "config_value"
+	RUN_CONFIG_COLUMN_REMARK       = "remark"
+	RUN_CONFIG_COLUMN_DELETED      = "deleted"
+	RUN_CONFIG_COLUMN_CREATED_TIME = "created_time"
+)
+
+type RunConfigDO struct {
+	Id          int32  `json:"id" db:"id" bson:"_id"`                                               //incr id
+	ConfigName  string `json:"config_name" db:"config_name" bson:"config_name"`                     //config name
+	ConfigKey   string `json:"config_key" db:"config_key" bson:"config_key"`                        //config key
+	ConfigValue string `json:"config_value" db:"config_value" sqlca:"isnull" bson:"config_value"`   //config value
+	Remark      string `json:"remark" db:"remark" sqlca:"isnull" bson:"remark"`                     //remark
+	Deleted     bool   `json:"deleted" db:"deleted" bson:"deleted"`                                 //is deleted(0=false 1=true)
+	CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //created time
+}
+
+func (do *RunConfigDO) GetId() int32            { return do.Id }
+func (do *RunConfigDO) SetId(v int32)           { do.Id = v }
+func (do *RunConfigDO) GetConfigName() string   { return do.ConfigName }
+func (do *RunConfigDO) SetConfigName(v string)  { do.ConfigName = v }
+func (do *RunConfigDO) GetConfigKey() string    { return do.ConfigKey }
+func (do *RunConfigDO) SetConfigKey(v string)   { do.ConfigKey = v }
+func (do *RunConfigDO) GetConfigValue() string  { return do.ConfigValue }
+func (do *RunConfigDO) SetConfigValue(v string) { do.ConfigValue = v }
+func (do *RunConfigDO) GetRemark() string       { return do.Remark }
+func (do *RunConfigDO) SetRemark(v string)      { do.Remark = v }
+func (do *RunConfigDO) GetDeleted() bool        { return do.Deleted }
+func (do *RunConfigDO) SetDeleted(v bool)       { do.Deleted = v }
+func (do *RunConfigDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *RunConfigDO) SetCreatedTime(v string) { do.CreatedTime = v }
+
+/*
+CREATE TABLE `run_config` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT 'incr id',
+  `config_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'config name',
+  `config_key` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'config key',
+  `config_value` text CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT 'config value',
+  `remark` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'remark',
+  `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'is deleted(0=false 1=true)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'created time',
+  PRIMARY KEY (`id`) USING BTREE,
+  UNIQUE KEY `UNIQ_NAME_KEY` (`config_name`,`config_key`) USING BTREE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='run config table';
+*/
diff --git a/pkg/dal/models/subscriber_do.go b/pkg/dal/models/subscriber_do.go
new file mode 100644
index 0000000..587c8be
--- /dev/null
+++ b/pkg/dal/models/subscriber_do.go
@@ -0,0 +1,60 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameSubscriber = "subscriber" //
+
+const (
+	SUBSCRIBER_COLUMN_ID           = "id"
+	SUBSCRIBER_COLUMN_CUSTOMER_ID  = "customer_id"
+	SUBSCRIBER_COLUMN_EMAIL        = "email"
+	SUBSCRIBER_COLUMN_TAGS         = "tags"
+	SUBSCRIBER_COLUMN_IS_DELETED   = "is_deleted"
+	SUBSCRIBER_COLUMN_CREATED_TIME = "created_time"
+	SUBSCRIBER_COLUMN_UPDATED_TIME = "updated_time"
+	SUBSCRIBER_COLUMN_EXTRA_DATA   = "extra_data"
+)
+
+type SubscriberDO struct {
+	Id          int64               `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	CustomerId  int32               `json:"customer_id" db:"customer_id" bson:"customer_id"`                     //订阅者ID(对应customer标id字段,可为空)
+	Email       string              `json:"email" db:"email" bson:"email"`                                       //订阅者邮箱
+	Tags        []string            `json:"tags" db:"tags" bson:"tags"`                                          //订阅标签(主题)
+	IsDeleted   bool                `json:"is_deleted" db:"is_deleted" bson:"is_deleted"`                        //是否已取消订阅(0=否 1=是)
+	CreatedTime string              `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string              `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+	ExtraData   SubscriberExtraData `json:"extra_data" db:"extra_data" sqlca:"isnull" bson:"extra_data"`         //附带数据(JSON)
+}
+
+func (do *SubscriberDO) GetId() int64                       { return do.Id }
+func (do *SubscriberDO) SetId(v int64)                      { do.Id = v }
+func (do *SubscriberDO) GetCustomerId() int32               { return do.CustomerId }
+func (do *SubscriberDO) SetCustomerId(v int32)              { do.CustomerId = v }
+func (do *SubscriberDO) GetEmail() string                   { return do.Email }
+func (do *SubscriberDO) SetEmail(v string)                  { do.Email = v }
+func (do *SubscriberDO) GetTags() []string                  { return do.Tags }
+func (do *SubscriberDO) SetTags(v []string)                 { do.Tags = v }
+func (do *SubscriberDO) GetIsDeleted() bool                 { return do.IsDeleted }
+func (do *SubscriberDO) SetIsDeleted(v bool)                { do.IsDeleted = v }
+func (do *SubscriberDO) GetCreatedTime() string             { return do.CreatedTime }
+func (do *SubscriberDO) SetCreatedTime(v string)            { do.CreatedTime = v }
+func (do *SubscriberDO) GetUpdatedTime() string             { return do.UpdatedTime }
+func (do *SubscriberDO) SetUpdatedTime(v string)            { do.UpdatedTime = v }
+func (do *SubscriberDO) GetExtraData() SubscriberExtraData  { return do.ExtraData }
+func (do *SubscriberDO) SetExtraData(v SubscriberExtraData) { do.ExtraData = v }
+
+/*
+CREATE TABLE `subscriber` (
+  `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `customer_id` int NOT NULL DEFAULT '0' COMMENT '订阅者ID(对应customer标id字段,可为空)',
+  `email` varchar(256) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '订阅者邮箱',
+  `tags` varchar(1024) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '订阅标签(主题)',
+  `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已取消订阅(0=否 1=是)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `UNIQ_CUSTOMER_EMAIL` (`email`)
+) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+*/
diff --git a/pkg/dal/models/tag_do.go b/pkg/dal/models/tag_do.go
new file mode 100644
index 0000000..e3ad807
--- /dev/null
+++ b/pkg/dal/models/tag_do.go
@@ -0,0 +1,59 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameTag = "tag" //
+
+const (
+	TAG_COLUMN_ID           = "id"
+	TAG_COLUMN_NAME         = "name"
+	TAG_COLUMN_NAME_CN      = "name_cn"
+	TAG_COLUMN_IS_INHERENT  = "is_inherent"
+	TAG_COLUMN_IS_DELETED   = "is_deleted"
+	TAG_COLUMN_CREATED_TIME = "created_time"
+	TAG_COLUMN_UPDATED_TIME = "updated_time"
+	TAG_COLUMN_EXTRA_DATA   = "extra_data"
+)
+
+type TagDO struct {
+	Id          int64           `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	Name        string          `json:"name" db:"name" bson:"name"`                                          //标签名
+	NameCn      string          `json:"name_cn" db:"name_cn" bson:"name_cn"`                                 //中文名
+	IsInherent  bool            `json:"is_inherent" db:"is_inherent" bson:"is_inherent"`                     //是否为固有标签(0=否 1=是)
+	IsDeleted   bool            `json:"is_deleted" db:"is_deleted" bson:"is_deleted"`                        //是否已删除(0=未删除 1=已删除)
+	CreatedTime string          `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string          `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+	ExtraData   CommonExtraData `json:"extra_data" db:"extra_data" sqlca:"isnull" bson:"extra_data"`         //附带数据(JSON)
+}
+
+func (do *TagDO) GetId() int64                   { return do.Id }
+func (do *TagDO) SetId(v int64)                  { do.Id = v }
+func (do *TagDO) GetName() string                { return do.Name }
+func (do *TagDO) SetName(v string)               { do.Name = v }
+func (do *TagDO) GetNameCn() string              { return do.NameCn }
+func (do *TagDO) SetNameCn(v string)             { do.NameCn = v }
+func (do *TagDO) GetIsInherent() bool            { return do.IsInherent }
+func (do *TagDO) SetIsInherent(v bool)           { do.IsInherent = v }
+func (do *TagDO) GetIsDeleted() bool             { return do.IsDeleted }
+func (do *TagDO) SetIsDeleted(v bool)            { do.IsDeleted = v }
+func (do *TagDO) GetCreatedTime() string         { return do.CreatedTime }
+func (do *TagDO) SetCreatedTime(v string)        { do.CreatedTime = v }
+func (do *TagDO) GetUpdatedTime() string         { return do.UpdatedTime }
+func (do *TagDO) SetUpdatedTime(v string)        { do.UpdatedTime = v }
+func (do *TagDO) GetExtraData() CommonExtraData  { return do.ExtraData }
+func (do *TagDO) SetExtraData(v CommonExtraData) { do.ExtraData = v }
+
+/*
+CREATE TABLE `tag` (
+  `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `name` varchar(32) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '标签名',
+  `name_cn` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '中文名',
+  `is_inherent` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为固有标签(0=否 1=是)',
+  `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  `extra_data` json DEFAULT NULL COMMENT '附带数据(JSON)',
+  PRIMARY KEY (`id`)
+) ENGINE=InnoDB AUTO_INCREMENT=10 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+*/
diff --git a/pkg/dal/models/template_do.go b/pkg/dal/models/template_do.go
new file mode 100644
index 0000000..b689ee7
--- /dev/null
+++ b/pkg/dal/models/template_do.go
@@ -0,0 +1,59 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameTemplate = "template" //
+
+const (
+	TEMPLATE_COLUMN_ID            = "id"
+	TEMPLATE_COLUMN_TEMPLATE_TYPE = "template_type"
+	TEMPLATE_COLUMN_SUBJECT       = "subject"
+	TEMPLATE_COLUMN_CONTENT       = "content"
+	TEMPLATE_COLUMN_LANGUAGE      = "language"
+	TEMPLATE_COLUMN_EDITOR_USER   = "editor_user"
+	TEMPLATE_COLUMN_CREATED_TIME  = "created_time"
+	TEMPLATE_COLUMN_UPDATED_TIME  = "updated_time"
+)
+
+type TemplateDO struct {
+	Id           int32        `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	TemplateType TemplateType `json:"template_type" db:"template_type" bson:"template_type"`               //模板类型(1=订阅欢迎邮件[英文] 2=订阅欢迎邮件[中午])
+	Subject      string       `json:"subject" db:"subject" bson:"subject"`                                 //主题
+	Content      string       `json:"content" db:"content" sqlca:"isnull" bson:"content"`                  //内容
+	Language     string       `json:"language" db:"language" bson:"language"`                              //语言(英语=en 中文=zh-CN)
+	EditorUser   string       `json:"editor_user" db:"editor_user" bson:"editor_user"`                     //最后编辑人
+	CreatedTime  string       `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime  string       `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+}
+
+func (do *TemplateDO) GetId() int32                   { return do.Id }
+func (do *TemplateDO) SetId(v int32)                  { do.Id = v }
+func (do *TemplateDO) GetTemplateType() TemplateType  { return do.TemplateType }
+func (do *TemplateDO) SetTemplateType(v TemplateType) { do.TemplateType = v }
+func (do *TemplateDO) GetSubject() string             { return do.Subject }
+func (do *TemplateDO) SetSubject(v string)            { do.Subject = v }
+func (do *TemplateDO) GetContent() string             { return do.Content }
+func (do *TemplateDO) SetContent(v string)            { do.Content = v }
+func (do *TemplateDO) GetLanguage() string            { return do.Language }
+func (do *TemplateDO) SetLanguage(v string)           { do.Language = v }
+func (do *TemplateDO) GetEditorUser() string          { return do.EditorUser }
+func (do *TemplateDO) SetEditorUser(v string)         { do.EditorUser = v }
+func (do *TemplateDO) GetCreatedTime() string         { return do.CreatedTime }
+func (do *TemplateDO) SetCreatedTime(v string)        { do.CreatedTime = v }
+func (do *TemplateDO) GetUpdatedTime() string         { return do.UpdatedTime }
+func (do *TemplateDO) SetUpdatedTime(v string)        { do.UpdatedTime = v }
+
+/*
+CREATE TABLE `template` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `template_type` int NOT NULL COMMENT '模板类型(1=订阅欢迎邮件[英文] 2=订阅欢迎邮件[中午])',
+  `subject` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '主题',
+  `content` mediumtext CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci COMMENT '内容',
+  `language` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '语言(英语=en 中文=zh-CN)',
+  `editor_user` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最后编辑人',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  PRIMARY KEY (`id`)
+) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+*/
diff --git a/pkg/dal/models/user_do.go b/pkg/dal/models/user_do.go
new file mode 100644
index 0000000..57d4796
--- /dev/null
+++ b/pkg/dal/models/user_do.go
@@ -0,0 +1,110 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameUser = "user" //登录账户信息表
+
+const (
+	USER_COLUMN_ID           = "id"
+	USER_COLUMN_USER_NAME    = "user_name"
+	USER_COLUMN_USER_ALIAS   = "user_alias"
+	USER_COLUMN_PASSWORD     = "password"
+	USER_COLUMN_SALT         = "salt"
+	USER_COLUMN_PHONE_NUMBER = "phone_number"
+	USER_COLUMN_IS_ADMIN     = "is_admin"
+	USER_COLUMN_EMAIL        = "email"
+	USER_COLUMN_ADDRESS      = "address"
+	USER_COLUMN_REMARK       = "remark"
+	USER_COLUMN_DELETED      = "deleted"
+	USER_COLUMN_STATE        = "state"
+	USER_COLUMN_LOGIN_IP     = "login_ip"
+	USER_COLUMN_LOGIN_TIME   = "login_time"
+	USER_COLUMN_CREATE_USER  = "create_user"
+	USER_COLUMN_EDIT_USER    = "edit_user"
+	USER_COLUMN_CREATED_TIME = "created_time"
+	USER_COLUMN_UPDATED_TIME = "updated_time"
+)
+
+type UserDO struct {
+	Id          int32  `json:"id" db:"id" bson:"_id"`                                               //用户ID(自增)
+	UserName    string `json:"user_name" db:"user_name" bson:"user_name"`                           //登录名称
+	UserAlias   string `json:"user_alias" db:"user_alias" bson:"user_alias"`                        //账户别名
+	Password    string `json:"password" db:"password" bson:"password"`                              //登录密码(MD5+SALT)
+	Salt        string `json:"salt" db:"salt" bson:"salt"`                                          //MD5加密盐
+	PhoneNumber string `json:"phone_number" db:"phone_number" bson:"phone_number"`                  //联系手机号
+	IsAdmin     bool   `json:"is_admin" db:"is_admin" bson:"is_admin"`                              //是否为超级管理员(0=普通账户 1=超级管理员)
+	Email       string `json:"email" db:"email" sqlca:"isnull" bson:"email"`                        //邮箱地址
+	Address     string `json:"address" db:"address" bson:"address"`                                 //家庭住址/公司地址
+	Remark      string `json:"remark" db:"remark" bson:"remark"`                                    //备注
+	Deleted     bool   `json:"deleted" db:"deleted" bson:"deleted"`                                 //是否已删除(0=未删除 1=已删除)
+	State       int8   `json:"state" db:"state" bson:"state"`                                       //是否已冻结(1=已启用 2=已冻结)
+	LoginIp     string `json:"login_ip" db:"login_ip" bson:"login_ip"`                              //最近登录IP
+	LoginTime   int64  `json:"login_time" db:"login_time" bson:"login_time"`                        //最近登录时间
+	CreateUser  string `json:"create_user" db:"create_user" bson:"create_user"`                     //创建人
+	EditUser    string `json:"edit_user" db:"edit_user" bson:"edit_user"`                           //最近编辑人
+	CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+}
+
+func (do *UserDO) GetId() int32            { return do.Id }
+func (do *UserDO) SetId(v int32)           { do.Id = v }
+func (do *UserDO) GetUserName() string     { return do.UserName }
+func (do *UserDO) SetUserName(v string)    { do.UserName = v }
+func (do *UserDO) GetUserAlias() string    { return do.UserAlias }
+func (do *UserDO) SetUserAlias(v string)   { do.UserAlias = v }
+func (do *UserDO) GetPassword() string     { return do.Password }
+func (do *UserDO) SetPassword(v string)    { do.Password = v }
+func (do *UserDO) GetSalt() string         { return do.Salt }
+func (do *UserDO) SetSalt(v string)        { do.Salt = v }
+func (do *UserDO) GetPhoneNumber() string  { return do.PhoneNumber }
+func (do *UserDO) SetPhoneNumber(v string) { do.PhoneNumber = v }
+func (do *UserDO) GetIsAdmin() bool        { return do.IsAdmin }
+func (do *UserDO) SetIsAdmin(v bool)       { do.IsAdmin = v }
+func (do *UserDO) GetEmail() string        { return do.Email }
+func (do *UserDO) SetEmail(v string)       { do.Email = v }
+func (do *UserDO) GetAddress() string      { return do.Address }
+func (do *UserDO) SetAddress(v string)     { do.Address = v }
+func (do *UserDO) GetRemark() string       { return do.Remark }
+func (do *UserDO) SetRemark(v string)      { do.Remark = v }
+func (do *UserDO) GetDeleted() bool        { return do.Deleted }
+func (do *UserDO) SetDeleted(v bool)       { do.Deleted = v }
+func (do *UserDO) GetState() int8          { return do.State }
+func (do *UserDO) SetState(v int8)         { do.State = v }
+func (do *UserDO) GetLoginIp() string      { return do.LoginIp }
+func (do *UserDO) SetLoginIp(v string)     { do.LoginIp = v }
+func (do *UserDO) GetLoginTime() int64     { return do.LoginTime }
+func (do *UserDO) SetLoginTime(v int64)    { do.LoginTime = v }
+func (do *UserDO) GetCreateUser() string   { return do.CreateUser }
+func (do *UserDO) SetCreateUser(v string)  { do.CreateUser = v }
+func (do *UserDO) GetEditUser() string     { return do.EditUser }
+func (do *UserDO) SetEditUser(v string)    { do.EditUser = v }
+func (do *UserDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *UserDO) SetCreatedTime(v string) { do.CreatedTime = v }
+func (do *UserDO) GetUpdatedTime() string  { return do.UpdatedTime }
+func (do *UserDO) SetUpdatedTime(v string) { do.UpdatedTime = v }
+
+/*
+CREATE TABLE `user` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT '用户ID(自增)',
+  `user_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '登录名称',
+  `user_alias` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '账户别名',
+  `password` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '登录密码(MD5+SALT)',
+  `salt` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT 'MD5加密盐',
+  `phone_number` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '联系手机号',
+  `is_admin` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否为超级管理员(0=普通账户 1=超级管理员)',
+  `email` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT '' COMMENT '邮箱地址',
+  `address` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '家庭住址/公司地址',
+  `remark` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '备注',
+  `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)',
+  `state` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已冻结(1=已启用 2=已冻结)',
+  `login_ip` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近登录IP',
+  `login_time` bigint NOT NULL DEFAULT '0' COMMENT '最近登录时间',
+  `create_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '创建人',
+  `edit_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近编辑人',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  PRIMARY KEY (`id`) USING BTREE,
+  UNIQUE KEY `UNIQ_USER_NAME` (`user_name`)
+) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='登录账户信息表';
+*/
diff --git a/pkg/dal/models/user_role_do.go b/pkg/dal/models/user_role_do.go
new file mode 100644
index 0000000..bef73d3
--- /dev/null
+++ b/pkg/dal/models/user_role_do.go
@@ -0,0 +1,60 @@
+// Code generated by db2go. DO NOT EDIT.
+// https://github.com/civet148/sqlca
+
+package models
+
+const TableNameUserRole = "user_role" //用户角色关系表
+
+const (
+	USER_ROLE_COLUMN_ID           = "id"
+	USER_ROLE_COLUMN_USER_NAME    = "user_name"
+	USER_ROLE_COLUMN_ROLE_NAME    = "role_name"
+	USER_ROLE_COLUMN_CREATE_USER  = "create_user"
+	USER_ROLE_COLUMN_EDIT_USER    = "edit_user"
+	USER_ROLE_COLUMN_DELETED      = "deleted"
+	USER_ROLE_COLUMN_CREATED_TIME = "created_time"
+	USER_ROLE_COLUMN_UPDATED_TIME = "updated_time"
+)
+
+type UserRoleDO struct {
+	Id          int32  `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	UserName    string `json:"user_name" db:"user_name" bson:"user_name"`                           //用户名
+	RoleName    string `json:"role_name" db:"role_name" bson:"role_name"`                           //角色名
+	CreateUser  string `json:"create_user" db:"create_user" bson:"create_user"`                     //创建人
+	EditUser    string `json:"edit_user" db:"edit_user" bson:"edit_user"`                           //最近编辑人
+	Deleted     bool   `json:"deleted" db:"deleted" bson:"deleted"`                                 //是否已删除(0=未删除 1=已删除)
+	CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间
+	UpdatedTime string `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间
+}
+
+func (do *UserRoleDO) GetId() int32            { return do.Id }
+func (do *UserRoleDO) SetId(v int32)           { do.Id = v }
+func (do *UserRoleDO) GetUserName() string     { return do.UserName }
+func (do *UserRoleDO) SetUserName(v string)    { do.UserName = v }
+func (do *UserRoleDO) GetRoleName() string     { return do.RoleName }
+func (do *UserRoleDO) SetRoleName(v string)    { do.RoleName = v }
+func (do *UserRoleDO) GetCreateUser() string   { return do.CreateUser }
+func (do *UserRoleDO) SetCreateUser(v string)  { do.CreateUser = v }
+func (do *UserRoleDO) GetEditUser() string     { return do.EditUser }
+func (do *UserRoleDO) SetEditUser(v string)    { do.EditUser = v }
+func (do *UserRoleDO) GetDeleted() bool        { return do.Deleted }
+func (do *UserRoleDO) SetDeleted(v bool)       { do.Deleted = v }
+func (do *UserRoleDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *UserRoleDO) SetCreatedTime(v string) { do.CreatedTime = v }
+func (do *UserRoleDO) GetUpdatedTime() string  { return do.UpdatedTime }
+func (do *UserRoleDO) SetUpdatedTime(v string) { do.UpdatedTime = v }
+
+/*
+CREATE TABLE `user_role` (
+  `id` int NOT NULL AUTO_INCREMENT COMMENT '自增ID',
+  `user_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '用户名',
+  `role_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL COMMENT '角色名',
+  `create_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '创建人',
+  `edit_user` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '最近编辑人',
+  `deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否已删除(0=未删除 1=已删除)',
+  `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+  `updated_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
+  PRIMARY KEY (`id`) USING BTREE,
+  UNIQUE KEY `UNIQ_USER_NAME` (`user_name`) COMMENT '用户唯一约束'
+) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci ROW_FORMAT=DYNAMIC COMMENT='用户角色关系表';
+*/
diff --git a/pkg/dal/models/wk_aig_news_do.go b/pkg/dal/models/wk_aig_news_do.go
new file mode 100644
index 0000000..12c2ca0
--- /dev/null
+++ b/pkg/dal/models/wk_aig_news_do.go
@@ -0,0 +1,67 @@
+package models
+
+const TableNameWkAigNews = "wk_aig_news"        //PG数据库新闻文章数据表(AI加工数据)
+const TableNameWkAigNewsChs = "wk_aig_news_chs" //PG数据库新闻文章数据表(AI加工数据)
+
+const (
+	WK_AIG_NEWS_COLUMN_ID           = "id"
+	WK_AIG_NEWS_COLUMN_ORG_ID       = "org_id"
+	WK_AIG_NEWS_COLUMN_TAG          = "tag"
+	WK_AIG_NEWS_COLUMN_CATEGORY     = "category"
+	WK_AIG_NEWS_COLUMN_MAIN_TITLE   = "main_title"
+	WK_AIG_NEWS_COLUMN_SUB_TITLE    = "sub_title"
+	WK_AIG_NEWS_COLUMN_SUMMARY      = "summary"
+	WK_AIG_NEWS_COLUMN_KEYWORDS     = "keywords"
+	WK_AIG_NEWS_COLUMN_SEO_KEYWORDS = "seo_keywords"
+	WK_AIG_NEWS_COLUMN_URL          = "url"
+	WK_AIG_NEWS_COLUMN_IMAGE_URL    = "image_url"
+	WK_AIG_NEWS_COLUMN_CONTENT      = "content"
+	WK_AIG_NEWS_COLUMN_IS_HOTSPOT   = "is_hotspot"
+	WK_AIG_NEWS_COLUMN_CREATED_TIME = "created_time"
+)
+
+type WkAigNewsDO struct {
+	Id          int64  `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	OrgId       int64  `json:"org_id" db:"org_id" bson:"org_id"`                                    //源文章ID
+	Tag         string `json:"tag" db:"tag" bson:"tag"`                                             //文章标签
+	Category    string `json:"category" db:"category" bson:"category"`                              //分类
+	MainTitle   string `json:"main_title" db:"main_title" bson:"main_title"`                        //主标题
+	SubTitle    string `json:"sub_title" db:"sub_title" bson:"sub_title"`                           //副标题
+	Summary     string `json:"summary" db:"summary" sqlca:"isnull" bson:"summary"`                  //摘要
+	Keywords    string `json:"keywords" db:"keywords" sqlca:"isnull" bson:"keywords"`               //文章关键词
+	SeoKeywords string `json:"seo_keywords" db:"seo_keywords" sqlca:"isnull" bson:"seo_keywords"`   //SEO关键词
+	Url         string `json:"url" db:"url" sqlca:"isnull" bson:"url"`                              //文章链接
+	ImageUrl    string `json:"image_url" db:"image_url" sqlca:"isnull" bson:"image_url"`            //图片URL
+	Content     string `json:"content" db:"content" sqlca:"isnull" bson:"content"`                  //文章内容
+	IsHotspot   bool   `json:"is_hotspot" db:"is_hotspot" bson:"is_hotspot"`                        //是否订阅推送(0=否 1=是)
+	CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //数据创建时间
+}
+
+func (do *WkAigNewsDO) GetId() int64            { return do.Id }
+func (do *WkAigNewsDO) SetId(v int64)           { do.Id = v }
+func (do *WkAigNewsDO) GetOrgId() int64         { return do.OrgId }
+func (do *WkAigNewsDO) SetOrgId(v int64)        { do.OrgId = v }
+func (do *WkAigNewsDO) GetTag() string          { return do.Tag }
+func (do *WkAigNewsDO) SetTag(v string)         { do.Tag = v }
+func (do *WkAigNewsDO) GetCategory() string     { return do.Category }
+func (do *WkAigNewsDO) SetCategory(v string)    { do.Category = v }
+func (do *WkAigNewsDO) GetMainTitle() string    { return do.MainTitle }
+func (do *WkAigNewsDO) SetMainTitle(v string)   { do.MainTitle = v }
+func (do *WkAigNewsDO) GetSubTitle() string     { return do.SubTitle }
+func (do *WkAigNewsDO) SetSubTitle(v string)    { do.SubTitle = v }
+func (do *WkAigNewsDO) GetSummary() string      { return do.Summary }
+func (do *WkAigNewsDO) SetSummary(v string)     { do.Summary = v }
+func (do *WkAigNewsDO) GetKeywords() string     { return do.Keywords }
+func (do *WkAigNewsDO) SetKeywords(v string)    { do.Keywords = v }
+func (do *WkAigNewsDO) GetSeoKeywords() string  { return do.SeoKeywords }
+func (do *WkAigNewsDO) SetSeoKeywords(v string) { do.SeoKeywords = v }
+func (do *WkAigNewsDO) GetUrl() string          { return do.Url }
+func (do *WkAigNewsDO) SetUrl(v string)         { do.Url = v }
+func (do *WkAigNewsDO) GetImageUrl() string     { return do.ImageUrl }
+func (do *WkAigNewsDO) SetImageUrl(v string)    { do.ImageUrl = v }
+func (do *WkAigNewsDO) GetContent() string      { return do.Content }
+func (do *WkAigNewsDO) SetContent(v string)     { do.Content = v }
+func (do *WkAigNewsDO) GetIsHotspot() bool      { return do.IsHotspot }
+func (do *WkAigNewsDO) SetIsHotspot(v bool)     { do.IsHotspot = v }
+func (do *WkAigNewsDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *WkAigNewsDO) SetCreatedTime(v string) { do.CreatedTime = v }
diff --git a/pkg/dal/models/wk_aig_qna_do.go b/pkg/dal/models/wk_aig_qna_do.go
new file mode 100644
index 0000000..84c3956
--- /dev/null
+++ b/pkg/dal/models/wk_aig_qna_do.go
@@ -0,0 +1,63 @@
+package models
+
+const TableNameWkAigQna = "wk_aig_qna"        //PG数据库新闻文章数据表(AI加工数据)
+const TableNameWkAigQnaChs = "wk_aig_qna_chs" //PG数据库新闻文章数据表(AI加工数据)
+
+const (
+	WK_AIG_QNA_COLUMN_ID               = "id"
+	WK_AIG_QNA_COLUMN_ORG_ID           = "org_id"
+	WK_AIG_QNA_COLUMN_TAG              = "tag"
+	WK_AIG_QNA_COLUMN_CATEGORY         = "category"
+	WK_AIG_QNA_COLUMN_QUESTION         = "question"
+	WK_AIG_QNA_COLUMN_QUESTION_SUMMARY = "question_summary"
+	WK_AIG_QNA_COLUMN_ANSWER           = "answer"
+	WK_AIG_QNA_COLUMN_KEYWORDS         = "keywords"
+	WK_AIG_QNA_COLUMN_SEO_KEYWORDS     = "seo_keywords"
+	WK_AIG_QNA_COLUMN_URL              = "url"
+	WK_AIG_QNA_COLUMN_IMAGE_URL        = "image_url"
+	WK_AIG_QNA_COLUMN_IS_HOTSPOT       = "is_hotspot"
+	WK_AIG_QNA_COLUMN_CREATED_TIME     = "created_time"
+)
+
+type WkAigQnaDO struct {
+	Id              int64  `json:"id" db:"id" bson:"_id"`                                                         //自增ID
+	OrgId           int64  `json:"org_id" db:"org_id" bson:"org_id"`                                              //源文章ID
+	Tag             string `json:"tag" db:"tag" bson:"tag"`                                                       //文章标签
+	Category        string `json:"category" db:"category" bson:"category"`                                        //分类
+	Question        string `json:"question" db:"question" bson:"question"`                                        //问题
+	Answer          string `json:"answer" db:"answer" bson:"answer"`                                              //答案
+	QuestionSummary string `json:"question_summary" db:"question_summary" sqlca:"isnull" bson:"question_summary"` //摘要
+	Keywords        string `json:"keywords" db:"keywords" sqlca:"isnull" bson:"keywords"`                         //文章关键词
+	SeoKeywords     string `json:"seo_keywords" db:"seo_keywords" sqlca:"isnull" bson:"seo_keywords"`             //SEO关键词
+	Url             string `json:"url" db:"url" sqlca:"isnull" bson:"url"`                                        //文章链接
+	ImageUrl        string `json:"image_url" db:"image_url" sqlca:"isnull" bson:"image_url"`                      //图片URL
+	IsHotspot       bool   `json:"is_hotspot" db:"is_hotspot" bson:"is_hotspot"`                                  //热点(false=否 true=是)
+	CreatedTime     string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"`           //数据创建时间
+}
+
+func (do *WkAigQnaDO) GetId() int64                { return do.Id }
+func (do *WkAigQnaDO) SetId(v int64)               { do.Id = v }
+func (do *WkAigQnaDO) GetOrgId() int64             { return do.OrgId }
+func (do *WkAigQnaDO) SetOrgId(v int64)            { do.OrgId = v }
+func (do *WkAigQnaDO) GetTag() string              { return do.Tag }
+func (do *WkAigQnaDO) SetTag(v string)             { do.Tag = v }
+func (do *WkAigQnaDO) GetCategory() string         { return do.Category }
+func (do *WkAigQnaDO) SetCategory(v string)        { do.Category = v }
+func (do *WkAigQnaDO) GetQuestion() string         { return do.Question }
+func (do *WkAigQnaDO) SetQuestion(v string)        { do.Question = v }
+func (do *WkAigQnaDO) GetAnswer() string           { return do.Answer }
+func (do *WkAigQnaDO) SetAnswer(v string)          { do.Answer = v }
+func (do *WkAigQnaDO) GetQuestionSummary() string  { return do.QuestionSummary }
+func (do *WkAigQnaDO) SetQuestionSummary(v string) { do.QuestionSummary = v }
+func (do *WkAigQnaDO) GetKeywords() string         { return do.Keywords }
+func (do *WkAigQnaDO) SetKeywords(v string)        { do.Keywords = v }
+func (do *WkAigQnaDO) GetSeoKeywords() string      { return do.SeoKeywords }
+func (do *WkAigQnaDO) SetSeoKeywords(v string)     { do.SeoKeywords = v }
+func (do *WkAigQnaDO) GetUrl() string              { return do.Url }
+func (do *WkAigQnaDO) SetUrl(v string)             { do.Url = v }
+func (do *WkAigQnaDO) GetImageUrl() string         { return do.ImageUrl }
+func (do *WkAigQnaDO) SetImageUrl(v string)        { do.ImageUrl = v }
+func (do *WkAigQnaDO) GetIsHotspot() bool          { return do.IsHotspot }
+func (do *WkAigQnaDO) SetIsHotspot(v bool)         { do.IsHotspot = v }
+func (do *WkAigQnaDO) GetCreatedTime() string      { return do.CreatedTime }
+func (do *WkAigQnaDO) SetCreatedTime(v string)     { do.CreatedTime = v }
diff --git a/pkg/dal/models/wk_spider_news_do.go b/pkg/dal/models/wk_spider_news_do.go
new file mode 100644
index 0000000..c998b11
--- /dev/null
+++ b/pkg/dal/models/wk_spider_news_do.go
@@ -0,0 +1,62 @@
+package models
+
+const TableNameWkSpiderNews = "wk_spider_news" //PG数据库新闻文章数据表(爬虫原始数据)
+
+const (
+	WK_SPIDER_NEWS_COLUMN_ID           = "id"
+	WK_SPIDER_NEWS_COLUMN_TAG          = "tag"
+	WK_SPIDER_NEWS_COLUMN_CATEGORY     = "category"
+	WK_SPIDER_NEWS_COLUMN_MAIN_TITLE   = "main_title"
+	WK_SPIDER_NEWS_COLUMN_SUB_TITLE    = "sub_title"
+	WK_SPIDER_NEWS_COLUMN_SUMMARY      = "summary"
+	WK_SPIDER_NEWS_COLUMN_KEYWORDS     = "keywords"
+	WK_SPIDER_NEWS_COLUMN_SEO_KEYWORDS = "seo_keywords"
+	WK_SPIDER_NEWS_COLUMN_URL          = "url"
+	WK_SPIDER_NEWS_COLUMN_IMAGE_URL    = "image_url"
+	WK_SPIDER_NEWS_COLUMN_CONTENT      = "content"
+	WK_SPIDER_NEWS_COLUMN_IS_HOTSPOT   = "is_hotspot"
+	WK_SPIDER_NEWS_COLUMN_CREATED_TIME = "created_time"
+)
+
+type WkSpiderNewsDO struct {
+	Id          int64  `json:"id" db:"id" bson:"_id"`                                               //自增ID
+	Tag         string `json:"tag" db:"tag" bson:"tag"`                                             //文章标签
+	Category    string `json:"category" db:"category" bson:"category"`                              //分类
+	MainTitle   string `json:"main_title" db:"main_title" bson:"main_title"`                        //主标题
+	SubTitle    string `json:"sub_title" db:"sub_title" bson:"sub_title"`                           //副标题
+	Summary     string `json:"summary" db:"summary" sqlca:"isnull" bson:"summary"`                  //摘要
+	Keywords    string `json:"keywords" db:"keywords" sqlca:"isnull" bson:"keywords"`               //文章关键词
+	SeoKeywords string `json:"seo_keywords" db:"seo_keywords" sqlca:"isnull" bson:"seo_keywords"`   //SEO关键词
+	Url         string `json:"url" db:"url" sqlca:"isnull" bson:"url"`                              //文章链接
+	ImageUrl    string `json:"image_url" db:"image_url" sqlca:"isnull" bson:"image_url"`            //图片URL
+	Content     string `json:"content" db:"content" sqlca:"isnull" bson:"content"`                  //文章内容
+	IsHotspot   bool   `json:"is_hotspot" db:"is_hotspot" bson:"is_hotspot"`                        //是否订阅推送(0=否 1=是)
+	CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //数据创建时间
+}
+
+func (do *WkSpiderNewsDO) GetId() int64            { return do.Id }
+func (do *WkSpiderNewsDO) SetId(v int64)           { do.Id = v }
+func (do *WkSpiderNewsDO) GetTag() string          { return do.Tag }
+func (do *WkSpiderNewsDO) SetTag(v string)         { do.Tag = v }
+func (do *WkSpiderNewsDO) GetCategory() string     { return do.Category }
+func (do *WkSpiderNewsDO) SetCategory(v string)    { do.Category = v }
+func (do *WkSpiderNewsDO) GetMainTitle() string    { return do.MainTitle }
+func (do *WkSpiderNewsDO) SetMainTitle(v string)   { do.MainTitle = v }
+func (do *WkSpiderNewsDO) GetSubTitle() string     { return do.SubTitle }
+func (do *WkSpiderNewsDO) SetSubTitle(v string)    { do.SubTitle = v }
+func (do *WkSpiderNewsDO) GetSummary() string      { return do.Summary }
+func (do *WkSpiderNewsDO) SetSummary(v string)     { do.Summary = v }
+func (do *WkSpiderNewsDO) GetKeywords() string     { return do.Keywords }
+func (do *WkSpiderNewsDO) SetKeywords(v string)    { do.Keywords = v }
+func (do *WkSpiderNewsDO) GetSeoKeywords() string  { return do.SeoKeywords }
+func (do *WkSpiderNewsDO) SetSeoKeywords(v string) { do.SeoKeywords = v }
+func (do *WkSpiderNewsDO) GetUrl() string          { return do.Url }
+func (do *WkSpiderNewsDO) SetUrl(v string)         { do.Url = v }
+func (do *WkSpiderNewsDO) GetImageUrl() string     { return do.ImageUrl }
+func (do *WkSpiderNewsDO) SetImageUrl(v string)    { do.ImageUrl = v }
+func (do *WkSpiderNewsDO) GetContent() string      { return do.Content }
+func (do *WkSpiderNewsDO) SetContent(v string)     { do.Content = v }
+func (do *WkSpiderNewsDO) GetIsHotspot() bool      { return do.IsHotspot }
+func (do *WkSpiderNewsDO) SetIsHotspot(v bool)     { do.IsHotspot = v }
+func (do *WkSpiderNewsDO) GetCreatedTime() string  { return do.CreatedTime }
+func (do *WkSpiderNewsDO) SetCreatedTime(v string) { do.CreatedTime = v }
diff --git a/pkg/dal/models/wk_spider_qna_do.go b/pkg/dal/models/wk_spider_qna_do.go
new file mode 100644
index 0000000..987eb94
--- /dev/null
+++ b/pkg/dal/models/wk_spider_qna_do.go
@@ -0,0 +1,62 @@
+package models
+
+const TableNameWkSpiderQna = "wk_spider_qna" //PG数据库新闻文章数据表(AI加工数据)
+
+const (
+	WK_SPIDER_QNA_COLUMN_ID               = "id"
+	WK_SPIDER_QNA_COLUMN_TAG              = "tag"
+	WK_SPIDER_QNA_COLUMN_CATEGORY         = "category"
+	WK_SPIDER_QNA_COLUMN_QUESTION         = "question"
+	WK_SPIDER_QNA_COLUMN_QUESTION_SUMMARY = "question_summary"
+	WK_SPIDER_QNA_COLUMN_ANSWER           = "answer"
+	WK_SPIDER_QNA_COLUMN_KEYWORDS         = "keywords"
+	WK_SPIDER_QNA_COLUMN_SEO_KEYWORDS     = "seo_keywords"
+	WK_SPIDER_QNA_COLUMN_URL              = "url"
+	WK_SPIDER_QNA_COLUMN_IMAGE_URL        = "image_url"
+	WK_SPIDER_QNA_COLUMN_IS_HOTSPOT       = "is_hotspot"
+	WK_SPIDER_QNA_COLUMN_CREATED_TIME     = "created_time"
+	WK_SPIDER_QNA_COLUMN_STATUS           = "status"
+)
+
+type WkSpiderQnaDO struct {
+	Id              int64    `json:"id" db:"id" bson:"_id"`                                                         //自增ID
+	Tag             string   `json:"tag" db:"tag" bson:"tag"`                                                       //文章标签
+	Category        string   `json:"category" db:"category" bson:"category"`                                        //分类
+	Question        string   `json:"question" db:"question" bson:"question"`                                        //问题
+	Answer          string   `json:"answer" db:"answer" bson:"answer"`                                              //答案
+	QuestionSummary string   `json:"question_summary" db:"question_summary" sqlca:"isnull" bson:"question_summary"` //摘要
+	Keywords        []string `json:"keywords" db:"keywords" sqlca:"isnull" bson:"keywords"`                         //关键词
+	SeoKeywords     []string `json:"seo_keywords" db:"seo_keywords" sqlca:"isnull" bson:"seo_keywords"`             //SEO关键词
+	Url             string   `json:"url" db:"url" sqlca:"isnull" bson:"url"`                                        //链接
+	ImageUrl        string   `json:"image_url" db:"image_url" sqlca:"isnull" bson:"image_url"`                      //图片URL
+	IsHotspot       bool     `json:"is_hotspot" db:"is_hotspot" bson:"is_hotspot"`                                  //是否热点(0=否 1=是)
+	CreatedTime     string   `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"`           //数据创建时间
+	Status          bool     `json:"status" db:"status" bson:"status"`                                              //处理状态(false=未处理 true=已处理)
+}
+
+func (do *WkSpiderQnaDO) GetId() int64                { return do.Id }
+func (do *WkSpiderQnaDO) SetId(v int64)               { do.Id = v }
+func (do *WkSpiderQnaDO) GetTag() string              { return do.Tag }
+func (do *WkSpiderQnaDO) SetTag(v string)             { do.Tag = v }
+func (do *WkSpiderQnaDO) GetCategory() string         { return do.Category }
+func (do *WkSpiderQnaDO) SetCategory(v string)        { do.Category = v }
+func (do *WkSpiderQnaDO) GetQuestion() string         { return do.Question }
+func (do *WkSpiderQnaDO) SetQuestion(v string)        { do.Question = v }
+func (do *WkSpiderQnaDO) GetAnswer() string           { return do.Answer }
+func (do *WkSpiderQnaDO) SetAnswer(v string)          { do.Answer = v }
+func (do *WkSpiderQnaDO) GetQuestionSummary() string  { return do.QuestionSummary }
+func (do *WkSpiderQnaDO) SetQuestionSummary(v string) { do.QuestionSummary = v }
+func (do *WkSpiderQnaDO) GetKeywords() []string       { return do.Keywords }
+func (do *WkSpiderQnaDO) SetKeywords(v []string)      { do.Keywords = v }
+func (do *WkSpiderQnaDO) GetSeoKeywords() []string    { return do.SeoKeywords }
+func (do *WkSpiderQnaDO) SetSeoKeywords(v []string)   { do.SeoKeywords = v }
+func (do *WkSpiderQnaDO) GetUrl() string              { return do.Url }
+func (do *WkSpiderQnaDO) SetUrl(v string)             { do.Url = v }
+func (do *WkSpiderQnaDO) GetImageUrl() string         { return do.ImageUrl }
+func (do *WkSpiderQnaDO) SetImageUrl(v string)        { do.ImageUrl = v }
+func (do *WkSpiderQnaDO) GetIsHotspot() bool          { return do.IsHotspot }
+func (do *WkSpiderQnaDO) SetIsHotspot(v bool)         { do.IsHotspot = v }
+func (do *WkSpiderQnaDO) GetCreatedTime() string      { return do.CreatedTime }
+func (do *WkSpiderQnaDO) SetCreatedTime(v string)     { do.CreatedTime = v }
+func (do *WkSpiderQnaDO) GetStatus() bool             { return do.Status }
+func (do *WkSpiderQnaDO) SetStatus(v bool)            { do.Status = v }
diff --git a/pkg/dal/ws/pool.go b/pkg/dal/ws/pool.go
new file mode 100644
index 0000000..ea4c3fc
--- /dev/null
+++ b/pkg/dal/ws/pool.go
@@ -0,0 +1,79 @@
+package ws
+
+import (
+	"log"
+	"sync"
+	"time"
+
+	"github.com/gorilla/websocket"
+)
+
+type WebSocketPool struct {
+	conns map[string][]*websocket.Conn
+	sync.RWMutex
+}
+
+// 创建新的连接池
+func NewWebSocketPool() *WebSocketPool {
+	return &WebSocketPool{
+		conns: make(map[string][]*websocket.Conn),
+	}
+}
+
+// 添加一个 WebSocket 连接到指定 appID 下
+func (p *WebSocketPool) Add(appID string, conn *websocket.Conn) {
+	p.Lock()
+	defer p.Unlock()
+	p.conns[appID] = append(p.conns[appID], conn)
+}
+
+// 从连接池中移除一个连接
+func (p *WebSocketPool) Remove(appID string, conn *websocket.Conn) {
+	p.Lock()
+	defer p.Unlock()
+	if conns, ok := p.conns[appID]; ok {
+		for i, c := range conns {
+			if c == conn {
+				p.conns[appID] = append(conns[:i], conns[i+1:]...)
+				break
+			}
+		}
+		if len(p.conns[appID]) == 0 {
+			delete(p.conns, appID)
+		}
+	}
+}
+
+// 向指定 appID 的所有连接广播消息
+func (p *WebSocketPool) Broadcast(appID string, message string) {
+	p.RLock()
+	defer p.RUnlock()
+	if conns, ok := p.conns[appID]; ok {
+		for _, conn := range conns {
+			if err := conn.WriteMessage(websocket.TextMessage, []byte(message)); err != nil {
+				log.Printf("[WebSocketPool] failed to send to appID=%s: %v", appID, err)
+			}
+		}
+	}
+}
+
+// 清理掉线或不可用的连接(可定期调用)
+func (p *WebSocketPool) CleanInactive(appID string) {
+	p.Lock()
+	defer p.Unlock()
+	if conns, ok := p.conns[appID]; ok {
+		active := make([]*websocket.Conn, 0, len(conns))
+		for _, conn := range conns {
+			if err := conn.WriteControl(websocket.PingMessage, []byte("ping"), time.Now().Add(1*time.Second)); err == nil {
+				active = append(active, conn)
+			} else {
+				_ = conn.Close()
+			}
+		}
+		if len(active) > 0 {
+			p.conns[appID] = active
+		} else {
+			delete(p.conns, appID)
+		}
+	}
+}
diff --git a/pkg/email/email_sender.go b/pkg/email/email_sender.go
new file mode 100644
index 0000000..03a85bd
--- /dev/null
+++ b/pkg/email/email_sender.go
@@ -0,0 +1,124 @@
+package email
+
+import (
+	"crypto/tls"
+	"github.com/civet148/log"
+	"gopkg.in/gomail.v2"
+)
+
+const (
+	SubjectVerificationCodeForRegister      = "Complete your registration with your Jelly AI verification Code"
+	SubjectVerificationCodeForResetPassword = "Complete your reset with your Jelly AI verification Code"
+)
+
+type EmailConfig struct {
+	SmtpServer string `json:"smtp_server" db:"smtp_server"`
+	SmtpPort   uint32 `json:"smtp_port" db:"smtp_port"`
+	SmtpName   string `json:"smtp_name" db:"smtp_name"`
+	AuthCode   string `json:"auth_code" db:"auth_code"`
+	SendName   string `json:"send_name" db:"send_name"`
+}
+
+func SendVerificationCode(cfg *EmailConfig, subject, to, body string) (err error) {
+
+	m := gomail.NewMessage()
+
+	//发送人
+	m.SetAddressHeader("From", cfg.SmtpName, cfg.SendName)
+	//接收人
+	m.SetHeader("To", to)
+	//主题
+	m.SetHeader("Subject", subject)
+	//内容
+	m.SetBody("text/html", body)
+	//附件
+	//m.Attach("./myIpPic.png")
+	d := gomail.NewDialer(cfg.SmtpServer, int(cfg.SmtpPort), cfg.SmtpName, cfg.AuthCode)
+	if d.TLSConfig == nil {
+		d.TLSConfig = &tls.Config{
+			InsecureSkipVerify: true,
+		}
+	} else {
+		d.TLSConfig.InsecureSkipVerify = true
+	}
+
+	// 发送邮件
+	if err = d.DialAndSend(m); err != nil {
+		log.Errorf("Send mail failed,err: %s", err.Error())
+		return err
+	}
+	log.Debugf("send to mail [%s] body [%s]", to, body)
+	return nil
+}
+
+func SendEmail(cfg *EmailConfig, subject string, msg string, to string) (err error) {
+	log.Debugf("[SMTP] server [%s] port [%v] name [%s] to [%s]", cfg.SmtpServer, cfg.SmtpPort, cfg.SmtpName, to)
+	if to == "" {
+		return log.Errorf("no recipient to send")
+	}
+	log.Debugf("send email subject [%s] to %+v ", subject, to)
+
+	m := gomail.NewMessage()
+
+	//发送人
+	m.SetAddressHeader("From", cfg.SmtpName, cfg.SendName)
+	//接收人
+	m.SetHeader("To", to)
+	//抄送
+	//m.SetHeader("Cc", to...)
+	//密送
+	//m.SetHeader("Bcc", to...)
+	//主题
+	m.SetHeader("Subject", subject)
+	//内容
+	m.SetBody("text/html", msg)
+	//附件
+	//m.Attach("./myIpPic.png")
+	d := gomail.NewDialer(cfg.SmtpServer, int(cfg.SmtpPort), cfg.SmtpName, cfg.AuthCode)
+	if d.TLSConfig == nil {
+		d.TLSConfig = &tls.Config{
+			InsecureSkipVerify: true,
+		}
+	} else {
+		d.TLSConfig.InsecureSkipVerify = true
+	}
+	// 发送邮件
+	if err = d.DialAndSend(m); err != nil {
+		log.Errorf("send email error [%s]", err.Error())
+		return err
+	}
+	return nil
+}
+
+func SendEmailBcc(cfg *EmailConfig, subject string, msg string, to ...string) (err error) {
+	log.Debugf("[SMTP] server [%s] port [%v] name [%s] to %v", cfg.SmtpServer, cfg.SmtpPort, cfg.SmtpName, to)
+	if len(to) == 0 {
+		return log.Errorf("no bcc recipients to send")
+	}
+	log.Debugf("send email subject [%s] to %+v ", subject, to)
+
+	m := gomail.NewMessage()
+
+	//发送人
+	m.SetAddressHeader("From", cfg.SmtpName, cfg.SendName)
+	//接收人
+	m.SetHeader("To", cfg.SmtpName)
+	//抄送
+	//m.SetHeader("Cc", to...)
+	//密送
+	m.SetHeader("Bcc", to...)
+	//主题
+	m.SetHeader("Subject", subject)
+	//内容
+	m.SetBody("text/html", msg)
+	//附件
+	//m.Attach("./myIpPic.png")
+	d := gomail.NewDialer(cfg.SmtpServer, int(cfg.SmtpPort), cfg.SmtpName, cfg.AuthCode)
+
+	// 发送邮件
+	if err = d.DialAndSend(m); err != nil {
+		log.Errorf("send email error [%s]", err.Error())
+		return err
+	}
+	return nil
+}
diff --git a/pkg/email/email_templates.go b/pkg/email/email_templates.go
new file mode 100644
index 0000000..069a4c3
--- /dev/null
+++ b/pkg/email/email_templates.go
@@ -0,0 +1,123 @@
+package email
+
+import (
+	_ "embed"
+	"fmt"
+	"intent-system/pkg/dal/models"
+)
+
+const (
+	SubscriptionWelcomeSubject = "Unlock the Future — Welcome to Jelly AI"
+)
+
+//go:embed welcome_email.html
+var SubscriptionWelcomeTemplate string
+
+//go:embed welcome_email_cn.html
+var SubscriptionWelcomeTemplateCn string
+
+func SubscriptionMessage(strSubject, strTitle, strSubTitle, strSummary, strLink string) string {
+	return fmt.Sprintf(`
+
+
+  
+  
+  %s
+  
+
+
+  
+
+

%s

+

%s

+
+
+

%s

+
+
+ Read more +
+
+ + +`, strSubject, strTitle, strSummary, strSubTitle, strLink) +} + +func RegisterVerificationCodeMessage(strLang, strUserName, strVerificationCode string) (strMsg string) { + switch strLang { + case models.Language_EN: + strMsg = registerVerificationCodeMessageEn(strUserName, strVerificationCode) + case models.Language_CN: + strMsg = registerVerificationCodeMessageCN(strUserName, strVerificationCode) + default: + strMsg = registerVerificationCodeMessageEn(strUserName, strVerificationCode) + } + return strMsg +} + +func registerVerificationCodeMessageCN(strUserName, strVerificationCode string) string { + return fmt.Sprintf(` + + + + 欢迎加入Jelly AI! + + +

亲爱的 %s,

+

感谢您加入Jelly AI!我们很高兴能为您指引接下来的旅程。首先,请使用以下验证码完成您的注册:

+

验证码: %s

+

注意: 此验证码将在60分钟后失效。为了您的安全,请不要与任何人分享此验证码。

+

在您踏上这段激动人心的旅程之际,我们想向您介绍我们设计的创新产品,旨在改变您的业务:

+

如果您没有请求此验证码,可能是有人误输入了您的电子邮件。请忽略此电子邮件,或者如果您有任何疑虑,请随时联系我们:it@jellydropsllc.com.

+

我们期待帮助您充分利用AI的潜力。感谢您选择Jelly AI.

+

祝好,
+ Jelly AI业务部门

+ +`, strUserName, strVerificationCode) +} + +func registerVerificationCodeMessageEn(strUserName, strVerificationCode string) string { + + return fmt.Sprintf(` + + + Welcome to Jelly AI! + + +

Hi %s,

+

Thank you for choosing Jelly AI! We are excited to guide you through the next steps of your journey with us. To begin, please use the verification code below to complete your registration:

+

Verification Code: %s

+

Note: This code will expire in 60 minutes. For your security, please do not share this code with anyone.

+

As you embark on this exciting journey, we would like to introduce you to our innovative products designed to transform your business:

+

If you did not request this code, it's possible that someone entered your email by mistake. Please disregard this email, or if you have any concerns, feel free to reach out to us at it@jellydropsllc.com.

+

We look forward to helping you leverage the full potential of AI. Thank you for choosing Jelly AI.

+

Warm regards,
+ The Jelly AI Service Team

+ + +`, strUserName, strVerificationCode) +} diff --git a/pkg/email/welcome_email.html b/pkg/email/welcome_email.html new file mode 100644 index 0000000..eadb501 --- /dev/null +++ b/pkg/email/welcome_email.html @@ -0,0 +1,931 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+                               +                               +       +
+
+ + + + + + +
+
+
+ + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + +
+
+
+
+ +
+ + + + + + +
 
+ + + + + + + + + + + + + +
 
+ + + + + + +
 
+ + + + + + +
+ + + + + + +
+
+

Unlock the Future — Welcome to + JellyAI +

+
+
+
+ + + + + + +
 
+ + + + + + +
+ + + + + + +
+
+

Dear [%s],

+

A warm welcome to JellyAI, where your journey into the frontiers of + AI innovation and blockchain finance begins! Your subscription has + opened the doors to the latest news, groundbreaking research in + artificial intelligence, advancements in deep learning and biometric + technology, and dynamic applications of blockchain in finance. + +

+

While you await our inaugural newsletter, get a sneak peek at some of + our top articles:

+
    + +
  • iSDK Interactive Cloud Platform: Redefining Future Application + Interactions
  • +
  • Biometric Tech & Deep Learning: The Next Step in Security + Innovation
  • +
  • AI Media Transformation: Crafting the New Chapter of Content + Creation
  • +
  • Blockchain in Action: Navigating Technological Evolution in + Finance
  • +
  • AI Collaboration Across Industries: Integrating Model Applications + in Academia and Research
  • + +
+

Also, don’t miss out on our podcast series that spans a spectrum of + topics from technological innovation to cultural trends, from + financial applications to lifestyle changes:

+
    + +
  • Conversations with Innovators: The AI Journey of JellyAI
  • +
  • Blockchain Perspectives: Foreseeing the Future of Financial + Technologies
  • +
  • Tech Pulse: Exploring the Impact of AI Across Various Life Spheres +
  • + +
+

Finally, follow JellyAI on our social platforms to engage in the + ongoing dialogue of technological evolution:

+ + + + + + + + + + + + + + + + + + + + + + + +

We thank you for your interest and are excited to build the future alongside you!

+
+

Best regards,

+

The JellyAI Team

+
+
+
+ + + + + + +
 
+ + + + + + +
 
+ + + + + + + + + +
+
+ Software Is eating the world +
+
+ + + + + + +
+ + + + + + + + + + +
+
+ + + + + + +
 
+ + + + + + + +
 
+ + + + + + +
+ + + + + + + +
+ + + + + + +
+
+

©2024 JellyAI. All rights reserved.

+
+
+
+ + + + + + + + +
+ +
+
+
+ + + + + + +
 
+
+ +
+
+
+
+ + + + + \ No newline at end of file diff --git a/pkg/email/welcome_email_cn.html b/pkg/email/welcome_email_cn.html new file mode 100644 index 0000000..403abda --- /dev/null +++ b/pkg/email/welcome_email_cn.html @@ -0,0 +1,909 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+                               +                               +       +
+
+ + + + + + +
+
+
+ + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + +
+
+
+
+ +
+ + + + + + +
 
+ + + + + + + + + + + + + +
 
+ + + + + + +
 
+ + + + + + +
+ + + + + + +
+
+

开启未来之门 — 欢迎加入JellyAI +

+
+
+
+ + + + + + +
 
+ + + + + + +
+ + + + + + +
+
+

尊贵的 [%s],

+

我们在JellyAI对您的加入表示热烈欢迎!通过您的订阅,您已站在AI创新和区块链金融的最前沿。每一篇新闻、每项研究成果,每一个技术突破,包括深度学习与生物识别技术、以及区块链金融产品的最新应用,都将及时送达您的邮箱。 + +

+

在期待我们精心准备的第一份简报的同时,请您先行一览我们的精选文章:

+
    + + + +
  • iSDK意图交互云平台:重塑应用程序的互动未来
  • +
  • 深度学习驱动的生物识别:开启安全技术的新纪元
  • +
  • AI媒体革命:内容创造的新篇章
  • +
  • 区块链实战:金融领域的技术演进
  • +
  • 跨界合作的AI:产学研一体化模型应用的融合路径
  • + +
+

我们的播客系列也同样不容错过,覆盖从技术革新到文化动向,从金融策略到生活方式的全域视角:

+
    + +
  • 创新者对话:追踪JellyAI的AI进程
  • +
  • 区块链视界:塑造金融技术未来的洞见
  • +
  • 技术脉络:深究AI在多领域的生活影响
  • + +
+

让我们共同参与和见证技术革命,通过下方社交平台关注JellyAI,融入这场永不止步的探索:

+ + + + + + + + + + + + + + + + + + + + + + + +

我们对您的关注表示衷心感谢,并期待与您共同建构更加辉煌的未来!

+
+

JellyAI团队 敬上

+
+
+
+ + + + + + +
 
+ + + + + + +
 
+ + + + + + + +
+ + + + + + +
+ + + + + + + + + + +
+
+ + + + + + +
 
+ + + + + + + +
 
+ + + + + + +
+ + + + + + + +
+ + + + + + + +
+
+

©2024 JellyAI. All rights reserved.

+
+
+
+ + + + + + + + +
+ +
+
+
+ + + + + + +
 
+
+ +
+
+
+
+ + + + + \ No newline at end of file diff --git a/pkg/itypes/biz_code.go b/pkg/itypes/biz_code.go new file mode 100644 index 0000000..5a28638 --- /dev/null +++ b/pkg/itypes/biz_code.go @@ -0,0 +1,97 @@ +package itypes + +import "fmt" + +type BizCode struct { + Code int + Message string +} + +const ( + CODE_ERROR = -1 //unknown error + CODE_OK = 0 //success + CODE_TOO_MANAY_REQUESTS = 429 //too many requests + CODE_INTERNAL_SERVER_ERROR = 500 //internal service error + CODE_DATABASE_ERROR = 501 //database server error + CODE_ACCESS_DENY = 1000 //access deny + CODE_UNAUTHORIZED = 1001 //user unauthorized + CODE_INVALID_USER_OR_PASSWORD = 1002 //user or password incorrect + CODE_INVALID_PARAMS = 1003 //parameters invalid + CODE_INVALID_JSON_OR_REQUIRED_PARAMS = 1004 //json format is invalid + CODE_ALREADY_EXIST = 1005 //account name already exist + CODE_NOT_FOUND = 1006 //record not found + CODE_INVALID_PASSWORD = 1007 //wrong password + CODE_INVALID_AUTH_CODE = 1008 //invalid auth code + CODE_ACCESS_VIOLATE = 1009 //access violate + CODE_TYPE_UNDEFINED = 1010 //type undefined + CODE_BAD_DID_OR_SIGNATURE = 1011 //bad did or signature + CODE_ACCOUNT_BANNED = 1012 //account was banned +) + +var codeMessages = map[int]string{ + CODE_ERROR: "unknown error", + CODE_OK: "OK", + CODE_TOO_MANAY_REQUESTS: "too many requests", + CODE_INTERNAL_SERVER_ERROR: "internal server error", + CODE_DATABASE_ERROR: "database error", + CODE_UNAUTHORIZED: "unauthorized", + CODE_ACCESS_DENY: "access deny", + CODE_INVALID_USER_OR_PASSWORD: "invalid user or password", + CODE_INVALID_PARAMS: "invalid params", + CODE_INVALID_JSON_OR_REQUIRED_PARAMS: "invalid json request", + CODE_ALREADY_EXIST: "data already exist", + CODE_NOT_FOUND: "data not found", + CODE_INVALID_PASSWORD: "invalid password", + CODE_INVALID_AUTH_CODE: "invalid auth code", + CODE_ACCESS_VIOLATE: "access violate", + CODE_TYPE_UNDEFINED: "type undefined", + CODE_BAD_DID_OR_SIGNATURE: "bad id or signature", + CODE_ACCOUNT_BANNED: "account banned", +} + +var BizOK = BizCode{ + Code: CODE_OK, +} + +func (c BizCode) Ok() bool { + return c.Code == CODE_OK +} + +func (c BizCode) String() string { + if c.Message != "" { + return c.Message + } + if m, ok := codeMessages[c.Code]; ok { + return m + } + return fmt.Sprintf("unknown_code<%d>", c.Code) +} + +func (c BizCode) GoString() string { + return c.String() +} + +func NewBizCode(code int, messages ...string) BizCode { + if code == CODE_OK { + return BizCode{} + } + var msg string + if len(messages) > 0 { + msg = messages[0] + } else { + msg = codeMessages[code] + } + + return BizCode{ + Code: code, + Message: msg, + } +} + +func NewBizCodeDatabaseError(messages ...string) BizCode { + var strError string + if len(messages) > 0 { + strError = messages[0] + } + return NewBizCode(CODE_DATABASE_ERROR, strError) +} diff --git a/pkg/itypes/check_type.go b/pkg/itypes/check_type.go new file mode 100644 index 0000000..47e0089 --- /dev/null +++ b/pkg/itypes/check_type.go @@ -0,0 +1,34 @@ +package itypes + +import "fmt" + +type CheckType int + +const ( + CheckType_UserName CheckType = 0 + CheckType_UserPhoneNumber CheckType = 1 + CheckType_RoleName CheckType = 2 + CheckType_PoolName CheckType = 3 + CheckType_ClusterName CheckType = 4 + CheckType_UserEmail CheckType = 5 +) + +var checkTypes = map[CheckType]string{ + CheckType_UserName: "CheckType_UserName", + CheckType_UserPhoneNumber: "CheckType_UserPhoneNumber", + CheckType_RoleName: "CheckType_RoleName", + CheckType_PoolName: "CheckType_PoolName", + CheckType_ClusterName: "CheckType_ClusterName", + CheckType_UserEmail: "CheckType_UserEmail", +} + +func (t CheckType) String() string { + if strType, ok := checkTypes[t]; ok { + return strType + } + return fmt.Sprintf("CheckType_Unknown<%d>", t) +} + +func (t CheckType) GoString() string { + return t.String() +} diff --git a/pkg/itypes/consts.go b/pkg/itypes/consts.go new file mode 100644 index 0000000..e9a9ebe --- /dev/null +++ b/pkg/itypes/consts.go @@ -0,0 +1,8 @@ +package itypes + +const ( + DEFAULT_IMAGE_PREFIX = "https://admin.hobbyworld.com/files" + DEFAULT_HTTP_LISTEN_ADDR = "0.0.0.0:8088" + DEFAULT_DATA_SOURCE_NAME = "mysql://root:123456@127.0.0.1:3306/intent-system?charset=utf8mb4" + DEFAULT_SUB_CRON_EMAIL_PUSH = "0 0 * * *" //每日0点0分推送订阅邮件 +) diff --git a/pkg/itypes/date.go b/pkg/itypes/date.go new file mode 100644 index 0000000..7e137a2 --- /dev/null +++ b/pkg/itypes/date.go @@ -0,0 +1,68 @@ +package itypes + +import ( + "database/sql/driver" + "fmt" + "reflect" + "strings" + "time" +) + +const ( + TIME_FORMAT_DATE = "2006-01-02" + TIME_FORMAT = "2006-01-02 15:04:05" +) + +type Date string + +func (d Date) String() string { + return string(d) +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Date) MarshalJSON() ([]byte, error) { + strDateWithQuote := fmt.Sprintf("\"%s\"", d) + return []byte(strDateWithQuote), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *Date) UnmarshalJSON(decimalBytes []byte) error { + var t time.Time + var dateLenIncludeQuotes = 12 // date string include quotes, e.g "2021-05-08" as []byte length is 12 not 10 + if len(decimalBytes) > dateLenIncludeQuotes { + //maybe is a timestamp like "2021-05-08T13:03:22Z" + if err := t.UnmarshalJSON(decimalBytes); err != nil { + return err + } + *d = Date(t.Format(TIME_FORMAT_DATE)) + } else { + strDate := strings.Replace(string(decimalBytes), "\"", "", -1) //trim quotes + *d = Date(strDate) + } + return nil +} + +// Scan implements the sql.Scanner interface for database deserialization. +func (d *Date) Scan(src interface{}) (err error) { + var value []byte + switch src.(type) { + case int, int32, int64, uint, uint32, uint64, string: + value = []byte(fmt.Sprintf("%v", src)) + case []byte: + value = src.([]byte) + default: + err = fmt.Errorf("unknown type [%v] to scan", reflect.TypeOf(src).String()) + fmt.Printf("%s\n", err) + return + } + if err = d.UnmarshalJSON(value); err != nil { + fmt.Printf("unmarshal [%s] error [%s]\n", value, err) + return + } + return +} + +// Value implements the driver.Valuer interface for database serialization. +func (d Date) Value() (driver.Value, error) { + return d.String(), nil +} diff --git a/pkg/itypes/error_type.go b/pkg/itypes/error_type.go new file mode 100644 index 0000000..a337a22 --- /dev/null +++ b/pkg/itypes/error_type.go @@ -0,0 +1,169 @@ +package itypes + +import ( + "strings" +) + +const ( + DEFAULT_ERROR_TYPE_SEP = "|" +) + +// ----------------------------------------------------------------------------- +type ErrorType int + +const ( + ErrorType_OK ErrorType = 0 //ok + ErrorType_DiskMissing ErrorType = 1 << 0 //disk is missing + ErrorType_FileMissing ErrorType = 1 << 1 //file is missing or damage + ErrorType_TicketExpired ErrorType = 1 << 2 //ticket expired (PC1+PC2 take a long time more than 25 hours) + ErrorType_GPU ErrorType = 1 << 3 //GPU error + ErrorType_DiskIO ErrorType = 1 << 4 //disk input/output error + ErrorType_TaskAbort ErrorType = 1 << 5 //task abort + ErrorType_RustError ErrorType = 1 << 6 //rust error + ErrorType_InvalidProof ErrorType = 1 << 7 //C2 compute proof failed + ErrorType_WorkerClosed ErrorType = 1 << 8 //worker closed + ErrorType_MessageExecFailed ErrorType = 1 << 9 //message execution failed + ErrorType_MovingToStorageFailed ErrorType = 1 << 10 //moving sector to storage error + ErrorType_FileInconsistent ErrorType = 1 << 11 //file inconsistent + ErrorType_ComputeFailed ErrorType = 1 << 12 //consecutive compute fails + ErrorType_InvalidHashedNode ErrorType = 1 << 13 //invalid hashed node length + ErrorType_WebSocketClosed ErrorType = 1 << 14 //websocket connection closed + ErrorType_ConnectTimeout ErrorType = 1 << 15 //connect timeout + ErrorType_SectorNotFound ErrorType = 1 << 16 //sector not found + ErrorType_RpcConnClosed ErrorType = 1 << 17 //rpc connection close error + + //--------------------------------------------------------------- + ErrorType_Unknown ErrorType = 1 << 30 //unknown error (keep biggest of int) +) + +func (t ErrorType) GoString() string { + return t.String() +} + +func (t ErrorType) String() string { + var errs []string + + if t == ErrorType_OK { + return "OK" + } + + if t&ErrorType_Unknown > 0 { + errs = append(errs, "UnknownError") + } + if t&ErrorType_DiskMissing > 0 { + errs = append(errs, "DiskMissing") + } + if t&ErrorType_FileMissing > 0 { + errs = append(errs, "FileMissing") + } + if t&ErrorType_TicketExpired > 0 { + errs = append(errs, "TicketExpired") + } + if t&ErrorType_GPU > 0 { + errs = append(errs, "GPU") + } + if t&ErrorType_DiskIO > 0 { + errs = append(errs, "DiskIO") + } + if t&ErrorType_TaskAbort > 0 { + errs = append(errs, "TaskAbort") + } + if t&ErrorType_RustError > 0 { + errs = append(errs, "RustError") + } + if t&ErrorType_InvalidProof > 0 { + errs = append(errs, "InvalidProof") + } + if t&ErrorType_WorkerClosed > 0 { + errs = append(errs, "WorkerClosed") + } + if t&ErrorType_MessageExecFailed > 0 { + errs = append(errs, "MessageExecFailed") + } + if t&ErrorType_MovingToStorageFailed > 0 { + errs = append(errs, "MovingToStorageFailed") + } + if t&ErrorType_FileInconsistent > 0 { + errs = append(errs, "FileInconsistent") + } + if t&ErrorType_ComputeFailed > 0 { + errs = append(errs, "ComputeFailed") + } + if t&ErrorType_InvalidHashedNode > 0 { + errs = append(errs, "InvalidHashedNode") + } + if t&ErrorType_WebSocketClosed > 0 { + errs = append(errs, "WebSocketClosed") + } + if t&ErrorType_ConnectTimeout > 0 { + errs = append(errs, "ConnectTimeout") + } + if t&ErrorType_SectorNotFound > 0 { + errs = append(errs, "SectorNotFound") + } + if t&ErrorType_RpcConnClosed > 0 { + errs = append(errs, "RpcConnClosed") + } + return strings.Join(errs, DEFAULT_ERROR_TYPE_SEP) +} + +func MakeErrorTypeFromName(strType string) (et ErrorType) { + + for _, v := range strings.Split(strType, DEFAULT_ERROR_TYPE_SEP) { + + switch v { + case "UnknownError": + et |= ErrorType_Unknown + case "OK": + et |= ErrorType_OK + case "DiskMissing": + et |= ErrorType_DiskMissing + case "FileMissing": + et |= ErrorType_FileMissing + case "TicketExpired": + et |= ErrorType_TicketExpired + case "GPU": + et |= ErrorType_GPU + case "DiskIO": + et |= ErrorType_DiskIO + case "TaskAbort": + et |= ErrorType_TaskAbort + case "RustError": + et |= ErrorType_RustError + case "InvalidProof": + et |= ErrorType_InvalidProof + case "WorkerClosed": + et |= ErrorType_WorkerClosed + case "MessageExecFailed": + et |= ErrorType_MessageExecFailed + case "MovingToStorageFailed": + et |= ErrorType_MovingToStorageFailed + case "FileInconsistent": + et |= ErrorType_FileInconsistent + case "ComputeFailed": + et |= ErrorType_ComputeFailed + case "InvalidHashedNode": + et |= ErrorType_InvalidHashedNode + case "WebSocketClosed": + et |= ErrorType_WebSocketClosed + case "ConnectTimeout": + et |= ErrorType_ConnectTimeout + case "SectorNotFound": + et |= ErrorType_SectorNotFound + case "RpcConnClosed": + et |= ErrorType_RpcConnClosed + } + } + return +} + +// JSON marshal implement +func (t ErrorType) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// JSON unmarshal implement +func (t *ErrorType) UnmarshalText(value []byte) error { + *t = MakeErrorTypeFromName(string(value)) + return nil +} diff --git a/pkg/itypes/http.go b/pkg/itypes/http.go new file mode 100644 index 0000000..0ccc3d0 --- /dev/null +++ b/pkg/itypes/http.go @@ -0,0 +1,97 @@ +package itypes + +import ( + "encoding/json" +) + +type AuthType string + +const ( + AuthType_Null AuthType = "" + AuthType_Basic AuthType = "Basic" + AuthType_Bearer AuthType = "Bearer" + AuthType_ProjectKey AuthType = "ProjectKey" +) + +func (t AuthType) String() string { + return string(t) +} +func (t AuthType) Valid() bool { + switch t { + case AuthType_Basic: + return true + case AuthType_Bearer: + return true + case AuthType_ProjectKey: + return true + } + return false +} + +const ( + HEADER_AUTHORIZATION = "Authorization" + HEADER_AUTH_TOKEN = "Auth-Token" +) + +const ( + MAX_DURATION int32 = 1440 + MIN_DURATION int32 = 1 + JSON_RPC_VER string = "2.0" +) + +type HttpHeader struct { + Code int `json:"code"` //response code of business (0=OK, other fail) + Message string `json:"message"` //error message + Total int64 `json:"total"` //result total + Count int `json:"count"` //result count (single page) +} + +type HttpResponse struct { + Header HttpHeader `json:"header"` //response header + Data interface{} `json:"data"` //response data body +} + +type RpcRequest struct { + Id interface{} `json:"id"` //0 + JsonRpc string `json:"json_rpc"` //2.0 + Method string `json:"method"` //JSON-RPC method + //Params []interface{} `json:"params"` //JSON-RPC parameters [any...] +} + +type RpcError struct { + Code int `json:"code"` //response code of business (0=OK, other fail) + Message string `json:"message"` //error message + Data interface{} `json:"data"` //error attach data +} + +type RpcResponse struct { + Id interface{} `json:"id"` //0 + JsonRpc string `json:"json_rpc"` //2.0 + Error RpcError `json:"error"` //error message + Result interface{} `json:"result"` //JSON-RPC result +} + +func (r *RpcResponse) String() string { + data, _ := json.Marshal(r) + return string(data) +} + +func NewRpcResponse(id interface{}, result interface{}) *RpcResponse { + return &RpcResponse{ + Id: id, + JsonRpc: JSON_RPC_VER, + Result: result, + } +} + +func NewRpcError(id interface{}, code int, strError string) *RpcResponse { + return &RpcResponse{ + Id: id, + JsonRpc: JSON_RPC_VER, + Error: RpcError{ + Code: code, + Message: strError, + Data: nil, + }, + } +} diff --git a/pkg/itypes/session.go b/pkg/itypes/session.go new file mode 100644 index 0000000..60af782 --- /dev/null +++ b/pkg/itypes/session.go @@ -0,0 +1,71 @@ +package itypes + +import ( + "os" + "path/filepath" +) + +var ( + DefaultStaticHome = os.ExpandEnv(filepath.Join("$HOME", ".intent-system/static")) + DefaultDocsHome = os.ExpandEnv(filepath.Join("$HOME", ".intent-system/docs")) + DefaultImagesHome = os.ExpandEnv(filepath.Join("$HOME", ".intent-system/images")) + DefaultConfigHome = os.ExpandEnv(filepath.Join("$HOME", ".intent-system/config")) + DefaultLevelDBHome = os.ExpandEnv(filepath.Join("$HOME", ".intent-system/db")) +) + +type Context struct { + *Session +} + +type Session struct { + UserId int32 `json:"user_id" db:"user_id"` + UserName string `json:"user_name" db:"user_name"` + Email string `json:"email" db:"email"` + Alias string `json:"alias" db:"alias"` + PhoneNumber string `json:"phone_number" db:"phone_number"` + IsAdmin bool `json:"is_admin" db:"is_admin"` + LoginIP string `json:"login_ip" db:"login_ip"` + AuthToken string `json:"auth_token" db:"auth_token"` + LoginMode int8 `json:"login_mode" db:"login_mode"` + IsCustomer bool `json:"is_customer" db:"is_custom"` +} + +func (ctx *Context) UserId() int32 { + return ctx.Session.UserId +} + +func (ctx *Context) AuthToken() string { + return ctx.Session.AuthToken +} + +func (ctx *Context) UserName() string { + return ctx.Session.UserName +} + +func (ctx *Context) Address() string { + return ctx.Session.UserName +} + +func (ctx *Context) Alias() string { + return ctx.Session.Alias +} + +func (ctx *Context) PhoneNumber() string { + return ctx.Session.PhoneNumber +} + +func (ctx *Context) LoginIP() string { + return ctx.Session.LoginIP +} + +func (ctx *Context) IsAdmin() bool { + return ctx.Session.IsAdmin +} + +func (ctx *Context) LoginMode() int8 { + return ctx.Session.LoginMode +} + +func (ctx *Context) GetEmail() string { + return ctx.Session.Email +} diff --git a/pkg/itypes/storage_slice.go b/pkg/itypes/storage_slice.go new file mode 100644 index 0000000..d1f2926 --- /dev/null +++ b/pkg/itypes/storage_slice.go @@ -0,0 +1,69 @@ +package itypes + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "reflect" + + "github.com/civet148/log" +) + +type Storage struct { + StorageID string `json:"storage_id" db:"storage_id" bson:"storage_id"` //storage id + Unsealed bool `json:"unsealed" db:"unsealed" bson:"unsealed"` //unsealed + Sealed bool `json:"sealed" db:"sealed" bson:"sealed"` //sealed + Cache bool `json:"cache" db:"cache" bson:"cache"` //cache + Local string `json:"local" db:"local" bson:"local"` //local storage path + URLs []string `json:"urls" db:"urls" bson:"urls"` //remote access urls +} +type StorageSlice []*Storage + +func (s StorageSlice) GoString() string { + return s.String() +} + +func (s StorageSlice) String() string { + data, _ := json.MarshalIndent(s, "", "\t") + return string(data) +} + +func (s StorageSlice) Len() int { + return len(s) +} + +func (s StorageSlice) ForEach(cb func(*Storage) error) { + for _, v := range s { + if err := cb(v); err != nil { + break + } + } +} + +// Scan implements the sql.Scanner interface for database deserialization. +func (s StorageSlice) Scan(src interface{}) error { + var data []byte + var err error + + switch src.(type) { + case []byte: + data = src.([]byte) + case string: + data = []byte(src.(string)) + case *string: + data = []byte(*src.(*string)) + default: + err = fmt.Errorf("can not handle with unknown type [%v], just only []byte or string supported", reflect.TypeOf(src).Kind()) + log.Errorf(err.Error()) + return err + } + err = json.Unmarshal(data, s) + return err +} + +// Value implements the driver.Valuer interface for database serialization. +func (s StorageSlice) Value() (value driver.Value, err error) { + var data []byte + data, err = json.MarshalIndent(s, "", "\t") + return string(data), nil +} diff --git a/pkg/itypes/task_type.go b/pkg/itypes/task_type.go new file mode 100644 index 0000000..7c32b11 --- /dev/null +++ b/pkg/itypes/task_type.go @@ -0,0 +1,73 @@ +package itypes + +import "fmt" + +type TaskType int //dao_sector sealing phase type +const ( + TaskType_Unknown TaskType = 0 + TaskType_AP TaskType = 1 + TaskType_PC1 TaskType = 2 + TaskType_PC2 TaskType = 3 + TaskType_C1 TaskType = 4 + TaskType_C2 TaskType = 5 + TaskType_GET TaskType = 6 + TaskType_FIN TaskType = 7 +) + +func (t TaskType) GoString() string { + return t.String() +} + +func (t TaskType) String() string { + switch t { + case TaskType_AP: + return "AP" + case TaskType_PC1: + return "PC1" + case TaskType_PC2: + return "PC2" + case TaskType_C1: + return "C1" + case TaskType_C2: + return "C2" + case TaskType_GET: + return "GET" + case TaskType_FIN: + return "FIN" + } + return fmt.Sprintf("Unknown<%d>", t) +} + +func (t *TaskType) FromString(strTaskName string) { + *t = MakeTaskTypeByName(strTaskName) +} + +func (t TaskType) MarshalText() (value []byte, err error) { + value = []byte(t.String()) + return value, nil +} + +func (t *TaskType) UnmarshalText(value []byte) (err error) { + t.FromString(string(value)) + return nil +} + +func MakeTaskTypeByName(strTaskName string) TaskType { + switch strTaskName { + case "AP": + return TaskType_AP + case "PC1": + return TaskType_PC1 + case "PC2": + return TaskType_PC2 + case "C1": + return TaskType_C1 + case "C2": + return TaskType_C2 + case "GET": + return TaskType_GET + case "FIN": + return TaskType_FIN + } + return TaskType_Unknown +} diff --git a/pkg/itypes/types.go b/pkg/itypes/types.go new file mode 100644 index 0000000..1f3afb0 --- /dev/null +++ b/pkg/itypes/types.go @@ -0,0 +1,6 @@ +package itypes + +const ( + DEFUALT_ACCESS_DOMAIN = "host:port" + DEFUALT_REGISTRY_MDNS = "mdns" +) diff --git a/pkg/itypes/websocket.go b/pkg/itypes/websocket.go new file mode 100644 index 0000000..11478b3 --- /dev/null +++ b/pkg/itypes/websocket.go @@ -0,0 +1,83 @@ +package itypes + +import ( + "sync" + + "github.com/civet148/log" + "github.com/gorilla/websocket" +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +type WebSocket struct { + outlocker sync.RWMutex //外部锁 + inlocker sync.RWMutex //内部锁 + msgType int //消息类型 + ws *websocket.Conn //websocket连接 + id string //会话ID +} + +func NewWebSocket(conn *websocket.Conn, id string) (*WebSocket, error) { + if conn == nil { + return nil, log.Errorf("websocket connection is nil") + } + + return &WebSocket{ + ws: conn, + id: id, + msgType: TextMessage, + }, nil +} + +func (m *WebSocket) Lock() { + m.outlocker.Lock() +} + +func (m *WebSocket) Unlock() { + m.outlocker.Unlock() +} + +func (m *WebSocket) ReadMessage() ([]byte, error) { + typ, data, err := m.ws.ReadMessage() + m.msgType = typ + return data, err +} + +func (m *WebSocket) WriteMessage(data []byte) error { + m.inlocker.Lock() + defer m.inlocker.Unlock() + return m.ws.WriteMessage(m.msgType, data) +} + +func (m *WebSocket) Close() error { + return m.ws.Close() +} + +func (m *WebSocket) Id() string { + return m.id +} + +func (m *WebSocket) MsgType() int { + return m.msgType +} diff --git a/pkg/middleware/cross.go b/pkg/middleware/cross.go new file mode 100644 index 0000000..3d9a0e7 --- /dev/null +++ b/pkg/middleware/cross.go @@ -0,0 +1,28 @@ +package middleware + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +func Cors() gin.HandlerFunc { //gin cross-domain handler middleware + return func(c *gin.Context) { + method := c.Request.Method + + //set header for cross-domain + c.Header("Access-Control-Allow-Origin", "*") + c.Header("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE, UPDATE") + c.Header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Authorization, Auth-Token, *") + c.Header("Access-Control-Expose-Headers", "Content-Length, Access-Control-Allow-Origin, Access-Control-Allow-Headers, Cache-Control, Content-Language, Content-Type, content-Disposition") + c.Header("Access-Control-Allow-Credentials", "false") + c.Set("content-type", "application/json, text/plain, multipart/form-data, */*") + + // abort with options method (code=204) + if method == "OPTIONS" { + c.AbortWithStatus(http.StatusNoContent) + } + + c.Next() + } +} diff --git a/pkg/middleware/jwt.go b/pkg/middleware/jwt.go new file mode 100644 index 0000000..123a477 --- /dev/null +++ b/pkg/middleware/jwt.go @@ -0,0 +1,171 @@ +package middleware + +import ( + "encoding/json" + "fmt" + "intent-system/pkg/itypes" + "net/http" + "time" + + "github.com/civet148/log" + + "github.com/dgrijalva/jwt-go" + "github.com/gin-gonic/gin" +) + +const ( + CLAIM_EXPIRE = "claim_expire" + CLAIM_ISSUE_AT = "claim_iat" + CLAIM_USER_SESSION = "user_session" +) + +const ( + DEFAULT_TOKEN_DURATION = 48 * time.Hour +) + +const ( + jwtTokenSecret = "7bdf27cffd5fd105af4efb20b1090bbe" +) + +type JwtCode int + +const ( + JWT_CODE_SUCCESS JwtCode = 0 + JWT_CODE_ERROR_CHECK_TOKEN JwtCode = -1 + JWT_CODE_ERROR_PARSE_TOKEN JwtCode = -2 + JWT_CODE_ERROR_INVALID_TOKEN JwtCode = -3 + JWT_CODE_ERROR_TOKEN_EXPIRED JwtCode = -4 +) + +var codeMessages = map[JwtCode]string{ + JWT_CODE_SUCCESS: "JWT_CODE_SUCCESS", + JWT_CODE_ERROR_CHECK_TOKEN: "JWT_CODE_ERROR_CHECK_TOKEN", + JWT_CODE_ERROR_PARSE_TOKEN: "JWT_CODE_ERROR_PARSE_TOKEN", + JWT_CODE_ERROR_INVALID_TOKEN: "JWT_CODE_ERROR_INVALID_TOKEN", + JWT_CODE_ERROR_TOKEN_EXPIRED: "JWT_CODE_ERROR_TOKEN_EXPIRED", +} + +func (j JwtCode) GoString() string { + return j.String() +} + +func (j JwtCode) String() string { + strMessage, ok := codeMessages[j] + if ok { + return strMessage + } + return fmt.Sprintf("JWT_CODE_UNKONWN<%d>", j) +} + +func JWT() gin.HandlerFunc { + return func(c *gin.Context) { + var data interface{} + if err := ParseToken(c); err != nil { + c.JSON(http.StatusUnauthorized, itypes.HttpResponse{ + Header: itypes.HttpHeader{ + Code: itypes.CODE_UNAUTHORIZED, + Message: "unauthorized", + Count: 0, + }, + Data: data, + }) + log.Errorf("[JWT] token parse failed, error [%s]", err.Error()) + c.Abort() + return + } + + c.Next() + } +} + +// generate JWT token +func GenerateToken(session interface{}, duration ...interface{}) (token string, err error) { + + var d time.Duration + var claims = make(jwt.MapClaims) + + if len(duration) == 0 { + d = DEFAULT_TOKEN_DURATION + } else { + var ok bool + if d, ok = duration[0].(time.Duration); !ok { + d = DEFAULT_TOKEN_DURATION + } + } + var data []byte + data, err = json.Marshal(session) + if err != nil { + return token, log.Errorf(err.Error()) + } + sign := jwt.New(jwt.SigningMethodHS256) + claims[CLAIM_EXPIRE] = time.Now().Add(d).Unix() + claims[CLAIM_ISSUE_AT] = time.Now().Unix() + claims[CLAIM_USER_SESSION] = string(data) + sign.Claims = claims + + token, err = sign.SignedString([]byte(jwtTokenSecret)) + return token, err +} + +// parse JWT token claims +func ParseToken(c *gin.Context) error { + strAuthToken := GetAuthToken(c) + if strAuthToken == "" { + return log.Errorf("[JWT] request header have no any key '%s' or '%s'", itypes.HEADER_AUTH_TOKEN, itypes.HEADER_AUTHORIZATION) + } + claims, err := ParseTokenClaims(strAuthToken) + if err != nil { + return log.Errorf(err.Error()) + } + c.Keys = make(map[string]interface{}) + c.Keys[CLAIM_EXPIRE] = int64(claims[CLAIM_EXPIRE].(float64)) + if c.Keys[CLAIM_EXPIRE].(int64) < time.Now().Unix() { + return log.Errorf("[JWT] token [%s] expired at %v\n", strAuthToken, c.Keys[CLAIM_EXPIRE]) + } + + c.Keys[CLAIM_EXPIRE] = int64(claims[CLAIM_EXPIRE].(float64)) + c.Keys[CLAIM_ISSUE_AT] = int64(claims[CLAIM_ISSUE_AT].(float64)) + c.Keys[CLAIM_USER_SESSION] = claims[CLAIM_USER_SESSION].(string) + return nil +} + +func ParseTokenClaims(strAuthToken string) (jwt.MapClaims, error) { + token, err := jwt.Parse(strAuthToken, func(*jwt.Token) (interface{}, error) { + return []byte(jwtTokenSecret), nil + }) + if err != nil { + return jwt.MapClaims{}, log.Errorf("[JWT] parse token error [%s]", err) + } + + claims, ok := token.Claims.(jwt.MapClaims) + if !ok { + return jwt.MapClaims{}, log.Errorf("[JWT] parse token error: no claims found") + } + return claims, nil +} + +func GetAuthToken(c *gin.Context) string { + strToken := c.Request.Header.Get(itypes.HEADER_AUTH_TOKEN) + if strToken == "" { + strToken = c.Request.Header.Get(itypes.HEADER_AUTHORIZATION) + } + return strToken +} + +func GetAuthSessionFromToken(strAuthToken string, session interface{}) error { + claims, err := ParseTokenClaims(strAuthToken) + if err != nil { + return log.Errorf(err.Error()) + } + strSessionJson := claims[CLAIM_USER_SESSION].(string) + err = json.Unmarshal([]byte(strSessionJson), session) + if err != nil { + return log.Errorf(err.Error()) + } + return nil +} + +func GetAuthSessionFromContext(c *gin.Context, session interface{}) error { + strAuthToken := GetAuthToken(c) + return GetAuthSessionFromToken(strAuthToken, session) +} diff --git a/pkg/middleware/jwt_test.go b/pkg/middleware/jwt_test.go new file mode 100644 index 0000000..a6892aa --- /dev/null +++ b/pkg/middleware/jwt_test.go @@ -0,0 +1,28 @@ +package middleware + +import ( + "intent-system/pkg/types" + "github.com/civet148/log" + "testing" + "time" +) + +func TestToken(t *testing.T) { + strToken, err := GenerateToken(&types.Session{ + UserId: 1, + UserName: "admin", + IsAdmin: true, + }, 6000*time.Hour) + if err != nil { + log.Errorf(err.Error()) + return + } + log.Infof("token [%s]", strToken) + var session types.Session + err = GetAuthSessionFromToken(strToken, &session) + if err != nil { + log.Errorf(err.Error()) + return + } + log.Infof("session [%+v]", session) +} diff --git a/pkg/middleware/limiter.go b/pkg/middleware/limiter.go new file mode 100644 index 0000000..0db86d3 --- /dev/null +++ b/pkg/middleware/limiter.go @@ -0,0 +1,19 @@ +package middleware + +import ( + "github.com/gin-gonic/gin" + "golang.org/x/time/rate" + "net/http" +) + +func RateLimit(limit rate.Limit, burst int) gin.HandlerFunc { + limiter := rate.NewLimiter(limit, burst) + return func(c *gin.Context) { + if !limiter.Allow() { + c.String(http.StatusTooManyRequests, "too many requests") + c.Abort() + return + } + c.Next() + } +} diff --git a/pkg/privilege/casbin.conf b/pkg/privilege/casbin.conf new file mode 100644 index 0000000..f0a8e26 --- /dev/null +++ b/pkg/privilege/casbin.conf @@ -0,0 +1,14 @@ +[request_definition] +r = sub, obj, act + +[policy_definition] +p = sub, obj, act + +[role_definition] +g = _, _ + +[policy_effect] +e = some(where (p.eft == allow)) + +[matchers] +m = g(r.sub, p.sub) && keyMatch(r.obj, p.obj) && r.act == p.act || r.sub == "root" \ No newline at end of file diff --git a/pkg/privilege/casbin.go b/pkg/privilege/casbin.go new file mode 100644 index 0000000..74d8a58 --- /dev/null +++ b/pkg/privilege/casbin.go @@ -0,0 +1,353 @@ +package privilege + +import ( + "fmt" + "github.com/casbin/casbin/v2" + "github.com/casbin/casbin/v2/model" + xormadapter "github.com/casbin/xorm-adapter/v2" + "github.com/civet148/log" + "github.com/civet148/sqlca/v2" + _ "github.com/go-sql-driver/mysql" + "intent-system/pkg/dal/dao" + "intent-system/pkg/dal/models" + "strings" +) + +//var CasRule *casbin.Enforcer + +const DefaultCasbinModel = ` +[request_definition] +r = sub, obj, act + +[policy_definition] +p = sub, obj, act + +[role_definition] +g = _, _ + +[policy_effect] +e = some(where (p.eft == allow)) + +[matchers] +m = g(r.sub, p.sub) && keyMatch(r.obj, p.obj) && r.act == p.act || r.sub == "root" +` + +type CasbinOption struct { + Model string + DSN string +} + +type CasbinRule struct { + cas *casbin.Enforcer + privilegeDAO *dao.PrivilegeDAO +} + +func NewCasbinRule(opt CasbinOption) *CasbinRule { + db, err := sqlca.NewEngine(opt.DSN) + if err != nil { + log.Panic("connect to database error [%s]", err.Error()) + return nil + } + cas := &CasbinRule{ + privilegeDAO: dao.NewPrivilegeDAO(db), + cas: newCasbin(opt.DSN, opt.Model), + } + + return cas.initPrivileges() +} + +func newCasbin(strDSN, strModel string) (cas *casbin.Enforcer) { + var err error + if strings.HasPrefix(strDSN, "mysql://") { + strDSN, err = sqlca.Url2MySql(strDSN) + if err != nil { + panic(err.Error()) + } + } + log.Infof("DSN [%s]", strDSN) + // 要使用自己定义的数据库,最后的true很重要.默认为false,使用缺省的数据库名casbin,不存在则创建,表不需要自己创建默认为casbin_rule + a, err := xormadapter.NewAdapter("mysql", strDSN, true) + if err != nil { + log.Fatalf("error: model: %s", err) + } + if strModel == "" { + strModel = DefaultCasbinModel + } + m, err := model.NewModelFromString(strModel) + if err != nil { + log.Fatalf("error: model: %s", err) + } + + cas, err = casbin.NewEnforcer(m, a) + if err != nil { + log.Fatalf("error: model: %s", err) + } + //从DB加载策略 + err = cas.LoadPolicy() + if err != nil { + log.Fatalf("casbin load policy error [%s]", err.Error()) + return nil + } + return cas +} + +// [][]string默认位数 +const ( + roleIndex = 0 + pathIndex = 1 + authIndex = 2 +) + +func (m *CasbinRule) initPrivileges() *CasbinRule { + var err error + for _, p := range TotalPrivileges() { + _, err = m.privilegeDAO.Upsert(&models.PrivilegeDO{ + Name: p.Name, + Label: p.Label, + Path: p.Path, + Children: p.Children, + IsInherent: true, + Remark: "inherent privilege (DO NOT EDIT OR DELETE)", + Deleted: false, + }) + if err != nil { + log.Panic(err.Error()) + } + } + return m +} + +// 编辑权限(除账户和角色类其他菜单都可以增删改查) +func (m *CasbinRule) EditPrivileges() (authorities []string) { + return []string{ + UserAccess, + RoleAccess, + NewsAccess, + NewsAdd, + NewsDelete, + NewsEdit, + QA_Access, + QA_Add, + QA_Delete, + QA_Edit, + SubAccess, + SubAdd, + SubEdit, + TagAccess, + TagAdd, + TagDelete, + TagEdit, + CustomerAccess, + } +} + +// 普通管理账户权限(所有菜单均只有查看权限) +func (m *CasbinRule) AccessPrivileges() (authorities []string) { + return []string{ + UserAccess, + RoleAccess, + NewsAccess, + QA_Access, + SubAccess, + TagAccess, + CustomerAccess, + } +} + +func (m *CasbinRule) TotalPrivileges() (authorities []string) { + privileges := TotalPrivileges() + for _, p := range privileges { + as := recursiveAuthorities(p.Children) + if len(as) != 0 { + authorities = append(authorities, as...) + } + } + return authorities +} + +// 检查用户+URL+角色是否有访问权限 +func (m *CasbinRule) Enforce(strUserName, strRequestURI string, privilege string) (ok bool, err error) { + return m.cas.Enforce(strUserName, strRequestURI, string(privilege)) +} + +// 添加角色权限 +func (m *CasbinRule) AddRoleAuthority(role string, accessPath string, authority string) { + _, err := m.cas.AddPolicy(role, accessPath, authority) + if err != nil { + log.Errorf("add role [%s] authority [%s] path [%s] error [%s]", role, authority, accessPath, err.Error()) + return + } +} + +// 获取角色权限 +func (m *CasbinRule) GetRoleAuthority(role string) (authority []string) { + var authorityList = make([]string, 0) + list := m.cas.GetPermissionsForUser(role) + for _, vlist := range list { + if len(vlist) > authIndex { + authorityList = append(authorityList, vlist[authIndex]) + } + } + return authorityList +} + +// 角色权限继承 roleA 继承/获取 roleB权限 +func (m *CasbinRule) InheritRoleAuthority(roleA, roleB string) { + // 获取roleB权限 + list := m.cas.GetPermissionsForUser(roleB) + for _, listV := range list { + if len(listV) >= authIndex { + m.cas.AddPolicy(roleA, listV[pathIndex], listV[authIndex]) + } + } +} + +// 角色更名时,用户继承新角色,并删除旧角色 +func (m *CasbinRule) GetUsersForRole(role string) (users []string, err error) { + return m.cas.GetUsersForRole(role) +} + +// 角色更名时,用户继承新角色,并删除旧角色 +func (m *CasbinRule) InheritUserRole(roleA, roleB string) { + // 获取具有角色的用户 + res, err := m.cas.GetUsersForRole(roleB) + if err != nil { + log.Errorf("get users for role:%s error:%s", roleB, err.Error()) + return + } + for _, user := range res { + m.AddUserRole(user, roleA) + m.DeleteUserRole(user, roleB) + } +} + +// 删除角色权限 +func (m *CasbinRule) DeleteRoleAuthority(role string, accessPath string, authority string) { + if ok, _ := m.cas.RemovePolicy(role, accessPath, authority); !ok { + fmt.Println("role authority doesn't exit!") + } else { + fmt.Println("role authority delete success") + } +} + +// 删除一个角色 +func (m *CasbinRule) DeleteRole(role string) { + m.cas.DeleteRole(role) +} + +// 获取用户权限列表 +func (m *CasbinRule) GetUserRoleList(userName string) (roleList []string) { + roleList = make([]string, 0) + // 获取用户角色 + res, err := m.cas.GetRolesForUser(userName) + if err != nil { + log.Error(err.Error()) + return roleList + } + for _, role := range res { + // 获取角色权限 + list := m.cas.GetPermissionsForUser(role) + for _, vlist := range list { + if len(vlist) >= authIndex { + roleList = append(roleList, vlist[authIndex]) + } + } + } + return m.removeDuplicateElement(roleList) +} + +// 为用户添加角色 +func (m *CasbinRule) AddUserRole(userName string, role string) { + if ok, _ := m.cas.AddRoleForUser(userName, role); !ok { + log.Infof("role hadn't exit:%s", role) + } else { + log.Infof("add role success!") + } +} + +// 删除用户角色 +func (m *CasbinRule) DeleteUserRole(userName, role string) { + ok, _ := m.cas.DeleteRoleForUser(userName, role) + if ok { + log.Infof("delete role:%s for user:%s success", role, userName) + } else { + log.Infof("delete role:%s for user:%s failed!", role, userName) + } +} + +// 删除用户所有角色 +func (m *CasbinRule) DeleteAllRoleForUser(userName string) { + if ok, _ := m.cas.DeleteRolesForUser(userName); !ok { + log.Infof("delete all user role failed:%s", userName) + } else { + log.Infof("delete all user role success!") + } +} + +// 删除一个用户 +func (m *CasbinRule) DeleteUser(userName string) { + m.cas.DeleteUser(userName) +} + +// 权限列表去重 +func (m *CasbinRule) removeDuplicateElement(authList []string) []string { + result := make([]string, 0, len(authList)) + temp := map[string]struct{}{} + for _, item := range authList { + if _, ok := temp[item]; !ok { + temp[item] = struct{}{} + result = append(result, item) + } + } + return result +} + +// 查询所有权限信息 +func (m *CasbinRule) GetAllPrivileges() (privileges models.TreePrivilege) { + return TotalPrivileges() +} + +func (m *CasbinRule) GetPrivilegePath(auth string) (path string) { + privileges := m.GetAllPrivileges() + for _, p := range privileges { + if p.Name == auth { + return p.Path + } + path = recursiveSearchPath(auth, p.Children) + if path != "" { + return path + } + } + return path +} + +func recursiveSearchPath(auth string, children models.TreePrivilege) (path string) { + for _, p := range children { + if auth == p.Name { + path = p.Path + } else { + if p.Children != nil { + path = recursiveSearchPath(auth, p.Children) + if path != "" { + return path + } + } + } + } + return path +} + +func recursiveAuthorities(children models.TreePrivilege) (authorities []string) { + for _, p := range children { + if p.Name != "" { + authorities = append(authorities, p.Name) + } + if p.Children != nil { + as := recursiveAuthorities(p.Children) + if len(as) != 0 { + authorities = append(authorities, as...) + } + } + } + return +} diff --git a/pkg/privilege/privileges.go b/pkg/privilege/privileges.go new file mode 100644 index 0000000..261e415 --- /dev/null +++ b/pkg/privilege/privileges.go @@ -0,0 +1,136 @@ +package privilege + +import ( + "intent-system/pkg/dal/models" + "intent-system/pkg/routers" +) + +// 固定权限名称列表 +const ( + Null = "Null" // 无权限校验 + UserAccess = "UserAccess" // 账户管理-访问 + UserAdd = "UserAdd" // 账户管理-添加 + UserEdit = "UserEdit" // 账户管理-编辑 + UserDelete = "UserDelete" // 账户管理-删除 + RoleAccess = "RoleAccess" // 角色管理-访问 + RoleAdd = "RoleAdd" // 角色管理-添加 + RoleDelete = "RoleDelete" // 角色管理-删除 + RoleEdit = "RoleEdit" // 角色管理-编辑 + RoleAuthority = "RoleAuthority" // 角色管理-权限授权 + NewsAccess = "NewsAccess" // News数据库-访问 + NewsAdd = "NewsAdd" // News数据库-添加 + NewsDelete = "NewsDelete" // News数据库-删除 + NewsEdit = "NewsEdit" // News数据库-编辑 + QA_Access = "QA_Access" // Q&A数据-访问 + QA_Add = "QA_Add" // Q&A数据-添加 + QA_Delete = "QA_Delete" // Q&A数据-删除 + QA_Edit = "QA_Edit" // Q&A数据-编辑 + SubAccess = "SubAccess" // 订阅管理-访问 + SubAdd = "SubAdd" // 订阅管理-添加 + SubEdit = "SubEdit" // 订阅管理-编辑 + TagAccess = "TagAccess" // 标签管理-访问 + TagAdd = "TagAdd" // 标签管理-添加 + TagDelete = "TagDelete" // 标签管理-删除 + TagEdit = "TagEdit" // 标签管理-编辑 + ModelAccess = "ModelAccess" // 模型管理-访问 + ModelAdd = "ModelAdd" // 模型管理-添加 + ModelDelete = "ModelDelete" // 模型管理-删除 + ModelEdit = "ModelEdit" // 模型管理-编辑 + CustomerAccess = "CustomerAccess" // 客户管理-访问 + DeployModelApp = "DeployModelApp" // 部署模型或者应用 +) + +func TotalPrivileges() (tree models.TreePrivilege) { + strPlatformPath := routers.GroupRouterPlatformV1 + "/*" + strNewsPath := routers.GroupRouterNewsV1 + "/*" + strQaPath := routers.GroupRouterQaV1 + "/*" + strTagPath := routers.GroupRouterTagV1 + "/*" + strSubPath := routers.GroupRouterSubV1 + "/*" + strCustomerPath := routers.GroupRouterCustomerV1 + "/*" + strDeployPath := routers.GroupRouterDeployV1 + "/*" + + tree = models.TreePrivilege{ + { + Label: "账户管理", + Name: "Account manage", + Children: models.TreePrivilege{ + {Label: "查看", Name: UserAccess, Path: strPlatformPath}, + {Label: "新增", Name: UserAdd, Path: strPlatformPath}, + {Label: "删除", Name: UserDelete, Path: strPlatformPath}, + {Label: "修改", Name: UserEdit, Path: strPlatformPath}, + }, + }, + { + Label: "角色管理", + Name: "Role manage", + Children: models.TreePrivilege{ + {Label: "查看", Name: RoleAccess, Path: strPlatformPath}, + {Label: "添加", Name: RoleAdd, Path: strPlatformPath}, + {Label: "删除", Name: RoleDelete, Path: strPlatformPath}, + {Label: "修改", Name: RoleEdit, Path: strPlatformPath}, + {Label: "权限授权", Name: RoleAuthority, Path: strPlatformPath}, + }, + }, + { + Label: "News数据库", + Name: "News DB", + Children: models.TreePrivilege{ + {Label: "查看", Name: NewsAccess, Path: strNewsPath}, + {Label: "添加", Name: NewsAdd, Path: strNewsPath}, + {Label: "删除", Name: NewsDelete, Path: strNewsPath}, + {Label: "修改", Name: NewsEdit, Path: strNewsPath}, + }, + }, + { + Label: "Q&A数据", + Name: "Q&A data", + Children: models.TreePrivilege{ + {Label: "查看", Name: QA_Access, Path: strQaPath}, + {Label: "添加", Name: QA_Add, Path: strQaPath}, + {Label: "删除", Name: QA_Delete, Path: strQaPath}, + {Label: "修改", Name: QA_Edit, Path: strQaPath}, + }, + }, + { + Label: "订阅", + Name: "Sub manage", + Children: models.TreePrivilege{ + {Label: "查看", Name: SubAccess, Path: strSubPath}, + {Label: "添加", Name: SubAdd, Path: strSubPath}, + {Label: "修改", Name: SubEdit, Path: strSubPath}, + //{Label: "删除", Name: SubDelete, Path: strSubPath}, + }, + }, + { + Label: "客户管理", + Name: "Customer manage", + Children: models.TreePrivilege{ + {Label: "查看", Name: CustomerAccess, Path: strCustomerPath}, + //{Label: "添加", Name: CustomerAdd, Path: strCustomerPath}, + //{Label: "删除", Name: CustomerDelete, Path: strCustomerPath}, + //{Label: "修改", Name: CustomerEdit, Path: strCustomerPath}, + }, + }, + { + Label: "标签管理", + Name: "Tag manage", + Children: models.TreePrivilege{ + {Label: "查看", Name: TagAccess, Path: strTagPath}, + {Label: "添加", Name: TagAdd, Path: strTagPath}, + {Label: "删除", Name: TagDelete, Path: strTagPath}, + {Label: "修改", Name: TagEdit, Path: strTagPath}, + }, + }, + { + Label: "部署模型或应用", + Name: "Deploy ModelApp", + Children: models.TreePrivilege{ + {Label: "查看", Name: ModelAccess, Path: strDeployPath}, + {Label: "添加", Name: ModelAdd, Path: strDeployPath}, + {Label: "删除", Name: ModelDelete, Path: strDeployPath}, + {Label: "修改", Name: ModelEdit, Path: strDeployPath}, + }, + }, + } + return tree +} diff --git a/pkg/proto/proto_biz.go b/pkg/proto/proto_biz.go new file mode 100644 index 0000000..75cfde7 --- /dev/null +++ b/pkg/proto/proto_biz.go @@ -0,0 +1,281 @@ +package proto + +import "intent-system/pkg/dal/models" + +type NewsListReq struct { + Id int64 `json:"id"` //按ID过滤 + Tag string `json:"tag"` //按标签过滤 + All bool `json:"all"` //查询管理后台所有可见数据 + IsDeleted bool `json:"is_deleted"` //是否查询已删除数据 + OrderAsc bool `json:"order_asc"` //排序 + Search string `json:"search"` //搜索条件 + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //分页页码(从0开始) + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` //单页数据条数 + Language string `json:"language"` //语言类型 +} + +type NewsListResp struct { + List []*models.NewsDO `json:"list"` +} + +type NewsAddReq struct { + Category string `json:"category" db:"category" bson:"category"` //分类 + MainTitle string `json:"main_title" db:"main_title" bson:"main_title"` //主标题 + SubTitle string `json:"sub_title" db:"sub_title" bson:"sub_title"` //副标题 + Summary string `json:"summary" db:"summary" sqlca:"isnull" bson:"summary"` //摘要 + Keywords []string `json:"keywords" db:"keywords" sqlca:"isnull" bson:"keywords"` //文章关键词 + SeoKeywords []string `json:"seo_keywords" db:"seo_keywords" sqlca:"isnull" bson:"seo_keywords"` //SEO关键词 + Tags []string `json:"tags" db:"tags" sqlca:"isnull" bson:"tags"` //人工打标签(多选) + ImageUrl string `json:"image_url" db:"image_url" sqlca:"isnull" bson:"image_url"` //图片URL + Content string `json:"content" db:"content" bson:"content" binding:"required"` //新闻内容 + Language string `json:"language" binding:"required"` //语言类型 +} + +type NewsAddResp struct { + DraftId int64 `json:"draft_id"` +} + +type NewsEditReq struct { + Id int64 `json:"id" binding:"required"` //数据ID +} + +type NewsEditResp struct { + DraftId int64 `json:"draft_id"` +} + +type NewsDeleteReq struct { + Ids []int64 `json:"ids" binding:"required"` +} + +type NewsDeleteResp struct { +} + +type NewsCompareReq struct { + Id int64 `json:"id" binding:"required"` +} + +type NewsCompareResp struct { + CurNews *models.NewsDO `json:"cur_news"` + OrgNews *models.NewsDO `json:"org_news"` +} + +type NewsDraftListReq struct { + Id int64 `json:"id"` //数据ID + OrderAsc bool `json:"order_asc"` //排序 + Search string `json:"search"` //搜索条件 + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //分页页码(从0开始) + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` //单页记录条数 +} + +type NewsDraftListResp struct { + List []*models.NewsDraftDO `json:"list"` +} + +type NewsDraftEditReq struct { + Id int64 `json:"id" binding:"required"` //数据ID + Category string `json:"category" db:"category" bson:"category"` //分类 + MainTitle string `json:"main_title" db:"main_title" bson:"main_title"` //主标题 + SubTitle string `json:"sub_title" db:"sub_title" bson:"sub_title"` //副标题 + Summary string `json:"summary" db:"summary" sqlca:"isnull" bson:"summary"` //摘要 + Keywords []string `json:"keywords" db:"keywords" sqlca:"isnull" bson:"keywords"` //文章关键词 + SeoKeywords []string `json:"seo_keywords" db:"seo_keywords" sqlca:"isnull" bson:"seo_keywords"` //SEO关键词 + Tags []string `json:"tags" db:"tags" sqlca:"isnull" bson:"tags"` //人工打标签(多选) + ImageUrl string `json:"image_url" db:"image_url" sqlca:"isnull" bson:"image_url"` //图片URL + Content string `json:"content" binding:"required"` //内容 + Language string `json:"language"` //语言类型 +} + +type NewsDraftEditResp struct { +} + +type NewsDraftPublishReq struct { + Id int64 `json:"id" binding:"required"` //数据ID +} + +type NewsDraftPublishResp struct { +} + +type NewsDraftDeleteReq struct { + Ids []int64 `json:"ids" binding:"required"` //数据ID列表 +} + +type NewsDraftDeleteResp struct { +} + +type NewsTagReq struct { + Id int64 `json:"id" binding:"required"` //数据ID + Tags []string `json:"tags" binding:"required"` //标签数组 +} + +type NewsTagResp struct { +} + +type NewsPublishReq struct { + Id int64 `json:"id" binding:"required"` //数据ID +} + +type NewsPublishResp struct { +} + +type QaListReq struct { + Id int64 `json:"id"` + IsDeleted bool `json:"is_deleted"` //Q&A列表 + OrderAsc bool `json:"order_asc"` //排序 + Search string `json:"search"` //搜索条件 + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //分页页码(从0开始) + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` //单页记录条数 + Language string `json:"language"` //语言类型 +} + +type QaListResp struct { + List []*models.QuestionAnswerDO `json:"list"` +} + +type QaAddReq struct { + Question string `json:"question" binding:"required"` //问题 + Answer string `json:"answer" binding:"required"` //答案 + Language string `json:"language" binding:"required"` //语言类型 +} + +type QaAddResp struct { + DraftId int64 `json:"draft_id"` +} + +type QaEditReq struct { + Id int64 `json:"id" binding:"required"` //数据ID +} + +type QaEditResp struct { + DraftId int64 `json:"draft_id"` +} + +type QaDeleteReq struct { + Ids []int64 `json:"ids" binding:"required"` //ID列表 +} + +type QaDeleteResp struct { +} + +type QaDraftListReq struct { + Id int64 `json:"id"` //数据ID + OrderAsc bool `json:"order_asc"` //排序 + Search string `json:"search"` //搜索条件 + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //分页页码(从0开始) + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` //单页记录条数 +} + +type QaDraftListResp struct { + List []*models.QuestionDraftDO `json:"list"` +} + +type QaDraftEditReq struct { + Id int64 `json:"id" binding:"required"` //数据ID + Question string `json:"question" binding:"required"` //问题 + Answer string `json:"answer" binding:"required"` //答案 + Language string `json:"language"` //语言类型 +} + +type QaDraftEditResp struct { +} + +type QaDraftPublishReq struct { + Id int64 `json:"id" binding:"required"` //数据ID +} + +type QaDraftPublishResp struct { +} + +type QaDraftDeleteReq struct { + Ids []int64 `json:"ids" binding:"required"` //数据ID +} + +type QaDraftDeleteResp struct { +} + +type SubListAllReq struct { + Id int64 `json:"id"` //数据ID + OrderAsc bool `json:"order_asc"` //排序 + Search string `json:"search"` //搜索条件 + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //分页页码(从0开始) + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` //单页条数 +} + +type SubListAllResp struct { + List []*models.NewsDO `json:"list"` +} + +type SubListPushedReq struct { + Id int64 `json:"id"` //数据ID + OrderAsc bool `json:"order_asc"` //排序 + Search string `json:"search"` //搜索条件 + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //分页页码(从0开始) + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` //单页条数 +} + +type SubListPushedResp struct { + List []*models.NewsDO `json:"list"` +} + +type SubListTodayReq struct { +} + +type SubListTodayResp struct { + List []*models.NewsDO `json:"list"` +} + +type SubAddNewsReq struct { +} + +type SubAddNewsResp struct { +} + +type SubEditNewsReq struct { + Id int64 `json:"id" binding:"required"` + Category string `json:"category" db:"category" bson:"category"` //分类 + MainTitle string `json:"main_title" db:"main_title" bson:"main_title"` //主标题 + SubTitle string `json:"sub_title" db:"sub_title" bson:"sub_title"` //副标题 + Summary string `json:"summary" db:"summary" sqlca:"isnull" bson:"summary"` //摘要 + Keywords []string `json:"keywords" db:"keywords" sqlca:"isnull" bson:"keywords"` //文章关键词 + SeoKeywords []string `json:"seo_keywords" db:"seo_keywords" sqlca:"isnull" bson:"seo_keywords"` //SEO关键词 + Tags []string `json:"tags" db:"tags" sqlca:"isnull" bson:"tags"` //人工打标签(多选) + ImageUrl string `json:"image_url" db:"image_url" sqlca:"isnull" bson:"image_url"` //图片URL + Content string `json:"content" binding:"required"` +} + +type SubEditNewsResp struct { +} + +type TagListReq struct { + OrderAsc bool `json:"order_asc"` + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //page_no must >= 0 + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` +} + +type TagListResp struct { + List []*models.TagDO `json:"list"` +} + +type TagAddReq struct { + Name string `json:"name" binding:"required"` + NameCN string `json:"name_cn" binding:"required"` +} + +type TagAddResp struct { + Id int64 `json:"id"` +} + +type TagEditReq struct { + Id int64 `json:"id" binding:"required"` //标签ID + Name string `json:"name" binding:"required"` + NameCN string `json:"name_cn" binding:"required"` +} + +type TagEditResp struct { +} + +type TagDeleteReq struct { + Ids []int64 `json:"ids" binding:"required"` +} + +type TagDeleteResp struct { +} diff --git a/pkg/proto/proto_common.go b/pkg/proto/proto_common.go new file mode 100644 index 0000000..990086a --- /dev/null +++ b/pkg/proto/proto_common.go @@ -0,0 +1,18 @@ +package proto + +type ActionType int + +const ( + ActionType_UserRegister ActionType = 0 + ActionType_ResetPassword ActionType = 1 +) + +type SendAuthCodeReq struct { + UserName string `json:"user_name" binding:"required"` //账户名 + Email string `json:"email" binding:"required"` //邮箱地址 + Language string `json:"language"` //语言类型 + ActionType ActionType `json:"action_type"` //操作类型: 0=用户注册 1=重置密码 +} + +type SendAuthCodeResp struct { +} diff --git a/pkg/proto/proto_customer.go b/pkg/proto/proto_customer.go new file mode 100644 index 0000000..077ab89 --- /dev/null +++ b/pkg/proto/proto_customer.go @@ -0,0 +1,99 @@ +package proto + +import "intent-system/pkg/dal/models" + +type CustomerRegisterReq struct { + UserName string `json:"user_name" binding:"required"` + Password string `json:"password" binding:"required"` + Email string `json:"email" binding:"required"` + RegCode string `json:"reg_code" binding:"required"` +} + +type CustomerURegisterReq struct { + UserName string `json:"user_name" binding:"required"` + Password string `json:"password" binding:"required"` + Referral string `json:"Referral" binding:"omitempty"` +} + +type CustomerRegisterResp struct { +} + +type CustomerLoginReq struct { + UserName string `json:"user_name" binding:"required"` + Password string `json:"password" binding:"required"` +} + +type CustomerLoginResp struct { + Id int32 `json:"id"` + Version string `json:"version"` + IsSubscribed bool `json:"is_subscribed"` + UserName string `json:"user_name" db:"user_name" bson:"user_name"` + FirstName string `json:"first_name" db:"first_name"` //姓 + LastName string `json:"last_name" db:"last_name"` //名 + Title string `json:"title" db:"title"` //职称 + Company string `json:"company" db:"company"` //公司名称 + AuthToken string `json:"auth_token" db:"auth_token" bson:"auth_token"` + LoginIp string `json:"login_ip" db:"login_ip" bson:"login_ip"` //最近登录IP + LoginTime int64 `json:"login_time" db:"login_time" bson:"login_time"` //最近登录时间 + Privileges []string `json:"privileges" db:"privileges" bson:"privileges"` //权限列表 + SubTags []string `json:"sub_tags" db:"sub_tags" bson:"sub_sub"` //订阅标签列表 +} + +type CustomerEditReq struct { + Email string `json:"email"` //邮箱地址 + FirstName string `json:"first_name" db:"first_name"` //姓 + LastName string `json:"last_name" db:"last_name"` //名 + Title string `json:"title" db:"title"` //职称 + Company string `json:"company" db:"company"` //公司名称 +} + +type CustomerEditResp struct { +} + +type CustomerLogoutReq struct { +} + +type CustomerLogoutResp struct { +} + +type CustomerSubInfoReq struct { + Email string `json:"email" binding:"required"` +} + +type CustomerSubInfoResp struct { + IsSubscribed bool `json:"is_subscribed"` //是否已订阅 + Tags []string `json:"tags"` //订阅标签列表 + FirstName string `json:"first_name"` //姓 + LastName string `json:"last_name"` //名 + Title string `json:"title"` //职称 + Company string `json:"company"` //公司名称 +} + +type CustomerSubscribeReq struct { + Language string `json:"language" binding:"required"` //语言类型 + FirstName string `json:"first_name" db:"first_name" binding:"required"` //姓 + Email string `json:"email"` //邮箱地址 + Tags []string `json:"tags"` //订阅标签 +} + +type CustomerSubscribeResp struct { +} + +type CustomerUnsubscribeReq struct { + Email string `json:"email"` + Reason string `json:"reason"` //退订原因 +} + +type CustomerUnsubscribeResp struct { +} + +type CustomerListReq struct { + Id int32 `json:"id"` + Email string `json:"email"` + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //page_no must >= 0 + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` +} + +type CustomerListResp struct { + List []*models.CustomerDO `json:"list"` +} diff --git a/pkg/proto/proto_deploy.go b/pkg/proto/proto_deploy.go new file mode 100644 index 0000000..311354b --- /dev/null +++ b/pkg/proto/proto_deploy.go @@ -0,0 +1,58 @@ +package proto + +import "intent-system/pkg/dal/models" + +type DeployDeployReq struct { + UserName string `json:"user_name" binding:"required"` + AppID string `json:"id" binding:"required"` +} + +type DeployDeployResp struct { +} + +type DeployStatusReq struct { + AppID string `json:"id" binding:"required"` +} + +// type DeployStatusResp struct { +// Id int32 `json:"id"` +// Nid string `json:"n_id" db:"n_id" bson:"n_id"` +// MbUuid string `json:"mb_uuid" db:"mb_uuid" bson:"mb_uuid"` +// RepoName string `json:"repo_name" db:"repo_name"` +// Digest string `json:"digest" db:"digest"` +// Type int32 `json:"type" db:"type"` +// Status string `json:"status" db:"status"` +// CreatedTime int64 `json:"created_time" db:"created_time" bson:"created_time"` //Created Time +// UpdatedTime int64 `json:"updated_time" db:"updated_time" bson:"updated_time"` //updated time +// } + +type DeployStatusResp struct { + Data *models.DeployDO `json:"data"` +} + +type DeployDeleteReq struct { + UserName string `json:"user_name"` + Nid string `json:"n_id"` +} + +type DeployDeleteResp struct { + // 可根据需要加上 message 等字段 +} + +type DeployStartReq struct { + UserName string `json:"user_name"` + Nid string `json:"n_id"` +} + +type DeployStartResp struct { + // 可根据需要加上 message 等字段 +} + +type DeployStopReq struct { + UserName string `json:"user_name"` + Nid string `json:"n_id"` +} + +type DeployStopResp struct { + // 可根据需要加上 message 等字段 +} diff --git a/pkg/proto/proto_platform.go b/pkg/proto/proto_platform.go new file mode 100644 index 0000000..ff0f5f7 --- /dev/null +++ b/pkg/proto/proto_platform.go @@ -0,0 +1,264 @@ +package proto + +import ( + "intent-system/pkg/dal/models" + "intent-system/pkg/itypes" +) + +type PlatformLoginReq struct { + UserName string `json:"user_name" binding:"required"` + Password string `json:"password" binding:"required"` +} + +type PlatformLoginResp struct { + Id int32 `json:"id"` + Version string `json:"version"` + UserName string `json:"user_name" db:"user_name" bson:"user_name"` + AuthToken string `json:"auth_token" db:"auth_token" bson:"auth_token"` + LoginIp string `json:"login_ip" db:"login_ip" bson:"login_ip"` //最近登录IP + LoginTime int64 `json:"login_time" db:"login_time" bson:"login_time"` //最近登录时间 + Role string `json:"role" db:"role" bson:"role"` + Privilege []string `json:"privilege" db:"privilege" bson:"privilege"` +} + +type PlatformLogoutReq struct { +} + +type PlatformLogoutResp struct { +} + +type PlatformWorkspaceReq struct { +} + +type PlatformWorkspaceResp struct { +} + +type PlatformSummaryReq struct { +} + +type PlatformSummaryResp struct { +} + +type PlatformCheckExistReq struct { + Name string `json:"name" db:"name" bson:"name" binding:"required"` //用户/矿池/角色名称或邮箱地址 + CheckType itypes.CheckType `json:"check_type" db:"check_type" bson:"check_type"` //检查类型(0=用户名 1=邮箱 2=角色名称) +} + +type PlatformCheckExistResp struct { +} + +type PlatformListUserReq struct { + Id int32 `json:"id" db:"id" bson:"id"` + UserName string `json:"user_name" db:"user_name" bson:"user_name"` + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //page_no must >= 0 + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` +} + +type PlatformListUserResp struct { + Users []*PlatformTotalUser `json:"users" db:"users" bson:"users"` +} + +type PlatformCreateUserReq struct { + UserName string `json:"user_name" db:"user_name" bson:"user_name" binding:"required"` + UserAlias string `json:"user_alias" db:"user_alias" bson:"user_alias" binding:"required"` + PhoneNumber string `json:"phone_number" db:"phone_number" bson:"phone_number"` + Email string `json:"email" db:"email" bson:"email"` + Password string `json:"password" db:"password" bson:"password"` + Remark string `json:"remark" db:"remark" bson:"remark"` + RoleName string `json:"role_name" db:"role_name" bson:"role_name"` +} + +type PlatformCreateUserResp struct { + UserId int32 `json:"user_id" db:"user_id" bson:"user_id"` +} + +type PlatformEditUserReq struct { + UserName string `json:"user_name" db:"user_name" bson:"user_name" binding:"required"` + Password string `json:"password" db:"password" bson:"password"` + UserAlias string `json:"user_alias" db:"user_alias" bson:"user_alias"` + PhoneNumber string `json:"phone_number" db:"phone_number" bson:"phone_number"` //联系手机号 + Email string `json:"email" db:"email" bson:"email"` + Remark string `json:"remark" db:"remark" bson:"remark"` + RoleName string `json:"role_name" db:"role_name" bson:"role_name"` +} + +type PlatformEditUserResp struct { +} + +type PlatformEnableUserReq struct { + UserName string `json:"user_name" db:"user_name" bson:"user_name" binding:"required"` +} + +type PlatformEnableUserResp struct { +} + +type PlatformDisableUserReq struct { + UserName string `json:"user_name" db:"user_name" bson:"user_name" binding:"required"` +} + +type PlatformDisableUserResp struct { +} + +type PlatformDeleteUserReq struct { + UserName string `json:"user_name" db:"user_name" bson:"user_name" binding:"required"` +} + +type PlatformDeleteUserResp struct { +} + +type PlatformEditUserRoleReq struct { + UserName string `json:"user_name" db:"user_name" bson:"user_name" binding:"required"` + RoleName string `json:"role_name" db:"role_name" bson:"role_name" binding:"required"` +} + +type PlatformEditUserRoleResp struct { +} + +type PlatformListRoleReq struct { + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //page_no must >= 0 + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` + RoleName string `json:"role_name" db:"role_name" bson:"role_name"` +} + +type PlatformListRoleResp struct { + Roles []*PlatformSysRole `json:"roles" db:"roles" bson:"roles"` +} + +type PlatformCreateRoleReq struct { + RoleName string `json:"role_name" db:"role_name" bson:"role_name" binding:"required"` + Remark string `json:"remark" db:"remark" bson:"remark"` +} + +type PlatformCreateRoleResp struct { +} + +type PlatformEditRoleReq struct { + Id int32 `json:"id" db:"id" bson:"id" binding:"required"` //角色ID + RoleName string `json:"role_name" db:"role_name" bson:"role_name" binding:"required"` //更改后的角色名称 + Remark string `json:"remark" db:"remark" bson:"remark"` +} + +type PlatformEditRoleResp struct { +} + +type PlatformDeleteRoleReq struct { + RoleName string `json:"role_name" db:"role_name" bson:"role_name" binding:"required"` +} + +type PlatformDeleteRoleResp struct { +} + +type PlatformAuthRoleReq struct { + RoleName string `json:"role_name" db:"role_name" bson:"role_name" binding:"required"` + Privilege []string `json:"privilege" db:"privilege" bson:"privilege" binding:"required"` // 权限列表 +} + +type PlatformAuthRoleResp struct { +} + +const ( + TypeUser = iota // 用户 + TypeRole // 角色 +) + +type PlatformInquireAuthReq struct { + Name string `json:"name" db:"name" bson:"name" binding:"required"` //名称 + NameType int `json:"name_type" db:"name_type" bson:"name_type" binding:"required"` // 类型 0:用户 1:角色 +} + +type PlatformInquireAuthResp struct { + Privilege []string `json:"privilege" db:"privilege" bson:"privilege" binding:"required"` // 权限列表 +} + +type PlatformUserQueryReq struct { + Name string `json:"name"` +} + +type PlatformUserQueryResp struct { + NameList []string `json:"name_list"` +} + +type PlatformPrivilegeTreeReq struct { +} + +type PlatformPrivilegeTreeResp struct { + TreeList models.TreePrivilege `json:"tree_list"` +} + +type PlatformGetEmailConfigReq struct { +} + +type PlatformSetEmailConfigReq struct { + SmtpServer string `json:"smtp_server"` // 邮箱服务器 + SmtpPort string `json:"smtp_port"` // 端口 + SmtpName string `json:"smtp_name"` // 邮箱名 + AuthCode string `json:"auth_code"` // 授权码 + SendName string `json:"send_name"` // 发件人名称 +} + +type PlatformSetEmailConfigResp struct { +} + +type PlatformResetPasswordReq struct { + UserName string `json:"user_name" db:"user_name" bson:"user_name"` + OldPassword string `json:"old_password" db:"old_password" bson:"old_password"` + NewPassword string `json:"new_password" db:"new_password" bson:"new_password" binding:"required"` +} + +type PlatformResetPasswordResp struct { +} + +type PlatformListRoleUserReq struct { + RoleName string `json:"role_name" db:"role_name" bson:"role_name" binding:"required"` + PageNo int `json:"page_no" db:"page_no" bson:"page_no"` //page_no must >= 0 + PageSize int `json:"page_size" db:"page_size" bson:"page_size"` +} + +type PlatformListRoleUserResp struct { + RoleName string `json:"role_name" db:"role_name" bson:"role_name"` + UserCount int `json:"user_count" db:"user_count" bson:"user_count"` + Users []*PlatformUser `json:"users" db:"users" bson:"users"` +} + +type PlatformRefreshAuthTokenReq struct { +} + +type PlatformRefreshAuthTokenResp struct { + AuthToken string `json:"auth_token" db:"auth_token"` +} + +type OperLog struct { + OperUser string `json:"oper_user"` + OperType int `json:"oper_type"` + OperTime string `json:"oper_time"` + OperContent string `json:"oper_content"` +} + +type PlatformListOperLogReq struct { +} + +type PlatformListOperLogResp struct { + List []*OperLog `json:"list"` +} + +const ( + UploadFileName = "file_name" + UploadFileData = "file_data" +) + +type UploadFileReq struct { + FileName string `json:"file_name"` + FileData string `json:"file_data"` +} + +type UploadFileResp struct { + FileName string `json:"file_name"` + FileUrl string `json:"file_url"` +} + +type PlatformDeleteUsersReq struct { + UserNames []string `json:"user_names" db:"user_names" bson:"user_names" binding:"required"` +} + +type PlatformDeleteUsersResp struct { +} diff --git a/pkg/proto/proto_public.go b/pkg/proto/proto_public.go new file mode 100644 index 0000000..1688a76 --- /dev/null +++ b/pkg/proto/proto_public.go @@ -0,0 +1,60 @@ +package proto + +type PlatformUser struct { + UserId int32 `json:"user_id" db:"user_id" bson:"user_id"` //用户ID(自增) + UserName string `json:"user_name" db:"user_name" bson:"user_name"` //登录名称 + UserAlias string `json:"user_alias" db:"user_alias" bson:"user_alias"` //真实姓名 + PhoneNumber string `json:"phone_number" db:"phone_number" bson:"phone_number"` //联系手机号 + IsAdmin bool `json:"is_admin" db:"is_admin" bson:"is_admin"` //是否为超级管理员(0=普通账户 1=超级管理员) + Email string `json:"email" db:"email" bson:"email"` //邮箱地址 + Address string `json:"address" db:"address" bson:"address"` //家庭住址/公司地址 + UserRemark string `json:"user_remark" db:"user_remark" bson:"user_remark"` //备注 + State int `json:"state" db:"state" bson:"state"` //是否已冻结(1=正常 2=已冻结) + LoginIp string `json:"login_ip" db:"login_ip" bson:"login_ip"` //最近登录IP + LoginTime int64 `json:"login_time" db:"login_time" bson:"login_time"` //最近登录时间 + RoleName string `json:"role_name" db:"role_name" bson:"role_name"` //角色名称 + RoleAlias string `json:"role_alias" db:"role_alias" bson:"role_alias"` //角色别名 + CreateUser string `json:"create_user" db:"create_user" bson:"create_user"` //创建人 + EditUser string `json:"edit_user" db:"edit_user" bson:"edit_user"` //最近编辑人 + Password string `json:"password" db:"password" bson:"password"` //密码 + CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间 + UpdatedTime string `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间 +} + +type PlatformRole struct { + Id int32 `json:"id" db:"id" bson:"id"` //角色ID(自增) + RoleName string `json:"role_name" db:"role_name" bson:"role_name"` //角色名称 + RoleAlias string `json:"role_alias" db:"role_alias" bson:"role_alias"` //角色别名 + CreateUser string `json:"create_user" db:"create_user" bson:"create_user"` //创建人 + EditUser string `json:"edit_user" db:"edit_user" bson:"edit_user"` //最近编辑人 + Remark string `json:"remark" db:"remark" bson:"remark"` //备注 + IsInherent bool `json:"is_inherent" db:"is_inherent" bson:"is_inherent"` //是否固有角色(false=自定义角色 true=平台固有角色) + Deleted bool `json:"deleted" db:"deleted" bson:"deleted"` //是否已删除(false=未删除 true=已删除) + CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间 + UpdatedTime string `json:"updated_time" db:"updated_time" sqlca:"readonly" bson:"updated_time"` //更新时间 +} + +type PlatformTotalUser struct { + UserId int32 `json:"user_id" db:"user_id" bson:"user_id"` //用户ID(自增) + UserName string `json:"user_name" db:"user_name" bson:"user_name"` //登录名称 + UserAlias string `json:"user_alias" db:"user_alias" bson:"user_alias"` //真实姓名 + PhoneNumber string `json:"phone_number" db:"phone_number" bson:"phone_number"` //联系手机号 + Email string `json:"email" db:"email" bson:"email"` //邮箱 + Remark string `json:"remark" db:"remark" bson:"remark"` //备注 + LoginTime int64 `json:"login_time" db:"login_time" bson:"login_time"` //最近登录时间 + RoleName string `json:"role_name" db:"role_name" bson:"role_name"` //角色名称 + State int `json:"state" db:"state" bson:"state"` //是否已冻结(1=正常 2=已冻结) + CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间 + CreateUser string `json:"create_user" db:"create_user" bson:"create_user"` //创建人 +} + +type PlatformSysRole struct { + Id int32 `json:"id" db:"id" bson:"id"` //角色ID(自增) + RoleName string `json:"role_name" db:"role_name" bson:"role_name"` //角色名称 + RoleAlias string `json:"role_alias" db:"role_alias" bson:"role_alias"` //角色别名 + CreateUser string `json:"create_user" db:"create_user" bson:"create_user"` //创建人 + IsInherent bool `json:"is_inherent" db:"is_inherent" bson:"is_inherent"` //是否固有角色(false=自定义角色 true=平台固有角色) + Remark string `json:"remark" db:"remark" bson:"remark"` //备注 + CreatedTime string `json:"created_time" db:"created_time" sqlca:"readonly" bson:"created_time"` //创建时间 + Privileges []string `json:"privileges"` //角色权限 +} diff --git a/pkg/routers/router_biz.go b/pkg/routers/router_biz.go new file mode 100644 index 0000000..08c9acc --- /dev/null +++ b/pkg/routers/router_biz.go @@ -0,0 +1,94 @@ +package routers + +import ( + "github.com/gin-gonic/gin" + "intent-system/pkg/api" + "intent-system/pkg/middleware" +) + +const ( + GroupRouterNewsV1 = "/api/v1/news" + GroupRouterQaV1 = "/api/v1/qa" + GroupRouterSubV1 = "/api/v1/sub" + GroupRouterTagV1 = "/api/v1/tag" +) + +const ( + RouterSubPathNewsList = "/list" + RouterSubPathNewsAdd = "/add" + RouterSubPathNewsEdit = "/edit" + RouterSubPathNewsDelete = "/delete" + RouterSubPathNewsCompare = "/compare" + RouterSubPathNewsPublish = "/publish" + RouterSubPathNewsDraftList = "/draft/list" + RouterSubPathNewsDraftEdit = "/draft/edit" + RouterSubPathNewsDraftPublish = "/draft/publish" + RouterSubPathNewsDraftDelete = "/draft/delete" + RouterSubPathNewsTag = "/tag" + RouterSubPathQaList = "/list" + RouterSubPathQaAdd = "/add" + RouterSubPathQaEdit = "/edit" + RouterSubPathQaDelete = "/delete" + RouterSubPathQaDraftList = "/draft/list" + RouterSubPathQaDraftEdit = "/draft/edit" + RouterSubPathQaDraftPublish = "/draft/publish" + RouterSubPathQaDraftDelete = "/draft/delete" + RouterSubPathSubscribeListToday = "/list/today" + RouterSubPathSubscribeListAll = "/list/all" + RouterSubPathSubscribeListPushed = "/list/pushed" + RouterSubPathSubscribeEdit = "/edit" + RouterSubPathTagList = "/list" + RouterSubPathTagAdd = "/add" + RouterSubPathTagEdit = "/edit" + RouterSubPathTagDelete = "/delete" +) + +func InitRouterGroupBiz(r *gin.Engine, handlers api.BizApi) { + groupNews := r.Group(GroupRouterNewsV1) + + groupNews.POST(RouterSubPathNewsList, handlers.NewsList) + groupNews.Use(middleware.JWT()) //use JWT token middleware + { + groupNews.POST(RouterSubPathNewsAdd, handlers.NewsAdd) + groupNews.POST(RouterSubPathNewsEdit, handlers.NewsEdit) + groupNews.POST(RouterSubPathNewsDelete, handlers.NewsDelete) + groupNews.POST(RouterSubPathNewsCompare, handlers.NewsCompare) + groupNews.POST(RouterSubPathNewsPublish, handlers.NewsPublish) + groupNews.POST(RouterSubPathNewsDraftList, handlers.NewsDraftList) + groupNews.POST(RouterSubPathNewsDraftEdit, handlers.NewsDraftEdit) + groupNews.POST(RouterSubPathNewsDraftPublish, handlers.NewsDraftPublish) + groupNews.POST(RouterSubPathNewsDraftDelete, handlers.NewsDraftDelete) + groupNews.POST(RouterSubPathNewsTag, handlers.NewsTag) + } + + groupQA := r.Group(GroupRouterQaV1) + groupQA.POST(RouterSubPathQaList, handlers.QaList) + groupQA.Use(middleware.JWT()) //use JWT token middleware + { + groupQA.POST(RouterSubPathQaAdd, handlers.QaAdd) + groupQA.POST(RouterSubPathQaEdit, handlers.QaEdit) + groupQA.POST(RouterSubPathQaDelete, handlers.QaDelete) + groupQA.POST(RouterSubPathQaDraftList, handlers.QaDraftList) + groupQA.POST(RouterSubPathQaDraftEdit, handlers.QaDraftEdit) + groupQA.POST(RouterSubPathQaDraftPublish, handlers.QaDraftPublish) + groupQA.POST(RouterSubPathQaDraftDelete, handlers.QaDraftDelete) + } + + groupSub := r.Group(GroupRouterSubV1) + groupSub.Use(middleware.JWT()) //use JWT token middleware + { + groupSub.POST(RouterSubPathSubscribeListToday, handlers.SubListToday) + groupSub.POST(RouterSubPathSubscribeListAll, handlers.SubListAll) + groupSub.POST(RouterSubPathSubscribeListPushed, handlers.SubListPushed) + groupSub.POST(RouterSubPathSubscribeEdit, handlers.SubEditNews) + } + + groupTag := r.Group(GroupRouterTagV1) + groupTag.POST(RouterSubPathTagList, handlers.TagList) + groupTag.Use(middleware.JWT()) //use JWT token middleware + { + groupTag.POST(RouterSubPathTagAdd, handlers.TagAdd) + groupTag.POST(RouterSubPathTagEdit, handlers.TagEdit) + groupTag.POST(RouterSubPathTagDelete, handlers.TagDelete) + } +} diff --git a/pkg/routers/router_common.go b/pkg/routers/router_common.go new file mode 100644 index 0000000..74b5fc4 --- /dev/null +++ b/pkg/routers/router_common.go @@ -0,0 +1,19 @@ +package routers + +import ( + "github.com/gin-gonic/gin" + "intent-system/pkg/api" +) + +const ( + GroupRouterCommonV1 = "/api/v1/common" +) + +const ( + RouterSubPathCustomerSendAuthCode = "/auth-code" +) + +func InitRouterGroupCommon(r *gin.Engine, handlers api.CommonApi) { + g := r.Group(GroupRouterCommonV1) + g.POST(RouterSubPathCustomerSendAuthCode, handlers.SendAuthCode) //do not need JWT authentication +} diff --git a/pkg/routers/router_customer.go b/pkg/routers/router_customer.go new file mode 100644 index 0000000..f538b71 --- /dev/null +++ b/pkg/routers/router_customer.go @@ -0,0 +1,40 @@ +package routers + +import ( + "github.com/gin-gonic/gin" + "intent-system/pkg/api" + "intent-system/pkg/middleware" +) + +const ( + GroupRouterCustomerV1 = "/api/v1/customer" +) + +const ( + RouterPathCustomerRegister = "/register" + RouterPathCustomerURegister = "/uregister" //do not verify email, only register by username. + RouterPathCustomerLogin = "/login" + RouterSubPathCustomerList = "/list" + RouterSubPathCustomerEdit = "/edit" + RouterSubPathCustomerLogout = "/logout" + RouterSubPathCustomerSubInfo = "/sub-info" + RouterSubPathCustomerSubscribe = "/subscribe" + RouterSubPathCustomerUnsubscribe = "/unsubscribe" +) + +func InitRouterGroupCustomer(r *gin.Engine, handlers api.CustomerApi) { + + g := r.Group(GroupRouterCustomerV1) + g.POST(RouterPathCustomerRegister, handlers.CustomerRegister) //do not need JWT authentication + g.POST(RouterPathCustomerURegister, handlers.CustomerURegister) + g.POST(RouterPathCustomerLogin, handlers.CustomerLogin) //do not need JWT authentication + g.POST(RouterSubPathCustomerSubInfo, handlers.CustomerSubInfo) //do not need JWT authentication + g.POST(RouterSubPathCustomerSubscribe, handlers.CustomerSubscribe) //do not need JWT authentication + g.POST(RouterSubPathCustomerUnsubscribe, handlers.CustomerUnsubscribe) //do not need JWT authentication + g.POST(RouterSubPathCustomerEdit, handlers.CustomerEdit) //do not need JWT authentication + g.Use(middleware.JWT()) //use JWT token middleware + { + g.POST(RouterSubPathCustomerList, handlers.CustomerList) + g.POST(RouterSubPathCustomerLogout, handlers.CustomerLogout) + } +} diff --git a/pkg/routers/router_deploy.go b/pkg/routers/router_deploy.go new file mode 100644 index 0000000..7baa596 --- /dev/null +++ b/pkg/routers/router_deploy.go @@ -0,0 +1,38 @@ +package routers + +import ( + "intent-system/pkg/api" + "intent-system/pkg/middleware" + + "github.com/civet148/log" + "github.com/gin-gonic/gin" +) + +const ( + GroupRouterDeployV1 = "/api/v1/deploy" +) + +const ( + RouterPathDeployDeploy = "/deploy" + RouterPathDeployStatus = "/status" + RouterPathDeployDelete = "/delete" + RouterPathDeployStart = "/start" + RouterPathDeployStop = "/stop" +) + +func InitRouterGroupDeploy(r *gin.Engine, handlers api.DeployApi) { + + log.Infof(".......routers.................Deploy.......................\n\n") + + g := r.Group(GroupRouterDeployV1) + g.POST(RouterPathDeployDeploy, handlers.DeployDeploy) //do not need JWT authentication + g.POST(RouterPathDeployStatus, handlers.DeployStatus) //do not need JWT authentication + g.POST(RouterPathDeployDelete, handlers.DeployDelete) //do not need JWT authentication + g.POST(RouterPathDeployStart, handlers.DeployStart) //do not need JWT authentication + g.POST(RouterPathDeployStop, handlers.DeployStop) //do not need JWT authentication + + g.Use(middleware.JWT()) //use JWT token middleware + { + //g.POST(RouterSubPathDeployDeploy, handlers.DeployDeploy) + } +} diff --git a/pkg/routers/router_gateway.go b/pkg/routers/router_gateway.go new file mode 100644 index 0000000..bb7acb1 --- /dev/null +++ b/pkg/routers/router_gateway.go @@ -0,0 +1,14 @@ +package routers + +import ( + "github.com/gin-gonic/gin" + "intent-system/pkg/api" +) + +const ( + RouterSubPathWebsocket = "/ws" +) + +func InitRouterGateway(r *gin.Engine, ws api.GatewayApi) { + r.GET(RouterSubPathWebsocket, ws.WebSocketRequest) +} diff --git a/pkg/routers/router_platform.go b/pkg/routers/router_platform.go new file mode 100644 index 0000000..d2cc10d --- /dev/null +++ b/pkg/routers/router_platform.go @@ -0,0 +1,68 @@ +package routers + +import ( + "github.com/gin-gonic/gin" + "intent-system/pkg/api" + "intent-system/pkg/middleware" +) + +const ( + GroupRouterPlatformV1 = "/api/v1/platform" +) + +const ( //prefix http://localhost:port/api/v1/platform + RouterSubPathPlatformLogin = "/login" + RouterSubPathPlatformLogout = "/logout" + RouterSubPathPlatformCheckExist = "/check/exist" + RouterSubPathPlatformListUser = "/list/user" + RouterSubPathPlatformCreateUser = "/create/user" + RouterSubPathPlatformEditUser = "/edit/user" + RouterSubPathPlatformEnableUser = "/enable/user" + RouterSubPathPlatformDisableUser = "/disable/user" + RouterSubPathPlatformDeleteUser = "/delete/user" + RouterSubPathPlatformDeleteUsers = "/delete/users" + RouterSubPathPlatformListRole = "/list/role" + RouterSubPathPlatformCreateRole = "/create/role" + RouterSubPathPlatformEditRole = "/edit/role" + RouterSubPathPlatformDeleteRole = "/delete/role" + RouterSubPathPlatformAuthRole = "/auth/role" + RouterSubPathPlatformInquireAuth = "/inquire/auth" + RouterSubPathPlatformPrivilegeTree = "/privilege/tree" + RouterSubPathPlatformResetPassword = "/reset/password" + RouterSubPathPlatformChangePassword = "/change/password" + RouterSubPathPlatformListRoleUser = "/list/role-user" + RouterSubPathPlatformRefreshAuthToken = "/refresh/token" + RouterSubPathPlatformListOperLog = "/list/oper-log" + RouterSubPathPlatformUploadFile = "/upload/file" +) + +func InitRouterGroupPlatform(r *gin.Engine, handlers api.PlatformApi) { + + g := r.Group(GroupRouterPlatformV1) + g.POST(RouterSubPathPlatformLogin, handlers.PlatformLogin) //do not need JWT authentication + g.Use(middleware.JWT()) //use JWT token middleware + { + g.POST(RouterSubPathPlatformLogout, handlers.PlatformLogout) + g.POST(RouterSubPathPlatformCheckExist, handlers.PlatformCheckExist) + g.POST(RouterSubPathPlatformListUser, handlers.PlatformListUser) + g.POST(RouterSubPathPlatformCreateUser, handlers.PlatformCreateUser) + g.POST(RouterSubPathPlatformEditUser, handlers.PlatformEditUser) + g.POST(RouterSubPathPlatformEnableUser, handlers.PlatformEnableUser) + g.POST(RouterSubPathPlatformDisableUser, handlers.PlatformDisableUser) + g.POST(RouterSubPathPlatformDeleteUser, handlers.PlatformDeleteUser) + g.POST(RouterSubPathPlatformDeleteUsers, handlers.PlatformDeleteUsers) + g.POST(RouterSubPathPlatformListRole, handlers.PlatformListRole) + g.POST(RouterSubPathPlatformCreateRole, handlers.PlatformCreateRole) + g.POST(RouterSubPathPlatformEditRole, handlers.PlatformEditRole) + g.POST(RouterSubPathPlatformDeleteRole, handlers.PlatformDeleteRole) + g.POST(RouterSubPathPlatformAuthRole, handlers.PlatformAuthRole) + g.POST(RouterSubPathPlatformInquireAuth, handlers.PlatformInquireAuth) + g.POST(RouterSubPathPlatformPrivilegeTree, handlers.PlatformPrivilegeTree) + g.POST(RouterSubPathPlatformResetPassword, handlers.PlatformResetPassword) + g.POST(RouterSubPathPlatformChangePassword, handlers.PlatformChangePassword) + g.POST(RouterSubPathPlatformListRoleUser, handlers.PlatformListRoleUser) + g.POST(RouterSubPathPlatformRefreshAuthToken, handlers.PlatformRefreshAuthToken) + g.POST(RouterSubPathPlatformListOperLog, handlers.PlatformListOperLog) + g.POST(RouterSubPathPlatformUploadFile, handlers.PlatformUploadFile) + } +} diff --git a/pkg/routers/router_ws.go b/pkg/routers/router_ws.go new file mode 100644 index 0000000..00f790b --- /dev/null +++ b/pkg/routers/router_ws.go @@ -0,0 +1,30 @@ +package routers + +import ( + "intent-system/pkg/api" + "intent-system/pkg/middleware" + + "github.com/civet148/log" + "github.com/gin-gonic/gin" +) + +const ( + GroupRouterDeployV1WS = "/api/v1/deploy/ws" +) + +const ( + RouterPathDeployStatusWS = "/status/:user/:appID" +) + +func InitRouterGroupDeployWs(r *gin.Engine, handlers api.WsApi) { + + log.Infof(".......routers.................Deploy..WS.....................\n\n") + + g := r.Group(GroupRouterDeployV1WS) + g.GET(RouterPathDeployStatusWS, handlers.DeployStatusWS) //do not need JWT authentication + + g.Use(middleware.JWT()) //use JWT token middleware + { + //g.POST(RouterSubPathDeployDeploy, handlers.DeployDeploy) + } +} diff --git a/pkg/services/service_manager.go b/pkg/services/service_manager.go new file mode 100644 index 0000000..e326732 --- /dev/null +++ b/pkg/services/service_manager.go @@ -0,0 +1,335 @@ +package services + +import ( + "context" + "encoding/json" + "intent-system/internal/licensecheck" + cainit "intent-system/pkg/cache" + "intent-system/pkg/config" + "intent-system/pkg/controllers" + "intent-system/pkg/itypes" + "intent-system/pkg/middleware" + "intent-system/pkg/routers" + "intent-system/pkg/utils" + "net/http" + "os" + "strings" + "sync/atomic" + "time" + + "intent-system/pkg/dal/core" // ✅ 用于调用 getSupabaseURL 和 DeployFromTar + + "github.com/civet148/log" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/gin-gonic/gin" +) + +/* ---------- 追加:自动部署节流控制 ---------- */ +var ( + deploying int32 // 0 = idle, 1 = busy + failStreak int32 // 连续失败次数 + maxFails int32 = 3 // 失败阈值 + cooldown = 5 * time.Minute + nextAllowed int64 // 进入冷静期的时间点(UnixNano) +) + +type Manager struct { + *controllers.Controller + cfg *config.Config + router *gin.Engine + routerRPC *gin.Engine +} + +func NewManager(cfg *config.Config) *Manager { + m := &Manager{ + cfg: cfg, + router: gin.New(), + routerRPC: gin.New(), + Controller: controllers.NewController(cfg), + } + return m +} + +func (m *Manager) monitorHealthAndRedeploy(ctx context.Context) { + + /* ====== 开机延时 60 s,再自检 ====== */ + select { + case <-time.After(60 * time.Second): // 冷启动缓冲 + case <-ctx.Done(): + return + } + ensureCradleReady(ctx) // 自检 + 强制部署 + + // ③ 开启 UDP 广播(9876 端口,2 s 间隔) + utils.StartBroadcast(ctx, 9876, 2, "PlugAI ZeroStack AI Server") + + ticker := time.NewTicker(75 * time.Second) //这个75秒没有特殊意思,是作者拍脑袋定的。 + defer ticker.Stop() + + var ( + lastHealthIP string + changeSince time.Time + ipChanging bool + ) + + client := &http.Client{Timeout: 5 * time.Second} + + for { + select { + case <-ctx.Done(): + return + + case <-ticker.C: + // ① 本机 IP + supURL := core.GetSupabaseURL() + localIP, err := utils.ExtractIP(supURL) + if err != nil { + log.Warnf("解析本机 IP 失败: %v", err) + continue + } + + // ② health 接口 + resp, err := client.Get("http://localhost:3008/api/health") + if err != nil { + log.Warnf("请求 health 失败: %v", err) + continue + } + var res struct { + Ip string `json:"ip"` + } + if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { + log.Warnf("解析 health 响应失败: %v", err) + resp.Body.Close() + continue + } + resp.Body.Close() + + // 打印一次对比 + log.Infof("health API 返回 IP=%s,localIP=%s", res.Ip, localIP) + + /* ---------- ③ 首次采样 ---------- */ + if lastHealthIP == "" { + lastHealthIP = res.Ip + // 若开机就不一致,立即进入计时 + if res.Ip != localIP { + ipChanging = true + changeSince = time.Now() + } + continue + } + + /* ---------- ④ IP 变化 ---------- */ + if res.Ip != lastHealthIP { + log.Infof("health IP 变化: %s → %s", lastHealthIP, res.Ip) + lastHealthIP = res.Ip + changeSince = time.Now() + ipChanging = true + } + + /* ---------- ⑤ 满足触发条件 ---------- */ + if !(ipChanging && time.Since(changeSince) > 10*time.Second && res.Ip != localIP) { + continue + } + + /* ---------- ⑥ 节流 & 冷静期 ---------- */ + if ts := atomic.LoadInt64(&nextAllowed); ts > time.Now().UnixNano() { + log.Warn("处于冷却期,跳过自动部署") + continue + } + if !atomic.CompareAndSwapInt32(&deploying, 0, 1) { + continue // 正在部署 + } + + // ⑥ 节流 & 冷静期 之后,替换 goroutine 内部逻辑 + go func() { + defer atomic.StoreInt32(&deploying, 0) + + // 调用返回 (id, err) + id, err := core.DeployCradleTar(ctx) + if err != nil { + log.Errorf("自动部署失败: %v", err) + + if atomic.AddInt32(&failStreak, 1) >= maxFails { + atomic.StoreInt64(&nextAllowed, time.Now().Add(cooldown).UnixNano()) + log.Errorf("连续失败 %d 次,进入冷却 %v", maxFails, cooldown) + } + return + } + + // 部署成功 + failStreak = 0 + lastHealthIP = localIP + ipChanging = false + log.Infof("✅ 自动部署成功,containerID=%s", id) + }() + } + } +} + +func (m *Manager) Run() (err error) { + + if err = licensecheck.Validate("/etc/license.dat"); err != nil { + log.Errorf("License invalid/expired: %v — blocking for 1h", err) + + start := time.Now() + for time.Since(start) < time.Hour { + log.Errorf("License invalid/expired: %v", err) + time.Sleep(30 * time.Second) + } + log.Warn("1-hour grace period over; continuing startup…") + } else { + log.Infof("License check passed") + } + + // ✅ 自动启动 IP 变化监控和部署逻辑 + ctx := context.Background() + go m.monitorHealthAndRedeploy(ctx) + + _ = m.runManager(func() error { + //save config to local storage + cainit.InitBigCache() + //start up web service, if success this routine will be blocked + if err = m.startWebService(); err != nil { + m.Close() + log.Errorf("start web service error [%s]", err) + return err + } + return err + }) + return +} + +func (m *Manager) Close() { + +} + +func (m *Manager) initRouterMgr() (r *gin.Engine) { + + m.router.Use(gin.Logger()) + m.router.Use(gin.Recovery()) + m.router.Use(middleware.Cors()) + //m.router.Static("/", m.cfg.Static) + routers.InitRouterGroupCommon(m.router, m) //通用接口 + routers.InitRouterGroupPlatform(m.router, m) //管理系统 + routers.InitRouterGroupCustomer(m.router, m) //客户管理 + routers.InitRouterGateway(m.router, m) //网关接口 + routers.InitRouterGroupBiz(m.router, m) //业务接口 + routers.InitRouterGroupDeploy(m.router, m) //部署有关接口 + routers.InitRouterGroupDeployWs(m.router, m) //websocket 推送部署进度信息 + return m.router +} + +func (m *Manager) runManager(run func() error) (err error) { + return run() +} + +func (m *Manager) startWebService() (err error) { + if !m.createImagesDir() { + log.Panic("create images dir failed") + } + routerMgr := m.initRouterMgr() + strHttpAddr := m.cfg.HttpAddr + log.Infof("starting http server on %s \n", strHttpAddr) + //Web manager service + if err = http.ListenAndServe(strHttpAddr, routerMgr); err != nil { //if everything is fine, it will block this routine + log.Panic("listen http server [%s] error [%s]\n", strHttpAddr, err.Error()) + } + return +} + +func (m *Manager) createImagesDir() bool { + var strUrl = m.cfg.ImagePrefix + var strImages string + if err := os.MkdirAll(m.cfg.ImagePath, os.ModePerm); err != nil { + log.Errorf("make dir [%s] error [%s]", m.cfg.ImagePath, err.Error()) + return false + } + log.Infof("make dir [%s] ok", m.cfg.ImagePath) + strUrl = strings.ToLower(strUrl) + strUrl = strings.TrimPrefix(strUrl, "http://") + strUrl = strings.TrimPrefix(strUrl, "https://") + idx := strings.Index(strUrl, "/") + if idx < 0 { + log.Panic("url [%s] images path not invalid ", strUrl) + } + strImages = strUrl[idx:] + if m.cfg.Static == "" { + m.cfg.Static = itypes.DefaultStaticHome + } + strLink := m.cfg.Static + strImages + idx = strings.LastIndex(strLink, "/") + strPrefixDir := strLink[:idx] + if err := os.MkdirAll(strPrefixDir, os.ModePerm); err != nil { + log.Errorf("make dir [%s] error [%s]", strPrefixDir, err.Error()) + return false + } + log.Infof("make dir [%s] ok", strPrefixDir) + + if err := os.Symlink(m.cfg.ImagePath, strLink); err != nil { + if !strings.Contains(err.Error(), "exists") { + log.Errorf("make link [%s] error [%s]", strLink, err.Error()) + return false + } + } + log.Infof("make link [%s] to [%s] ok", strLink, m.cfg.ImagePath) + return true +} + +// ensureCradleReady 确保系统里有一个正常运行的 cradle_amd64 容器; +// 不存在或状态异常则先清理再重新部署。 +func ensureCradleReady(ctx context.Context) { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + log.Warnf("docker client err: %v", err) + return + } + defer cli.Close() + + const imageTag = "cradle:latest" // 镜像名 + const containerNameKeyword = "Cradle" // 容器名关键字,区分大小写(如需不区分,可用 strings.ToLower) + + var ( + needDeploy = true + imagesToDelete = map[string]struct{}{} + ) + + // ① 遍历所有容器 + cs, _ := cli.ContainerList(ctx, types.ContainerListOptions{All: true}) + for _, c := range cs { + // c.Names 是 []string,通常只有一个,以 "/" 开头 + name := strings.TrimPrefix(c.Names[0], "/") + if !strings.Contains(name, containerNameKeyword) { + continue + } + + imagesToDelete[c.Image] = struct{}{} // 记录同名镜像 + + if c.State == "running" { + needDeploy = false // 已有正常实例 + continue + } + + // 状态异常:stop + rm + timeout := 10 * time.Second + _ = cli.ContainerStop(ctx, c.ID, &timeout) + _ = cli.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{Force: true}) + log.Warnf("清理异常容器 %s (state=%s)", name, c.State) + } + + // ② 删除残留镜像(若需要) + if needDeploy { + for img := range imagesToDelete { + _, _ = cli.ImageRemove(ctx, img, types.ImageRemoveOptions{Force: true, PruneChildren: true}) + log.Warnf("删除旧镜像 %s", img) + } + // ③ 重新部署 + if id, err := core.DeployCradleTar(ctx); err != nil { + log.Errorf("开机强制部署失败: %v", err) + } else { + log.Infof("✅ 开机强制部署完成,containerID=%s", id) + } + } else { + log.Infof("检测到正在运行的 cradle 容器,跳过开机部署") + } +} diff --git a/pkg/sessions/context.go b/pkg/sessions/context.go new file mode 100644 index 0000000..c04937a --- /dev/null +++ b/pkg/sessions/context.go @@ -0,0 +1,84 @@ +package sessions + +import ( + "intent-system/pkg/itypes" + "intent-system/pkg/middleware" + "intent-system/pkg/storage" + "sync" + + "github.com/civet148/log" + "github.com/gin-gonic/gin" +) + +var locker sync.RWMutex +var contexts = make(map[string]*itypes.Context, 0) + +func init() { + preloadContexts() +} + +func NewContext(s *itypes.Session) *itypes.Context { + ctx := &itypes.Context{ + Session: &itypes.Session{ + UserId: s.UserId, + UserName: s.UserName, + Email: s.Email, + Alias: s.Alias, + PhoneNumber: s.PhoneNumber, + IsAdmin: s.IsAdmin, + LoginIP: s.LoginIP, + AuthToken: s.AuthToken, + LoginMode: s.LoginMode, + }, + } + locker.Lock() + defer locker.Unlock() + contexts[s.AuthToken] = ctx + putLevelDB(ctx) + return ctx +} + +func RemoveContext(c *gin.Context) *itypes.Context { + strToken := middleware.GetAuthToken(c) + locker.Lock() + defer locker.Unlock() + for k, v := range contexts { + if v.AuthToken() == strToken { + delete(contexts, k) + deleteLevelDB(k) + return v + } + } + return nil +} + +func GetContext(c *gin.Context) *itypes.Context { + strToken := middleware.GetAuthToken(c) + locker.RLock() + defer locker.RUnlock() + v, ok := contexts[strToken] + if ok && v != nil { + return v + } + return nil +} + +func putLevelDB(ctx *itypes.Context) { + if err := storage.Store.PutContext(ctx); err != nil { + log.Errorf("level db put error [%s]", err) + return + } +} + +func deleteLevelDB(strAuthToken string) { + storage.Store.DeleteContext(strAuthToken) +} + +func preloadContexts() { + var err error + locker.Lock() + defer locker.Unlock() + if contexts, err = storage.Store.LoadContexts(); err != nil { + log.Errorf(err.Error()) + } +} diff --git a/pkg/storage/local_storage.go b/pkg/storage/local_storage.go new file mode 100644 index 0000000..346380a --- /dev/null +++ b/pkg/storage/local_storage.go @@ -0,0 +1,138 @@ +package storage + +import ( + "encoding/json" + "fmt" + "intent-system/pkg/itypes" + "os" + "strings" + + "github.com/civet148/log" + "github.com/syndtr/goleveldb/leveldb" +) + +const ( + LEVEL_DB_KEY_PREFFIX_CONTEXT = "/context/" //the last '/' cannot be removed +) + +type ContextMap map[string]*itypes.Context + +type LocalStorage struct { + ldb *leveldb.DB +} + +var Store *LocalStorage + +func init() { + Store = NewLocalStorage() +} + +func NewLocalStorage() *LocalStorage { + return &LocalStorage{} +} + +func (s *LocalStorage) open() (err error) { + s.ldb, err = leveldb.OpenFile(itypes.DefaultLevelDBHome, nil) + if err != nil { + return fmt.Errorf("open level db path [%s] error [%s]", itypes.DefaultLevelDBHome, err.Error()) + } + return +} + +func (s *LocalStorage) close() (err error) { + return s.ldb.Close() +} + +func (s *LocalStorage) delete(strKey string) (err error) { + return s.ldb.Delete([]byte(strKey), nil) +} + +func (s *LocalStorage) put(strKey string, data []byte) (err error) { + if err = s.ldb.Put([]byte(strKey), data, nil); err != nil { + log.Errorf("level db put [%s] error [%s]", strKey, err) + return + } + return +} + +func (s *LocalStorage) clean() error { + return os.RemoveAll(itypes.DefaultLevelDBHome) +} + +func (s *LocalStorage) get(strKey string) []byte { + data, err := s.ldb.Get([]byte(strKey), nil) + if err != nil { + log.Errorf("level db get [%s] error [%s]", strKey, err) + return nil + } + return data +} + +// clean storage directory +func (s *LocalStorage) Clean() error { + return s.clean() +} + +func (s *LocalStorage) DeleteContext(strAuthToken string) { + var strKey = s.MakeContextKey(strAuthToken) + if err := s.open(); err != nil { + log.Errorf(err.Error()) + return + } + defer s.close() + if err := s.delete(strKey); err != nil { + log.Errorf(err.Error()) + } +} + +func (s *LocalStorage) PutContext(ctx *itypes.Context) (err error) { + var data []byte + data, err = json.Marshal(ctx) + if err != nil { + log.Errorf("json marshal error [%s]", err) + return + } + if err = s.open(); err != nil { + log.Errorf(err.Error()) + return + } + defer s.close() + var strKey = s.MakeContextKey(ctx.AuthToken()) + if err = s.put(strKey, data); err != nil { + log.Errorf("level db put error [%s]", err) + return + } + return +} + +func (s *LocalStorage) LoadContexts() (contexts ContextMap, err error) { + contexts = make(ContextMap) + if err = s.open(); err != nil { + log.Errorf(err.Error()) + return + } + defer s.close() + iter := s.ldb.NewIterator(nil, nil) + for iter.Next() { + strKey := string(iter.Key()) + value := iter.Value() + + if strings.HasPrefix(strKey, LEVEL_DB_KEY_PREFFIX_CONTEXT) { + strKey = strings.TrimPrefix(strKey, LEVEL_DB_KEY_PREFFIX_CONTEXT) + var ctx = &itypes.Context{} + if err = json.Unmarshal(value, ctx); err != nil { + log.Errorf("key [%s] value unmarshal error [%s]", strKey, err) + continue + } + contexts[strKey] = ctx + log.Debugf("load user token [%v] context [%+v] from level db ok", strKey, ctx.Session) + } + } + iter.Release() + return +} + +func (s *LocalStorage) MakeContextKey(strAuthToken string) string { + strKey := fmt.Sprintf("%s%v", LEVEL_DB_KEY_PREFFIX_CONTEXT, strAuthToken) + return strKey +} diff --git a/pkg/utils/color.go b/pkg/utils/color.go new file mode 100644 index 0000000..cad20ec --- /dev/null +++ b/pkg/utils/color.go @@ -0,0 +1,79 @@ +package utils + +import ( + "fmt" + "github.com/fatih/color" + "regexp" + "strings" +) + +func ColorBool(ok bool) string { + if ok { + return color.GreenString("true") + } + return "false" +} + +func ColorWarn(strFmt string, args ...interface{}) string { + if strFmt != "" { + return color.YellowString(strFmt, args...) + } + return "" +} + +func ColorError(strFmt string, args ...interface{}) string { + if strFmt != "" { + return color.RedString(strFmt, args...) + } + return "" +} + +func BlueString(v interface{}) string { + return color.BlueString("%v", v) +} + +func GreenString(v interface{}) string { + return color.GreenString("%v", v) +} + +func YellowString(v interface{}) string { + return color.YellowString("%v", v) +} + +func RedString(v interface{}) string { + return color.RedString("%v", v) +} + +func RedStringNotOK(v interface{}) string { + str := fmt.Sprintf("%v", v) + str = strings.ToLower(str) + if str == "ok" { + return str + } + return color.RedString("%v", v) +} + +func CyanString(v interface{}) string { + return color.CyanString("%v", v) +} + +func RedInt(v int) string { + if v == 0 { + return fmt.Sprintf("%v", v) + } + return color.RedString("%v", v) +} + +// phoneNumber +func VerifyMobileFormat(mobileNum string) bool { + regular := "^((13[0-9])|(14[5,7])|(15[0-3,5-9])|(17[0,1,3,5-8])|(18[0-9])|166|198|199|(147))\\d{8}$" + + reg := regexp.MustCompile(regular) + return reg.MatchString(mobileNum) +} + +func VerifyEmailFormat(email string) bool { + pattern := `\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*` //匹配电子邮箱 + reg := regexp.MustCompile(pattern) + return reg.MatchString(email) +} diff --git a/pkg/utils/hash.go b/pkg/utils/hash.go new file mode 100644 index 0000000..c971faf --- /dev/null +++ b/pkg/utils/hash.go @@ -0,0 +1,158 @@ +package utils + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "github.com/civet148/log" + "net/url" + "reflect" + "sort" + "strings" +) + +const ( + TAG_VALUE_IGNORE = "-" + TAG_NAME_JSON = "json" +) + +var tagNames = []string{TAG_NAME_JSON} + +//MakeSignString make a sign string sort by alpha character +// obj can be url.Values, struct with json tag or map[string]interface{} +func MakeSignString(obj interface{}, excepts ...string) string { + var strSort string + if values, ok := obj.(url.Values); ok { + dic := make(map[string]interface{}, 0) + for k, v := range values { + if len(v) > 0 { + dic[k] = v[0] + } + } + strSort = makeSignStringByMap(dic, excepts...) + } else { + typ := reflect.TypeOf(obj) + val := reflect.ValueOf(obj) + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } + switch typ.Kind() { + case reflect.String: + strSort = obj.(string) + case reflect.Map: + strSort = makeSignStringByMap(obj.(map[string]interface{}), excepts...) + case reflect.Struct: + strSort = makeSignStringByStruct(typ, val, excepts...) + default: + log.Errorf("object type [%s] not support", typ.Name()) + } + } + return strSort +} + +func MakeSignSHA256Hex(obj interface{}, excepts ...string) string { + strToSign := MakeSignString(obj, excepts...) + digestHash := sha256.Sum256([]byte(strToSign)) + return hex.EncodeToString(digestHash[:]) +} + +func MakeSignSHA256(obj interface{}, excepts ...string) []byte { + strToSign := MakeSignString(obj, excepts...) + digestHash := sha256.Sum256([]byte(strToSign)) + return digestHash[:] +} + +func makeSignStringByStruct(typ reflect.Type, val reflect.Value, excepts ...string) string { + dic := parseStructFields(typ, val, tagNames...) + return makeSignStringByMap(dic, excepts...) +} + +func makeSignStringByMap(dic map[string]interface{}, excepts ...string) string { + var keys, values []string + for _, v := range excepts { + delete(dic, v) + } + for k, _ := range dic { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { + if strings.Compare(keys[i], keys[j]) < 0 { + return true + } + return false + }) + for _, key := range keys { + v := fmt.Sprintf("%s=%v", key, dic[key]) + values = append(values, v) + } + return strings.Join(values, "&") +} + +// parse struct fields +func parseStructFields(typ reflect.Type, val reflect.Value, tagNames ...string) (dic map[string]interface{}) { + + kind := typ.Kind() + dic = make(map[string]interface{}, 0) + + if kind == reflect.Struct { + NumField := val.NumField() + for i := 0; i < NumField; i++ { + typField := typ.Field(i) + valField := val.Field(i) + + if typField.Type.Kind() == reflect.Ptr { + typField.Type = typField.Type.Elem() + valField = valField.Elem() + } + if !valField.IsValid() || !valField.CanInterface() { + continue + } + saveValueByField(dic, typField, valField, tagNames...) // save field tag value and field value to map + } + } + return dic +} + +//trim the field value's first and last blank character and save to map +func saveValueByField(dic map[string]interface{}, field reflect.StructField, val reflect.Value, tagNames ...string) { + + if len(tagNames) == 0 { + log.Errorf("no tag to save value") + return + } + + var tagVal string + for _, v := range tagNames { + strTagValue, ignore := getTag(field, v) + tagVal = handleTagValue(v, strTagValue) + if ignore { + break + } + if tagVal == "" { + tagVal = field.Name + } + dic[tagVal] = fmt.Sprintf("%v", val.Interface()) + } +} + +// get struct field's tag value +func getTag(sf reflect.StructField, tagName string) (strValue string, ignore bool) { + + strValue = sf.Tag.Get(tagName) + if strValue == TAG_VALUE_IGNORE { + return "", true + } + return +} + +func handleTagValue(strTagName, strTagValue string) string { + if strTagValue == "" { + return "" + } + if strTagName == TAG_NAME_JSON { + vs := strings.Split(strTagValue, ",") + strTagValue = vs[0] + } + return strTagValue +} diff --git a/pkg/utils/ip.go b/pkg/utils/ip.go new file mode 100644 index 0000000..9667546 --- /dev/null +++ b/pkg/utils/ip.go @@ -0,0 +1,24 @@ +package utils + +import ( + "fmt" + "net" + "net/url" +) + +// ExtractIP 把 "http://1.2.3.4:8000" → "1.2.3.4" +func ExtractIP(rawURL string) (string, error) { + u, err := url.Parse(rawURL) + if err != nil { + return "", err + } + host := u.Host + if h, _, errSplit := net.SplitHostPort(host); errSplit == nil { + host = h + } + ip := net.ParseIP(host) + if ip == nil { + return "", fmt.Errorf("invalid IP in url: %s", rawURL) + } + return ip.String(), nil +} diff --git a/pkg/utils/network.go b/pkg/utils/network.go new file mode 100644 index 0000000..fbdc7e5 --- /dev/null +++ b/pkg/utils/network.go @@ -0,0 +1,145 @@ +package utils + +import ( + "context" + "encoding/json" + "fmt" + "net" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/civet148/log" +) + +/* ---------- 获取本机同网段 IP ---------- */ + +func getGatewayIP() (string, error) { + var cmd *exec.Cmd + switch runtime.GOOS { + case "linux": + cmd = exec.Command("sh", "-c", "ip route | grep default | awk '{print $3}'") + case "darwin": + cmd = exec.Command("sh", "-c", "netstat -rn | grep default | awk '{print $2}'") + default: + return "", fmt.Errorf("unsupported OS") + } + out, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +func ipInSameSubnet(ip1 net.IP, ip2 net.IP, mask net.IPMask) bool { + if mask == nil { + return false + } + return ip1.Mask(mask).Equal(ip2.Mask(mask)) +} + +func GetLocalIPSameAsGW() string { + gwStr, err := getGatewayIP() + if err != nil { + return "127.0.0.1" + } + gw := net.ParseIP(gwStr) + + ifaces, _ := net.Interfaces() + for _, iface := range ifaces { + if iface.Flags&net.FlagUp == 0 { + continue + } + addrs, _ := iface.Addrs() + for _, a := range addrs { + var ip net.IP + var mask net.IPMask + switch v := a.(type) { + case *net.IPNet: + ip = v.IP + mask = v.Mask + case *net.IPAddr: + ip = v.IP + } + if ip == nil || ip.IsLoopback() || ip.To4() == nil { + continue + } + if ipInSameSubnet(ip, gw, mask) { + return ip.String() + } + } + } + return "127.0.0.1" +} + +func IsValidIP(ip string) bool { + if ip == "" || ip == "127.0.0.1" { + return false + } + p := net.ParseIP(ip) + return p != nil && !p.IsLoopback() && !p.IsUnspecified() && + !p.IsLinkLocalMulticast() && !p.IsLinkLocalUnicast() +} + +/* ---------- UDP 广播 ---------- */ + +type BroadcastMessage struct { + Type string `json:"type"` + IP string `json:"ip"` + Port int `json:"port"` + Name string `json:"name"` +} + +// StartBroadcast 启动 UDP 广播;ctx 取消时自动退出 +func StartBroadcast(ctx context.Context, port, interval int, name string) { + go func() { + var prevIP string + var conn *net.UDPConn + + for { + select { + case <-ctx.Done(): + if conn != nil { + conn.Close() + } + return + default: + } + + curIP := GetLocalIPSameAsGW() + if !IsValidIP(curIP) { + time.Sleep(time.Duration(interval) * time.Second) + continue + } + + if curIP != prevIP || conn == nil { + if conn != nil { + conn.Close() + } + addr := net.UDPAddr{IP: net.IPv4bcast, Port: port} + c, err := net.DialUDP("udp4", nil, &addr) + if err != nil { + log.Warnf("UDP 连接失败: %v", err) + time.Sleep(time.Duration(interval) * time.Second) + continue + } + conn = c + prevIP = curIP + log.Infof("绑定新 IP 广播: %s", curIP) + } + + msg := BroadcastMessage{ + Type: "ai_server_announce", + IP: curIP, + Port: port, + Name: name, + } + data, _ := json.Marshal(msg) + if _, err := conn.Write(data); err != nil { + log.Warnf("广播失败: %v", err) + } + time.Sleep(time.Duration(interval) * time.Second) + } + }() +} diff --git a/pkg/utils/passwd_md5.go b/pkg/utils/passwd_md5.go new file mode 100644 index 0000000..73df42a --- /dev/null +++ b/pkg/utils/passwd_md5.go @@ -0,0 +1,22 @@ +package utils + +import ( + "crypto/md5" + "fmt" + "github.com/civet148/log" +) + +func GenerateSalt() string { + + return "" +} + +func PasswordMD5(strPassword, strSalt string) (strMD5 string) { + m := md5.New() + strEnc := fmt.Sprintf("%s%s", strPassword, strSalt) + if _, err := m.Write([]byte(strEnc)); err != nil { + log.Errorf("md5 write error [%s]", err.Error()) + return "" + } + return fmt.Sprintf("%x", m.Sum(nil)) +} diff --git a/pkg/utils/round.go b/pkg/utils/round.go new file mode 100644 index 0000000..ae94a68 --- /dev/null +++ b/pkg/utils/round.go @@ -0,0 +1,26 @@ +package utils + +import ( + "fmt" + "math" + "math/rand" + "strings" + "time" +) + +func Round(f float64, n int) float64 { + pow10 := math.Pow10(n) + return math.Trunc((f+0.5/pow10)*pow10) / pow10 +} + +func GenValidateCode(width int) string { + numeric := [9]byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + r := len(numeric) + rand.Seed(time.Now().UnixNano()) + + var sb strings.Builder + for i := 0; i < width; i++ { + fmt.Fprintf(&sb, "%d", numeric[rand.Intn(r)]) + } + return sb.String() +} diff --git a/pkg/utils/signature.go b/pkg/utils/signature.go new file mode 100644 index 0000000..4c63fc5 --- /dev/null +++ b/pkg/utils/signature.go @@ -0,0 +1,30 @@ +package utils + +import ( + "bytes" + "strings" + + "github.com/ethereum/go-ethereum/common" +) + +const ( + ethAddrPrefix = "0x" +) + +func TrimEvmAddrPrefix(addr string) string { + if strings.HasPrefix(addr, ethAddrPrefix) { + addr = strings.TrimPrefix(addr, ethAddrPrefix) + } + return addr +} + +func CompareEvmAddr(a1, a2 string) bool { + a1 = TrimEvmAddrPrefix(a1) + a2 = TrimEvmAddrPrefix(a2) + addr1 := common.HexToAddress(a1) + addr2 := common.HexToAddress(a2) + if bytes.Compare(addr1.Bytes(), addr2.Bytes()) == 0 { + return true + } + return false +} diff --git a/pkg/utils/str.go b/pkg/utils/str.go new file mode 100644 index 0000000..4cd6b81 --- /dev/null +++ b/pkg/utils/str.go @@ -0,0 +1,29 @@ +package utils + +import "strings" + +func TrimSpace(s string) string { + s = strings.TrimSpace(s) + s = strings.ReplaceAll(s, " ", "") + return s +} + +func StrMap2Uint(s string, max uint64) (idx uint) { + var m uint64 + for _, v := range s { + m += uint64(v) + } + return uint(m % max) +} + +func UrlSuffix(strUri string) (strRouter, strSuffix string) { + idx := strings.LastIndex(strUri, "/") + if idx == 0 { + strRouter = strUri[0:1] + strSuffix = strUri[idx+1:] + } else if idx > 0 { + strRouter = strUri[:idx] + strSuffix = strUri[idx+1:] + } + return +} diff --git a/pkg/utils/time.go b/pkg/utils/time.go new file mode 100644 index 0000000..255e732 --- /dev/null +++ b/pkg/utils/time.go @@ -0,0 +1,307 @@ +package utils + +import ( + "fmt" + "github.com/civet148/log" + "strconv" + "strings" + "time" +) + +const ( + HEIGHTS_ON_DAY = 2880 + SECONDS_ONE_DAY = 24 * 60 * 60 + TIME_FORMAT_DATE = "2006-01-02" + TIME_FORMAT_DATETIME = "2006-01-02 15:04:05" + FILECOIN_GENESIS_TIME = 1598306400 //height is 0 + FILECOIN_BLOCK_DURATION = 30 //30s create a new block +) + +func Now() string { + return time.Now().Format(TIME_FORMAT_DATETIME) +} + +func NowDate() string { + return time.Now().Format(TIME_FORMAT_DATE) +} + +func FormatTimestampTZ(strTZ string) string { + var err error + var t time.Time + t, err = time.Parse(time.RFC3339, strTZ) + if err != nil { + log.Errorf("format timestamp [%s] error [%s]", err.Error()) + return strTZ + } + return t.Format(TIME_FORMAT_DATETIME) +} + +func NowRandom() string { + return time.Now().Format("20060102150405.000000000") +} + +func NowHeight() int64 { + now64 := time.Now().Unix() + return UnixTimeToHeight(now64) +} + +func Unix2DateTime(t64 uint64) string { + if t64 == 0 { + return "" + } + t := time.Unix(int64(t64), 0) + return t.Format(fmt.Sprintf("%s", TIME_FORMAT_DATETIME)) +} + +func Unix2Date(t64 uint64) string { + if t64 == 0 { + return "" + } + t := time.Unix(int64(t64), 0) + return t.Format(fmt.Sprintf("%s", TIME_FORMAT_DATE)) +} + +func Minute2Hour(minutes int) string { + if minutes <= 0 { + return "0h0m" + } + if minutes >= 60 { + return fmt.Sprintf("%dh%dm", minutes/60, minutes%60) + } + return fmt.Sprintf("0h%dm", minutes) +} + +func Now64() int64 { + return time.Now().Unix() +} + +func TodayZero() int64 { + now := time.Now() + t := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + return t.Unix() +} + +func YesterDay(cur64 int64) int64 { + t := time.Unix(cur64, 0) + d, err := time.ParseDuration("-24h") + if err != nil { + log.Errorf("parse duration error [%s]", err) + return 0 + } + return t.Add(d).Unix() +} + +func ISODateByUnix64(v int64) (t time.Time) { + return time.Unix(v, 0) +} + +func DateTimeStr2Unix(strDateTime string, timeFmt ...interface{}) (unixTime int64) { + + var t time.Time + var bNormal = true + + if len(timeFmt) > 0 { + + if timeFmt[0].(string) == "/" { + bNormal = false + } + } else { + if strings.Contains(strDateTime, "/") { + bNormal = false + } + } + + if len(strDateTime) != 19 { + + nIndex := strings.Index(strDateTime, " ") + if nIndex == -1 { + fmt.Println("error: DateTimeStr2Unix invalid datetime format") + return 0 + } + + sdt := strings.Split(strDateTime, " ") + if len(sdt) == 1 { + fmt.Println("error: DateTimeStr2Unix invalid datetime format") + return 0 + } + + ymd := sdt[0] + hms := sdt[1] + + var s1, s2 []string + if bNormal { + s1 = strings.Split(ymd, "-") + + } else { + s1 = strings.Split(ymd, "/") + } + + s2 = strings.Split(hms, ":") + + if len(s1) != 3 || len(s2) != 3 { + fmt.Println("error: DateTimeStr2Unix invalid datetime format, not match 'YYYY-MM-DD hh:mm:ss' or 'YYYY/MM/DD hh:mm:ss'") + return 0 + } + year := s1[0] + month := s1[1] + day := s1[2] + hour := s2[0] + min := s2[1] + sec := s2[2] + if len(year) != 4 { + fmt.Println("error: DateTimeStr2Unix invalid year format, not match YYYY") + return 0 + } + if len(month) == 1 { + month = "0" + month + } + if len(day) == 1 { + day = "0" + day + } + if len(hour) == 1 { + hour = "0" + hour + } + if len(min) == 1 { + min = "0" + min + } + if len(sec) == 1 { + sec = "0" + sec + } + + if bNormal { + strDateTime = fmt.Sprintf("%v-%v-%v %v:%v:%v", year, month, day, hour, min, sec) + + } else { + strDateTime = fmt.Sprintf("%v/%v/%v %v:%v:%v", year, month, day, hour, min, sec) + } + } + + if strDateTime != "" { + + loc, _ := time.LoadLocation("Local") + + if bNormal { + t, _ = time.ParseInLocation("2006-01-02 15:04:05", strDateTime, loc) + } else { + t, _ = time.ParseInLocation("2006/01/02 15:04:05", strDateTime, loc) + } + + unixTime = t.Unix() + } + + return +} + +func GetDateBeginTime(strDate string) string { + return strDate + " 00:00:00" +} + +func GetDateEndTime(strDate string) string { + return strDate + " 23:59:30" +} + +func DateToEpochBeginTime64(strDate string) int64 { + strDateTime := GetDateBeginTime(strDate) + return DateTimeStr2Unix(strDateTime) +} + +func DateToEpochEndTime64(strDate string) int64 { + strDateTime := GetDateEndTime(strDate) + return DateTimeStr2Unix(strDateTime) +} + +func DateBeginHeight(strDate string) int64 { + strDateTime := GetDateBeginTime(strDate) + return DateTimeToHeight(strDateTime) +} + +func DateEndHeight(strDate string) int64 { + strDateTime := GetDateEndTime(strDate) + return DateTimeToHeight(strDateTime) +} + +func HeightToTimeUnix64(e int64) int64 { + return HeightToTime(e).Unix() +} + +func HeightToTime(e int64) (t time.Time) { + unix64 := FILECOIN_GENESIS_TIME + (e * FILECOIN_BLOCK_DURATION) + return time.Unix(unix64, 0) +} + +func HeightToDateTime(e int64) string { + return HeightToTime(e).Format(TIME_FORMAT_DATETIME) +} + +func HeightToDate(e int64) string { + return HeightToTime(e).Format(TIME_FORMAT_DATE) +} + +func UnixTimeToHeight(time64 int64) int64 { + + d := time64 - FILECOIN_GENESIS_TIME + if d < 0 { + return 0 + } + return d / FILECOIN_BLOCK_DURATION +} + +func DateTimeToHeight(strDateTime string) int64 { + time64 := DateTimeStr2Unix(strDateTime) + return UnixTimeToHeight(time64) +} + +func GetLatestHeight() int64 { + return UnixTimeToHeight(time.Now().Unix()) +} + +func TimestampToDate(t time.Time) string { + return t.Format(TIME_FORMAT_DATE) +} + +func DateIsValid(strDate string) bool { + + if strDate == "" || len(strDate) < 6 { + return false + } + if strDate[0] == '0' { + return false + } + strDate = strings.Replace(strDate, "-", "", -1) + strDate = strings.Replace(strDate, "/", "", -1) + if date, err := strconv.Atoi(strDate); err != nil { + return false + } else { + if len(strDate) == 6 { + if date >= 197011 { + return true + } + } else if len(strDate) == 8 { + if date >= 19700101 { + return true + } + } + } + return false +} + +func DateStr2Unix(strDate string) int64 { + + t, err := time.Parse(TIME_FORMAT_DATE, strDate) + if err != nil { + log.Errorf("date string [%s] parse error [%s]", strDate, err.Error()) + return 0 + } + return t.Unix() +} + +func DateLessThan(strDate1, strDate2 string) bool { + d1 := DateStr2Unix(strDate1) + d2 := DateStr2Unix(strDate2) + return d1 < d2 +} + +func DateLessThanNow(strDate string) bool { + strNowDate := NowDate() + return DateLessThan(strDate, strNowDate) +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go new file mode 100644 index 0000000..783653c --- /dev/null +++ b/pkg/utils/utils.go @@ -0,0 +1,131 @@ +package utils + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strconv" + "strings" + "time" + + "github.com/civet148/gotools/randoms" + + "github.com/mssola/user_agent" +) + +// 验证码位数 +const AuthCodeNum = 6 + +const ( + UNIT_EiB = "EiB" + UNIT_PIB = "PiB" + UNIT_DOLLAR = "$" + UNIT_BILLION = "billion" +) + +const ( + UNIT_ALL = UNIT_EiB + "|" + UNIT_PIB + "|" + UNIT_DOLLAR + "|" + UNIT_BILLION +) + +func JsonMarshal(v interface{}) string { + var data []byte + data, _ = json.Marshal(v) + return string(data) +} + +func MakeTimestampSuffix(strKey string) string { + now := time.Now().Unix() + return fmt.Sprintf("%s_%d", strKey, now) +} + +func ConvertFloat64NonUnit(strNumber string) float64 { + units := strings.Split(UNIT_ALL, "|") + for _, unit := range units { + strNumber = strings.Replace(strNumber, unit, "", -1) + } + strNumber = strings.TrimSpace(strNumber) + number, err := strconv.ParseFloat(strNumber, 64) + if err != nil { + fmt.Printf("parse float (%s) error [%s]", strNumber, err) + return 0 + } + return number +} + +func CheckDurationMinutes(strDuration string) (ok bool, minutes int64, err error) { + if strings.Contains(strDuration, "m") { + ok = true + strMinutes := strings.TrimSuffix(strDuration, "m") + minutes, err = strconv.ParseInt(strMinutes, 10, 32) + } + return +} + +func CheckDurationHours(strDuration string) (ok bool, hours int64, err error) { + if strings.Contains(strDuration, "h") { + ok = true + strHours := strings.TrimSuffix(strDuration, "h") + hours, err = strconv.ParseInt(strHours, 10, 32) + } + return +} + +func ParseUserAgent(userAgent string) (osi user_agent.OSInfo, isMobile bool) { + ua := user_agent.New(userAgent) + return ua.OSInfo(), ua.Mobile() +} + +func StructToMap(s interface{}) map[string]interface{} { + result := make(map[string]interface{}) + valueOf := reflect.ValueOf(s) + typeOf := reflect.TypeOf(s) + + for i := 0; i < valueOf.NumField(); i++ { + fieldValue := valueOf.Field(i) + fieldName := typeOf.Field(i).Name + result[fieldName] = fieldValue.Interface() + } + return result +} + +func GenAuthCode() (code string) { + return randoms.RandomAlphaOrNumeric(AuthCodeNum, false, true) +} + +// GetMBUUID 尝试从系统中获取主板/系统唯一标识符(适用于 Linux) +func GetMBUUID() string { + paths := []string{ + "/sys/class/dmi/id/product_uuid", // 系统 UUID + "/sys/class/dmi/id/product_serial", // 系统序列号 + "/sys/class/dmi/id/board_serial", // 主板序列号 + "/sys/class/dmi/id/chassis_serial", // 机箱序列号 + "/etc/machine-id", // fallback + } + + for _, path := range paths { + if val, ok := readAndValidateHardwareID(path); ok { + return val + } + } + return "unknown" +} + +// readAndValidateHardwareID 从文件中读取并验证内容是否是有效硬件ID +func readAndValidateHardwareID(path string) (string, bool) { + data, err := os.ReadFile(path) + if err != nil { + return "", false + } + val := strings.TrimSpace(string(data)) + lower := strings.ToLower(val) + + if val == "" || + strings.Contains(lower, "to be filled") || + strings.Contains(lower, "not specified") || + strings.Contains(lower, "default") || + lower == "ffffffff-ffff-ffff-ffff-ffffffffffff" { + return "", false + } + return val, true +} diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go new file mode 100644 index 0000000..d4adb2c --- /dev/null +++ b/pkg/utils/utils_test.go @@ -0,0 +1,24 @@ +package utils + +import ( + "fmt" + "github.com/civet148/log" + "testing" +) + +func TestNowRandom(t *testing.T) { + fmt.Printf("time now random [%s]", NowRandom()) +} + +func TestStrMap2Uint(t *testing.T) { + //strIn := "f2te32o3nkh7mxtsnts8nyum1pc6njygdm87sgurw7tywwxgfyl2wtb56xk6fwwrje567kaxcv456hygda3behda" //index=0 + //strIn := "f3te32o3nkh3mxtsntscnyumkpc6njygdmb7sgurw7tywwxgfyl2wtbmixk6fwwrjeufakaxcvwhygda3behda" //index=1 + strIn := "f3te32o3nk33mxtsntscnyumkpc6nj8gdmb7sgurw7tywwxgfyl2wtbmixk6fwwrjeufakaxcvwhygda3behdc" //index=2 + n := StrMap2Uint(strIn, 3) + fmt.Printf("index = %d\n", n) +} + +func TestUrlKey(t *testing.T) { + router, key := UrlSuffix("/rpc/v1/xk6fwwrjeufakaxcvwhygda3behdc") + log.Infof("router [%s] key [%s]", router, key) +} diff --git a/run-system.sh b/run-system.sh new file mode 100755 index 0000000..8de8dca --- /dev/null +++ b/run-system.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# 数据源(正式环境需修改成实际数据库配置) +DSN='mysql://root:Coeus!123456@127.0.0.1:3306/intent-system?charset=utf8mb4' +PG='postgres://postgres:Pg2#123321@127.0.0.1:5432/webdb?sslmode=disable&search_path=public' + +# 管理系统HTTP服务监听地址 +LISTEN_ADDR="0.0.0.0:8083" + +# 订阅邮件访问链接域名 +DOMAIN="http://67.223.119.33:3008/blog" + +# 图片存储域名+后缀 +#IMAGE_PREFIX=https://www.your-enterprise.com/images + +# 网关URL +GATEWAY_URL="ws://67.223.119.33:12345" + +# 网关访问KEY +GATEWAY_KEY="bAkYh0JVe2Kph0ot" + +# 网关访问密码 +GATEWAY_SECRET="1EWKBne2LCX0TJBXkrOWSzSDkzaQmoR3xuXBrc41JsdjorpM" + +# 订阅邮件定时任务 +SUB_CRON="0 0 * * *" + +./intent-system run --debug -n "${DSN}" --pg "${PG}" -d "${DOMAIN}" -g "${GATEWAY_URL}" -k "${GATEWAY_KEY}" -s "${GATEWAY_SECRET}" --sub-cron "${SUB_CRON}" "$LISTEN_ADDR" + diff --git a/static/hello.html b/static/hello.html new file mode 100644 index 0000000..573c995 --- /dev/null +++ b/static/hello.html @@ -0,0 +1,10 @@ + + + + + Hello + + +你好! + + \ No newline at end of file