This commit is contained in:
hailin 2025-05-13 14:08:13 +08:00
parent e9b25a11e3
commit bda87f82a8
1 changed files with 0 additions and 475 deletions

View File

@ -5,8 +5,6 @@ import (
"context" "context"
_ "embed" _ "embed"
"fmt" "fmt"
"net"
"net/url"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@ -17,14 +15,12 @@ import (
"github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat" "github.com/docker/go-connections/nat"
"github.com/go-errors/errors" "github.com/go-errors/errors"
"github.com/jackc/pgconn" "github.com/jackc/pgconn"
"github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4"
"github.com/spf13/afero" "github.com/spf13/afero"
"github.com/supabase/cli/internal/db/start" "github.com/supabase/cli/internal/db/start"
"github.com/supabase/cli/internal/functions/serve"
"github.com/supabase/cli/internal/seed/buckets" "github.com/supabase/cli/internal/seed/buckets"
"github.com/supabase/cli/internal/services" "github.com/supabase/cli/internal/services"
"github.com/supabase/cli/internal/status" "github.com/supabase/cli/internal/status"
@ -173,173 +169,6 @@ func run(p utils.Program, ctx context.Context, fsys afero.Fs, excludedContainers
utils.Config.Storage.ImageTransformation.Enabled && !isContainerExcluded(utils.Config.Storage.ImgProxyImage, excluded) utils.Config.Storage.ImageTransformation.Enabled && !isContainerExcluded(utils.Config.Storage.ImgProxyImage, excluded)
p.Send(utils.StatusMsg("Starting containers...")) p.Send(utils.StatusMsg("Starting containers..."))
// Start Logflare
if utils.Config.Analytics.Enabled && !isContainerExcluded(utils.Config.Analytics.Image, excluded) {
env := []string{
"DB_DATABASE=_supabase",
"DB_HOSTNAME=" + dbConfig.Host,
fmt.Sprintf("DB_PORT=%d", dbConfig.Port),
"DB_SCHEMA=_analytics",
"DB_USERNAME=supabase_admin",
"DB_PASSWORD=" + dbConfig.Password,
"LOGFLARE_MIN_CLUSTER_SIZE=1",
"LOGFLARE_SINGLE_TENANT=true",
"LOGFLARE_SUPABASE_MODE=true",
"LOGFLARE_API_KEY=" + utils.Config.Analytics.ApiKey,
"LOGFLARE_LOG_LEVEL=warn",
"LOGFLARE_NODE_HOST=127.0.0.1",
"LOGFLARE_FEATURE_FLAG_OVERRIDE='multibackend=true'",
"RELEASE_COOKIE=cookie",
}
bind := []string{}
switch utils.Config.Analytics.Backend {
case config.LogflareBigQuery:
workdir, err := os.Getwd()
if err != nil {
return errors.Errorf("failed to get working directory: %w", err)
}
hostJwtPath := filepath.Join(workdir, utils.Config.Analytics.GcpJwtPath)
bind = append(bind, hostJwtPath+":/opt/app/rel/logflare/bin/gcloud.json")
// This is hardcoded in studio frontend
env = append(env,
"GOOGLE_DATASET_ID_APPEND=_prod",
"GOOGLE_PROJECT_ID="+utils.Config.Analytics.GcpProjectId,
"GOOGLE_PROJECT_NUMBER="+utils.Config.Analytics.GcpProjectNumber,
)
case config.LogflarePostgres:
env = append(env,
fmt.Sprintf("POSTGRES_BACKEND_URL=postgresql://%s:%s@%s:%d/%s", dbConfig.User, dbConfig.Password, dbConfig.Host, dbConfig.Port, "_supabase"),
"POSTGRES_BACKEND_SCHEMA=_analytics",
)
}
if _, err := utils.DockerStart(
ctx,
container.Config{
Hostname: "127.0.0.1",
Image: utils.Config.Analytics.Image,
Env: env,
// Original entrypoint conflicts with healthcheck due to 15 seconds sleep:
// https://github.com/Logflare/logflare/blob/staging/run.sh#L35
Entrypoint: []string{"sh", "-c", `cat <<'EOF' > run.sh && sh run.sh
./logflare eval Logflare.Release.migrate
./logflare start --sname logflare
EOF
`},
Healthcheck: &container.HealthConfig{
Test: []string{"CMD", "curl", "-sSfL", "--head", "-o", "/dev/null",
"http://127.0.0.1:4000/health",
},
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
Retries: 3,
StartPeriod: 10 * time.Second,
},
ExposedPorts: nat.PortSet{"4000/tcp": {}},
},
container.HostConfig{
Binds: bind,
PortBindings: nat.PortMap{"4000/tcp": []nat.PortBinding{{HostPort: strconv.FormatUint(uint64(utils.Config.Analytics.Port), 10)}}},
RestartPolicy: container.RestartPolicy{Name: "always"},
},
network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
utils.NetId: {
Aliases: utils.LogflareAliases,
},
},
},
utils.LogflareId,
); err != nil {
return err
}
started = append(started, utils.LogflareId)
}
// Start vector
if utils.Config.Analytics.Enabled && !isContainerExcluded(utils.Config.Analytics.VectorImage, excluded) {
var vectorConfigBuf bytes.Buffer
if err := vectorConfigTemplate.Option("missingkey=error").Execute(&vectorConfigBuf, vectorConfig{
ApiKey: utils.Config.Analytics.ApiKey,
VectorId: utils.VectorId,
LogflareId: utils.LogflareId,
KongId: utils.KongId,
GotrueId: utils.GotrueId,
RestId: utils.RestId,
RealtimeId: utils.RealtimeId,
StorageId: utils.StorageId,
EdgeRuntimeId: utils.EdgeRuntimeId,
DbId: utils.DbId,
}); err != nil {
return errors.Errorf("failed to exec template: %w", err)
}
var binds, env, securityOpts []string
// Special case for GitLab pipeline
parsed, err := client.ParseHostURL(utils.Docker.DaemonHost())
if err != nil {
return errors.Errorf("failed to parse docker host: %w", err)
}
// Ref: https://vector.dev/docs/reference/configuration/sources/docker_logs/#docker_host
dindHost := &url.URL{Scheme: "http", Host: net.JoinHostPort(utils.DinDHost, "2375")}
switch parsed.Scheme {
case "tcp":
if _, port, err := net.SplitHostPort(parsed.Host); err == nil {
dindHost.Host = net.JoinHostPort(utils.DinDHost, port)
}
env = append(env, "DOCKER_HOST="+dindHost.String())
case "npipe":
fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), "analytics requires docker daemon exposed on tcp://localhost:2375")
env = append(env, "DOCKER_HOST="+dindHost.String())
case "unix":
if dindHost, err = client.ParseHostURL(client.DefaultDockerHost); err != nil {
return errors.Errorf("failed to parse default host: %w", err)
} else if strings.HasSuffix(parsed.Host, "/.docker/run/docker.sock") {
fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), "analytics requires mounting default docker socket:", dindHost.Host)
binds = append(binds, fmt.Sprintf("%[1]s:%[1]s:ro", dindHost.Host))
} else {
// Podman and OrbStack can mount root-less socket without issue
binds = append(binds, fmt.Sprintf("%s:%s:ro", parsed.Host, dindHost.Host))
securityOpts = append(securityOpts, "label:disable")
}
}
if _, err := utils.DockerStart(
ctx,
container.Config{
Image: utils.Config.Analytics.VectorImage,
Env: env,
Entrypoint: []string{"sh", "-c", `cat <<'EOF' > /etc/vector/vector.yaml && vector --config /etc/vector/vector.yaml
` + vectorConfigBuf.String() + `
EOF
`},
Healthcheck: &container.HealthConfig{
Test: []string{"CMD", "wget", "--no-verbose", "--tries=1", "--spider",
"http://127.0.0.1:9001/health",
},
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
Retries: 3,
},
},
container.HostConfig{
Binds: binds,
RestartPolicy: container.RestartPolicy{Name: "always"},
SecurityOpt: securityOpts,
},
network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
utils.NetId: {
Aliases: utils.VectorAliases,
},
},
},
utils.VectorId,
); err != nil {
return err
}
started = append(started, utils.VectorId)
}
// Start Kong. // Start Kong.
if !isContainerExcluded(utils.Config.Api.KongImage, excluded) { if !isContainerExcluded(utils.Config.Api.KongImage, excluded) {
var kongConfigBuf bytes.Buffer var kongConfigBuf bytes.Buffer
@ -700,100 +529,6 @@ EOF
started = append(started, utils.GotrueId) started = append(started, utils.GotrueId)
} }
// Start Inbucket.
if utils.Config.Inbucket.Enabled && !isContainerExcluded(utils.Config.Inbucket.Image, excluded) {
inbucketPortBindings := nat.PortMap{"9000/tcp": []nat.PortBinding{{HostPort: strconv.FormatUint(uint64(utils.Config.Inbucket.Port), 10)}}}
if utils.Config.Inbucket.SmtpPort != 0 {
inbucketPortBindings["2500/tcp"] = []nat.PortBinding{{HostPort: strconv.FormatUint(uint64(utils.Config.Inbucket.SmtpPort), 10)}}
}
if utils.Config.Inbucket.Pop3Port != 0 {
inbucketPortBindings["1100/tcp"] = []nat.PortBinding{{HostPort: strconv.FormatUint(uint64(utils.Config.Inbucket.Pop3Port), 10)}}
}
if _, err := utils.DockerStart(
ctx,
container.Config{
Image: utils.Config.Inbucket.Image,
},
container.HostConfig{
Binds: []string{
// Override default mount points to avoid creating multiple anonymous volumes
// Ref: https://github.com/inbucket/inbucket/blob/v3.0.4/Dockerfile#L52
utils.InbucketId + ":/config",
utils.InbucketId + ":/storage",
},
PortBindings: inbucketPortBindings,
RestartPolicy: container.RestartPolicy{Name: "always"},
},
network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
utils.NetId: {
Aliases: utils.InbucketAliases,
},
},
},
utils.InbucketId,
); err != nil {
return err
}
started = append(started, utils.InbucketId)
}
// Start Realtime.
if utils.Config.Realtime.Enabled && !isContainerExcluded(utils.Config.Realtime.Image, excluded) {
if _, err := utils.DockerStart(
ctx,
container.Config{
Image: utils.Config.Realtime.Image,
Env: []string{
"PORT=4000",
"DB_HOST=" + dbConfig.Host,
fmt.Sprintf("DB_PORT=%d", dbConfig.Port),
"DB_USER=supabase_admin",
"DB_PASSWORD=" + dbConfig.Password,
"DB_NAME=" + dbConfig.Database,
"DB_AFTER_CONNECT_QUERY=SET search_path TO _realtime",
"DB_ENC_KEY=" + utils.Config.Realtime.EncryptionKey,
"API_JWT_SECRET=" + utils.Config.Auth.JwtSecret,
fmt.Sprintf("API_JWT_JWKS=%s", jwks),
"METRICS_JWT_SECRET=" + utils.Config.Auth.JwtSecret,
"APP_NAME=realtime",
"SECRET_KEY_BASE=" + utils.Config.Realtime.SecretKeyBase,
"ERL_AFLAGS=" + utils.ToRealtimeEnv(utils.Config.Realtime.IpVersion),
"DNS_NODES=''",
"RLIMIT_NOFILE=",
"SEED_SELF_HOST=true",
"RUN_JANITOR=true",
fmt.Sprintf("MAX_HEADER_LENGTH=%d", utils.Config.Realtime.MaxHeaderLength),
},
ExposedPorts: nat.PortSet{"4000/tcp": {}},
Healthcheck: &container.HealthConfig{
// Podman splits command by spaces unless it's quoted, but curl header can't be quoted.
Test: []string{"CMD", "curl", "-sSfL", "--head", "-o", "/dev/null",
"-H", "Host:" + utils.Config.Realtime.TenantId,
"http://127.0.0.1:4000/api/ping",
},
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
Retries: 3,
},
},
container.HostConfig{
RestartPolicy: container.RestartPolicy{Name: "always"},
},
network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
utils.NetId: {
Aliases: utils.RealtimeAliases,
},
},
},
utils.RealtimeId,
); err != nil {
return err
}
started = append(started, utils.RealtimeId)
}
// Start PostgREST. // Start PostgREST.
if utils.Config.Api.Enabled && !isContainerExcluded(utils.Config.Api.Image, excluded) { if utils.Config.Api.Enabled && !isContainerExcluded(utils.Config.Api.Image, excluded) {
if _, err := utils.DockerStart( if _, err := utils.DockerStart(
@ -886,216 +621,6 @@ EOF
started = append(started, utils.StorageId) started = append(started, utils.StorageId)
} }
// Start Storage ImgProxy.
if isStorageEnabled && isImgProxyEnabled {
if _, err := utils.DockerStart(
ctx,
container.Config{
Image: utils.Config.Storage.ImgProxyImage,
Env: []string{
"IMGPROXY_BIND=:5001",
"IMGPROXY_LOCAL_FILESYSTEM_ROOT=/",
"IMGPROXY_USE_ETAG=/",
"IMGPROXY_MAX_SRC_RESOLUTION=50",
"IMGPROXY_MAX_SRC_FILE_SIZE=25000000",
"IMGPROXY_MAX_ANIMATION_FRAMES=60",
"IMGPROXY_ENABLE_WEBP_DETECTION=true",
},
Healthcheck: &container.HealthConfig{
Test: []string{"CMD", "imgproxy", "health"},
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
Retries: 3,
},
},
container.HostConfig{
VolumesFrom: []string{utils.StorageId},
RestartPolicy: container.RestartPolicy{Name: "always"},
},
network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
utils.NetId: {
Aliases: utils.ImgProxyAliases,
},
},
},
utils.ImgProxyId,
); err != nil {
return err
}
started = append(started, utils.ImgProxyId)
}
// Start all functions.
if utils.Config.EdgeRuntime.Enabled && !isContainerExcluded(utils.Config.EdgeRuntime.Image, excluded) {
dbUrl := fmt.Sprintf("postgresql://%s:%s@%s:%d/%s", dbConfig.User, dbConfig.Password, dbConfig.Host, dbConfig.Port, dbConfig.Database)
if err := serve.ServeFunctions(ctx, "", nil, "", dbUrl, serve.RuntimeOption{}, fsys); err != nil {
return err
}
started = append(started, utils.EdgeRuntimeId)
}
// Start pg-meta.
if utils.Config.Studio.Enabled && !isContainerExcluded(utils.Config.Studio.PgmetaImage, excluded) {
if _, err := utils.DockerStart(
ctx,
container.Config{
Image: utils.Config.Studio.PgmetaImage,
Env: []string{
"PG_META_PORT=8080",
"PG_META_DB_HOST=" + dbConfig.Host,
"PG_META_DB_NAME=" + dbConfig.Database,
"PG_META_DB_USER=" + dbConfig.User,
fmt.Sprintf("PG_META_DB_PORT=%d", dbConfig.Port),
"PG_META_DB_PASSWORD=" + dbConfig.Password,
},
Healthcheck: &container.HealthConfig{
Test: []string{"CMD-SHELL", `node --eval="fetch('http://127.0.0.1:8080/health').then((r) => {if (!r.ok) throw new Error(r.status)})"`},
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
Retries: 3,
},
},
container.HostConfig{
RestartPolicy: container.RestartPolicy{Name: "always"},
},
network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
utils.NetId: {
Aliases: utils.PgmetaAliases,
},
},
},
utils.PgmetaId,
); err != nil {
return err
}
started = append(started, utils.PgmetaId)
}
// Start Studio.
if utils.Config.Studio.Enabled && !isContainerExcluded(utils.Config.Studio.Image, excluded) {
if _, err := utils.DockerStart(
ctx,
container.Config{
Image: utils.Config.Studio.Image,
Env: []string{
"STUDIO_PG_META_URL=http://" + utils.PgmetaId + ":8080",
"POSTGRES_PASSWORD=" + dbConfig.Password,
"SUPABASE_URL=http://" + utils.KongId + ":8000",
"SUPABASE_PUBLIC_URL=" + utils.Config.Studio.ApiUrl,
"AUTH_JWT_SECRET=" + utils.Config.Auth.JwtSecret,
"SUPABASE_ANON_KEY=" + utils.Config.Auth.AnonKey,
"SUPABASE_SERVICE_KEY=" + utils.Config.Auth.ServiceRoleKey,
"LOGFLARE_API_KEY=" + utils.Config.Analytics.ApiKey,
"OPENAI_API_KEY=" + utils.Config.Studio.OpenaiApiKey,
fmt.Sprintf("LOGFLARE_URL=http://%v:4000", utils.LogflareId),
fmt.Sprintf("NEXT_PUBLIC_ENABLE_LOGS=%v", utils.Config.Analytics.Enabled),
fmt.Sprintf("NEXT_ANALYTICS_BACKEND_PROVIDER=%v", utils.Config.Analytics.Backend),
// Ref: https://github.com/vercel/next.js/issues/51684#issuecomment-1612834913
"HOSTNAME=0.0.0.0",
},
Healthcheck: &container.HealthConfig{
Test: []string{"CMD-SHELL", `node --eval="fetch('http://127.0.0.1:3000/api/platform/profile').then((r) => {if (!r.ok) throw new Error(r.status)})"`},
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
Retries: 3,
},
},
container.HostConfig{
PortBindings: nat.PortMap{"3000/tcp": []nat.PortBinding{{HostPort: strconv.FormatUint(uint64(utils.Config.Studio.Port), 10)}}},
RestartPolicy: container.RestartPolicy{Name: "always"},
},
network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
utils.NetId: {
Aliases: utils.StudioAliases,
},
},
},
utils.StudioId,
); err != nil {
return err
}
started = append(started, utils.StudioId)
}
// Start pooler.
if utils.Config.Db.Pooler.Enabled && !isContainerExcluded(utils.Config.Db.Pooler.Image, excluded) {
portSession := uint16(5432)
portTransaction := uint16(6543)
dockerPort := portTransaction
if utils.Config.Db.Pooler.PoolMode == config.SessionMode {
dockerPort = portSession
}
// Create pooler tenant
var poolerTenantBuf bytes.Buffer
if err := poolerTenantTemplate.Option("missingkey=error").Execute(&poolerTenantBuf, poolerTenant{
DbHost: dbConfig.Host,
DbPort: dbConfig.Port,
DbDatabase: dbConfig.Database,
DbPassword: dbConfig.Password,
ExternalId: utils.Config.Db.Pooler.TenantId,
ModeType: utils.Config.Db.Pooler.PoolMode,
DefaultMaxClients: utils.Config.Db.Pooler.MaxClientConn,
DefaultPoolSize: utils.Config.Db.Pooler.DefaultPoolSize,
}); err != nil {
return errors.Errorf("failed to exec template: %w", err)
}
if _, err := utils.DockerStart(
ctx,
container.Config{
Image: utils.Config.Db.Pooler.Image,
Env: []string{
"PORT=4000",
fmt.Sprintf("PROXY_PORT_SESSION=%d", portSession),
fmt.Sprintf("PROXY_PORT_TRANSACTION=%d", portTransaction),
fmt.Sprintf("DATABASE_URL=ecto://%s:%s@%s:%d/%s", dbConfig.User, dbConfig.Password, dbConfig.Host, dbConfig.Port, "_supabase"),
"CLUSTER_POSTGRES=true",
"SECRET_KEY_BASE=" + utils.Config.Db.Pooler.SecretKeyBase,
"VAULT_ENC_KEY=" + utils.Config.Db.Pooler.EncryptionKey,
"API_JWT_SECRET=" + utils.Config.Auth.JwtSecret,
"METRICS_JWT_SECRET=" + utils.Config.Auth.JwtSecret,
"REGION=local",
"RUN_JANITOR=true",
"ERL_AFLAGS=-proto_dist inet_tcp",
},
Cmd: []string{
"/bin/sh", "-c",
fmt.Sprintf("/app/bin/migrate && /app/bin/supavisor eval '%s' && /app/bin/server", poolerTenantBuf.String()),
},
ExposedPorts: nat.PortSet{
"4000/tcp": {},
nat.Port(fmt.Sprintf("%d/tcp", portSession)): {},
nat.Port(fmt.Sprintf("%d/tcp", portTransaction)): {},
},
Healthcheck: &container.HealthConfig{
Test: []string{"CMD", "curl", "-sSfL", "--head", "-o", "/dev/null", "http://127.0.0.1:4000/api/health"},
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
Retries: 3,
},
},
container.HostConfig{
PortBindings: nat.PortMap{nat.Port(fmt.Sprintf("%d/tcp", dockerPort)): []nat.PortBinding{{
HostPort: strconv.FormatUint(uint64(utils.Config.Db.Pooler.Port), 10)},
}},
RestartPolicy: container.RestartPolicy{Name: "always"},
},
network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
utils.NetId: {
Aliases: utils.PoolerAliases,
},
},
},
utils.PoolerId,
); err != nil {
return err
}
started = append(started, utils.PoolerId)
}
p.Send(utils.StatusMsg("Waiting for health checks...")) p.Send(utils.StatusMsg("Waiting for health checks..."))
if utils.NoBackupVolume && utils.SliceContains(started, utils.StorageId) { if utils.NoBackupVolume && utils.SliceContains(started, utils.StorageId) {
if err := start.WaitForHealthyService(ctx, serviceTimeout, utils.StorageId); err != nil { if err := start.WaitForHealthyService(ctx, serviceTimeout, utils.StorageId); err != nil {