commit e9b25a11e36d9077e2107795a21ca653583b2287 Author: hailin Date: Tue May 13 14:00:38 2025 +0800 first commit diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..25b00f9 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @supabase/dev-workflows diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..7dca709 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,38 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**System information** +Rerun the failing command with `--create-ticket` flag. + - Ticket ID: [e.g. ab1ac733e31e4f928a4d7c8402543712] + - Version of OS: [e.g. Ubuntu 22.04] + - Version of CLI: [e.g. v1.60.0] + - Version of Docker: [e.g. v25.0.3] + - Versions of services: [output from `supabase services` command] + +**Additional context** +If applicable, add any other context about the problem here. + - Browser [e.g. chrome, safari] + - Version of supabase-js [e.g. v2.22.0] + - Version of Node.js [e.g. v16.20.0] diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..bbcbbe7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/improve-documentation.md b/.github/ISSUE_TEMPLATE/improve-documentation.md new file mode 100644 index 0000000..24e3b0a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/improve-documentation.md @@ -0,0 +1,20 @@ +--- +name: Improve documentation +about: Suggest an improvement to our documentation +title: '' +labels: '' +assignees: '' + +--- + +**Link** +Add a link to the page which needs improvement (if relevant) + +**Describe the problem** +Is the documentation missing? Or is it confusing? Why is it confusing? + +**Describe the improvement** +A clear and concise description of the improvement. + +**Additional context** +Add any other context or screenshots that help clarify your question. diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..c1c1005 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,23 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "daily" + - package-ecosystem: "npm" + directory: "/" + schedule: + interval: "daily" + - package-ecosystem: "docker" + directory: "pkg/config/templates" + schedule: + interval: "daily" + ignore: + - dependency-name: "library/kong" + - dependency-name: "inbucket/inbucket" + - dependency-name: "darthsim/imgproxy" + - dependency-name: "timberio/vector" diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml new file mode 100644 index 0000000..1e57b0a --- /dev/null +++ b/.github/workflows/automerge.yml @@ -0,0 +1,38 @@ +# Adapted from https://blog.somewhatabstract.com/2021/10/11/setting-up-dependabot-with-github-actions-to-approve-and-merge/ +name: Dependabot auto-merge + +on: pull_request + +permissions: + pull-requests: write + contents: write + +jobs: + dependabot: + runs-on: ubuntu-latest + # Checking the actor will prevent your Action run failing on non-Dependabot + # PRs but also ensures that it only does work for Dependabot PRs. + if: ${{ github.actor == 'dependabot[bot]' }} + steps: + # This first step will fail if there's no metadata and so the approval + # will not occur. + - name: Dependabot metadata + id: meta + uses: dependabot/fetch-metadata@v2 + with: + github-token: "${{ secrets.GITHUB_TOKEN }}" + + # Here the PR gets approved. + - name: Approve a PR + if: ${{ steps.meta.outputs.update-type == 'version-update:semver-patch' || (!startsWith(steps.meta.outputs.previous-version, '0.') && steps.meta.outputs.update-type == 'version-update:semver-minor') }} + run: gh pr review --approve "${{ github.event.pull_request.html_url }}" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # Finally, this sets the PR to allow auto-merging for patch and minor + # updates if all checks pass + - name: Enable auto-merge for Dependabot PRs + if: ${{ steps.meta.outputs.update-type == 'version-update:semver-patch' || (!startsWith(steps.meta.outputs.previous-version, '0.') && steps.meta.outputs.update-type == 'version-update:semver-minor') }} + run: gh pr merge --auto --squash "${{ github.event.pull_request.html_url }}" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..ce32d17 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,80 @@ +name: CI + +on: + pull_request: + push: + branches: + - develop + +jobs: + test: + name: Test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + + # Required by: internal/utils/credentials/keyring_test.go + - uses: t1m0thyj/unlock-keyring@v1 + - run: | + go run gotest.tools/gotestsum -- -race -v -count=1 -coverprofile=coverage.out \ + `go list ./... | grep -Ev 'cmd|docs|examples|pkg/api|tools'` + + - uses: coverallsapp/github-action@v2 + with: + file: coverage.out + format: golang + + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + # Linter requires no cache + cache: false + + - uses: golangci/golangci-lint-action@v6 + with: + args: --timeout 3m --verbose + + start: + name: Start + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + - run: go build main.go + - run: ./main init + - run: ./main start + env: + SUPABASE_INTERNAL_IMAGE_REGISTRY: ghcr.io + + codegen: + name: Codegen + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + + - run: go generate + - run: | + if ! git diff --ignore-space-at-eol --exit-code --quiet pkg; then + echo "Detected uncommitted changes after codegen. See status below:" + git diff + exit 1 + fi diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000..218e316 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,93 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + pull_request: + push: + branches: + - develop + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners (GitHub.com only) + # Consider using larger runners or machines with greater resources for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} + permissions: + # required for all workflows + security-events: write + + # required to fetch internal or private CodeQL packs + packages: read + + # only required for workflows in private repositories + actions: read + contents: read + + strategy: + fail-fast: false + matrix: + include: + - language: go + build-mode: autobuild + - language: javascript-typescript + build-mode: none + # CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' + # Use `c-cpp` to analyze code written in C, C++ or both + # Use 'java-kotlin' to analyze code written in Java, Kotlin or both + # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both + # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, + # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. + # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how + # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + # If the analyze step fails for one of the languages you are analyzing with + # "We were unable to automatically build your code", modify the matrix above + # to set the build mode to "manual" for that language. Then modify this step + # to build your code. + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + - if: matrix.build-mode == 'manual' + shell: bash + run: | + echo 'If you are using a "manual" build mode for one or more of the' \ + 'languages you are analyzing, replace this with the commands to build' \ + 'your code, for example:' + echo ' make bootstrap' + echo ' make release' + exit 1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/deploy-check.yml b/.github/workflows/deploy-check.yml new file mode 100644 index 0000000..44c2806 --- /dev/null +++ b/.github/workflows/deploy-check.yml @@ -0,0 +1,20 @@ +name: Check Deploy + +on: + pull_request_target: + types: + - opened + - reopened + - synchronize + - edited + branches: + - main + +jobs: + check: + if: github.head_ref != 'develop' + runs-on: ubuntu-latest + steps: + - run: | + echo "Pull requests to main branch are only allowed from develop branch." + exit 1 diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..ace9fc0 --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,22 @@ +name: Prod Deploy + +on: + # Run this action every Tuesday at 02:00 UTC (Singapore 10AM) + schedule: + - cron: "0 2 * * 2" + workflow_dispatch: + +permissions: + pull-requests: write + contents: write + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - run: gh pr create -B main -H develop --title 'Prod deploy' --fill + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/fast-forward.yml b/.github/workflows/fast-forward.yml new file mode 100644 index 0000000..efeb443 --- /dev/null +++ b/.github/workflows/fast-forward.yml @@ -0,0 +1,32 @@ +name: Fast-forward + +on: + pull_request_review: + types: + - submitted + +permissions: + contents: write + +jobs: + approved: + if: | + github.event.pull_request.head.ref == 'develop' && + github.event.pull_request.base.ref == 'main' && + github.event.review.state == 'approved' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - run: | + git checkout main + git merge --ff-only "${{ github.event.pull_request.head.sha }}" + git push origin main + + publish: + needs: + - approved + # Call workflow explicitly because events from actions cannot trigger more actions + uses: ./.github/workflows/release.yml + secrets: inherit diff --git a/.github/workflows/install.yml b/.github/workflows/install.yml new file mode 100644 index 0000000..e9bcd30 --- /dev/null +++ b/.github/workflows/install.yml @@ -0,0 +1,124 @@ +name: Install + +on: + pull_request: + paths: + - '.github/workflows/install.yml' + - 'package.json' + - 'scripts/**' + push: + branches: + - develop + paths: + - '.github/workflows/install.yml' + - 'package.json' + - 'scripts/**' + +jobs: + pack: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - run: | + jq -c '.version = "1.28.0"' package.json > tmp.$$.json + mv tmp.$$.json package.json + npm pack + + - uses: actions/upload-artifact@v4 + with: + name: installer + path: supabase-1.28.0.tgz + + npm: + needs: pack + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/download-artifact@v4 + with: + name: installer + + - run: npm init -y + - run: npm i --save-dev ./supabase-1.28.0.tgz + - run: npx --no-install supabase --version + + yarn: + needs: pack + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/download-artifact@v4 + with: + name: installer + + - run: yarn init -y + - run: yarn add -D ./supabase-1.28.0.tgz + - run: yarn supabase --version + + yarn_berry: + needs: pack + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/download-artifact@v4 + with: + name: installer + + - run: yarn set version berry + # - run: yarn config set nodeLinker node-modules + - run: yarn init -y + - run: yarn add -D ./supabase-1.28.0.tgz + - if: ${{ matrix.os != 'windows-latest' }} + run: yarn supabase --version + # Workaround for running extensionless executable on windows + - if: ${{ matrix.os == 'windows-latest' }} + run: | + & "$(yarn bin supabase).exe" --version + + pnpm: + needs: pack + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/download-artifact@v4 + with: + name: installer + + - run: npm install -g pnpm + - run: pnpm init + - run: pnpm i --save-dev ./supabase-1.28.0.tgz + - run: pnpm supabase --version + + bun: + needs: pack + strategy: + fail-fast: false + matrix: + # Bun build is experimental on windows + os: [ubuntu-latest, macos-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/download-artifact@v4 + with: + name: installer + + - uses: oven-sh/setup-bun@v2 + with: + bun-version: latest + - run: | + echo '{"trustedDependencies": ["supabase"]}' > package.json + - run: bun add -D ./supabase-1.28.0.tgz + - run: bunx supabase --version diff --git a/.github/workflows/mirror-image.yml b/.github/workflows/mirror-image.yml new file mode 100644 index 0000000..fb0bfd5 --- /dev/null +++ b/.github/workflows/mirror-image.yml @@ -0,0 +1,46 @@ +name: Mirror Image + +on: + workflow_call: + inputs: + image: + required: true + type: string + workflow_dispatch: + inputs: + image: + description: "org/image:tag" + required: true + type: string + +jobs: + mirror: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write + steps: + - id: strip + run: | + TAG=${{ inputs.image }} + echo "image=${TAG##*/}" >> $GITHUB_OUTPUT + - name: configure aws credentials + uses: aws-actions/configure-aws-credentials@v4.1.0 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: us-east-1 + - uses: docker/login-action@v3 + with: + registry: public.ecr.aws + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - uses: akhilerm/tag-push-action@v2.2.0 + with: + src: docker.io/${{ inputs.image }} + dst: | + public.ecr.aws/supabase/${{ steps.strip.outputs.image }} + ghcr.io/supabase/${{ steps.strip.outputs.image }} diff --git a/.github/workflows/mirror.yml b/.github/workflows/mirror.yml new file mode 100644 index 0000000..0eb46d8 --- /dev/null +++ b/.github/workflows/mirror.yml @@ -0,0 +1,57 @@ +name: Mirror Dependencies +# We mirror upstream container images like Migra, imgproxy, etc. because these +# are usually only available on certain image registry and not others (e.g. only +# on Docker Hub and not on ghcr.io or AWS ECR). +# +# For container images that we control, we usually publish to Docker Hub, +# ghcr.io, and AWS ECR. + +on: + # We can't trigger the mirror job on PR merge because certain tests would fail + # until we mirror some images. E.g. a PR to update the imgproxy image version + # would fail, because there is a test that creates a container from the + # updated image version, which would fail because the image hasn't been + # mirrored yet. It's a catch-22! + # + # TODO: Make the cli start test run *after* we mirror images (if needed). + pull_request_review: + types: + - submitted + workflow_dispatch: + +jobs: + setup: + runs-on: ubuntu-latest + if: ${{ github.event_name == 'workflow_dispatch' || github.event.review.state == 'approved' }} + outputs: + tags: ${{ steps.list.outputs.tags }} + curr: ${{ steps.curr.outputs.tags }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + - id: list + run: | + echo "tags=$(go run tools/listdep/main.go)" >> $GITHUB_OUTPUT + - id: curr + name: List main branch dependencies + if: github.ref != 'refs/heads/main' + run: | + git fetch origin main + git checkout main + echo "tags=$(go run tools/listdep/main.go)" >> $GITHUB_OUTPUT + + publish: + needs: + - setup + if: ${{ needs.setup.outputs.tags != needs.setup.outputs.curr }} + strategy: + matrix: + src: ${{ fromJson(needs.setup.outputs.tags) }} + # Call workflow explicitly because events from actions cannot trigger more actions + uses: ./.github/workflows/mirror-image.yml + with: + image: ${{ matrix.src }} + secrets: inherit diff --git a/.github/workflows/pg-prove.yml b/.github/workflows/pg-prove.yml new file mode 100644 index 0000000..9844d7e --- /dev/null +++ b/.github/workflows/pg-prove.yml @@ -0,0 +1,85 @@ +name: Publish pg_prove + +on: + workflow_dispatch: + +jobs: + settings: + runs-on: ubuntu-latest + outputs: + image_tag: supabase/pg_prove:${{ steps.version.outputs.pg_prove }} + steps: + - uses: docker/setup-buildx-action@v3 + - uses: docker/build-push-action@v6 + with: + load: true + context: https://github.com/horrendo/pg_prove.git + target: builder + tags: supabase/pg_prove:builder + - id: version + # Replace space with equal to get the raw version string, ie. pg_prove=3.36 + run: | + docker run --rm -a STDOUT supabase/pg_prove:builder pg_prove --version \ + | tr ' ' '=' \ + >> $GITHUB_OUTPUT + shell: bash + + build_image: + needs: + - settings + strategy: + matrix: + include: + - runner: [self-hosted, X64] + arch: amd64 + - runner: arm-runner + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + outputs: + image_digest: ${{ steps.build.outputs.digest }} + steps: + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - id: build + uses: docker/build-push-action@v6 + with: + push: true + context: https://github.com/horrendo/pg_prove.git + tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-pg_prove-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-pg_prove-${{ matrix.arch }} + + merge_manifest: + needs: + - settings + - build_image + runs-on: ubuntu-latest + steps: + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ + ${{ needs.settings.outputs.image_tag }}_amd64 \ + ${{ needs.settings.outputs.image_tag }}_arm64 + + publish: + needs: + - settings + - merge_manifest + # Call workflow explicitly because events from actions cannot trigger more actions + uses: ./.github/workflows/mirror-image.yml + with: + image: ${{ needs.settings.outputs.image_tag }} + secrets: inherit diff --git a/.github/workflows/publish-migra.yml b/.github/workflows/publish-migra.yml new file mode 100644 index 0000000..e3db68c --- /dev/null +++ b/.github/workflows/publish-migra.yml @@ -0,0 +1,85 @@ +name: Publish migra + +on: + workflow_dispatch: + +jobs: + settings: + runs-on: ubuntu-latest + outputs: + image_tag: supabase/migra:${{ steps.version.outputs.migra }} + steps: + - uses: docker/setup-buildx-action@v3 + - uses: docker/build-push-action@v6 + with: + load: true + context: https://github.com/djrobstep/migra.git + tags: supabase/migra:builder + - id: version + # Replace space with equal to get the raw version string, ie. migra=3.0.1663481299 + run: | + docker run --rm -a STDOUT supabase/migra:builder pip show migra \ + | grep 'Version' \ + | sed -E 's/Version: (.*)/migra=\1/g' \ + >> $GITHUB_OUTPUT + shell: bash + + build_image: + needs: + - settings + strategy: + matrix: + include: + - runner: [self-hosted, X64] + arch: amd64 + - runner: arm-runner + arch: arm64 + runs-on: ${{ matrix.runner }} + timeout-minutes: 180 + outputs: + image_digest: ${{ steps.build.outputs.digest }} + steps: + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - id: build + uses: docker/build-push-action@v6 + with: + push: true + context: https://github.com/djrobstep/migra.git + tags: ${{ needs.settings.outputs.image_tag }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-migra-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-migra-${{ matrix.arch }} + + merge_manifest: + needs: + - settings + - build_image + runs-on: ubuntu-latest + steps: + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ needs.settings.outputs.image_tag }} \ + ${{ needs.settings.outputs.image_tag }}_amd64 \ + ${{ needs.settings.outputs.image_tag }}_arm64 + + publish: + needs: + - settings + - merge_manifest + # Call workflow explicitly because events from actions cannot trigger more actions + uses: ./.github/workflows/mirror-image.yml + with: + image: ${{ needs.settings.outputs.image_tag }} + secrets: inherit diff --git a/.github/workflows/release-beta.yml b/.github/workflows/release-beta.yml new file mode 100644 index 0000000..bc2601c --- /dev/null +++ b/.github/workflows/release-beta.yml @@ -0,0 +1,90 @@ +name: Release (Beta) + +on: + push: + branches: + - develop + workflow_dispatch: + +jobs: + release: + name: semantic-release + runs-on: ubuntu-latest + permissions: + contents: write + outputs: + new-release-published: ${{ steps.semantic-release.outputs.new_release_published }} + new-release-version: ${{ steps.semantic-release.outputs.new_release_version }} + new-release-channel: ${{ steps.semantic-release.outputs.new_release_channel }} + steps: + - uses: actions/checkout@v4 + - id: semantic-release + uses: cycjimmy/semantic-release-action@v4 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + goreleaser: + name: GoReleaser + needs: + - release + if: needs.release.outputs.new-release-published == 'true' + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + + - uses: goreleaser/goreleaser-action@v6 + with: + distribution: goreleaser + version: ~> v2 + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SENTRY_DSN: ${{ secrets.SENTRY_DSN }} + + - run: gh release edit v${{ needs.release.outputs.new-release-version }} --draft=false --prerelease + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + commit: + name: Publish Brew and Scoop + needs: + - release + - goreleaser + if: needs.release.outputs.new-release-published == 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + - run: go run tools/publish/main.go --beta "${{ needs.release.outputs.new-release-version }}" + env: + GITHUB_TOKEN: ${{ secrets.GH_PAT }} + + publish: + name: Publish NPM + needs: + - release + - goreleaser + if: needs.release.outputs.new-release-published == 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: "16.x" + registry-url: "https://registry.npmjs.org" + - run: npm --git-tag-version=false version ${{ needs.release.outputs.new-release-version }} + - run: npm publish --tag ${{ needs.release.outputs.new-release-channel }} + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..8ef8b43 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,91 @@ +name: Release + +on: + push: + branches: + - main + workflow_call: + +jobs: + settings: + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + outputs: + release_tag: ${{ steps.prerelease.outputs.tagName }} + steps: + - uses: actions/checkout@v4 + - id: prerelease + run: | + gh release list --limit 1 --json tagName --jq \ + '.[]|to_entries|map("\(.key)=\(.value|tostring)")|.[]' >> $GITHUB_OUTPUT + - run: gh release edit ${{ steps.prerelease.outputs.tagName }} --latest --prerelease=false + + commit: + name: Publish Brew and Scoop + needs: + - settings + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + - run: go run tools/publish/main.go ${{ needs.settings.outputs.release_tag }} + env: + GITHUB_TOKEN: ${{ secrets.GH_PAT }} + + publish: + name: Publish NPM + needs: + - settings + uses: ./.github/workflows/tag-npm.yml + with: + release: ${{ needs.settings.outputs.release_tag }} + secrets: inherit + + compose: + name: Bump self-hosted versions + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + - run: go run tools/selfhost/main.go + env: + GITHUB_TOKEN: ${{ secrets.GH_PAT }} + + changelog: + name: Publish changelog + needs: + - commit + - publish + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + - run: go run tools/changelog/main.go ${{ secrets.SLACK_CHANNEL }} + env: + GITHUB_TOKEN: ${{ secrets.GH_PAT }} + SLACK_TOKEN: ${{ secrets.SLACK_TOKEN }} + + docs: + name: Publish reference docs + needs: + - settings + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + - run: go run docs/main.go ${{ needs.settings.outputs.release_tag }} | go run tools/bumpdoc/main.go apps/docs/spec/cli_v1_commands.yaml + env: + GITHUB_TOKEN: ${{ secrets.GH_PAT }} diff --git a/.github/workflows/tag-npm.yml b/.github/workflows/tag-npm.yml new file mode 100644 index 0000000..9eda44c --- /dev/null +++ b/.github/workflows/tag-npm.yml @@ -0,0 +1,29 @@ +name: Tag NPM + +on: + workflow_call: + inputs: + release: + required: true + type: string + workflow_dispatch: + inputs: + release: + description: "v1.0.0" + required: true + type: string + +jobs: + tag: + name: Move latest tag + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: "16.x" + registry-url: "https://registry.npmjs.org" + - run: npm dist-tag add "supabase@${RELEASE_TAG#v}" latest + env: + RELEASE_TAG: ${{ inputs.release }} + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e119d97 --- /dev/null +++ b/.gitignore @@ -0,0 +1,31 @@ +# General +.DS_Store +.env + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +/cli + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# IDE +/.vscode +/.idea + +# NPM +node_modules +package-lock.json + +# Initialized by cli for local testing +/supabase diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..b91c1af --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,23 @@ +linters: + enable: + - dogsled + - dupl + - gofmt + - goimports + - gosec + - misspell + - nakedret + - stylecheck + - unconvert + - unparam + - whitespace + - errcheck + - gosimple + - staticcheck + - ineffassign + - unused +linters-settings: + stylecheck: + checks: ["all", "-ST1003"] + dupl: + threshold: 250 diff --git a/.goreleaser.yml b/.goreleaser.yml new file mode 100644 index 0000000..b1174fd --- /dev/null +++ b/.goreleaser.yml @@ -0,0 +1,46 @@ +version: 2 +project_name: supabase +builds: + - id: supabase + binary: supabase + flags: + - -trimpath + ldflags: + - -s -w -X github.com/supabase/cli/internal/utils.Version={{.Version}} -X github.com/supabase/cli/internal/utils.SentryDsn={{ .Env.SENTRY_DSN }} + env: + - CGO_ENABLED=0 + targets: + - darwin_amd64 + - darwin_arm64 + - linux_amd64 + - linux_arm64 + - windows_amd64 + - windows_arm64 +archives: + - name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}' +release: + draft: true + replace_existing_draft: true + prerelease: auto +changelog: + use: github + groups: + - title: Features + regexp: '^.*?feat(\([[:word:]]+\))??!?:.+$' + order: 0 + - title: "Bug fixes" + regexp: '^.*?fix(\([[:word:]]+\))??!?:.+$' + order: 1 + - title: Others + order: 999 +nfpms: + - vendor: Supabase + description: Supabase CLI + maintainer: Supabase CLI + homepage: https://supabase.com + license: MIT + formats: + - apk + - deb + - rpm + - archlinux diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..86937e4 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,43 @@ +# Welcome to Supabase CLI contributing guide + +## Release process + +We release to stable channel every two weeks. + +We release to beta channel on merge to `main` branch. + +Hotfixes are released manually. Follow these steps: + +1. Create a new branch named `N.N.x` from latest stable version. For eg. + 1. If stable is on `v1.2.3` and beta is on `v1.3.6`, create `1.2.x` branch. + 2. If stable is on `v1.3.1` and beta is on `v1.3.6`, create `1.3.x` branch (or simply release all patch versions). +2. Cherry-pick your hotfix on top of `N.N.x` branch. +3. Run the [Release (Beta)](https://github.com/supabase/cli/actions/workflows/release-beta.yml) workflow targetting `N.N.x` branch. +4. Verify your hotfix locally with `npx supabase@N.N.x help` +5. Edit [GitHub releases](https://github.com/supabase/cli/releases) to set your hotfix pre-release as latest stable. + +After promoting the next beta version to stable, previous `N.N.x` branches may be deleted. + +To revert a stable release, set a previous release to latest. This will update brew and scoop to an old version. There's no need to revert npm as it supports version pinning. + +## Unit testing + +All new code should aim to improve [test coverage](https://coveralls.io/github/supabase/cli). + +We use mock objects for unit testing code that interacts with external systems, such as + +- local filesystem (via [afero](https://github.com/spf13/afero)) +- Postgres database (via [pgmock](https://github.com/jackc/pgmock)) +- Supabase API (via [gock](https://github.com/h2non/gock)) + +Wrappers and test helper methods can be found under [internal/testing](internal/testing). + +Integration tests are created under [test](test). To run all tests: + +```bash +go test ./... -race -v -count=1 -failfast +``` + +## API client + +The Supabase API client is generated from OpenAPI spec. See [our guide](api/README.md) for updating the client and types. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f1802df --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Supabase, Inc. and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..7da7ff5 --- /dev/null +++ b/README.md @@ -0,0 +1,183 @@ +# Supabase CLI (v1) + +[![Coverage Status](https://coveralls.io/repos/github/supabase/cli/badge.svg?branch=main)](https://coveralls.io/github/supabase/cli?branch=main) [![Bitbucket Pipelines](https://img.shields.io/bitbucket/pipelines/supabase-cli/setup-cli/master?style=flat-square&label=Bitbucket%20Canary)](https://bitbucket.org/supabase-cli/setup-cli/pipelines) [![Gitlab Pipeline Status](https://img.shields.io/gitlab/pipeline-status/sweatybridge%2Fsetup-cli?label=Gitlab%20Canary) +](https://gitlab.com/sweatybridge/setup-cli/-/pipelines) + +[Supabase](https://supabase.io) is an open source Firebase alternative. We're building the features of Firebase using enterprise-grade open source tools. + +This repository contains all the functionality for Supabase CLI. + +- [x] Running Supabase locally +- [x] Managing database migrations +- [x] Creating and deploying Supabase Functions +- [x] Generating types directly from your database schema +- [x] Making authenticated HTTP requests to [Management API](https://supabase.com/docs/reference/api/introduction) + +## Getting started + +### Install the CLI + +Available via [NPM](https://www.npmjs.com) as dev dependency. To install: + +```bash +npm i supabase --save-dev +``` + +To install the beta release channel: + +```bash +npm i supabase@beta --save-dev +``` + +When installing with yarn 4, you need to disable experimental fetch with the following nodejs config. + +``` +NODE_OPTIONS=--no-experimental-fetch yarn add supabase +``` + +> **Note** +For Bun versions below v1.0.17, you must add `supabase` as a [trusted dependency](https://bun.sh/guides/install/trusted) before running `bun add -D supabase`. + +
+ macOS + + Available via [Homebrew](https://brew.sh). To install: + + ```sh + brew install supabase/tap/supabase + ``` + + To install the beta release channel: + + ```sh + brew install supabase/tap/supabase-beta + brew link --overwrite supabase-beta + ``` + + To upgrade: + + ```sh + brew upgrade supabase + ``` +
+ +
+ Windows + + Available via [Scoop](https://scoop.sh). To install: + + ```powershell + scoop bucket add supabase https://github.com/supabase/scoop-bucket.git + scoop install supabase + ``` + + To upgrade: + + ```powershell + scoop update supabase + ``` +
+ +
+ Linux + + Available via [Homebrew](https://brew.sh) and Linux packages. + + #### via Homebrew + + To install: + + ```sh + brew install supabase/tap/supabase + ``` + + To upgrade: + + ```sh + brew upgrade supabase + ``` + + #### via Linux packages + + Linux packages are provided in [Releases](https://github.com/supabase/cli/releases). To install, download the `.apk`/`.deb`/`.rpm`/`.pkg.tar.zst` file depending on your package manager and run the respective commands. + + ```sh + sudo apk add --allow-untrusted <...>.apk + ``` + + ```sh + sudo dpkg -i <...>.deb + ``` + + ```sh + sudo rpm -i <...>.rpm + ``` + + ```sh + sudo pacman -U <...>.pkg.tar.zst + ``` +
+ +
+ Other Platforms + + You can also install the CLI via [go modules](https://go.dev/ref/mod#go-install) without the help of package managers. + + ```sh + go install github.com/supabase/cli@latest + ``` + + Add a symlink to the binary in `$PATH` for easier access: + + ```sh + ln -s "$(go env GOPATH)/bin/cli" /usr/bin/supabase + ``` + + This works on other non-standard Linux distros. +
+ +
+ Community Maintained Packages + + Available via [pkgx](https://pkgx.sh/). Package script [here](https://github.com/pkgxdev/pantry/blob/main/projects/supabase.com/cli/package.yml). + To install in your working directory: + + ```bash + pkgx install supabase + ``` + + Available via [Nixpkgs](https://nixos.org/). Package script [here](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/tools/supabase-cli/default.nix). +
+ +### Run the CLI + +```bash +supabase bootstrap +``` + +Or using npx: + +```bash +npx supabase bootstrap +``` + +The bootstrap command will guide you through the process of setting up a Supabase project using one of the [starter](https://github.com/supabase-community/supabase-samples/blob/main/samples.json) templates. + +## Docs + +Command & config reference can be found [here](https://supabase.com/docs/reference/cli/about). + +## Breaking changes + +We follow semantic versioning for changes that directly impact CLI commands, flags, and configurations. + +However, due to dependencies on other service images, we cannot guarantee that schema migrations, seed.sql, and generated types will always work for the same CLI major version. If you need such guarantees, we encourage you to pin a specific version of CLI in package.json. + +## Developing + +To run from source: + +```sh +# Go >= 1.22 +go run . help +``` diff --git a/api/README.md b/api/README.md new file mode 100644 index 0000000..a867368 --- /dev/null +++ b/api/README.md @@ -0,0 +1,25 @@ +# Supabase OpenAPI Specification + +This directory contains the OpenAPI specification for Supabase Management APIs. + +It is used to automatically generate the Go [client](pkg/api/client.gen.go) and [types](pkg/api/types.gen.go). + +## Updating the specification + +The specification yaml is generated from our NestJS middleware. The latest release is viewable as [Swagger UI](https://api.supabase.com/api/v1). + +To make a new release: + +1. Update `beta.yaml` with the latest version from local development + +```bash +curl -o api/beta.yaml http://127.0.0.1:8080/api/v1-yaml +``` + +2. Regenerate the Go client and API types + +```bash +go generate +``` + +3. [Optional] Manually add [properties](https://swagger.io/docs/specification/basic-structure/) not generated by NestJS diff --git a/api/beta.yaml b/api/beta.yaml new file mode 100644 index 0000000..be28b6c --- /dev/null +++ b/api/beta.yaml @@ -0,0 +1,6195 @@ +openapi: 3.0.0 +paths: + /v1/branches/{branch_id}: + get: + operationId: v1-get-a-branch-config + summary: Get database branch config + description: Fetches configurations of the specified database branch + parameters: + - name: branch_id + required: true + in: path + description: Branch ID + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BranchDetailResponse' + '500': + description: Failed to retrieve database branch + tags: + - Environments + security: + - bearer: [] + patch: + operationId: v1-update-a-branch-config + summary: Update database branch config + description: Updates the configuration of the specified database branch + parameters: + - name: branch_id + required: true + in: path + description: Branch ID + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateBranchBody' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BranchResponse' + '500': + description: Failed to update database branch + tags: + - Environments + security: + - bearer: [] + delete: + operationId: v1-delete-a-branch + summary: Delete a database branch + description: Deletes the specified database branch + parameters: + - name: branch_id + required: true + in: path + description: Branch ID + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BranchDeleteResponse' + '500': + description: Failed to delete database branch + tags: + - Environments + security: + - bearer: [] + /v1/branches/{branch_id}/push: + post: + operationId: v1-push-a-branch + summary: Pushes a database branch + description: Pushes the specified database branch + parameters: + - name: branch_id + required: true + in: path + description: Branch ID + schema: + type: string + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BranchUpdateResponse' + '500': + description: Failed to push database branch + tags: + - Environments + security: + - bearer: [] + /v1/branches/{branch_id}/reset: + post: + operationId: v1-reset-a-branch + summary: Resets a database branch + description: Resets the specified database branch + parameters: + - name: branch_id + required: true + in: path + description: Branch ID + schema: + type: string + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BranchUpdateResponse' + '500': + description: Failed to reset database branch + tags: + - Environments + security: + - bearer: [] + /v1/projects: + get: + operationId: v1-list-all-projects + summary: List all projects + description: Returns a list of all projects you've previously created. + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/V1ProjectWithDatabaseResponse' + tags: + - Projects + security: + - bearer: [] + post: + operationId: v1-create-a-project + summary: Create a project + parameters: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/V1CreateProjectBodyDto' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/V1ProjectResponse' + tags: + - Projects + security: + - bearer: [] + /v1/organizations: + get: + operationId: v1-list-all-organizations + summary: List all organizations + description: Returns a list of organizations that you currently belong to. + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/OrganizationResponseV1' + '500': + description: Unexpected error listing organizations + tags: + - Organizations + security: + - bearer: [] + post: + operationId: v1-create-an-organization + summary: Create an organization + parameters: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateOrganizationV1Dto' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/OrganizationResponseV1' + '500': + description: Unexpected error creating an organization + tags: + - Organizations + security: + - bearer: [] + /v1/oauth/authorize: + get: + operationId: v1-authorize-user + summary: '[Beta] Authorize user through oauth' + parameters: + - name: client_id + required: true + in: query + schema: + type: string + - name: response_type + required: true + in: query + schema: + enum: + - code + - token + - id_token token + type: string + - name: redirect_uri + required: true + in: query + schema: + type: string + - name: scope + required: false + in: query + schema: + type: string + - name: state + required: false + in: query + schema: + type: string + - name: response_mode + required: false + in: query + schema: + type: string + - name: code_challenge + required: false + in: query + schema: + type: string + - name: code_challenge_method + required: false + in: query + schema: + enum: + - plain + - sha256 + - S256 + type: string + responses: + '303': + description: '' + tags: + - OAuth + security: + - oauth2: + - read + /v1/oauth/token: + post: + operationId: v1-exchange-oauth-token + summary: '[Beta] Exchange auth code for user''s access and refresh token' + parameters: [] + requestBody: + required: true + content: + application/x-www-form-urlencoded: + schema: + $ref: '#/components/schemas/OAuthTokenBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/OAuthTokenResponse' + tags: + - OAuth + security: + - oauth2: + - write + /v1/oauth/revoke: + post: + operationId: v1-revoke-token + summary: '[Beta] Revoke oauth app authorization and it''s corresponding tokens' + parameters: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/OAuthRevokeTokenBodyDto' + responses: + '204': + description: '' + tags: + - OAuth + security: + - oauth2: + - write + /v1/snippets: + get: + operationId: v1-list-all-snippets + summary: Lists SQL snippets for the logged in user + parameters: + - name: cursor + required: false + in: query + schema: + type: string + - name: limit + required: false + in: query + schema: + type: string + minimum: 1 + maximum: 100 + - name: sort_by + required: false + in: query + schema: + enum: + - name + - inserted_at + type: string + - name: sort_order + required: false + in: query + schema: + enum: + - asc + - desc + type: string + - name: project_ref + required: false + in: query + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/SnippetList' + '500': + description: Failed to list user's SQL snippets + tags: + - Database + security: + - bearer: [] + /v1/snippets/{id}: + get: + operationId: v1-get-a-snippet + summary: Gets a specific SQL snippet + parameters: + - name: id + required: true + in: path + schema: + format: uuid + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/SnippetResponse' + '500': + description: Failed to retrieve SQL snippet + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/api-keys: + get: + operationId: v1-get-project-api-keys + summary: Get project api keys + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: reveal + required: true + in: query + schema: + type: boolean + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ApiKeyResponse' + tags: + - Secrets + security: + - bearer: [] + post: + operationId: createApiKey + summary: '[Alpha] Creates a new API key for the project' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: reveal + required: true + in: query + schema: + type: boolean + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateApiKeyBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeyResponse' + tags: + - Secrets + security: + - bearer: [] + /v1/projects/{ref}/api-keys/{id}: + patch: + operationId: updateApiKey + summary: '[Alpha] Updates an API key for the project' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: id + required: true + in: path + schema: + type: string + - name: reveal + required: true + in: query + schema: + type: boolean + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateApiKeyBody' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeyResponse' + tags: + - Secrets + security: + - bearer: [] + get: + operationId: getApiKey + summary: '[Alpha] Get API key' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: id + required: true + in: path + schema: + type: string + - name: reveal + required: true + in: query + schema: + type: boolean + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeyResponse' + tags: + - Secrets + security: + - bearer: [] + delete: + operationId: deleteApiKey + summary: '[Alpha] Deletes an API key for the project' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: id + required: true + in: path + schema: + type: string + - name: reveal + required: true + in: query + schema: + type: boolean + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeyResponse' + '403': + description: '' + tags: + - Secrets + security: + - bearer: [] + /v1/projects/{ref}/branches: + get: + operationId: v1-list-all-branches + summary: List all database branches + description: Returns all database branches of the specified project. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/BranchResponse' + '500': + description: Failed to retrieve database branches + tags: + - Environments + security: + - bearer: [] + post: + operationId: v1-create-a-branch + summary: Create a database branch + description: Creates a database branch from the specified project. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateBranchBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BranchResponse' + '500': + description: Failed to create database branch + tags: + - Environments + security: + - bearer: [] + delete: + operationId: v1-disable-preview-branching + summary: Disables preview branching + description: Disables preview branching for the specified project + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + '500': + description: Failed to disable preview branching + tags: + - Environments + security: + - bearer: [] + /v1/projects/{ref}/custom-hostname: + get: + operationId: v1-get-hostname-config + summary: '[Beta] Gets project''s custom hostname config' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateCustomHostnameResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's custom hostname config + tags: + - Domains + security: + - bearer: [] + delete: + operationId: v1-Delete hostname config + summary: '[Beta] Deletes a project''s custom hostname configuration' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + '403': + description: '' + '500': + description: Failed to delete project custom hostname configuration + tags: + - Domains + security: + - bearer: [] + /v1/projects/{ref}/custom-hostname/initialize: + post: + operationId: v1-update-hostname-config + summary: '[Beta] Updates project''s custom hostname configuration' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateCustomHostnameBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateCustomHostnameResponse' + '403': + description: '' + '500': + description: Failed to update project custom hostname configuration + tags: + - Domains + security: + - bearer: [] + /v1/projects/{ref}/custom-hostname/reverify: + post: + operationId: v1-verify-dns-config + summary: >- + [Beta] Attempts to verify the DNS configuration for project's custom + hostname configuration + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateCustomHostnameResponse' + '403': + description: '' + '500': + description: Failed to verify project custom hostname configuration + tags: + - Domains + security: + - bearer: [] + /v1/projects/{ref}/custom-hostname/activate: + post: + operationId: v1-activate-custom-hostname + summary: '[Beta] Activates a custom hostname for a project.' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateCustomHostnameResponse' + '403': + description: '' + '500': + description: Failed to activate project custom hostname configuration + tags: + - Domains + security: + - bearer: [] + /v1/projects/{ref}/network-bans/retrieve: + post: + operationId: v1-list-all-network-bans + summary: '[Beta] Gets project''s network bans' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/NetworkBanResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's network bans + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/network-bans: + delete: + operationId: v1-delete-network-bans + summary: '[Beta] Remove network bans.' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/RemoveNetworkBanRequest' + responses: + '200': + description: '' + '403': + description: '' + '500': + description: Failed to remove network bans. + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/network-restrictions: + get: + operationId: v1-get-network-restrictions + summary: '[Beta] Gets project''s network restrictions' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/NetworkRestrictionsResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's network restrictions + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/network-restrictions/apply: + post: + operationId: v1-update-network-restrictions + summary: '[Beta] Updates project''s network restrictions' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/NetworkRestrictionsRequest' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/NetworkRestrictionsResponse' + '403': + description: '' + '500': + description: Failed to update project network restrictions + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/pgsodium: + get: + operationId: v1-get-pgsodium-config + summary: '[Beta] Gets project''s pgsodium config' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PgsodiumConfigResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's pgsodium config + tags: + - Secrets + security: + - bearer: [] + put: + operationId: v1-update-pgsodium-config + summary: >- + [Beta] Updates project's pgsodium config. Updating the root_key can + cause all data encrypted with the older key to become inaccessible. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdatePgsodiumConfigBody' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PgsodiumConfigResponse' + '403': + description: '' + '500': + description: Failed to update project's pgsodium config + tags: + - Secrets + security: + - bearer: [] + /v1/projects/{ref}/postgrest: + get: + operationId: v1-get-postgrest-service-config + summary: Gets project's postgrest config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PostgrestConfigWithJWTSecretResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's postgrest config + tags: + - Rest + security: + - bearer: [] + patch: + operationId: v1-update-postgrest-service-config + summary: Updates project's postgrest config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdatePostgrestConfigBody' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/V1PostgrestConfigResponse' + '403': + description: '' + '500': + description: Failed to update project's postgrest config + tags: + - Rest + security: + - bearer: [] + /v1/projects/{ref}: + get: + operationId: v1-get-project + summary: Gets a specific project that belongs to the authenticated user + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/V1ProjectWithDatabaseResponse' + '500': + description: Failed to retrieve project + tags: + - Projects + security: + - bearer: [] + delete: + operationId: v1-delete-a-project + summary: Deletes the given project + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/V1ProjectRefResponse' + '403': + description: '' + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/secrets: + get: + operationId: v1-list-all-secrets + summary: List all secrets + description: Returns all secrets you've previously added to the specified project. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/SecretResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's secrets + tags: + - Secrets + security: + - bearer: [] + post: + operationId: v1-bulk-create-secrets + summary: Bulk create secrets + description: Creates multiple secrets and adds them to the specified project. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/CreateSecretBody' + responses: + '201': + description: '' + '403': + description: '' + '500': + description: Failed to create project's secrets + tags: + - Secrets + security: + - bearer: [] + delete: + operationId: v1-bulk-delete-secrets + summary: Bulk delete secrets + description: Deletes all secrets with the given names from the specified project + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + type: array + items: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + type: object + '403': + description: '' + '500': + description: Failed to delete secrets with given names + tags: + - Secrets + security: + - bearer: [] + /v1/projects/{ref}/ssl-enforcement: + get: + operationId: v1-get-ssl-enforcement-config + summary: '[Beta] Get project''s SSL enforcement configuration.' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/SslEnforcementResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's SSL enforcement config + tags: + - Database + security: + - bearer: [] + put: + operationId: v1-update-ssl-enforcement-config + summary: '[Beta] Update project''s SSL enforcement configuration.' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SslEnforcementRequest' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/SslEnforcementResponse' + '403': + description: '' + '500': + description: Failed to update project's SSL enforcement configuration. + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/types/typescript: + get: + operationId: v1-generate-typescript-types + summary: Generate TypeScript types + description: Returns the TypeScript types of your schema for use with supabase-js. + parameters: + - name: included_schemas + required: false + in: query + schema: + default: public + type: string + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/TypescriptResponse' + '403': + description: '' + '500': + description: Failed to generate TypeScript types + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/vanity-subdomain: + get: + operationId: v1-get-vanity-subdomain-config + summary: '[Beta] Gets current vanity subdomain config' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/VanitySubdomainConfigResponse' + '403': + description: '' + '500': + description: Failed to get project vanity subdomain configuration + tags: + - Domains + security: + - bearer: [] + delete: + operationId: v1-deactivate-vanity-subdomain-config + summary: '[Beta] Deletes a project''s vanity subdomain configuration' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + '403': + description: '' + '500': + description: Failed to delete project vanity subdomain configuration + tags: + - Domains + security: + - bearer: [] + /v1/projects/{ref}/vanity-subdomain/check-availability: + post: + operationId: v1-check-vanity-subdomain-availability + summary: '[Beta] Checks vanity subdomain availability' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/VanitySubdomainBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/SubdomainAvailabilityResponse' + '403': + description: '' + '500': + description: Failed to check project vanity subdomain configuration + tags: + - Domains + security: + - bearer: [] + /v1/projects/{ref}/vanity-subdomain/activate: + post: + operationId: v1-activate-vanity-subdomain-config + summary: '[Beta] Activates a vanity subdomain for a project.' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/VanitySubdomainBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ActivateVanitySubdomainResponse' + '403': + description: '' + '500': + description: Failed to activate project vanity subdomain configuration + tags: + - Domains + security: + - bearer: [] + /v1/projects/{ref}/upgrade: + post: + operationId: v1-upgrade-postgres-version + summary: '[Beta] Upgrades the project''s Postgres version' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpgradeDatabaseBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUpgradeInitiateResponse' + '403': + description: '' + '500': + description: Failed to initiate project upgrade + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/upgrade/eligibility: + get: + operationId: v1-get-postgres-upgrade-eligibility + summary: '[Beta] Returns the project''s eligibility for upgrades' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUpgradeEligibilityResponse' + '403': + description: '' + '500': + description: Failed to determine project upgrade eligibility + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/upgrade/status: + get: + operationId: v1-get-postgres-upgrade-status + summary: '[Beta] Gets the latest status of the project''s upgrade' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: tracking_id + required: false + in: query + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DatabaseUpgradeStatusResponse' + '403': + description: '' + '500': + description: Failed to retrieve project upgrade status + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/readonly: + get: + operationId: v1-get-readonly-mode-status + summary: Returns project's readonly mode status + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ReadOnlyStatusResponse' + '500': + description: Failed to get project readonly mode status + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/readonly/temporary-disable: + post: + operationId: v1-disable-readonly-mode-temporarily + summary: Disables project's readonly mode for the next 15 minutes + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '201': + description: '' + '500': + description: Failed to disable project's readonly mode + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/read-replicas/setup: + post: + operationId: v1-setup-a-read-replica + summary: '[Beta] Set up a read replica' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SetUpReadReplicaBody' + responses: + '201': + description: '' + '403': + description: '' + '500': + description: Failed to set up read replica + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/read-replicas/remove: + post: + operationId: v1-remove-a-read-replica + summary: '[Beta] Remove a read replica' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/RemoveReadReplicaBody' + responses: + '201': + description: '' + '403': + description: '' + '500': + description: Failed to remove read replica + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/health: + get: + operationId: v1-get-services-health + summary: Gets project's service health status + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: timeout_ms + required: false + in: query + schema: + minimum: 0 + maximum: 10000 + type: integer + - name: services + required: true + in: query + schema: + type: array + items: + type: string + enum: + - auth + - db + - pooler + - realtime + - rest + - storage + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/V1ServiceHealthResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's service health status + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/config/storage: + get: + operationId: v1-get-storage-config + summary: Gets project's storage config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/StorageConfigResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's storage config + tags: + - Storage + security: + - bearer: [] + patch: + operationId: v1-update-storage-config + summary: Updates project's storage config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateStorageConfigBody' + responses: + '200': + description: '' + '403': + description: '' + '500': + description: Failed to update project's storage config + tags: + - Storage + security: + - bearer: [] + /v1/projects/{ref}/config/database/postgres: + get: + operationId: v1-get-postgres-config + summary: Gets project's Postgres config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PostgresConfigResponse' + '500': + description: Failed to retrieve project's Postgres config + tags: + - Database + security: + - bearer: [] + put: + operationId: v1-update-postgres-config + summary: Updates project's Postgres config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdatePostgresConfigBody' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PostgresConfigResponse' + '403': + description: '' + '500': + description: Failed to update project's Postgres config + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/config/database/pgbouncer: + get: + operationId: v1-get-project-pgbouncer-config + summary: Get project's pgbouncer config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/V1PgbouncerConfigResponse' + '500': + description: Failed to retrieve project's pgbouncer config + tags: + - Database + /v1/projects/{ref}/config/database/pooler: + get: + operationId: v1-get-supavisor-config + summary: Gets project's supavisor config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/SupavisorConfigResponse' + '500': + description: Failed to retrieve project's supavisor config + tags: + - Database + security: + - bearer: [] + patch: + operationId: v1-update-supavisor-config + summary: Updates project's supavisor config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateSupavisorConfigBody' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateSupavisorConfigResponse' + '403': + description: '' + '500': + description: Failed to update project's supavisor config + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/config/auth: + get: + operationId: v1-get-auth-service-config + summary: Gets project's auth config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/AuthConfigResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's auth config + tags: + - Auth + security: + - bearer: [] + patch: + operationId: v1-update-auth-service-config + summary: Updates a project's auth config + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateAuthConfigBody' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/AuthConfigResponse' + '403': + description: '' + '500': + description: Failed to update project's auth config + tags: + - Auth + security: + - bearer: [] + /v1/projects/{ref}/config/auth/third-party-auth: + post: + operationId: createTPAForProject + summary: Creates a new third-party auth integration + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateThirdPartyAuthBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ThirdPartyAuth' + '403': + description: '' + tags: + - Auth + security: + - bearer: [] + get: + operationId: listTPAForProject + summary: '[Alpha] Lists all third-party auth integrations' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ThirdPartyAuth' + '403': + description: '' + tags: + - Auth + security: + - bearer: [] + /v1/projects/{ref}/config/auth/third-party-auth/{tpa_id}: + delete: + operationId: deleteTPAForProject + summary: '[Alpha] Removes a third-party auth integration' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: tpa_id + required: true + in: path + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ThirdPartyAuth' + '403': + description: '' + tags: + - Auth + security: + - bearer: [] + get: + operationId: getTPAForProject + summary: '[Alpha] Get a third-party integration' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: tpa_id + required: true + in: path + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ThirdPartyAuth' + '403': + description: '' + tags: + - Auth + security: + - bearer: [] + /v1/projects/{ref}/pause: + post: + operationId: v1-pause-a-project + summary: Pauses the given project + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + '403': + description: '' + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/restore: + get: + operationId: v1-list-available-restore-versions + summary: Lists available restore versions for the given project + parameters: + - name: ref + required: true + in: path + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: >- + #/components/schemas/GetProjectAvailableRestoreVersionsResponse + '403': + description: '' + tags: + - Projects + security: + - bearer: [] + post: + operationId: v1-restore-a-project + summary: Restores the given project + parameters: + - name: ref + required: true + in: path + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/RestoreProjectBodyDto' + responses: + '200': + description: '' + '403': + description: '' + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/restore/cancel: + post: + operationId: v1-cancel-a-project-restoration + summary: Cancels the given project restoration + parameters: + - name: ref + required: true + in: path + schema: + type: string + responses: + '200': + description: '' + '403': + description: '' + tags: + - Projects + security: + - bearer: [] + /v1/projects/{ref}/analytics/endpoints/logs.all: + get: + operationId: getLogs + summary: Gets project's logs + parameters: + - name: iso_timestamp_end + required: false + in: query + schema: + type: string + - name: iso_timestamp_start + required: false + in: query + schema: + type: string + - name: sql + required: false + in: query + schema: + type: string + - name: ref + required: true + in: path + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/V1AnalyticsResponse' + '403': + description: '' + tags: + - Analytics + security: + - bearer: [] + /v1/projects/{ref}/database/query: + post: + operationId: v1-run-a-query + summary: '[Beta] Run sql query' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/V1RunQueryBody' + responses: + '201': + description: '' + content: + application/json: + schema: + type: object + '403': + description: '' + '500': + description: Failed to run sql query + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/database/webhooks/enable: + post: + operationId: v1-enable-database-webhook + summary: '[Beta] Enables Database Webhooks on the project' + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '201': + description: '' + '403': + description: '' + '500': + description: Failed to enable Database Webhooks on the project + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/database/context: + get: + operationId: getDatabaseMetadata + summary: Gets database metadata for the given project. + description: >- + This is an **experimental** endpoint. It is subject to change or removal + in future versions. Use it with caution, as it may not remain supported + or stable. + deprecated: true + parameters: + - name: ref + required: true + in: path + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/GetProjectDbMetadataResponseDto' + '403': + description: '' + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/functions: + get: + operationId: v1-list-all-functions + summary: List all functions + description: Returns all functions you've previously added to the specified project. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/FunctionResponse' + '403': + description: '' + '500': + description: Failed to retrieve project's functions + tags: + - Edge Functions + security: + - bearer: [] + post: + operationId: v1-create-a-function + summary: Create a function + description: Creates a function and adds it to the specified project. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: slug + required: false + in: query + schema: + pattern: /^[A-Za-z0-9_-]+$/ + type: string + - name: name + required: false + in: query + schema: + type: string + - name: verify_jwt + required: false + in: query + schema: + type: boolean + - name: import_map + required: false + in: query + schema: + type: boolean + - name: entrypoint_path + required: false + in: query + schema: + type: string + - name: import_map_path + required: false + in: query + schema: + type: string + - name: compute_multiplier + required: false + in: query + schema: + minimum: 1 + maximum: 4 + type: number + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/V1CreateFunctionBody' + application/vnd.denoland.eszip: + schema: + $ref: '#/components/schemas/V1CreateFunctionBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/FunctionResponse' + '403': + description: '' + '500': + description: Failed to create project's function + tags: + - Edge Functions + security: + - bearer: [] + put: + operationId: v1-bulk-update-functions + summary: Bulk update functions + description: >- + Bulk update functions. It will create a new function or replace + existing. The operation is idempotent. NOTE: You will need to manually + bump the version. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/BulkUpdateFunctionBody' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BulkUpdateFunctionResponse' + '403': + description: '' + '500': + description: Failed to update functions + tags: + - Edge Functions + security: + - bearer: [] + /v1/projects/{ref}/functions/deploy: + post: + operationId: v1-deploy-a-function + summary: Deploy a function + description: >- + A new endpoint to deploy functions. It will create if function does not + exist. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: slug + required: false + in: query + schema: + pattern: /^[A-Za-z0-9_-]+$/ + type: string + - name: bundleOnly + required: false + in: query + schema: + type: boolean + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/FunctionDeployBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DeployFunctionResponse' + '403': + description: '' + '500': + description: Failed to deploy function + tags: + - Edge Functions + security: + - bearer: [] + /v1/projects/{ref}/functions/{function_slug}: + get: + operationId: v1-get-a-function + summary: Retrieve a function + description: Retrieves a function with the specified slug and project. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: function_slug + required: true + in: path + description: Function slug + schema: + pattern: /^[A-Za-z0-9_-]+$/ + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/FunctionSlugResponse' + '403': + description: '' + '500': + description: Failed to retrieve function with given slug + tags: + - Edge Functions + security: + - bearer: [] + patch: + operationId: v1-update-a-function + summary: Update a function + description: Updates a function with the specified slug and project. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: function_slug + required: true + in: path + description: Function slug + schema: + pattern: /^[A-Za-z0-9_-]+$/ + type: string + - name: slug + required: false + in: query + schema: + pattern: /^[A-Za-z0-9_-]+$/ + type: string + - name: name + required: false + in: query + schema: + type: string + - name: verify_jwt + required: false + in: query + schema: + type: boolean + - name: import_map + required: false + in: query + schema: + type: boolean + - name: entrypoint_path + required: false + in: query + schema: + type: string + - name: import_map_path + required: false + in: query + schema: + type: string + - name: compute_multiplier + required: false + in: query + schema: + minimum: 1 + maximum: 4 + type: number + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/V1UpdateFunctionBody' + application/vnd.denoland.eszip: + schema: + $ref: '#/components/schemas/V1UpdateFunctionBody' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/FunctionResponse' + '403': + description: '' + '500': + description: Failed to update function with given slug + tags: + - Edge Functions + security: + - bearer: [] + delete: + operationId: v1-delete-a-function + summary: Delete a function + description: Deletes a function with the specified slug from the specified project. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: function_slug + required: true + in: path + description: Function slug + schema: + pattern: /^[A-Za-z0-9_-]+$/ + type: string + responses: + '200': + description: '' + '403': + description: '' + '500': + description: Failed to delete function with given slug + tags: + - Edge Functions + security: + - bearer: [] + /v1/projects/{ref}/functions/{function_slug}/body: + get: + operationId: v1-get-a-function-body + summary: Retrieve a function body + description: Retrieves a function body for the specified slug and project. + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: function_slug + required: true + in: path + description: Function slug + schema: + pattern: /^[A-Za-z0-9_-]+$/ + type: string + responses: + '200': + description: '' + '403': + description: '' + '500': + description: Failed to retrieve function body with given slug + tags: + - Edge Functions + security: + - bearer: [] + /v1/projects/{ref}/storage/buckets: + get: + operationId: v1-list-all-buckets + summary: Lists all buckets + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/V1StorageBucketResponse' + '403': + description: '' + '500': + description: Failed to get list of buckets + tags: + - Storage + security: + - bearer: [] + /v1/projects/{ref}/config/auth/sso/providers: + post: + operationId: v1-create-a-sso-provider + summary: Creates a new SSO provider + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateProviderBody' + responses: + '201': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/CreateProviderResponse' + '403': + description: '' + '404': + description: SAML 2.0 support is not enabled for this project + tags: + - Auth + security: + - bearer: [] + get: + operationId: v1-list-all-sso-provider + summary: Lists all SSO providers + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ListProvidersResponse' + '403': + description: '' + '404': + description: SAML 2.0 support is not enabled for this project + tags: + - Auth + security: + - bearer: [] + /v1/projects/{ref}/config/auth/sso/providers/{provider_id}: + get: + operationId: v1-get-a-sso-provider + summary: Gets a SSO provider by its UUID + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: provider_id + required: true + in: path + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/GetProviderResponse' + '403': + description: '' + '404': + description: >- + Either SAML 2.0 was not enabled for this project, or the provider + does not exist + tags: + - Auth + security: + - bearer: [] + put: + operationId: v1-update-a-sso-provider + summary: Updates a SSO provider by its UUID + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: provider_id + required: true + in: path + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateProviderBody' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateProviderResponse' + '403': + description: '' + '404': + description: >- + Either SAML 2.0 was not enabled for this project, or the provider + does not exist + tags: + - Auth + security: + - bearer: [] + delete: + operationId: v1-delete-a-sso-provider + summary: Removes a SSO provider by its UUID + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + - name: provider_id + required: true + in: path + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteProviderResponse' + '403': + description: '' + '404': + description: >- + Either SAML 2.0 was not enabled for this project, or the provider + does not exist + tags: + - Auth + security: + - bearer: [] + /v1/projects/{ref}/database/backups: + get: + operationId: v1-list-all-backups + summary: Lists all backups + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/V1BackupsResponse' + '500': + description: Failed to get backups + tags: + - Database + security: + - bearer: [] + /v1/projects/{ref}/database/backups/restore-pitr: + post: + operationId: v1-restore-pitr-backup + summary: Restores a PITR backup for a database + parameters: + - name: ref + required: true + in: path + description: Project ref + schema: + minLength: 20 + maxLength: 20 + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/V1RestorePitrBody' + responses: + '201': + description: '' + tags: + - Database + security: + - bearer: [] + /v1/organizations/{slug}/members: + get: + operationId: v1-list-organization-members + summary: List members of an organization + parameters: + - name: slug + required: true + in: path + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/V1OrganizationMemberResponse' + tags: + - Organizations + security: + - bearer: [] + /v1/organizations/{slug}: + get: + operationId: v1-get-an-organization + summary: Gets information about the organization + parameters: + - name: slug + required: true + in: path + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/V1OrganizationSlugResponse' + tags: + - Organizations + security: + - bearer: [] +info: + title: Supabase API (v1) + description: >- + Supabase API generated from the OpenAPI specification.
Visit + [https://supabase.com/docs](https://supabase.com/docs) for a complete + documentation. + version: 1.0.0 + contact: {} +tags: + - name: Auth + description: Auth related endpoints + - name: Database + description: Database related endpoints + - name: Domains + description: Domains related endpoints + - name: Edge Functions + description: Edge related endpoints + - name: Environments + description: Environments related endpoints + - name: OAuth + description: OAuth related endpoints + - name: Organizations + description: Organizations related endpoints + - name: Projects + description: Projects related endpoints + - name: Rest + description: Rest related endpoints + - name: Secrets + description: Secrets related endpoints + - name: Storage + description: Storage related endpoints +servers: [] +components: + securitySchemes: + bearer: + scheme: bearer + bearerFormat: JWT + type: http + schemas: + BranchDetailResponse: + type: object + properties: + status: + type: string + enum: + - INACTIVE + - ACTIVE_HEALTHY + - ACTIVE_UNHEALTHY + - COMING_UP + - UNKNOWN + - GOING_DOWN + - INIT_FAILED + - REMOVED + - RESTORING + - UPGRADING + - PAUSING + - RESTORE_FAILED + - RESTARTING + - PAUSE_FAILED + - RESIZING + db_port: + type: integer + ref: + type: string + postgres_version: + type: string + postgres_engine: + type: string + release_channel: + type: string + db_host: + type: string + db_user: + type: string + db_pass: + type: string + jwt_secret: + type: string + required: + - status + - db_port + - ref + - postgres_version + - postgres_engine + - release_channel + - db_host + UpdateBranchBody: + type: object + properties: + reset_on_push: + type: boolean + deprecated: true + description: >- + This field is deprecated and will be ignored. Use v1-reset-a-branch + endpoint directly instead. + branch_name: + type: string + git_branch: + type: string + persistent: + type: boolean + status: + type: string + enum: + - CREATING_PROJECT + - RUNNING_MIGRATIONS + - MIGRATIONS_PASSED + - MIGRATIONS_FAILED + - FUNCTIONS_DEPLOYED + - FUNCTIONS_FAILED + BranchResponse: + type: object + properties: + pr_number: + type: integer + format: int32 + latest_check_run_id: + type: number + deprecated: true + description: This field is deprecated and will not be populated. + status: + type: string + enum: + - CREATING_PROJECT + - RUNNING_MIGRATIONS + - MIGRATIONS_PASSED + - MIGRATIONS_FAILED + - FUNCTIONS_DEPLOYED + - FUNCTIONS_FAILED + id: + type: string + name: + type: string + project_ref: + type: string + parent_project_ref: + type: string + is_default: + type: boolean + git_branch: + type: string + persistent: + type: boolean + created_at: + type: string + updated_at: + type: string + required: + - status + - id + - name + - project_ref + - parent_project_ref + - is_default + - persistent + - created_at + - updated_at + BranchDeleteResponse: + type: object + properties: + message: + type: string + required: + - message + BranchUpdateResponse: + type: object + properties: + workflow_run_id: + type: string + message: + type: string + required: + - workflow_run_id + - message + V1DatabaseResponse: + type: object + properties: + host: + type: string + description: Database host + version: + type: string + description: Database version + postgres_engine: + type: string + description: Database engine + release_channel: + type: string + description: Release channel + required: + - host + - version + - postgres_engine + - release_channel + V1ProjectWithDatabaseResponse: + type: object + properties: + id: + type: string + description: Id of your project + organization_id: + type: string + description: Slug of your organization + name: + type: string + description: Name of your project + region: + type: string + description: Region of your project + example: us-east-1 + created_at: + type: string + description: Creation timestamp + example: '2023-03-29T16:32:59Z' + status: + type: string + enum: + - INACTIVE + - ACTIVE_HEALTHY + - ACTIVE_UNHEALTHY + - COMING_UP + - UNKNOWN + - GOING_DOWN + - INIT_FAILED + - REMOVED + - RESTORING + - UPGRADING + - PAUSING + - RESTORE_FAILED + - RESTARTING + - PAUSE_FAILED + - RESIZING + database: + $ref: '#/components/schemas/V1DatabaseResponse' + required: + - id + - organization_id + - name + - region + - created_at + - status + - database + V1CreateProjectBodyDto: + type: object + properties: + db_pass: + type: string + description: Database password + name: + type: string + description: Name of your project + organization_id: + type: string + description: Slug of your organization + plan: + type: string + enum: + - free + - pro + deprecated: true + description: >- + Subscription Plan is now set on organization level and is ignored in + this request + region: + type: string + description: Region you want your server to reside in + enum: + - us-east-1 + - us-east-2 + - us-west-1 + - us-west-2 + - ap-east-1 + - ap-southeast-1 + - ap-northeast-1 + - ap-northeast-2 + - ap-southeast-2 + - eu-west-1 + - eu-west-2 + - eu-west-3 + - eu-north-1 + - eu-central-1 + - eu-central-2 + - ca-central-1 + - ap-south-1 + - sa-east-1 + kps_enabled: + type: boolean + deprecated: true + description: This field is deprecated and is ignored in this request + desired_instance_size: + type: string + enum: + - micro + - small + - medium + - large + - xlarge + - 2xlarge + - 4xlarge + - 8xlarge + - 12xlarge + - 16xlarge + template_url: + type: string + format: uri + description: Template URL used to create the project from the CLI. + example: >- + https://github.com/supabase/supabase/tree/master/examples/slack-clone/nextjs-slack-clone + required: + - db_pass + - name + - organization_id + - region + additionalProperties: false + hideDefinitions: + - release_channel + - postgres_engine + V1ProjectResponse: + type: object + properties: + id: + type: string + description: Id of your project + organization_id: + type: string + description: Slug of your organization + name: + type: string + description: Name of your project + region: + type: string + description: Region of your project + example: us-east-1 + created_at: + type: string + description: Creation timestamp + example: '2023-03-29T16:32:59Z' + status: + type: string + enum: + - INACTIVE + - ACTIVE_HEALTHY + - ACTIVE_UNHEALTHY + - COMING_UP + - UNKNOWN + - GOING_DOWN + - INIT_FAILED + - REMOVED + - RESTORING + - UPGRADING + - PAUSING + - RESTORE_FAILED + - RESTARTING + - PAUSE_FAILED + - RESIZING + required: + - id + - organization_id + - name + - region + - created_at + - status + OrganizationResponseV1: + type: object + properties: + id: + type: string + name: + type: string + required: + - id + - name + CreateOrganizationV1Dto: + type: object + properties: + name: + type: string + required: + - name + additionalProperties: false + OAuthTokenBody: + type: object + properties: + grant_type: + enum: + - authorization_code + - refresh_token + type: string + client_id: + type: string + client_secret: + type: string + code: + type: string + code_verifier: + type: string + redirect_uri: + type: string + refresh_token: + type: string + required: + - grant_type + - client_id + - client_secret + OAuthTokenResponse: + type: object + properties: + expires_in: + type: integer + format: int64 + token_type: + type: string + enum: + - Bearer + access_token: + type: string + refresh_token: + type: string + required: + - expires_in + - token_type + - access_token + - refresh_token + OAuthRevokeTokenBodyDto: + type: object + properties: + client_id: + type: string + format: uuid + client_secret: + type: string + refresh_token: + type: string + required: + - client_id + - client_secret + - refresh_token + additionalProperties: false + SnippetProject: + type: object + properties: + id: + type: integer + format: int64 + name: + type: string + required: + - id + - name + SnippetUser: + type: object + properties: + id: + type: integer + format: int64 + username: + type: string + required: + - id + - username + SnippetMeta: + type: object + properties: + id: + type: string + inserted_at: + type: string + updated_at: + type: string + type: + type: string + enum: + - sql + visibility: + type: string + enum: + - user + - project + - org + - public + name: + type: string + description: + type: string + nullable: true + project: + $ref: '#/components/schemas/SnippetProject' + owner: + $ref: '#/components/schemas/SnippetUser' + updated_by: + $ref: '#/components/schemas/SnippetUser' + required: + - id + - inserted_at + - updated_at + - type + - visibility + - name + - description + - project + - owner + - updated_by + SnippetList: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/SnippetMeta' + cursor: + type: string + required: + - data + SnippetContent: + type: object + properties: + favorite: + type: boolean + schema_version: + type: string + sql: + type: string + required: + - favorite + - schema_version + - sql + SnippetResponse: + type: object + properties: + id: + type: string + inserted_at: + type: string + updated_at: + type: string + type: + type: string + enum: + - sql + visibility: + enum: + - user + - project + - org + - public + type: string + name: + type: string + description: + type: string + nullable: true + project: + $ref: '#/components/schemas/SnippetProject' + owner: + $ref: '#/components/schemas/SnippetUser' + updated_by: + $ref: '#/components/schemas/SnippetUser' + content: + $ref: '#/components/schemas/SnippetContent' + required: + - id + - inserted_at + - updated_at + - type + - visibility + - name + - description + - project + - owner + - updated_by + - content + ApiKeySecretJWTTemplate: + type: object + properties: + role: + type: string + required: + - role + ApiKeyResponse: + type: object + properties: + type: + nullable: true + type: string + enum: + - publishable + - secret + - legacy + name: + type: string + api_key: + type: string + id: + type: string + nullable: true + prefix: + type: string + nullable: true + description: + type: string + nullable: true + hash: + type: string + nullable: true + secret_jwt_template: + nullable: true + allOf: + - $ref: '#/components/schemas/ApiKeySecretJWTTemplate' + inserted_at: + type: string + nullable: true + updated_at: + type: string + nullable: true + required: + - name + - api_key + CreateApiKeyBody: + type: object + properties: + type: + enum: + - publishable + - secret + type: string + description: + type: string + nullable: true + secret_jwt_template: + nullable: true + allOf: + - $ref: '#/components/schemas/ApiKeySecretJWTTemplate' + required: + - type + UpdateApiKeyBody: + type: object + properties: + description: + type: string + nullable: true + secret_jwt_template: + nullable: true + allOf: + - $ref: '#/components/schemas/ApiKeySecretJWTTemplate' + DesiredInstanceSize: + type: string + enum: + - micro + - small + - medium + - large + - xlarge + - 2xlarge + - 4xlarge + - 8xlarge + - 12xlarge + - 16xlarge + ReleaseChannel: + type: string + enum: + - internal + - alpha + - beta + - ga + - withdrawn + - preview + PostgresEngine: + type: string + description: >- + Postgres engine version. If not provided, the latest version will be + used. + enum: + - '15' + - 17-oriole + CreateBranchBody: + type: object + properties: + desired_instance_size: + $ref: '#/components/schemas/DesiredInstanceSize' + release_channel: + $ref: '#/components/schemas/ReleaseChannel' + postgres_engine: + $ref: '#/components/schemas/PostgresEngine' + branch_name: + type: string + git_branch: + type: string + persistent: + type: boolean + region: + type: string + required: + - branch_name + ValidationRecord: + type: object + properties: + txt_name: + type: string + txt_value: + type: string + required: + - txt_name + - txt_value + ValidationError: + type: object + properties: + message: + type: string + required: + - message + SslValidation: + type: object + properties: + status: + type: string + validation_records: + type: array + items: + $ref: '#/components/schemas/ValidationRecord' + validation_errors: + type: array + items: + $ref: '#/components/schemas/ValidationError' + required: + - status + - validation_records + OwnershipVerification: + type: object + properties: + type: + type: string + name: + type: string + value: + type: string + required: + - type + - name + - value + CustomHostnameDetails: + type: object + properties: + id: + type: string + hostname: + type: string + ssl: + $ref: '#/components/schemas/SslValidation' + ownership_verification: + $ref: '#/components/schemas/OwnershipVerification' + custom_origin_server: + type: string + verification_errors: + type: array + items: + type: string + status: + type: string + required: + - id + - hostname + - ssl + - ownership_verification + - custom_origin_server + - status + CfResponse: + type: object + properties: + success: + type: boolean + errors: + type: array + items: + type: object + messages: + type: array + items: + type: object + result: + $ref: '#/components/schemas/CustomHostnameDetails' + required: + - success + - errors + - messages + - result + UpdateCustomHostnameResponse: + type: object + properties: + status: + enum: + - 1_not_started + - 2_initiated + - 3_challenge_verified + - 4_origin_setup_completed + - 5_services_reconfigured + type: string + custom_hostname: + type: string + data: + $ref: '#/components/schemas/CfResponse' + required: + - status + - custom_hostname + - data + UpdateCustomHostnameBody: + type: object + properties: + custom_hostname: + type: string + required: + - custom_hostname + NetworkBanResponse: + type: object + properties: + banned_ipv4_addresses: + type: array + items: + type: string + required: + - banned_ipv4_addresses + RemoveNetworkBanRequest: + type: object + properties: + ipv4_addresses: + type: array + items: + type: string + required: + - ipv4_addresses + NetworkRestrictionsRequest: + type: object + properties: + dbAllowedCidrs: + type: array + items: + type: string + dbAllowedCidrsV6: + type: array + items: + type: string + NetworkRestrictionsResponse: + type: object + properties: + entitlement: + enum: + - disallowed + - allowed + type: string + config: + $ref: '#/components/schemas/NetworkRestrictionsRequest' + old_config: + $ref: '#/components/schemas/NetworkRestrictionsRequest' + status: + enum: + - stored + - applied + type: string + required: + - entitlement + - config + - status + PgsodiumConfigResponse: + type: object + properties: + root_key: + type: string + required: + - root_key + UpdatePgsodiumConfigBody: + type: object + properties: + root_key: + type: string + required: + - root_key + PostgrestConfigWithJWTSecretResponse: + type: object + properties: + max_rows: + type: integer + db_pool: + type: integer + nullable: true + description: >- + If `null`, the value is automatically configured based on compute + size. + db_schema: + type: string + db_extra_search_path: + type: string + jwt_secret: + type: string + required: + - max_rows + - db_pool + - db_schema + - db_extra_search_path + UpdatePostgrestConfigBody: + type: object + properties: + max_rows: + type: integer + minimum: 0 + maximum: 1000000 + db_pool: + type: integer + minimum: 0 + maximum: 1000 + db_extra_search_path: + type: string + db_schema: + type: string + V1PostgrestConfigResponse: + type: object + properties: + max_rows: + type: integer + db_pool: + type: integer + nullable: true + description: >- + If `null`, the value is automatically configured based on compute + size. + db_schema: + type: string + db_extra_search_path: + type: string + required: + - max_rows + - db_pool + - db_schema + - db_extra_search_path + V1ProjectRefResponse: + type: object + properties: + id: + type: integer + format: int64 + ref: + type: string + name: + type: string + required: + - id + - ref + - name + SecretResponse: + type: object + properties: + name: + type: string + value: + type: string + required: + - name + - value + CreateSecretBody: + type: object + properties: + name: + type: string + maxLength: 256 + pattern: /^(?!SUPABASE_).*/ + description: Secret name must not start with the SUPABASE_ prefix. + example: string + value: + type: string + maxLength: 24576 + required: + - name + - value + SslEnforcements: + type: object + properties: + database: + type: boolean + required: + - database + SslEnforcementResponse: + type: object + properties: + currentConfig: + $ref: '#/components/schemas/SslEnforcements' + appliedSuccessfully: + type: boolean + required: + - currentConfig + - appliedSuccessfully + SslEnforcementRequest: + type: object + properties: + requestedConfig: + $ref: '#/components/schemas/SslEnforcements' + required: + - requestedConfig + TypescriptResponse: + type: object + properties: + types: + type: string + required: + - types + VanitySubdomainConfigResponse: + type: object + properties: + status: + enum: + - not-used + - custom-domain-used + - active + type: string + custom_domain: + type: string + required: + - status + VanitySubdomainBody: + type: object + properties: + vanity_subdomain: + type: string + required: + - vanity_subdomain + SubdomainAvailabilityResponse: + type: object + properties: + available: + type: boolean + required: + - available + ActivateVanitySubdomainResponse: + type: object + properties: + custom_domain: + type: string + required: + - custom_domain + UpgradeDatabaseBody: + type: object + properties: + release_channel: + $ref: '#/components/schemas/ReleaseChannel' + target_version: + type: string + required: + - release_channel + - target_version + ProjectUpgradeInitiateResponse: + type: object + properties: + tracking_id: + type: string + required: + - tracking_id + ProjectVersion: + type: object + properties: + postgres_version: + $ref: '#/components/schemas/PostgresEngine' + release_channel: + $ref: '#/components/schemas/ReleaseChannel' + app_version: + type: string + required: + - postgres_version + - release_channel + - app_version + ProjectUpgradeEligibilityResponse: + type: object + properties: + current_app_version_release_channel: + $ref: '#/components/schemas/ReleaseChannel' + duration_estimate_hours: + type: integer + eligible: + type: boolean + current_app_version: + type: string + latest_app_version: + type: string + target_upgrade_versions: + type: array + items: + $ref: '#/components/schemas/ProjectVersion' + potential_breaking_changes: + type: array + items: + type: string + legacy_auth_custom_roles: + type: array + items: + type: string + extension_dependent_objects: + type: array + items: + type: string + required: + - current_app_version_release_channel + - duration_estimate_hours + - eligible + - current_app_version + - latest_app_version + - target_upgrade_versions + - potential_breaking_changes + - legacy_auth_custom_roles + - extension_dependent_objects + DatabaseUpgradeStatus: + type: object + properties: + target_version: + type: integer + status: + enum: + - 0 + - 1 + - 2 + type: integer + initiated_at: + type: string + latest_status_at: + type: string + error: + type: string + enum: + - 1_upgraded_instance_launch_failed + - 2_volume_detachchment_from_upgraded_instance_failed + - 3_volume_attachment_to_original_instance_failed + - 4_data_upgrade_initiation_failed + - 5_data_upgrade_completion_failed + - 6_volume_detachchment_from_original_instance_failed + - 7_volume_attachment_to_upgraded_instance_failed + - 8_upgrade_completion_failed + - 9_post_physical_backup_failed + progress: + type: string + enum: + - 0_requested + - 1_started + - 2_launched_upgraded_instance + - 3_detached_volume_from_upgraded_instance + - 4_attached_volume_to_original_instance + - 5_initiated_data_upgrade + - 6_completed_data_upgrade + - 7_detached_volume_from_original_instance + - 8_attached_volume_to_upgraded_instance + - 9_completed_upgrade + - 10_completed_post_physical_backup + required: + - target_version + - status + - initiated_at + - latest_status_at + DatabaseUpgradeStatusResponse: + type: object + properties: + databaseUpgradeStatus: + nullable: true + allOf: + - $ref: '#/components/schemas/DatabaseUpgradeStatus' + required: + - databaseUpgradeStatus + ReadOnlyStatusResponse: + type: object + properties: + enabled: + type: boolean + override_enabled: + type: boolean + override_active_until: + type: string + required: + - enabled + - override_enabled + - override_active_until + SetUpReadReplicaBody: + type: object + properties: + read_replica_region: + type: string + enum: + - us-east-1 + - us-east-2 + - us-west-1 + - us-west-2 + - ap-east-1 + - ap-southeast-1 + - ap-northeast-1 + - ap-northeast-2 + - ap-southeast-2 + - eu-west-1 + - eu-west-2 + - eu-west-3 + - eu-north-1 + - eu-central-1 + - eu-central-2 + - ca-central-1 + - ap-south-1 + - sa-east-1 + description: Region you want your read replica to reside in + example: us-east-1 + required: + - read_replica_region + RemoveReadReplicaBody: + type: object + properties: + database_identifier: + type: string + required: + - database_identifier + AuthHealthResponse: + type: object + properties: + name: + type: string + enum: + - GoTrue + required: + - name + RealtimeHealthResponse: + type: object + properties: + connected_cluster: + type: integer + required: + - connected_cluster + V1ServiceHealthResponse: + type: object + properties: + info: + oneOf: + - $ref: '#/components/schemas/AuthHealthResponse' + - $ref: '#/components/schemas/RealtimeHealthResponse' + name: + enum: + - auth + - db + - pooler + - realtime + - rest + - storage + type: string + healthy: + type: boolean + status: + enum: + - COMING_UP + - ACTIVE_HEALTHY + - UNHEALTHY + type: string + error: + type: string + required: + - name + - healthy + - status + StorageFeatureImageTransformation: + type: object + properties: + enabled: + type: boolean + required: + - enabled + StorageFeatureS3Protocol: + type: object + properties: + enabled: + type: boolean + required: + - enabled + StorageFeatures: + type: object + properties: + imageTransformation: + $ref: '#/components/schemas/StorageFeatureImageTransformation' + s3Protocol: + $ref: '#/components/schemas/StorageFeatureS3Protocol' + required: + - imageTransformation + - s3Protocol + StorageConfigResponse: + type: object + properties: + fileSizeLimit: + type: integer + format: int64 + features: + $ref: '#/components/schemas/StorageFeatures' + required: + - fileSizeLimit + - features + UpdateStorageConfigBody: + type: object + properties: + fileSizeLimit: + type: integer + minimum: 0 + maximum: 53687091200 + format: int64 + features: + $ref: '#/components/schemas/StorageFeatures' + PostgresConfigResponse: + type: object + properties: + effective_cache_size: + type: string + logical_decoding_work_mem: + type: string + maintenance_work_mem: + type: string + track_activity_query_size: + type: string + max_connections: + type: integer + minimum: 1 + maximum: 262143 + max_locks_per_transaction: + type: integer + minimum: 10 + maximum: 2147483640 + max_parallel_maintenance_workers: + type: integer + minimum: 0 + maximum: 1024 + max_parallel_workers: + type: integer + minimum: 0 + maximum: 1024 + max_parallel_workers_per_gather: + type: integer + minimum: 0 + maximum: 1024 + max_replication_slots: + type: integer + max_slot_wal_keep_size: + type: string + max_standby_archive_delay: + type: string + max_standby_streaming_delay: + type: string + max_wal_size: + type: string + max_wal_senders: + type: integer + max_worker_processes: + type: integer + minimum: 0 + maximum: 262143 + shared_buffers: + type: string + statement_timeout: + type: string + track_commit_timestamp: + type: boolean + wal_keep_size: + type: string + wal_sender_timeout: + type: string + work_mem: + type: string + session_replication_role: + enum: + - origin + - replica + - local + type: string + UpdatePostgresConfigBody: + type: object + properties: + effective_cache_size: + type: string + logical_decoding_work_mem: + type: string + maintenance_work_mem: + type: string + track_activity_query_size: + type: string + max_connections: + type: integer + minimum: 1 + maximum: 262143 + max_locks_per_transaction: + type: integer + minimum: 10 + maximum: 2147483640 + max_parallel_maintenance_workers: + type: integer + minimum: 0 + maximum: 1024 + max_parallel_workers: + type: integer + minimum: 0 + maximum: 1024 + max_parallel_workers_per_gather: + type: integer + minimum: 0 + maximum: 1024 + max_replication_slots: + type: integer + max_slot_wal_keep_size: + type: string + max_standby_archive_delay: + type: string + max_standby_streaming_delay: + type: string + max_wal_size: + type: string + max_wal_senders: + type: integer + max_worker_processes: + type: integer + minimum: 0 + maximum: 262143 + shared_buffers: + type: string + statement_timeout: + type: string + track_commit_timestamp: + type: boolean + wal_keep_size: + type: string + wal_sender_timeout: + type: string + work_mem: + type: string + restart_database: + type: boolean + session_replication_role: + enum: + - origin + - replica + - local + type: string + V1PgbouncerConfigResponse: + type: object + properties: + pool_mode: + type: string + enum: + - transaction + - session + - statement + default_pool_size: + type: number + ignore_startup_parameters: + type: string + max_client_conn: + type: number + connection_string: + type: string + SupavisorConfigResponse: + type: object + properties: + database_type: + type: string + enum: + - PRIMARY + - READ_REPLICA + db_port: + type: integer + default_pool_size: + type: integer + nullable: true + max_client_conn: + type: integer + nullable: true + identifier: + type: string + is_using_scram_auth: + type: boolean + db_user: + type: string + db_host: + type: string + db_name: + type: string + connectionString: + type: string + pool_mode: + enum: + - transaction + - session + type: string + required: + - database_type + - db_port + - default_pool_size + - max_client_conn + - identifier + - is_using_scram_auth + - db_user + - db_host + - db_name + - connectionString + - pool_mode + UpdateSupavisorConfigBody: + type: object + properties: + default_pool_size: + type: integer + nullable: true + minimum: 0 + maximum: 1000 + pool_mode: + enum: + - transaction + - session + type: string + deprecated: true + description: This field is deprecated and is ignored in this request + UpdateSupavisorConfigResponse: + type: object + properties: + default_pool_size: + type: integer + nullable: true + pool_mode: + enum: + - transaction + - session + type: string + required: + - default_pool_size + - pool_mode + AuthConfigResponse: + type: object + properties: + api_max_request_duration: + type: integer + nullable: true + db_max_pool_size: + type: integer + nullable: true + jwt_exp: + type: integer + nullable: true + mailer_otp_exp: + type: integer + mailer_otp_length: + type: integer + nullable: true + mfa_max_enrolled_factors: + type: integer + nullable: true + mfa_phone_otp_length: + type: integer + mfa_phone_max_frequency: + type: integer + nullable: true + password_min_length: + type: integer + nullable: true + rate_limit_anonymous_users: + type: integer + nullable: true + rate_limit_email_sent: + type: integer + nullable: true + rate_limit_sms_sent: + type: integer + nullable: true + rate_limit_token_refresh: + type: integer + nullable: true + rate_limit_verify: + type: integer + nullable: true + rate_limit_otp: + type: integer + nullable: true + security_refresh_token_reuse_interval: + type: integer + nullable: true + sessions_inactivity_timeout: + type: integer + nullable: true + sessions_timebox: + type: integer + nullable: true + sms_max_frequency: + type: integer + nullable: true + sms_otp_exp: + type: integer + nullable: true + sms_otp_length: + type: integer + smtp_max_frequency: + type: integer + nullable: true + disable_signup: + type: boolean + nullable: true + external_anonymous_users_enabled: + type: boolean + nullable: true + external_apple_additional_client_ids: + type: string + nullable: true + external_apple_client_id: + type: string + nullable: true + external_apple_enabled: + type: boolean + nullable: true + external_apple_secret: + type: string + nullable: true + external_azure_client_id: + type: string + nullable: true + external_azure_enabled: + type: boolean + nullable: true + external_azure_secret: + type: string + nullable: true + external_azure_url: + type: string + nullable: true + external_bitbucket_client_id: + type: string + nullable: true + external_bitbucket_enabled: + type: boolean + nullable: true + external_bitbucket_secret: + type: string + nullable: true + external_discord_client_id: + type: string + nullable: true + external_discord_enabled: + type: boolean + nullable: true + external_discord_secret: + type: string + nullable: true + external_email_enabled: + type: boolean + nullable: true + external_facebook_client_id: + type: string + nullable: true + external_facebook_enabled: + type: boolean + nullable: true + external_facebook_secret: + type: string + nullable: true + external_figma_client_id: + type: string + nullable: true + external_figma_enabled: + type: boolean + nullable: true + external_figma_secret: + type: string + nullable: true + external_github_client_id: + type: string + nullable: true + external_github_enabled: + type: boolean + nullable: true + external_github_secret: + type: string + nullable: true + external_gitlab_client_id: + type: string + nullable: true + external_gitlab_enabled: + type: boolean + nullable: true + external_gitlab_secret: + type: string + nullable: true + external_gitlab_url: + type: string + nullable: true + external_google_additional_client_ids: + type: string + nullable: true + external_google_client_id: + type: string + nullable: true + external_google_enabled: + type: boolean + nullable: true + external_google_secret: + type: string + nullable: true + external_google_skip_nonce_check: + type: boolean + nullable: true + external_kakao_client_id: + type: string + nullable: true + external_kakao_enabled: + type: boolean + nullable: true + external_kakao_secret: + type: string + nullable: true + external_keycloak_client_id: + type: string + nullable: true + external_keycloak_enabled: + type: boolean + nullable: true + external_keycloak_secret: + type: string + nullable: true + external_keycloak_url: + type: string + nullable: true + external_linkedin_oidc_client_id: + type: string + nullable: true + external_linkedin_oidc_enabled: + type: boolean + nullable: true + external_linkedin_oidc_secret: + type: string + nullable: true + external_slack_oidc_client_id: + type: string + nullable: true + external_slack_oidc_enabled: + type: boolean + nullable: true + external_slack_oidc_secret: + type: string + nullable: true + external_notion_client_id: + type: string + nullable: true + external_notion_enabled: + type: boolean + nullable: true + external_notion_secret: + type: string + nullable: true + external_phone_enabled: + type: boolean + nullable: true + external_slack_client_id: + type: string + nullable: true + external_slack_enabled: + type: boolean + nullable: true + external_slack_secret: + type: string + nullable: true + external_spotify_client_id: + type: string + nullable: true + external_spotify_enabled: + type: boolean + nullable: true + external_spotify_secret: + type: string + nullable: true + external_twitch_client_id: + type: string + nullable: true + external_twitch_enabled: + type: boolean + nullable: true + external_twitch_secret: + type: string + nullable: true + external_twitter_client_id: + type: string + nullable: true + external_twitter_enabled: + type: boolean + nullable: true + external_twitter_secret: + type: string + nullable: true + external_workos_client_id: + type: string + nullable: true + external_workos_enabled: + type: boolean + nullable: true + external_workos_secret: + type: string + nullable: true + external_workos_url: + type: string + nullable: true + external_zoom_client_id: + type: string + nullable: true + external_zoom_enabled: + type: boolean + nullable: true + external_zoom_secret: + type: string + nullable: true + hook_custom_access_token_enabled: + type: boolean + nullable: true + hook_custom_access_token_uri: + type: string + nullable: true + hook_custom_access_token_secrets: + type: string + nullable: true + hook_mfa_verification_attempt_enabled: + type: boolean + nullable: true + hook_mfa_verification_attempt_uri: + type: string + nullable: true + hook_mfa_verification_attempt_secrets: + type: string + nullable: true + hook_password_verification_attempt_enabled: + type: boolean + nullable: true + hook_password_verification_attempt_uri: + type: string + nullable: true + hook_password_verification_attempt_secrets: + type: string + nullable: true + hook_send_sms_enabled: + type: boolean + nullable: true + hook_send_sms_uri: + type: string + nullable: true + hook_send_sms_secrets: + type: string + nullable: true + hook_send_email_enabled: + type: boolean + nullable: true + hook_send_email_uri: + type: string + nullable: true + hook_send_email_secrets: + type: string + nullable: true + mailer_allow_unverified_email_sign_ins: + type: boolean + nullable: true + mailer_autoconfirm: + type: boolean + nullable: true + mailer_secure_email_change_enabled: + type: boolean + nullable: true + mailer_subjects_confirmation: + type: string + nullable: true + mailer_subjects_email_change: + type: string + nullable: true + mailer_subjects_invite: + type: string + nullable: true + mailer_subjects_magic_link: + type: string + nullable: true + mailer_subjects_reauthentication: + type: string + nullable: true + mailer_subjects_recovery: + type: string + nullable: true + mailer_templates_confirmation_content: + type: string + nullable: true + mailer_templates_email_change_content: + type: string + nullable: true + mailer_templates_invite_content: + type: string + nullable: true + mailer_templates_magic_link_content: + type: string + nullable: true + mailer_templates_reauthentication_content: + type: string + nullable: true + mailer_templates_recovery_content: + type: string + nullable: true + mfa_totp_enroll_enabled: + type: boolean + nullable: true + mfa_totp_verify_enabled: + type: boolean + nullable: true + mfa_phone_enroll_enabled: + type: boolean + nullable: true + mfa_phone_verify_enabled: + type: boolean + nullable: true + mfa_web_authn_enroll_enabled: + type: boolean + nullable: true + mfa_web_authn_verify_enabled: + type: boolean + nullable: true + mfa_phone_template: + type: string + nullable: true + password_hibp_enabled: + type: boolean + nullable: true + password_required_characters: + type: string + nullable: true + refresh_token_rotation_enabled: + type: boolean + nullable: true + saml_enabled: + type: boolean + nullable: true + saml_external_url: + type: string + nullable: true + saml_allow_encrypted_assertions: + type: boolean + nullable: true + security_captcha_enabled: + type: boolean + nullable: true + security_captcha_provider: + type: string + nullable: true + security_captcha_secret: + type: string + nullable: true + security_manual_linking_enabled: + type: boolean + nullable: true + security_update_password_require_reauthentication: + type: boolean + nullable: true + sessions_single_per_user: + type: boolean + nullable: true + sessions_tags: + type: string + nullable: true + site_url: + type: string + nullable: true + sms_autoconfirm: + type: boolean + nullable: true + sms_messagebird_access_key: + type: string + nullable: true + sms_messagebird_originator: + type: string + nullable: true + sms_provider: + type: string + nullable: true + sms_template: + type: string + nullable: true + sms_test_otp: + type: string + nullable: true + sms_test_otp_valid_until: + type: string + nullable: true + sms_textlocal_api_key: + type: string + nullable: true + sms_textlocal_sender: + type: string + nullable: true + sms_twilio_account_sid: + type: string + nullable: true + sms_twilio_auth_token: + type: string + nullable: true + sms_twilio_content_sid: + type: string + nullable: true + sms_twilio_message_service_sid: + type: string + nullable: true + sms_twilio_verify_account_sid: + type: string + nullable: true + sms_twilio_verify_auth_token: + type: string + nullable: true + sms_twilio_verify_message_service_sid: + type: string + nullable: true + sms_vonage_api_key: + type: string + nullable: true + sms_vonage_api_secret: + type: string + nullable: true + sms_vonage_from: + type: string + nullable: true + smtp_admin_email: + type: string + nullable: true + smtp_host: + type: string + nullable: true + smtp_pass: + type: string + nullable: true + smtp_port: + type: string + nullable: true + smtp_sender_name: + type: string + nullable: true + smtp_user: + type: string + nullable: true + uri_allow_list: + type: string + nullable: true + required: + - api_max_request_duration + - db_max_pool_size + - jwt_exp + - mailer_otp_exp + - mailer_otp_length + - mfa_max_enrolled_factors + - mfa_phone_otp_length + - mfa_phone_max_frequency + - password_min_length + - rate_limit_anonymous_users + - rate_limit_email_sent + - rate_limit_sms_sent + - rate_limit_token_refresh + - rate_limit_verify + - rate_limit_otp + - security_refresh_token_reuse_interval + - sessions_inactivity_timeout + - sessions_timebox + - sms_max_frequency + - sms_otp_exp + - sms_otp_length + - smtp_max_frequency + - disable_signup + - external_anonymous_users_enabled + - external_apple_additional_client_ids + - external_apple_client_id + - external_apple_enabled + - external_apple_secret + - external_azure_client_id + - external_azure_enabled + - external_azure_secret + - external_azure_url + - external_bitbucket_client_id + - external_bitbucket_enabled + - external_bitbucket_secret + - external_discord_client_id + - external_discord_enabled + - external_discord_secret + - external_email_enabled + - external_facebook_client_id + - external_facebook_enabled + - external_facebook_secret + - external_figma_client_id + - external_figma_enabled + - external_figma_secret + - external_github_client_id + - external_github_enabled + - external_github_secret + - external_gitlab_client_id + - external_gitlab_enabled + - external_gitlab_secret + - external_gitlab_url + - external_google_additional_client_ids + - external_google_client_id + - external_google_enabled + - external_google_secret + - external_google_skip_nonce_check + - external_kakao_client_id + - external_kakao_enabled + - external_kakao_secret + - external_keycloak_client_id + - external_keycloak_enabled + - external_keycloak_secret + - external_keycloak_url + - external_linkedin_oidc_client_id + - external_linkedin_oidc_enabled + - external_linkedin_oidc_secret + - external_slack_oidc_client_id + - external_slack_oidc_enabled + - external_slack_oidc_secret + - external_notion_client_id + - external_notion_enabled + - external_notion_secret + - external_phone_enabled + - external_slack_client_id + - external_slack_enabled + - external_slack_secret + - external_spotify_client_id + - external_spotify_enabled + - external_spotify_secret + - external_twitch_client_id + - external_twitch_enabled + - external_twitch_secret + - external_twitter_client_id + - external_twitter_enabled + - external_twitter_secret + - external_workos_client_id + - external_workos_enabled + - external_workos_secret + - external_workos_url + - external_zoom_client_id + - external_zoom_enabled + - external_zoom_secret + - hook_custom_access_token_enabled + - hook_custom_access_token_uri + - hook_custom_access_token_secrets + - hook_mfa_verification_attempt_enabled + - hook_mfa_verification_attempt_uri + - hook_mfa_verification_attempt_secrets + - hook_password_verification_attempt_enabled + - hook_password_verification_attempt_uri + - hook_password_verification_attempt_secrets + - hook_send_sms_enabled + - hook_send_sms_uri + - hook_send_sms_secrets + - hook_send_email_enabled + - hook_send_email_uri + - hook_send_email_secrets + - mailer_allow_unverified_email_sign_ins + - mailer_autoconfirm + - mailer_secure_email_change_enabled + - mailer_subjects_confirmation + - mailer_subjects_email_change + - mailer_subjects_invite + - mailer_subjects_magic_link + - mailer_subjects_reauthentication + - mailer_subjects_recovery + - mailer_templates_confirmation_content + - mailer_templates_email_change_content + - mailer_templates_invite_content + - mailer_templates_magic_link_content + - mailer_templates_reauthentication_content + - mailer_templates_recovery_content + - mfa_totp_enroll_enabled + - mfa_totp_verify_enabled + - mfa_phone_enroll_enabled + - mfa_phone_verify_enabled + - mfa_web_authn_enroll_enabled + - mfa_web_authn_verify_enabled + - mfa_phone_template + - password_hibp_enabled + - password_required_characters + - refresh_token_rotation_enabled + - saml_enabled + - saml_external_url + - saml_allow_encrypted_assertions + - security_captcha_enabled + - security_captcha_provider + - security_captcha_secret + - security_manual_linking_enabled + - security_update_password_require_reauthentication + - sessions_single_per_user + - sessions_tags + - site_url + - sms_autoconfirm + - sms_messagebird_access_key + - sms_messagebird_originator + - sms_provider + - sms_template + - sms_test_otp + - sms_test_otp_valid_until + - sms_textlocal_api_key + - sms_textlocal_sender + - sms_twilio_account_sid + - sms_twilio_auth_token + - sms_twilio_content_sid + - sms_twilio_message_service_sid + - sms_twilio_verify_account_sid + - sms_twilio_verify_auth_token + - sms_twilio_verify_message_service_sid + - sms_vonage_api_key + - sms_vonage_api_secret + - sms_vonage_from + - smtp_admin_email + - smtp_host + - smtp_pass + - smtp_port + - smtp_sender_name + - smtp_user + - uri_allow_list + UpdateAuthConfigBody: + type: object + properties: + jwt_exp: + type: integer + minimum: 0 + maximum: 604800 + smtp_max_frequency: + type: integer + minimum: 0 + maximum: 32767 + mfa_max_enrolled_factors: + type: integer + minimum: 0 + maximum: 2147483647 + sessions_timebox: + type: integer + minimum: 0 + sessions_inactivity_timeout: + type: integer + minimum: 0 + rate_limit_anonymous_users: + type: integer + minimum: 1 + maximum: 2147483647 + rate_limit_email_sent: + type: integer + minimum: 1 + maximum: 2147483647 + rate_limit_sms_sent: + type: integer + minimum: 1 + maximum: 2147483647 + rate_limit_verify: + type: integer + minimum: 1 + maximum: 2147483647 + rate_limit_token_refresh: + type: integer + minimum: 1 + maximum: 2147483647 + rate_limit_otp: + type: integer + minimum: 1 + maximum: 2147483647 + password_min_length: + type: integer + minimum: 6 + maximum: 32767 + security_refresh_token_reuse_interval: + type: integer + minimum: 0 + maximum: 2147483647 + mailer_otp_exp: + type: integer + minimum: 0 + maximum: 2147483647 + mailer_otp_length: + type: integer + minimum: 6 + maximum: 10 + sms_max_frequency: + type: integer + minimum: 0 + maximum: 32767 + sms_otp_exp: + type: integer + minimum: 0 + maximum: 2147483647 + sms_otp_length: + type: integer + minimum: 0 + maximum: 32767 + db_max_pool_size: + type: integer + api_max_request_duration: + type: integer + mfa_phone_max_frequency: + type: integer + minimum: 0 + maximum: 32767 + mfa_phone_otp_length: + type: integer + minimum: 0 + maximum: 32767 + site_url: + type: string + pattern: /^[^,]+$/ + disable_signup: + type: boolean + smtp_admin_email: + type: string + smtp_host: + type: string + smtp_port: + type: string + smtp_user: + type: string + smtp_pass: + type: string + smtp_sender_name: + type: string + mailer_allow_unverified_email_sign_ins: + type: boolean + mailer_autoconfirm: + type: boolean + mailer_subjects_invite: + type: string + mailer_subjects_confirmation: + type: string + mailer_subjects_recovery: + type: string + mailer_subjects_email_change: + type: string + mailer_subjects_magic_link: + type: string + mailer_subjects_reauthentication: + type: string + mailer_templates_invite_content: + type: string + mailer_templates_confirmation_content: + type: string + mailer_templates_recovery_content: + type: string + mailer_templates_email_change_content: + type: string + mailer_templates_magic_link_content: + type: string + mailer_templates_reauthentication_content: + type: string + uri_allow_list: + type: string + external_anonymous_users_enabled: + type: boolean + external_email_enabled: + type: boolean + external_phone_enabled: + type: boolean + saml_enabled: + type: boolean + saml_external_url: + type: string + pattern: /^[^,]+$/ + security_captcha_enabled: + type: boolean + security_captcha_provider: + type: string + security_captcha_secret: + type: string + sessions_single_per_user: + type: boolean + sessions_tags: + type: string + pattern: /^\s*([a-z0-9_-]+(\s*,+\s*)?)*\s*$/i + mailer_secure_email_change_enabled: + type: boolean + refresh_token_rotation_enabled: + type: boolean + password_hibp_enabled: + type: boolean + password_required_characters: + type: string + enum: + - abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ:0123456789 + - abcdefghijklmnopqrstuvwxyz:ABCDEFGHIJKLMNOPQRSTUVWXYZ:0123456789 + - abcdefghijklmnopqrstuvwxyz:ABCDEFGHIJKLMNOPQRSTUVWXYZ:0123456789:!@#$%^&*()_+-=[]{};'\\\\:\"|<>?,./`~ + - '' + security_manual_linking_enabled: + type: boolean + security_update_password_require_reauthentication: + type: boolean + sms_autoconfirm: + type: boolean + sms_provider: + type: string + sms_messagebird_access_key: + type: string + sms_messagebird_originator: + type: string + sms_test_otp: + type: string + pattern: /^([0-9]{1,15}=[0-9]+,?)*$/ + sms_test_otp_valid_until: + type: string + sms_textlocal_api_key: + type: string + sms_textlocal_sender: + type: string + sms_twilio_account_sid: + type: string + sms_twilio_auth_token: + type: string + sms_twilio_content_sid: + type: string + sms_twilio_message_service_sid: + type: string + sms_twilio_verify_account_sid: + type: string + sms_twilio_verify_auth_token: + type: string + sms_twilio_verify_message_service_sid: + type: string + sms_vonage_api_key: + type: string + sms_vonage_api_secret: + type: string + sms_vonage_from: + type: string + sms_template: + type: string + hook_mfa_verification_attempt_enabled: + type: boolean + hook_mfa_verification_attempt_uri: + type: string + hook_mfa_verification_attempt_secrets: + type: string + hook_password_verification_attempt_enabled: + type: boolean + hook_password_verification_attempt_uri: + type: string + hook_password_verification_attempt_secrets: + type: string + hook_custom_access_token_enabled: + type: boolean + hook_custom_access_token_uri: + type: string + hook_custom_access_token_secrets: + type: string + hook_send_sms_enabled: + type: boolean + hook_send_sms_uri: + type: string + hook_send_sms_secrets: + type: string + hook_send_email_enabled: + type: boolean + hook_send_email_uri: + type: string + hook_send_email_secrets: + type: string + external_apple_enabled: + type: boolean + external_apple_client_id: + type: string + external_apple_secret: + type: string + external_apple_additional_client_ids: + type: string + external_azure_enabled: + type: boolean + external_azure_client_id: + type: string + external_azure_secret: + type: string + external_azure_url: + type: string + external_bitbucket_enabled: + type: boolean + external_bitbucket_client_id: + type: string + external_bitbucket_secret: + type: string + external_discord_enabled: + type: boolean + external_discord_client_id: + type: string + external_discord_secret: + type: string + external_facebook_enabled: + type: boolean + external_facebook_client_id: + type: string + external_facebook_secret: + type: string + external_figma_enabled: + type: boolean + external_figma_client_id: + type: string + external_figma_secret: + type: string + external_github_enabled: + type: boolean + external_github_client_id: + type: string + external_github_secret: + type: string + external_gitlab_enabled: + type: boolean + external_gitlab_client_id: + type: string + external_gitlab_secret: + type: string + external_gitlab_url: + type: string + external_google_enabled: + type: boolean + external_google_client_id: + type: string + external_google_secret: + type: string + external_google_additional_client_ids: + type: string + external_google_skip_nonce_check: + type: boolean + external_kakao_enabled: + type: boolean + external_kakao_client_id: + type: string + external_kakao_secret: + type: string + external_keycloak_enabled: + type: boolean + external_keycloak_client_id: + type: string + external_keycloak_secret: + type: string + external_keycloak_url: + type: string + external_linkedin_oidc_enabled: + type: boolean + external_linkedin_oidc_client_id: + type: string + external_linkedin_oidc_secret: + type: string + external_slack_oidc_enabled: + type: boolean + external_slack_oidc_client_id: + type: string + external_slack_oidc_secret: + type: string + external_notion_enabled: + type: boolean + external_notion_client_id: + type: string + external_notion_secret: + type: string + external_slack_enabled: + type: boolean + external_slack_client_id: + type: string + external_slack_secret: + type: string + external_spotify_enabled: + type: boolean + external_spotify_client_id: + type: string + external_spotify_secret: + type: string + external_twitch_enabled: + type: boolean + external_twitch_client_id: + type: string + external_twitch_secret: + type: string + external_twitter_enabled: + type: boolean + external_twitter_client_id: + type: string + external_twitter_secret: + type: string + external_workos_enabled: + type: boolean + external_workos_client_id: + type: string + external_workos_secret: + type: string + external_workos_url: + type: string + external_zoom_enabled: + type: boolean + external_zoom_client_id: + type: string + external_zoom_secret: + type: string + mfa_totp_enroll_enabled: + type: boolean + mfa_totp_verify_enabled: + type: boolean + mfa_web_authn_enroll_enabled: + type: boolean + mfa_web_authn_verify_enabled: + type: boolean + mfa_phone_enroll_enabled: + type: boolean + mfa_phone_verify_enabled: + type: boolean + mfa_phone_template: + type: string + CreateThirdPartyAuthBody: + type: object + properties: + oidc_issuer_url: + type: string + jwks_url: + type: string + custom_jwks: + type: object + ThirdPartyAuth: + type: object + properties: + id: + type: string + type: + type: string + oidc_issuer_url: + type: string + nullable: true + jwks_url: + type: string + nullable: true + custom_jwks: + type: object + nullable: true + resolved_jwks: + type: object + nullable: true + inserted_at: + type: string + updated_at: + type: string + resolved_at: + type: string + nullable: true + required: + - id + - type + - inserted_at + - updated_at + ProjectAvailableRestoreVersion: + type: object + properties: + version: + type: string + release_channel: + type: string + enum: + - internal + - alpha + - beta + - ga + - withdrawn + - preview + postgres_engine: + type: string + enum: + - '13' + - '14' + - '15' + - '17' + - 17-oriole + required: + - version + - release_channel + - postgres_engine + GetProjectAvailableRestoreVersionsResponse: + type: object + properties: + available_versions: + type: array + items: + $ref: '#/components/schemas/ProjectAvailableRestoreVersion' + required: + - available_versions + RestoreProjectBodyDto: + type: object + properties: {} + hideDefinitions: + - release_channel + - postgres_engine + V1AnalyticsResponse: + type: object + properties: + error: + oneOf: + - properties: + code: + type: number + errors: + type: array + items: + properties: + domain: + type: string + location: + type: string + locationType: + type: string + message: + type: string + reason: + type: string + message: + type: string + status: + type: string + - type: string + result: + type: array + items: + type: object + V1RunQueryBody: + type: object + properties: + query: + type: string + required: + - query + GetProjectDbMetadataResponseDto: + type: object + properties: + databases: + type: array + items: + type: object + properties: + name: + type: string + schemas: + type: array + items: + type: object + properties: + name: + type: string + required: + - name + additionalProperties: true + required: + - name + - schemas + additionalProperties: true + required: + - databases + FunctionResponse: + type: object + properties: + version: + type: integer + created_at: + type: integer + format: int64 + updated_at: + type: integer + format: int64 + id: + type: string + slug: + type: string + name: + type: string + status: + enum: + - ACTIVE + - REMOVED + - THROTTLED + type: string + verify_jwt: + type: boolean + import_map: + type: boolean + entrypoint_path: + type: string + import_map_path: + type: string + compute_multiplier: + type: number + required: + - version + - created_at + - updated_at + - id + - slug + - name + - status + V1CreateFunctionBody: + type: object + properties: + slug: + type: string + pattern: /^[A-Za-z0-9_-]+$/ + name: + type: string + body: + type: string + verify_jwt: + type: boolean + compute_multiplier: + type: number + minimum: 1 + maximum: 4 + required: + - slug + - name + - body + BulkUpdateFunctionBody: + type: object + properties: + version: + type: integer + created_at: + type: integer + format: int64 + id: + type: string + slug: + type: string + name: + type: string + status: + enum: + - ACTIVE + - REMOVED + - THROTTLED + type: string + verify_jwt: + type: boolean + import_map: + type: boolean + entrypoint_path: + type: string + import_map_path: + type: string + required: + - version + - id + - slug + - name + - status + BulkUpdateFunctionResponse: + type: object + properties: + functions: + type: array + items: + $ref: '#/components/schemas/FunctionResponse' + required: + - functions + FunctionDeployMetadata: + type: object + properties: + entrypoint_path: + type: string + import_map_path: + type: string + static_patterns: + type: array + items: + type: string + verify_jwt: + type: boolean + name: + type: string + required: + - entrypoint_path + FunctionDeployBody: + type: object + properties: + file: + type: array + items: + type: string + format: binary + metadata: + $ref: '#/components/schemas/FunctionDeployMetadata' + required: + - file + - metadata + DeployFunctionResponse: + type: object + properties: + version: + type: integer + created_at: + type: integer + format: int64 + updated_at: + type: integer + format: int64 + id: + type: string + slug: + type: string + name: + type: string + status: + enum: + - ACTIVE + - REMOVED + - THROTTLED + type: string + verify_jwt: + type: boolean + import_map: + type: boolean + entrypoint_path: + type: string + import_map_path: + type: string + compute_multiplier: + type: number + required: + - version + - id + - slug + - name + - status + FunctionSlugResponse: + type: object + properties: + version: + type: integer + created_at: + type: integer + format: int64 + updated_at: + type: integer + format: int64 + id: + type: string + slug: + type: string + name: + type: string + status: + enum: + - ACTIVE + - REMOVED + - THROTTLED + type: string + verify_jwt: + type: boolean + import_map: + type: boolean + entrypoint_path: + type: string + import_map_path: + type: string + compute_multiplier: + type: number + required: + - version + - created_at + - updated_at + - id + - slug + - name + - status + V1UpdateFunctionBody: + type: object + properties: + name: + type: string + body: + type: string + verify_jwt: + type: boolean + compute_multiplier: + type: number + minimum: 1 + maximum: 4 + V1StorageBucketResponse: + type: object + properties: + id: + type: string + name: + type: string + owner: + type: string + created_at: + type: string + updated_at: + type: string + public: + type: boolean + required: + - id + - name + - owner + - created_at + - updated_at + - public + AttributeValue: + type: object + properties: + default: + oneOf: + - type: object + - type: number + - type: string + - type: boolean + name: + type: string + names: + type: array + items: + type: string + array: + type: boolean + AttributeMapping: + type: object + properties: + keys: + type: object + additionalProperties: + $ref: '#/components/schemas/AttributeValue' + required: + - keys + CreateProviderBody: + type: object + properties: + type: + type: string + enum: + - saml + description: What type of provider will be created + metadata_xml: + type: string + metadata_url: + type: string + domains: + type: array + items: + type: string + attribute_mapping: + $ref: '#/components/schemas/AttributeMapping' + required: + - type + SamlDescriptor: + type: object + properties: + id: + type: string + entity_id: + type: string + metadata_url: + type: string + metadata_xml: + type: string + attribute_mapping: + $ref: '#/components/schemas/AttributeMapping' + required: + - id + - entity_id + Domain: + type: object + properties: + id: + type: string + domain: + type: string + created_at: + type: string + updated_at: + type: string + required: + - id + CreateProviderResponse: + type: object + properties: + id: + type: string + saml: + $ref: '#/components/schemas/SamlDescriptor' + domains: + type: array + items: + $ref: '#/components/schemas/Domain' + created_at: + type: string + updated_at: + type: string + required: + - id + Provider: + type: object + properties: + id: + type: string + saml: + $ref: '#/components/schemas/SamlDescriptor' + domains: + type: array + items: + $ref: '#/components/schemas/Domain' + created_at: + type: string + updated_at: + type: string + required: + - id + ListProvidersResponse: + type: object + properties: + items: + type: array + items: + $ref: '#/components/schemas/Provider' + required: + - items + GetProviderResponse: + type: object + properties: + id: + type: string + saml: + $ref: '#/components/schemas/SamlDescriptor' + domains: + type: array + items: + $ref: '#/components/schemas/Domain' + created_at: + type: string + updated_at: + type: string + required: + - id + UpdateProviderBody: + type: object + properties: + metadata_xml: + type: string + metadata_url: + type: string + domains: + type: array + items: + type: string + attribute_mapping: + $ref: '#/components/schemas/AttributeMapping' + UpdateProviderResponse: + type: object + properties: + id: + type: string + saml: + $ref: '#/components/schemas/SamlDescriptor' + domains: + type: array + items: + $ref: '#/components/schemas/Domain' + created_at: + type: string + updated_at: + type: string + required: + - id + DeleteProviderResponse: + type: object + properties: + id: + type: string + saml: + $ref: '#/components/schemas/SamlDescriptor' + domains: + type: array + items: + $ref: '#/components/schemas/Domain' + created_at: + type: string + updated_at: + type: string + required: + - id + V1Backup: + type: object + properties: + status: + type: string + enum: + - COMPLETED + - FAILED + - PENDING + - REMOVED + - ARCHIVED + - CANCELLED + is_physical_backup: + type: boolean + inserted_at: + type: string + required: + - status + - is_physical_backup + - inserted_at + V1PhysicalBackup: + type: object + properties: + earliest_physical_backup_date_unix: + type: integer + format: int64 + latest_physical_backup_date_unix: + type: integer + format: int64 + V1BackupsResponse: + type: object + properties: + region: + type: string + walg_enabled: + type: boolean + pitr_enabled: + type: boolean + backups: + type: array + items: + $ref: '#/components/schemas/V1Backup' + physical_backup_data: + $ref: '#/components/schemas/V1PhysicalBackup' + required: + - region + - walg_enabled + - pitr_enabled + - backups + - physical_backup_data + V1RestorePitrBody: + type: object + properties: + recovery_time_target_unix: + type: integer + minimum: 0 + format: int64 + required: + - recovery_time_target_unix + V1OrganizationMemberResponse: + type: object + properties: + user_id: + type: string + user_name: + type: string + email: + type: string + role_name: + type: string + mfa_enabled: + type: boolean + required: + - user_id + - user_name + - role_name + - mfa_enabled + BillingPlanId: + type: string + enum: + - free + - pro + - team + - enterprise + V1OrganizationSlugResponse: + type: object + properties: + plan: + $ref: '#/components/schemas/BillingPlanId' + opt_in_tags: + type: array + items: + type: string + enum: + - AI_SQL_GENERATOR_OPT_IN + allowed_release_channels: + type: array + items: + $ref: '#/components/schemas/ReleaseChannel' + id: + type: string + name: + type: string + required: + - opt_in_tags + - allowed_release_channels + - id + - name diff --git a/cmd/bans.go b/cmd/bans.go new file mode 100644 index 0000000..603e2dd --- /dev/null +++ b/cmd/bans.go @@ -0,0 +1,47 @@ +package cmd + +import ( + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/bans/get" + "github.com/supabase/cli/internal/bans/update" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + bansCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "network-bans", + Short: "Manage network bans", + Long: `Network bans are IPs that get temporarily blocked if their traffic pattern looks abusive (e.g. multiple failed auth attempts). + +The subcommands help you view the current bans, and unblock IPs if desired.`, + } + + dbIpsToUnban []string + + bansRemoveCmd = &cobra.Command{ + Use: "remove", + Short: "Remove a network ban", + RunE: func(cmd *cobra.Command, args []string) error { + return update.Run(cmd.Context(), flags.ProjectRef, dbIpsToUnban, afero.NewOsFs()) + }, + } + + bansGetCmd = &cobra.Command{ + Use: "get", + Short: "Get the current network bans", + RunE: func(cmd *cobra.Command, args []string) error { + return get.Run(cmd.Context(), flags.ProjectRef, afero.NewOsFs()) + }, + } +) + +func init() { + bansCmd.PersistentFlags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + bansCmd.AddCommand(bansGetCmd) + bansRemoveCmd.Flags().StringSliceVar(&dbIpsToUnban, "db-unban-ip", []string{}, "IP to allow DB connections from.") + bansCmd.AddCommand(bansRemoveCmd) + + rootCmd.AddCommand(bansCmd) +} diff --git a/cmd/bootstrap.go b/cmd/bootstrap.go new file mode 100644 index 0000000..fd9f749 --- /dev/null +++ b/cmd/bootstrap.go @@ -0,0 +1,96 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "os/signal" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/bootstrap" + "github.com/supabase/cli/internal/utils" +) + +var ( + starter = bootstrap.StarterTemplate{ + Name: "scratch", + Description: "An empty project from scratch.", + Start: "supabase start", + } + + bootstrapCmd = &cobra.Command{ + GroupID: groupQuickStart, + Use: "bootstrap [template]", + Short: "Bootstrap a Supabase project from a starter template", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + if !viper.IsSet("WORKDIR") { + title := fmt.Sprintf("Enter a directory to bootstrap your project (or leave blank to use %s): ", utils.Bold(utils.CurrentDirAbs)) + if workdir, err := utils.NewConsole().PromptText(ctx, title); err != nil { + return err + } else { + viper.Set("WORKDIR", workdir) + } + } + client := utils.GetGitHubClient(ctx) + templates, err := bootstrap.ListSamples(ctx, client) + if err != nil { + return err + } + if len(args) > 0 { + name := args[0] + for _, t := range templates { + if strings.EqualFold(t.Name, name) { + starter = t + break + } + } + if !strings.EqualFold(starter.Name, name) { + return errors.New("Invalid template: " + name) + } + } else { + if err := promptStarterTemplate(ctx, templates); err != nil { + return err + } + } + return bootstrap.Run(ctx, starter, afero.NewOsFs()) + }, + } +) + +func init() { + bootstrapFlags := bootstrapCmd.Flags() + bootstrapFlags.StringVarP(&dbPassword, "password", "p", "", "Password to your remote Postgres database.") + cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", bootstrapFlags.Lookup("password"))) + rootCmd.AddCommand(bootstrapCmd) +} + +func promptStarterTemplate(ctx context.Context, templates []bootstrap.StarterTemplate) error { + items := make([]utils.PromptItem, len(templates)) + for i, t := range templates { + items[i] = utils.PromptItem{ + Index: i, + Summary: t.Name, + Details: t.Description, + } + } + items = append(items, utils.PromptItem{ + Index: len(items), + Summary: starter.Name, + Details: starter.Description, + }) + title := "Which starter template do you want to use?" + choice, err := utils.PromptChoice(ctx, title, items) + if err != nil { + return err + } + if choice.Index < len(templates) { + starter = templates[choice.Index] + } + return nil +} diff --git a/cmd/branches.go b/cmd/branches.go new file mode 100644 index 0000000..b541704 --- /dev/null +++ b/cmd/branches.go @@ -0,0 +1,230 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/branches/create" + "github.com/supabase/cli/internal/branches/delete" + "github.com/supabase/cli/internal/branches/disable" + "github.com/supabase/cli/internal/branches/get" + "github.com/supabase/cli/internal/branches/list" + "github.com/supabase/cli/internal/branches/update" + "github.com/supabase/cli/internal/gen/keys" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" +) + +var ( + branchesCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "branches", + Short: "Manage Supabase preview branches", + } + + branchRegion = utils.EnumFlag{ + Allowed: awsRegions(), + } + persistent bool + + branchCreateCmd = &cobra.Command{ + Use: "create [name]", + Short: "Create a preview branch", + Long: "Create a preview branch for the linked project.", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var body api.CreateBranchBody + if len(args) > 0 { + body.BranchName = args[0] + } + cmdFlags := cmd.Flags() + if cmdFlags.Changed("region") { + body.Region = &branchRegion.Value + } + if cmdFlags.Changed("size") { + body.DesiredInstanceSize = (*api.DesiredInstanceSize)(&size.Value) + } + if cmdFlags.Changed("persistent") { + body.Persistent = &persistent + } + return create.Run(cmd.Context(), body, afero.NewOsFs()) + }, + } + + branchListCmd = &cobra.Command{ + Use: "list", + Short: "List all preview branches", + Long: "List all preview branches of the linked project.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return list.Run(cmd.Context(), afero.NewOsFs()) + }, + } + + branchId string + + branchGetCmd = &cobra.Command{ + Use: "get [branch-id]", + Short: "Retrieve details of a preview branch", + Long: "Retrieve details of the specified preview branch.", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + if len(args) == 0 { + if err := promptBranchId(ctx, flags.ProjectRef); err != nil { + return err + } + } else { + branchId = args[0] + } + return get.Run(ctx, branchId, afero.NewOsFs()) + }, + } + + branchStatus = utils.EnumFlag{ + Allowed: []string{ + string(api.BranchResponseStatusRUNNINGMIGRATIONS), + string(api.BranchResponseStatusMIGRATIONSPASSED), + string(api.BranchResponseStatusMIGRATIONSFAILED), + string(api.BranchResponseStatusFUNCTIONSDEPLOYED), + string(api.BranchResponseStatusFUNCTIONSFAILED), + }, + } + branchName string + gitBranch string + + branchUpdateCmd = &cobra.Command{ + Use: "update [branch-id]", + Short: "Update a preview branch", + Long: "Update a preview branch by its ID.", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + cmdFlags := cmd.Flags() + var body api.UpdateBranchBody + if cmdFlags.Changed("name") { + body.BranchName = &branchName + } + if cmdFlags.Changed("git-branch") { + body.GitBranch = &gitBranch + } + if cmdFlags.Changed("persistent") { + body.Persistent = &persistent + } + if cmdFlags.Changed("status") { + body.Status = (*api.UpdateBranchBodyStatus)(&branchStatus.Value) + } + ctx := cmd.Context() + if len(args) == 0 { + if err := promptBranchId(ctx, flags.ProjectRef); err != nil { + return err + } + } else { + branchId = args[0] + } + return update.Run(cmd.Context(), branchId, body, afero.NewOsFs()) + }, + } + + branchDeleteCmd = &cobra.Command{ + Use: "delete [branch-id]", + Short: "Delete a preview branch", + Long: "Delete a preview branch by its ID.", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + if len(args) == 0 { + if err := promptBranchId(ctx, flags.ProjectRef); err != nil { + return err + } + } else { + branchId = args[0] + } + return delete.Run(ctx, branchId) + }, + } + + branchDisableCmd = &cobra.Command{ + Use: "disable", + Short: "Disable preview branching", + Long: "Disable preview branching for the linked project.", + RunE: func(cmd *cobra.Command, args []string) error { + return disable.Run(cmd.Context(), afero.NewOsFs()) + }, + } +) + +func init() { + branchFlags := branchesCmd.PersistentFlags() + branchFlags.StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + createFlags := branchCreateCmd.Flags() + createFlags.Var(&branchRegion, "region", "Select a region to deploy the branch database.") + createFlags.Var(&size, "size", "Select a desired instance size for the branch database.") + createFlags.BoolVar(&persistent, "persistent", false, "Whether to create a persistent branch.") + getFlags := branchGetCmd.Flags() + getFlags.VarP(&utils.OutputFormat, "output", "o", "Output format of branch details.") + branchesCmd.AddCommand(branchCreateCmd) + branchesCmd.AddCommand(branchListCmd) + branchesCmd.AddCommand(branchGetCmd) + updateFlags := branchUpdateCmd.Flags() + updateFlags.StringVar(&branchName, "name", "", "Rename the preview branch.") + updateFlags.StringVar(&gitBranch, "git-branch", "", "Change the associated git branch.") + updateFlags.BoolVar(&persistent, "persistent", false, "Switch between ephemeral and persistent branch.") + updateFlags.Var(&branchStatus, "status", "Override the current branch status.") + branchesCmd.AddCommand(branchUpdateCmd) + branchesCmd.AddCommand(branchDeleteCmd) + branchesCmd.AddCommand(branchDisableCmd) + rootCmd.AddCommand(branchesCmd) +} + +func promptBranchId(ctx context.Context, ref string) error { + resp, err := utils.GetSupabase().V1ListAllBranchesWithResponse(ctx, ref) + if err != nil { + return errors.Errorf("failed to list preview branches: %w", err) + } + if resp.JSON200 == nil { + return errors.New("Unexpected error listing preview branches: " + string(resp.Body)) + } + console := utils.NewConsole() + if !console.IsTTY { + // Fallback to current git branch on GHA + gitBranch := keys.GetGitBranch(afero.NewOsFs()) + title := "Enter the name of your branch: " + if len(gitBranch) > 0 { + title = fmt.Sprintf("%-2s (or leave blank to use %s): ", title, utils.Aqua(gitBranch)) + } + if name, err := console.PromptText(ctx, title); err != nil { + return err + } else if len(name) > 0 { + gitBranch = name + } + if len(gitBranch) == 0 { + return errors.New("git branch cannot be empty") + } + for _, branch := range *resp.JSON200 { + if branch.Name == gitBranch { + branchId = branch.Id + return nil + } + } + return errors.Errorf("Branch not found: %s", gitBranch) + } + items := make([]utils.PromptItem, len(*resp.JSON200)) + for i, branch := range *resp.JSON200 { + items[i] = utils.PromptItem{ + Summary: branch.Name, + Details: branch.Id, + } + } + title := "Select a branch:" + choice, err := utils.PromptChoice(ctx, title, items) + if err == nil { + branchId = choice.Details + fmt.Fprintln(os.Stderr, "Selected branch ID:", branchId) + } + return err +} diff --git a/cmd/config.go b/cmd/config.go new file mode 100644 index 0000000..1d0f607 --- /dev/null +++ b/cmd/config.go @@ -0,0 +1,30 @@ +package cmd + +import ( + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/config/push" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + configCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "config", + Short: "Manage Supabase project configurations", + } + + configPushCmd = &cobra.Command{ + Use: "push", + Short: "Pushes local config.toml to the linked project", + RunE: func(cmd *cobra.Command, args []string) error { + return push.Run(cmd.Context(), flags.ProjectRef, afero.NewOsFs()) + }, + } +) + +func init() { + configCmd.PersistentFlags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + configCmd.AddCommand(configPushCmd) + rootCmd.AddCommand(configCmd) +} diff --git a/cmd/db.go b/cmd/db.go new file mode 100644 index 0000000..bcc7ac5 --- /dev/null +++ b/cmd/db.go @@ -0,0 +1,345 @@ +package cmd + +import ( + "fmt" + "os" + "os/signal" + "path/filepath" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/db/branch/create" + "github.com/supabase/cli/internal/db/branch/delete" + "github.com/supabase/cli/internal/db/branch/list" + "github.com/supabase/cli/internal/db/branch/switch_" + "github.com/supabase/cli/internal/db/diff" + "github.com/supabase/cli/internal/db/dump" + "github.com/supabase/cli/internal/db/lint" + "github.com/supabase/cli/internal/db/pull" + "github.com/supabase/cli/internal/db/push" + "github.com/supabase/cli/internal/db/remote/changes" + "github.com/supabase/cli/internal/db/remote/commit" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/db/test" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + dbCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "db", + Short: "Manage Postgres databases", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + cmd.SetContext(ctx) + return cmd.Root().PersistentPreRunE(cmd, args) + }, + } + + dbBranchCmd = &cobra.Command{ + Hidden: true, + Use: "branch", + Short: "Manage local database branches", + Long: "Manage local database branches. Each branch is associated with a separate local database. Forking remote databases is NOT supported.", + } + + dbBranchCreateCmd = &cobra.Command{ + Deprecated: "use \"branches create \" instead.\n", + Use: "create ", + Short: "Create a branch", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return create.Run(args[0], afero.NewOsFs()) + }, + } + + dbBranchDeleteCmd = &cobra.Command{ + Deprecated: "use \"branches delete \" instead.\n", + Use: "delete ", + Short: "Delete a branch", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return delete.Run(args[0], afero.NewOsFs()) + }, + } + + dbBranchListCmd = &cobra.Command{ + Deprecated: "use \"branches list\" instead.\n", + Use: "list", + Short: "List branches", + RunE: func(cmd *cobra.Command, args []string) error { + return list.Run(afero.NewOsFs(), os.Stdout) + }, + } + + dbSwitchCmd = &cobra.Command{ + Deprecated: "use \"branches create \" instead.\n", + Use: "switch ", + Short: "Switch the active branch", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return switch_.Run(cmd.Context(), args[0], afero.NewOsFs()) + }, + } + + useMigra bool + usePgAdmin bool + usePgSchema bool + schema []string + file string + + dbDiffCmd = &cobra.Command{ + Use: "diff", + Short: "Diffs the local database for schema changes", + RunE: func(cmd *cobra.Command, args []string) error { + if usePgAdmin { + return diff.RunPgAdmin(cmd.Context(), schema, file, flags.DbConfig, afero.NewOsFs()) + } + differ := diff.DiffSchemaMigra + if usePgSchema { + differ = diff.DiffPgSchema + fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), "--use-pg-schema flag is experimental and may not include all entities, such as RLS policies, enums, and grants.") + } + return diff.Run(cmd.Context(), schema, file, flags.DbConfig, differ, afero.NewOsFs()) + }, + } + + dataOnly bool + useCopy bool + roleOnly bool + keepComments bool + excludeTable []string + + dbDumpCmd = &cobra.Command{ + Use: "dump", + Short: "Dumps data or schemas from the remote database", + PreRun: func(cmd *cobra.Command, args []string) { + if useCopy || len(excludeTable) > 0 { + cobra.CheckErr(cmd.MarkFlagRequired("data-only")) + } + }, + RunE: func(cmd *cobra.Command, args []string) error { + return dump.Run(cmd.Context(), file, flags.DbConfig, schema, excludeTable, dataOnly, roleOnly, keepComments, useCopy, dryRun, afero.NewOsFs()) + }, + PostRun: func(cmd *cobra.Command, args []string) { + if len(file) > 0 { + if absPath, err := filepath.Abs(file); err != nil { + fmt.Fprintln(os.Stderr, "Dumped schema to "+utils.Bold(file)+".") + } else { + fmt.Fprintln(os.Stderr, "Dumped schema to "+utils.Bold(absPath)+".") + } + } + }, + } + + dryRun bool + includeAll bool + includeRoles bool + includeSeed bool + + dbPushCmd = &cobra.Command{ + Use: "push", + Short: "Push new migrations to the remote database", + RunE: func(cmd *cobra.Command, args []string) error { + return push.Run(cmd.Context(), dryRun, includeAll, includeRoles, includeSeed, flags.DbConfig, afero.NewOsFs()) + }, + } + + dbPullCmd = &cobra.Command{ + Use: "pull [migration name]", + Short: "Pull schema from the remote database", + RunE: func(cmd *cobra.Command, args []string) error { + name := "remote_schema" + if len(args) > 0 { + name = args[0] + } + return pull.Run(cmd.Context(), schema, flags.DbConfig, name, afero.NewOsFs()) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Println("Finished " + utils.Aqua("supabase db pull") + ".") + }, + } + + dbRemoteCmd = &cobra.Command{ + Hidden: true, + Use: "remote", + Short: "Manage remote databases", + } + + dbRemoteChangesCmd = &cobra.Command{ + Deprecated: "use \"db diff --use-migra --linked\" instead.\n", + Use: "changes", + Short: "Show changes on the remote database", + Long: "Show changes on the remote database since last migration.", + RunE: func(cmd *cobra.Command, args []string) error { + return changes.Run(cmd.Context(), schema, flags.DbConfig, afero.NewOsFs()) + }, + } + + dbRemoteCommitCmd = &cobra.Command{ + Deprecated: "use \"db pull\" instead.\n", + Use: "commit", + Short: "Commit remote changes as a new migration", + RunE: func(cmd *cobra.Command, args []string) error { + return commit.Run(cmd.Context(), schema, flags.DbConfig, afero.NewOsFs()) + }, + } + + noSeed bool + + dbResetCmd = &cobra.Command{ + Use: "reset", + Short: "Resets the local database to current migrations", + RunE: func(cmd *cobra.Command, args []string) error { + if noSeed { + utils.Config.Db.Seed.Enabled = false + } + return reset.Run(cmd.Context(), migrationVersion, flags.DbConfig, afero.NewOsFs()) + }, + } + + level = utils.EnumFlag{ + Allowed: lint.AllowedLevels, + Value: lint.AllowedLevels[0], + } + + lintFailOn = utils.EnumFlag{ + Allowed: append([]string{"none"}, lint.AllowedLevels...), + Value: "none", + } + + dbLintCmd = &cobra.Command{ + Use: "lint", + Short: "Checks local database for typing error", + RunE: func(cmd *cobra.Command, args []string) error { + return lint.Run(cmd.Context(), schema, level.Value, lintFailOn.Value, flags.DbConfig, afero.NewOsFs()) + }, + } + + fromBackup string + + dbStartCmd = &cobra.Command{ + Use: "start", + Short: "Starts local Postgres database", + RunE: func(cmd *cobra.Command, args []string) error { + return start.Run(cmd.Context(), fromBackup, afero.NewOsFs()) + }, + } + + dbTestCmd = &cobra.Command{ + Hidden: true, + Use: "test [path] ...", + Short: "Tests local database with pgTAP", + RunE: func(cmd *cobra.Command, args []string) error { + return test.Run(cmd.Context(), args, flags.DbConfig, afero.NewOsFs()) + }, + } +) + +func init() { + // Build branch command + dbBranchCmd.AddCommand(dbBranchCreateCmd) + dbBranchCmd.AddCommand(dbBranchDeleteCmd) + dbBranchCmd.AddCommand(dbBranchListCmd) + dbBranchCmd.AddCommand(dbSwitchCmd) + dbCmd.AddCommand(dbBranchCmd) + // Build diff command + diffFlags := dbDiffCmd.Flags() + diffFlags.BoolVar(&useMigra, "use-migra", true, "Use migra to generate schema diff.") + diffFlags.BoolVar(&usePgAdmin, "use-pgadmin", false, "Use pgAdmin to generate schema diff.") + diffFlags.BoolVar(&usePgSchema, "use-pg-schema", false, "Use pg-schema-diff to generate schema diff.") + dbDiffCmd.MarkFlagsMutuallyExclusive("use-migra", "use-pgadmin") + diffFlags.String("db-url", "", "Diffs against the database specified by the connection string (must be percent-encoded).") + diffFlags.Bool("linked", false, "Diffs local migration files against the linked project.") + diffFlags.Bool("local", true, "Diffs local migration files against the local database.") + dbDiffCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + diffFlags.StringVarP(&file, "file", "f", "", "Saves schema diff to a new migration file.") + diffFlags.StringSliceVarP(&schema, "schema", "s", []string{}, "Comma separated list of schema to include.") + dbCmd.AddCommand(dbDiffCmd) + // Build dump command + dumpFlags := dbDumpCmd.Flags() + dumpFlags.BoolVar(&dryRun, "dry-run", false, "Prints the pg_dump script that would be executed.") + dumpFlags.BoolVar(&dataOnly, "data-only", false, "Dumps only data records.") + dumpFlags.BoolVar(&useCopy, "use-copy", false, "Uses copy statements in place of inserts.") + dumpFlags.StringSliceVarP(&excludeTable, "exclude", "x", []string{}, "List of schema.tables to exclude from data-only dump.") + dumpFlags.BoolVar(&roleOnly, "role-only", false, "Dumps only cluster roles.") + dbDumpCmd.MarkFlagsMutuallyExclusive("role-only", "data-only") + dumpFlags.BoolVar(&keepComments, "keep-comments", false, "Keeps commented lines from pg_dump output.") + dbDumpCmd.MarkFlagsMutuallyExclusive("keep-comments", "data-only") + dumpFlags.StringVarP(&file, "file", "f", "", "File path to save the dumped contents.") + dumpFlags.String("db-url", "", "Dumps from the database specified by the connection string (must be percent-encoded).") + dumpFlags.Bool("linked", true, "Dumps from the linked project.") + dumpFlags.Bool("local", false, "Dumps from the local database.") + dbDumpCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + dumpFlags.StringVarP(&dbPassword, "password", "p", "", "Password to your remote Postgres database.") + cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", dumpFlags.Lookup("password"))) + dumpFlags.StringSliceVarP(&schema, "schema", "s", []string{}, "Comma separated list of schema to include.") + dbDumpCmd.MarkFlagsMutuallyExclusive("schema", "role-only") + dbCmd.AddCommand(dbDumpCmd) + // Build push command + pushFlags := dbPushCmd.Flags() + pushFlags.BoolVar(&includeAll, "include-all", false, "Include all migrations not found on remote history table.") + pushFlags.BoolVar(&includeRoles, "include-roles", false, "Include custom roles from "+utils.CustomRolesPath+".") + pushFlags.BoolVar(&includeSeed, "include-seed", false, "Include seed data from your config.") + pushFlags.BoolVar(&dryRun, "dry-run", false, "Print the migrations that would be applied, but don't actually apply them.") + pushFlags.String("db-url", "", "Pushes to the database specified by the connection string (must be percent-encoded).") + pushFlags.Bool("linked", true, "Pushes to the linked project.") + pushFlags.Bool("local", false, "Pushes to the local database.") + dbPushCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + pushFlags.StringVarP(&dbPassword, "password", "p", "", "Password to your remote Postgres database.") + cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", pushFlags.Lookup("password"))) + dbCmd.AddCommand(dbPushCmd) + // Build pull command + pullFlags := dbPullCmd.Flags() + pullFlags.StringSliceVarP(&schema, "schema", "s", []string{}, "Comma separated list of schema to include.") + pullFlags.String("db-url", "", "Pulls from the database specified by the connection string (must be percent-encoded).") + pullFlags.Bool("linked", true, "Pulls from the linked project.") + pullFlags.Bool("local", false, "Pulls from the local database.") + dbPullCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + pullFlags.StringVarP(&dbPassword, "password", "p", "", "Password to your remote Postgres database.") + cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", pullFlags.Lookup("password"))) + dbCmd.AddCommand(dbPullCmd) + // Build remote command + remoteFlags := dbRemoteCmd.PersistentFlags() + remoteFlags.String("db-url", "", "Connect using the specified Postgres URL (must be percent-encoded).") + remoteFlags.StringVarP(&dbPassword, "password", "p", "", "Password to your remote Postgres database.") + cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", remoteFlags.Lookup("password"))) + remoteFlags.StringSliceVarP(&schema, "schema", "s", []string{}, "Comma separated list of schema to include.") + dbRemoteCmd.AddCommand(dbRemoteChangesCmd) + dbRemoteCmd.AddCommand(dbRemoteCommitCmd) + dbCmd.AddCommand(dbRemoteCmd) + // Build reset command + resetFlags := dbResetCmd.Flags() + resetFlags.String("db-url", "", "Resets the database specified by the connection string (must be percent-encoded).") + resetFlags.Bool("linked", false, "Resets the linked project with local migrations.") + resetFlags.Bool("local", true, "Resets the local database with local migrations.") + resetFlags.BoolVar(&noSeed, "no-seed", false, "Skip running the seed script after reset.") + dbResetCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + resetFlags.StringVar(&migrationVersion, "version", "", "Reset up to the specified version.") + dbCmd.AddCommand(dbResetCmd) + // Build lint command + lintFlags := dbLintCmd.Flags() + lintFlags.String("db-url", "", "Lints the database specified by the connection string (must be percent-encoded).") + lintFlags.Bool("linked", false, "Lints the linked project for schema errors.") + lintFlags.Bool("local", true, "Lints the local database for schema errors.") + dbLintCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + lintFlags.StringSliceVarP(&schema, "schema", "s", []string{}, "Comma separated list of schema to include.") + lintFlags.Var(&level, "level", "Error level to emit.") + lintFlags.Var(&lintFailOn, "fail-on", "Error level to exit with non-zero status.") + dbCmd.AddCommand(dbLintCmd) + // Build start command + startFlags := dbStartCmd.Flags() + startFlags.StringVar(&fromBackup, "from-backup", "", "Path to a logical backup file.") + dbCmd.AddCommand(dbStartCmd) + // Build test command + dbCmd.AddCommand(dbTestCmd) + testFlags := dbTestCmd.Flags() + testFlags.String("db-url", "", "Tests the database specified by the connection string (must be percent-encoded).") + testFlags.Bool("linked", false, "Runs pgTAP tests on the linked project.") + testFlags.Bool("local", true, "Runs pgTAP tests on the local database.") + dbTestCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + rootCmd.AddCommand(dbCmd) +} diff --git a/cmd/domains.go b/cmd/domains.go new file mode 100644 index 0000000..9ec4a09 --- /dev/null +++ b/cmd/domains.go @@ -0,0 +1,89 @@ +package cmd + +import ( + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/hostnames/activate" + "github.com/supabase/cli/internal/hostnames/create" + "github.com/supabase/cli/internal/hostnames/delete" + "github.com/supabase/cli/internal/hostnames/get" + "github.com/supabase/cli/internal/hostnames/reverify" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + customHostnamesCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "domains", + Short: "Manage custom domain names for Supabase projects", + Long: `Manage custom domain names for Supabase projects. + +Use of custom domains and vanity subdomains is mutually exclusive. +`, + } + + rawOutput bool + customHostname string + + customHostnamesCreateCmd = &cobra.Command{ + Use: "create", + Short: "Create a custom hostname", + Long: `Create a custom hostname for your Supabase project. + +Expects your custom hostname to have a CNAME record to your Supabase project's subdomain.`, + RunE: func(cmd *cobra.Command, args []string) error { + return create.Run(cmd.Context(), flags.ProjectRef, customHostname, rawOutput, afero.NewOsFs()) + }, + } + + customHostnamesGetCmd = &cobra.Command{ + Use: "get", + Short: "Get the current custom hostname config", + Long: "Retrieve the custom hostname config for your project, as stored in the Supabase platform.", + RunE: func(cmd *cobra.Command, args []string) error { + return get.Run(cmd.Context(), flags.ProjectRef, rawOutput, afero.NewOsFs()) + }, + } + + customHostnamesReverifyCmd = &cobra.Command{ + Use: "reverify", + Short: "Re-verify the custom hostname config for your project", + RunE: func(cmd *cobra.Command, args []string) error { + return reverify.Run(cmd.Context(), flags.ProjectRef, rawOutput, afero.NewOsFs()) + }, + } + + customHostnamesActivateCmd = &cobra.Command{ + Use: "activate", + Short: "Activate the custom hostname for a project", + Long: `Activates the custom hostname configuration for a project. + +This reconfigures your Supabase project to respond to requests on your custom hostname. +After the custom hostname is activated, your project's auth services will no longer function on the Supabase-provisioned subdomain.`, + RunE: func(cmd *cobra.Command, args []string) error { + return activate.Run(cmd.Context(), flags.ProjectRef, rawOutput, afero.NewOsFs()) + }, + } + + customHostnamesDeleteCmd = &cobra.Command{ + Use: "delete", + Short: "Deletes the custom hostname config for your project", + RunE: func(cmd *cobra.Command, args []string) error { + return delete.Run(cmd.Context(), flags.ProjectRef, afero.NewOsFs()) + }, + } +) + +func init() { + persistentFlags := customHostnamesCmd.PersistentFlags() + persistentFlags.StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + persistentFlags.BoolVar(&rawOutput, "include-raw-output", false, "Include raw output (useful for debugging).") + customHostnamesCreateCmd.Flags().StringVar(&customHostname, "custom-hostname", "", "The custom hostname to use for your Supabase project.") + customHostnamesCmd.AddCommand(customHostnamesGetCmd) + customHostnamesCmd.AddCommand(customHostnamesCreateCmd) + customHostnamesCmd.AddCommand(customHostnamesReverifyCmd) + customHostnamesCmd.AddCommand(customHostnamesActivateCmd) + customHostnamesCmd.AddCommand(customHostnamesDeleteCmd) + + rootCmd.AddCommand(customHostnamesCmd) +} diff --git a/cmd/encryption.go b/cmd/encryption.go new file mode 100644 index 0000000..d3b654a --- /dev/null +++ b/cmd/encryption.go @@ -0,0 +1,41 @@ +package cmd + +import ( + "os" + + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/encryption/get" + "github.com/supabase/cli/internal/encryption/update" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + encryptionCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "encryption", + Short: "Manage encryption keys of Supabase projects", + } + + rootKeyGetCmd = &cobra.Command{ + Use: "get-root-key", + Short: "Get the root encryption key of a Supabase project", + RunE: func(cmd *cobra.Command, args []string) error { + return get.Run(cmd.Context(), flags.ProjectRef) + }, + } + + rootKeyUpdateCmd = &cobra.Command{ + Use: "update-root-key", + Short: "Update root encryption key of a Supabase project", + RunE: func(cmd *cobra.Command, args []string) error { + return update.Run(cmd.Context(), flags.ProjectRef, os.Stdin) + }, + } +) + +func init() { + encryptionCmd.PersistentFlags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + encryptionCmd.AddCommand(rootKeyUpdateCmd) + encryptionCmd.AddCommand(rootKeyGetCmd) + rootCmd.AddCommand(encryptionCmd) +} diff --git a/cmd/functions.go b/cmd/functions.go new file mode 100644 index 0000000..cc5ffa5 --- /dev/null +++ b/cmd/functions.go @@ -0,0 +1,162 @@ +package cmd + +import ( + "fmt" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/functions/delete" + "github.com/supabase/cli/internal/functions/deploy" + "github.com/supabase/cli/internal/functions/download" + "github.com/supabase/cli/internal/functions/list" + new_ "github.com/supabase/cli/internal/functions/new" + "github.com/supabase/cli/internal/functions/serve" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/cast" +) + +var ( + functionsCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "functions", + Short: "Manage Supabase Edge functions", + } + + functionsListCmd = &cobra.Command{ + Use: "list", + Short: "List all Functions in Supabase", + Long: "List all Functions in the linked Supabase project.", + RunE: func(cmd *cobra.Command, args []string) error { + return list.Run(cmd.Context(), flags.ProjectRef, afero.NewOsFs()) + }, + } + + functionsDeleteCmd = &cobra.Command{ + Use: "delete ", + Short: "Delete a Function from Supabase", + Long: "Delete a Function from the linked Supabase project. This does NOT remove the Function locally.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return delete.Run(cmd.Context(), args[0], flags.ProjectRef, afero.NewOsFs()) + }, + } + + functionsDownloadCmd = &cobra.Command{ + Use: "download ", + Short: "Download a Function from Supabase", + Long: "Download the source code for a Function from the linked Supabase project.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return download.Run(cmd.Context(), args[0], flags.ProjectRef, useLegacyBundle, afero.NewOsFs()) + }, + } + + useApi bool + useDocker bool + useLegacyBundle bool + noVerifyJWT = new(bool) + importMapPath string + + functionsDeployCmd = &cobra.Command{ + Use: "deploy [Function name]", + Short: "Deploy a Function to Supabase", + Long: "Deploy a Function to the linked Supabase project.", + RunE: func(cmd *cobra.Command, args []string) error { + // Fallback to config if user did not set the flag. + if !cmd.Flags().Changed("no-verify-jwt") { + noVerifyJWT = nil + } + if useApi { + useDocker = false + } else if maxJobs > 1 { + return errors.New("--jobs must be used together with --use-api") + } + return deploy.Run(cmd.Context(), args, useDocker, noVerifyJWT, importMapPath, maxJobs, afero.NewOsFs()) + }, + } + + functionsNewCmd = &cobra.Command{ + Use: "new ", + Short: "Create a new Function locally", + Args: cobra.ExactArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + cmd.GroupID = groupLocalDev + return cmd.Root().PersistentPreRunE(cmd, args) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return new_.Run(cmd.Context(), args[0], afero.NewOsFs()) + }, + } + + envFilePath string + inspectBrk bool + inspectMode = utils.EnumFlag{ + Allowed: []string{ + string(serve.InspectModeRun), + string(serve.InspectModeBrk), + string(serve.InspectModeWait), + }, + } + runtimeOption serve.RuntimeOption + + functionsServeCmd = &cobra.Command{ + Use: "serve", + Short: "Serve all Functions locally", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + cmd.GroupID = groupLocalDev + return cmd.Root().PersistentPreRunE(cmd, args) + }, + RunE: func(cmd *cobra.Command, args []string) error { + // Fallback to config if user did not set the flag. + if !cmd.Flags().Changed("no-verify-jwt") { + noVerifyJWT = nil + } + + if len(inspectMode.Value) > 0 { + runtimeOption.InspectMode = cast.Ptr(serve.InspectMode(inspectMode.Value)) + } else if inspectBrk { + runtimeOption.InspectMode = cast.Ptr(serve.InspectModeBrk) + } + if runtimeOption.InspectMode == nil && runtimeOption.InspectMain { + return fmt.Errorf("--inspect-main must be used together with one of these flags: [inspect inspect-mode]") + } + + return serve.Run(cmd.Context(), envFilePath, noVerifyJWT, importMapPath, runtimeOption, afero.NewOsFs()) + }, + } +) + +func init() { + functionsListCmd.Flags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + functionsDeleteCmd.Flags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + deployFlags := functionsDeployCmd.Flags() + deployFlags.BoolVar(&useApi, "use-api", false, "Use Management API to bundle functions.") + deployFlags.BoolVar(&useDocker, "use-docker", true, "Use Docker to bundle functions.") + deployFlags.BoolVar(&useLegacyBundle, "legacy-bundle", false, "Use legacy bundling mechanism.") + functionsDeployCmd.MarkFlagsMutuallyExclusive("use-api", "use-docker", "legacy-bundle") + cobra.CheckErr(deployFlags.MarkHidden("legacy-bundle")) + deployFlags.UintVarP(&maxJobs, "jobs", "j", 1, "Maximum number of parallel jobs.") + deployFlags.BoolVar(noVerifyJWT, "no-verify-jwt", false, "Disable JWT verification for the Function.") + deployFlags.StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + deployFlags.StringVar(&importMapPath, "import-map", "", "Path to import map file.") + functionsServeCmd.Flags().BoolVar(noVerifyJWT, "no-verify-jwt", false, "Disable JWT verification for the Function.") + functionsServeCmd.Flags().StringVar(&envFilePath, "env-file", "", "Path to an env file to be populated to the Function environment.") + functionsServeCmd.Flags().StringVar(&importMapPath, "import-map", "", "Path to import map file.") + functionsServeCmd.Flags().BoolVar(&inspectBrk, "inspect", false, "Alias of --inspect-mode brk.") + functionsServeCmd.Flags().Var(&inspectMode, "inspect-mode", "Activate inspector capability for debugging.") + functionsServeCmd.Flags().BoolVar(&runtimeOption.InspectMain, "inspect-main", false, "Allow inspecting the main worker.") + functionsServeCmd.MarkFlagsMutuallyExclusive("inspect", "inspect-mode") + functionsServeCmd.Flags().Bool("all", true, "Serve all Functions.") + cobra.CheckErr(functionsServeCmd.Flags().MarkHidden("all")) + functionsDownloadCmd.Flags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + functionsDownloadCmd.Flags().BoolVar(&useLegacyBundle, "legacy-bundle", false, "Use legacy bundling mechanism.") + functionsCmd.AddCommand(functionsListCmd) + functionsCmd.AddCommand(functionsDeleteCmd) + functionsCmd.AddCommand(functionsDeployCmd) + functionsCmd.AddCommand(functionsNewCmd) + functionsCmd.AddCommand(functionsServeCmd) + functionsCmd.AddCommand(functionsDownloadCmd) + rootCmd.AddCommand(functionsCmd) +} diff --git a/cmd/gen.go b/cmd/gen.go new file mode 100644 index 0000000..7f936bd --- /dev/null +++ b/cmd/gen.go @@ -0,0 +1,121 @@ +package cmd + +import ( + "os" + "os/signal" + + env "github.com/Netflix/go-env" + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/gen/keys" + "github.com/supabase/cli/internal/gen/types" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + genCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "gen", + Short: "Run code generation tools", + } + + keyNames keys.CustomName + keyOutput = utils.EnumFlag{ + Allowed: []string{ + utils.OutputEnv, + utils.OutputJson, + utils.OutputToml, + utils.OutputYaml, + }, + Value: utils.OutputEnv, + } + + genKeysCmd = &cobra.Command{ + Use: "keys", + Short: "Generate keys for preview branch", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + es, err := env.EnvironToEnvSet(override) + if err != nil { + return err + } + if err := env.Unmarshal(es, &keyNames); err != nil { + return err + } + cmd.GroupID = groupManagementAPI + return cmd.Root().PersistentPreRunE(cmd, args) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return keys.Run(cmd.Context(), flags.ProjectRef, keyOutput.Value, keyNames, afero.NewOsFs()) + }, + } + + lang = utils.EnumFlag{ + Allowed: []string{ + types.LangTypescript, + types.LangGo, + types.LangSwift, + }, + Value: types.LangTypescript, + } + postgrestV9Compat bool + swiftAccessControl = utils.EnumFlag{ + Allowed: []string{ + types.SwiftInternalAccessControl, + types.SwiftPublicAccessControl, + }, + Value: types.SwiftInternalAccessControl, + } + + genTypesCmd = &cobra.Command{ + Use: "types", + Short: "Generate types from Postgres schema", + PreRunE: func(cmd *cobra.Command, args []string) error { + if postgrestV9Compat && !cmd.Flags().Changed("db-url") { + return errors.New("--postgrest-v9-compat must used together with --db-url") + } + // Legacy commands specify language using arg, eg. gen types typescript + if len(args) > 0 && args[0] != types.LangTypescript && !cmd.Flags().Changed("lang") { + return errors.New("use --lang flag to specify the typegen language") + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + if flags.DbConfig.Host == "" { + // If no flag is specified, prompt for project id. + if err := flags.ParseProjectRef(ctx, afero.NewMemMapFs()); errors.Is(err, utils.ErrNotLinked) { + return errors.New("Must specify one of --local, --linked, --project-id, or --db-url") + } else if err != nil { + return err + } + } + return types.Run(ctx, flags.ProjectRef, flags.DbConfig, lang.Value, schema, postgrestV9Compat, swiftAccessControl.Value, afero.NewOsFs()) + }, + Example: ` supabase gen types --local + supabase gen types --linked --lang=go + supabase gen types --project-id abc-def-123 --schema public --schema private + supabase gen types --db-url 'postgresql://...' --schema public --schema auth`, + } +) + +func init() { + typeFlags := genTypesCmd.Flags() + typeFlags.Bool("local", false, "Generate types from the local dev database.") + typeFlags.Bool("linked", false, "Generate types from the linked project.") + typeFlags.String("db-url", "", "Generate types from a database url.") + typeFlags.StringVar(&flags.ProjectRef, "project-id", "", "Generate types from a project ID.") + genTypesCmd.MarkFlagsMutuallyExclusive("local", "linked", "project-id", "db-url") + typeFlags.Var(&lang, "lang", "Output language of the generated types.") + typeFlags.StringSliceVarP(&schema, "schema", "s", []string{}, "Comma separated list of schema to include.") + typeFlags.Var(&swiftAccessControl, "swift-access-control", "Access control for Swift generated types.") + typeFlags.BoolVar(&postgrestV9Compat, "postgrest-v9-compat", false, "Generate types compatible with PostgREST v9 and below. Only use together with --db-url.") + genCmd.AddCommand(genTypesCmd) + keyFlags := genKeysCmd.Flags() + keyFlags.StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + keyFlags.VarP(&keyOutput, "output", "o", "Output format of key variables.") + keyFlags.StringSliceVar(&override, "override-name", []string{}, "Override specific variable names.") + genCmd.AddCommand(genKeysCmd) + rootCmd.AddCommand(genCmd) +} diff --git a/cmd/generateFigSpec.go b/cmd/generateFigSpec.go new file mode 100644 index 0000000..a23fbd2 --- /dev/null +++ b/cmd/generateFigSpec.go @@ -0,0 +1,9 @@ +package cmd + +import ( + generateFigSpec "github.com/withfig/autocomplete-tools/packages/cobra" +) + +func init() { + rootCmd.AddCommand(generateFigSpec.NewCmdGenFigSpec()) +} diff --git a/cmd/init.go b/cmd/init.go new file mode 100644 index 0000000..a9d2d90 --- /dev/null +++ b/cmd/init.go @@ -0,0 +1,63 @@ +package cmd + +import ( + "fmt" + "os" + "os/signal" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/spf13/viper" + _init "github.com/supabase/cli/internal/init" + "github.com/supabase/cli/internal/utils" +) + +var ( + createVscodeSettings = new(bool) + createIntellijSettings = new(bool) + initParams = utils.InitParams{} + + initCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "init", + Short: "Initialize a local project", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if !viper.IsSet("WORKDIR") { + // Prevents recursing to parent directory + viper.Set("WORKDIR", ".") + } + return cmd.Root().PersistentPreRunE(cmd, args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + if initParams.UseOrioleDB { + cobra.CheckErr(cmd.MarkFlagRequired("experimental")) + } + }, + RunE: func(cmd *cobra.Command, args []string) error { + fsys := afero.NewOsFs() + if !cmd.Flags().Changed("with-vscode-settings") && !cmd.Flags().Changed("with-vscode-workspace") { + createVscodeSettings = nil + } + + if !cmd.Flags().Changed("with-intellij-settings") { + createIntellijSettings = nil + } + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + return _init.Run(ctx, fsys, createVscodeSettings, createIntellijSettings, initParams) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Println("Finished " + utils.Aqua("supabase init") + ".") + }, + } +) + +func init() { + flags := initCmd.Flags() + flags.BoolVar(createVscodeSettings, "with-vscode-workspace", false, "Generate VS Code workspace.") + cobra.CheckErr(flags.MarkHidden("with-vscode-workspace")) + flags.BoolVar(createVscodeSettings, "with-vscode-settings", false, "Generate VS Code settings for Deno.") + flags.BoolVar(createIntellijSettings, "with-intellij-settings", false, "Generate IntelliJ IDEA settings for Deno.") + flags.BoolVar(&initParams.UseOrioleDB, "use-orioledb", false, "Use OrioleDB storage engine for Postgres.") + flags.BoolVar(&initParams.Overwrite, "force", false, "Overwrite existing "+utils.ConfigPath+".") + rootCmd.AddCommand(initCmd) +} diff --git a/cmd/inspect.go b/cmd/inspect.go new file mode 100644 index 0000000..2b55c38 --- /dev/null +++ b/cmd/inspect.go @@ -0,0 +1,265 @@ +package cmd + +import ( + "fmt" + "os" + "os/signal" + "path/filepath" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/inspect/bloat" + "github.com/supabase/cli/internal/inspect/blocking" + "github.com/supabase/cli/internal/inspect/cache" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + + "github.com/supabase/cli/internal/inspect" + "github.com/supabase/cli/internal/inspect/calls" + "github.com/supabase/cli/internal/inspect/index_sizes" + "github.com/supabase/cli/internal/inspect/index_usage" + "github.com/supabase/cli/internal/inspect/locks" + "github.com/supabase/cli/internal/inspect/long_running_queries" + "github.com/supabase/cli/internal/inspect/outliers" + "github.com/supabase/cli/internal/inspect/replication_slots" + "github.com/supabase/cli/internal/inspect/role_configs" + "github.com/supabase/cli/internal/inspect/role_connections" + "github.com/supabase/cli/internal/inspect/seq_scans" + "github.com/supabase/cli/internal/inspect/table_index_sizes" + "github.com/supabase/cli/internal/inspect/table_record_counts" + "github.com/supabase/cli/internal/inspect/table_sizes" + "github.com/supabase/cli/internal/inspect/total_index_size" + "github.com/supabase/cli/internal/inspect/total_table_sizes" + "github.com/supabase/cli/internal/inspect/unused_indexes" + "github.com/supabase/cli/internal/inspect/vacuum_stats" +) + +var ( + inspectCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "inspect", + Short: "Tools to inspect your Supabase project", + } + + inspectDBCmd = &cobra.Command{ + Use: "db", + Short: "Tools to inspect your Supabase database", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + cmd.SetContext(ctx) + return cmd.Root().PersistentPreRunE(cmd, args) + }, + } + + inspectCacheHitCmd = &cobra.Command{ + Use: "cache-hit", + Short: "Show cache hit rates for tables and indices", + RunE: func(cmd *cobra.Command, args []string) error { + return cache.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectReplicationSlotsCmd = &cobra.Command{ + Use: "replication-slots", + Short: "Show information about replication slots on the database", + RunE: func(cmd *cobra.Command, args []string) error { + return replication_slots.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectIndexUsageCmd = &cobra.Command{ + Use: "index-usage", + Short: "Show information about the efficiency of indexes", + RunE: func(cmd *cobra.Command, args []string) error { + return index_usage.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectLocksCmd = &cobra.Command{ + Use: "locks", + Short: "Show queries which have taken out an exclusive lock on a relation", + RunE: func(cmd *cobra.Command, args []string) error { + return locks.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectBlockingCmd = &cobra.Command{ + Use: "blocking", + Short: "Show queries that are holding locks and the queries that are waiting for them to be released", + RunE: func(cmd *cobra.Command, args []string) error { + return blocking.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectOutliersCmd = &cobra.Command{ + Use: "outliers", + Short: "Show queries from pg_stat_statements ordered by total execution time", + RunE: func(cmd *cobra.Command, args []string) error { + return outliers.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectCallsCmd = &cobra.Command{ + Use: "calls", + Short: "Show queries from pg_stat_statements ordered by total times called", + RunE: func(cmd *cobra.Command, args []string) error { + return calls.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectTotalIndexSizeCmd = &cobra.Command{ + Use: "total-index-size", + Short: "Show total size of all indexes", + RunE: func(cmd *cobra.Command, args []string) error { + return total_index_size.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectIndexSizesCmd = &cobra.Command{ + Use: "index-sizes", + Short: "Show index sizes of individual indexes", + RunE: func(cmd *cobra.Command, args []string) error { + return index_sizes.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectTableSizesCmd = &cobra.Command{ + Use: "table-sizes", + Short: "Show table sizes of individual tables without their index sizes", + RunE: func(cmd *cobra.Command, args []string) error { + return table_sizes.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectTableIndexSizesCmd = &cobra.Command{ + Use: "table-index-sizes", + Short: "Show index sizes of individual tables", + RunE: func(cmd *cobra.Command, args []string) error { + return table_index_sizes.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectTotalTableSizesCmd = &cobra.Command{ + Use: "total-table-sizes", + Short: "Show total table sizes, including table index sizes", + RunE: func(cmd *cobra.Command, args []string) error { + return total_table_sizes.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectUnusedIndexesCmd = &cobra.Command{ + Use: "unused-indexes", + Short: "Show indexes with low usage", + RunE: func(cmd *cobra.Command, args []string) error { + return unused_indexes.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectSeqScansCmd = &cobra.Command{ + Use: "seq-scans", + Short: "Show number of sequential scans recorded against all tables", + RunE: func(cmd *cobra.Command, args []string) error { + return seq_scans.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectLongRunningQueriesCmd = &cobra.Command{ + Use: "long-running-queries", + Short: "Show currently running queries running for longer than 5 minutes", + RunE: func(cmd *cobra.Command, args []string) error { + return long_running_queries.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectTableRecordCountsCmd = &cobra.Command{ + Use: "table-record-counts", + Short: "Show estimated number of rows per table", + RunE: func(cmd *cobra.Command, args []string) error { + return table_record_counts.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectBloatCmd = &cobra.Command{ + Use: "bloat", + Short: "Estimates space allocated to a relation that is full of dead tuples", + RunE: func(cmd *cobra.Command, args []string) error { + return bloat.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectVacuumStatsCmd = &cobra.Command{ + Use: "vacuum-stats", + Short: "Show statistics related to vacuum operations per table", + RunE: func(cmd *cobra.Command, args []string) error { + return vacuum_stats.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectRoleConfigsCmd = &cobra.Command{ + Use: "role-configs", + Short: "Show configuration settings for database roles when they have been modified", + RunE: func(cmd *cobra.Command, args []string) error { + return role_configs.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + inspectRoleConnectionsCmd = &cobra.Command{ + Use: "role-connections", + Short: "Show number of active connections for all database roles", + RunE: func(cmd *cobra.Command, args []string) error { + return role_connections.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + outputDir string + + reportCmd = &cobra.Command{ + Use: "report", + Short: "Generate a CSV output for all inspect commands", + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + if len(outputDir) == 0 { + defaultPath := filepath.Join(utils.CurrentDirAbs, "report") + title := fmt.Sprintf("Enter a directory to save output files (or leave blank to use %s): ", utils.Bold(defaultPath)) + if dir, err := utils.NewConsole().PromptText(ctx, title); err != nil { + return err + } else if len(dir) == 0 { + outputDir = defaultPath + } + } + return inspect.Report(ctx, outputDir, flags.DbConfig, afero.NewOsFs()) + }, + } +) + +func init() { + inspectFlags := inspectCmd.PersistentFlags() + inspectFlags.String("db-url", "", "Inspect the database specified by the connection string (must be percent-encoded).") + inspectFlags.Bool("linked", true, "Inspect the linked project.") + inspectFlags.Bool("local", false, "Inspect the local database.") + inspectCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + inspectDBCmd.AddCommand(inspectCacheHitCmd) + inspectDBCmd.AddCommand(inspectReplicationSlotsCmd) + inspectDBCmd.AddCommand(inspectIndexUsageCmd) + inspectDBCmd.AddCommand(inspectLocksCmd) + inspectDBCmd.AddCommand(inspectBlockingCmd) + inspectDBCmd.AddCommand(inspectOutliersCmd) + inspectDBCmd.AddCommand(inspectCallsCmd) + inspectDBCmd.AddCommand(inspectTotalIndexSizeCmd) + inspectDBCmd.AddCommand(inspectIndexSizesCmd) + inspectDBCmd.AddCommand(inspectTableSizesCmd) + inspectDBCmd.AddCommand(inspectTableIndexSizesCmd) + inspectDBCmd.AddCommand(inspectTotalTableSizesCmd) + inspectDBCmd.AddCommand(inspectUnusedIndexesCmd) + inspectDBCmd.AddCommand(inspectSeqScansCmd) + inspectDBCmd.AddCommand(inspectLongRunningQueriesCmd) + inspectDBCmd.AddCommand(inspectTableRecordCountsCmd) + inspectDBCmd.AddCommand(inspectBloatCmd) + inspectDBCmd.AddCommand(inspectVacuumStatsCmd) + inspectDBCmd.AddCommand(inspectRoleConfigsCmd) + inspectDBCmd.AddCommand(inspectRoleConnectionsCmd) + inspectCmd.AddCommand(inspectDBCmd) + reportCmd.Flags().StringVar(&outputDir, "output-dir", "", "Path to save CSV files in") + inspectCmd.AddCommand(reportCmd) + rootCmd.AddCommand(inspectCmd) +} diff --git a/cmd/link.go b/cmd/link.go new file mode 100644 index 0000000..65b8b5f --- /dev/null +++ b/cmd/link.go @@ -0,0 +1,48 @@ +package cmd + +import ( + "os" + "os/signal" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/link" + "github.com/supabase/cli/internal/utils/flags" + "golang.org/x/term" +) + +var ( + linkCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "link", + Short: "Link to a Supabase project", + PreRunE: func(cmd *cobra.Command, args []string) error { + if !term.IsTerminal(int(os.Stdin.Fd())) && !viper.IsSet("PROJECT_ID") { + return cmd.MarkFlagRequired("project-ref") + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + // Use an empty fs to skip loading from file + if err := flags.ParseProjectRef(ctx, afero.NewMemMapFs()); err != nil { + return err + } + fsys := afero.NewOsFs() + if err := flags.LoadConfig(fsys); err != nil { + return err + } + return link.Run(ctx, flags.ProjectRef, fsys) + }, + } +) + +func init() { + linkFlags := linkCmd.Flags() + linkFlags.StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + linkFlags.StringVarP(&dbPassword, "password", "p", "", "Password to your remote Postgres database.") + // For some reason, BindPFlag only works for StringVarP instead of StringP + cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", linkFlags.Lookup("password"))) + rootCmd.AddCommand(linkCmd) +} diff --git a/cmd/login.go b/cmd/login.go new file mode 100644 index 0000000..ca9aea6 --- /dev/null +++ b/cmd/login.go @@ -0,0 +1,51 @@ +package cmd + +import ( + "os" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/login" + "github.com/supabase/cli/internal/utils" + "golang.org/x/term" +) + +var ( + ErrMissingToken = errors.Errorf("Cannot use automatic login flow inside non-TTY environments. Please provide %s flag or set the %s environment variable.", utils.Aqua("--token"), utils.Aqua("SUPABASE_ACCESS_TOKEN")) +) + +var ( + params = login.RunParams{ + // Skip the browser if we are inside non-TTY environment, which is the case for any CI. + OpenBrowser: term.IsTerminal(int(os.Stdin.Fd())), + Fsys: afero.NewOsFs(), + } + + loginCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "login", + Short: "Authenticate using an access token", + RunE: func(cmd *cobra.Command, args []string) error { + if params.Token == "" { + params.Token = login.ParseAccessToken(os.Stdin) + } + if params.Token == "" && !params.OpenBrowser { + return ErrMissingToken + } + if cmd.Flags().Changed("no-browser") { + params.OpenBrowser = false + } + return login.Run(cmd.Context(), os.Stdout, params) + }, + } +) + +func init() { + loginFlags := loginCmd.Flags() + loginFlags.StringVar(¶ms.Token, "token", "", "Use provided token instead of automatic login flow") + loginFlags.StringVar(¶ms.TokenName, "name", "", "Name that will be used to store token in your settings") + loginFlags.Lookup("name").DefValue = "built-in token name generator" + loginFlags.Bool("no-browser", false, "Do not open browser automatically") + rootCmd.AddCommand(loginCmd) +} diff --git a/cmd/logout.go b/cmd/logout.go new file mode 100644 index 0000000..dda3dd4 --- /dev/null +++ b/cmd/logout.go @@ -0,0 +1,24 @@ +package cmd + +import ( + "os" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/logout" +) + +var ( + logoutCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "logout", + Short: "Log out and delete access tokens locally", + RunE: func(cmd *cobra.Command, args []string) error { + return logout.Run(cmd.Context(), os.Stdout, afero.NewOsFs()) + }, + } +) + +func init() { + rootCmd.AddCommand(logoutCmd) +} diff --git a/cmd/migration.go b/cmd/migration.go new file mode 100644 index 0000000..30b7716 --- /dev/null +++ b/cmd/migration.go @@ -0,0 +1,154 @@ +package cmd + +import ( + "fmt" + "os" + "os/signal" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/migration/fetch" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/migration/new" + "github.com/supabase/cli/internal/migration/repair" + "github.com/supabase/cli/internal/migration/squash" + "github.com/supabase/cli/internal/migration/up" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + migrationCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "migration", + Aliases: []string{"migrations"}, + Short: "Manage database migration scripts", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + cmd.SetContext(ctx) + return cmd.Root().PersistentPreRunE(cmd, args) + }, + } + + migrationListCmd = &cobra.Command{ + Use: "list", + Short: "List local and remote migrations", + RunE: func(cmd *cobra.Command, args []string) error { + return list.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } + + migrationNewCmd = &cobra.Command{ + Use: "new ", + Short: "Create an empty migration script", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return new.Run(args[0], os.Stdin, afero.NewOsFs()) + }, + } + + targetStatus = utils.EnumFlag{ + Allowed: []string{ + repair.Applied, + repair.Reverted, + }, + } + + migrationRepairCmd = &cobra.Command{ + Use: "repair [version] ...", + Short: "Repair the migration history table", + RunE: func(cmd *cobra.Command, args []string) error { + return repair.Run(cmd.Context(), flags.DbConfig, args, targetStatus.Value, afero.NewOsFs()) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Println("Finished " + utils.Aqua("supabase migration repair") + ".") + }, + } + + migrationVersion string + + migrationSquashCmd = &cobra.Command{ + Use: "squash", + Short: "Squash migrations to a single file", + RunE: func(cmd *cobra.Command, args []string) error { + return squash.Run(cmd.Context(), migrationVersion, flags.DbConfig, afero.NewOsFs()) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Println("Finished " + utils.Aqua("supabase migration squash") + ".") + }, + } + + migrationUpCmd = &cobra.Command{ + Use: "up", + Short: "Apply pending migrations to local database", + RunE: func(cmd *cobra.Command, args []string) error { + return up.Run(cmd.Context(), includeAll, flags.DbConfig, afero.NewOsFs()) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Println("Local database is up to date.") + }, + } + + migrationFetchCmd = &cobra.Command{ + Use: "fetch", + Short: "Fetch migration files from history table", + RunE: func(cmd *cobra.Command, args []string) error { + return fetch.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) + }, + } +) + +func init() { + // Build list command + listFlags := migrationListCmd.Flags() + listFlags.String("db-url", "", "Lists migrations of the database specified by the connection string (must be percent-encoded).") + listFlags.Bool("linked", true, "Lists migrations applied to the linked project.") + listFlags.Bool("local", false, "Lists migrations applied to the local database.") + migrationListCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + listFlags.StringVarP(&dbPassword, "password", "p", "", "Password to your remote Postgres database.") + cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", listFlags.Lookup("password"))) + migrationListCmd.MarkFlagsMutuallyExclusive("db-url", "password") + migrationCmd.AddCommand(migrationListCmd) + // Build repair command + repairFlags := migrationRepairCmd.Flags() + repairFlags.Var(&targetStatus, "status", "Version status to update.") + cobra.CheckErr(migrationRepairCmd.MarkFlagRequired("status")) + repairFlags.String("db-url", "", "Repairs migrations of the database specified by the connection string (must be percent-encoded).") + repairFlags.Bool("linked", true, "Repairs the migration history of the linked project.") + repairFlags.Bool("local", false, "Repairs the migration history of the local database.") + migrationRepairCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + repairFlags.StringVarP(&dbPassword, "password", "p", "", "Password to your remote Postgres database.") + cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", repairFlags.Lookup("password"))) + migrationRepairCmd.MarkFlagsMutuallyExclusive("db-url", "password") + migrationCmd.AddCommand(migrationRepairCmd) + // Build squash command + squashFlags := migrationSquashCmd.Flags() + squashFlags.StringVar(&migrationVersion, "version", "", "Squash up to the specified version.") + squashFlags.String("db-url", "", "Squashes migrations of the database specified by the connection string (must be percent-encoded).") + squashFlags.Bool("linked", false, "Squashes the migration history of the linked project.") + squashFlags.Bool("local", true, "Squashes the migration history of the local database.") + migrationSquashCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + squashFlags.StringVarP(&dbPassword, "password", "p", "", "Password to your remote Postgres database.") + cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", squashFlags.Lookup("password"))) + migrationSquashCmd.MarkFlagsMutuallyExclusive("db-url", "password") + migrationCmd.AddCommand(migrationSquashCmd) + // Build up command + upFlags := migrationUpCmd.Flags() + upFlags.BoolVar(&includeAll, "include-all", false, "Include all migrations not found on remote history table.") + upFlags.String("db-url", "", "Applies migrations to the database specified by the connection string (must be percent-encoded).") + upFlags.Bool("linked", false, "Applies pending migrations to the linked project.") + upFlags.Bool("local", true, "Applies pending migrations to the local database.") + migrationUpCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + migrationCmd.AddCommand(migrationUpCmd) + // Build up command + fetchFlags := migrationFetchCmd.Flags() + fetchFlags.String("db-url", "", "Fetches migrations from the database specified by the connection string (must be percent-encoded).") + fetchFlags.Bool("linked", true, "Fetches migration history from the linked project.") + fetchFlags.Bool("local", false, "Fetches migration history from the local database.") + migrationFetchCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + migrationCmd.AddCommand(migrationFetchCmd) + // Build new command + migrationCmd.AddCommand(migrationNewCmd) + rootCmd.AddCommand(migrationCmd) +} diff --git a/cmd/orgs.go b/cmd/orgs.go new file mode 100644 index 0000000..5d0750d --- /dev/null +++ b/cmd/orgs.go @@ -0,0 +1,40 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/orgs/create" + "github.com/supabase/cli/internal/orgs/list" +) + +var ( + orgsCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "orgs", + Short: "Manage Supabase organizations", + } + + orgsListCmd = &cobra.Command{ + Use: "list", + Short: "List all organizations", + Long: "List all organizations the logged-in user belongs.", + RunE: func(cmd *cobra.Command, args []string) error { + return list.Run(cmd.Context()) + }, + } + + orgsCreateCmd = &cobra.Command{ + Use: "create", + Short: "Create an organization", + Long: "Create an organization for the logged-in user.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return create.Run(cmd.Context(), args[0]) + }, + } +) + +func init() { + orgsCmd.AddCommand(orgsListCmd) + orgsCmd.AddCommand(orgsCreateCmd) + rootCmd.AddCommand(orgsCmd) +} diff --git a/cmd/postgres.go b/cmd/postgres.go new file mode 100644 index 0000000..a1f9c37 --- /dev/null +++ b/cmd/postgres.go @@ -0,0 +1,68 @@ +package cmd + +import ( + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/postgresConfig/delete" + "github.com/supabase/cli/internal/postgresConfig/get" + "github.com/supabase/cli/internal/postgresConfig/update" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + postgresCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "postgres-config", + Short: "Manage Postgres database config", + } + + postgresConfigGetCmd = &cobra.Command{ + Use: "get", + Short: "Get the current Postgres database config overrides", + RunE: func(cmd *cobra.Command, args []string) error { + return get.Run(cmd.Context(), flags.ProjectRef, afero.NewOsFs()) + }, + } + + postgresConfigUpdateCmd = &cobra.Command{ + Use: "update", + Short: "Update Postgres database config", + Long: `Overriding the default Postgres config could result in unstable database behavior. +Custom configuration also overrides the optimizations generated based on the compute add-ons in use.`, + RunE: func(cmd *cobra.Command, args []string) error { + return update.Run(cmd.Context(), flags.ProjectRef, postgresConfigValues, postgresConfigUpdateReplaceMode, noRestart, afero.NewOsFs()) + }, + } + + postgresConfigDeleteCmd = &cobra.Command{ + Use: "delete", + Short: "Delete specific Postgres database config overrides", + Long: "Delete specific config overrides, reverting them to their default values.", + RunE: func(cmd *cobra.Command, args []string) error { + return delete.Run(cmd.Context(), flags.ProjectRef, postgresConfigKeysToDelete, noRestart, afero.NewOsFs()) + }, + } + + postgresConfigValues []string + postgresConfigUpdateReplaceMode bool + postgresConfigKeysToDelete []string + noRestart bool +) + +func init() { + postgresCmd.PersistentFlags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + postgresCmd.AddCommand(postgresConfigGetCmd) + postgresCmd.AddCommand(postgresConfigUpdateCmd) + postgresCmd.AddCommand(postgresConfigDeleteCmd) + + updateFlags := postgresConfigUpdateCmd.Flags() + updateFlags.StringSliceVar(&postgresConfigValues, "config", []string{}, "Config overrides specified as a 'key=value' pair") + updateFlags.BoolVar(&postgresConfigUpdateReplaceMode, "replace-existing-overrides", false, "If true, replaces all existing overrides with the ones provided. If false (default), merges existing overrides with the ones provided.") + updateFlags.BoolVar(&noRestart, "no-restart", false, "Do not restart the database after updating config.") + + deleteFlags := postgresConfigDeleteCmd.Flags() + deleteFlags.StringSliceVar(&postgresConfigKeysToDelete, "config", []string{}, "Config keys to delete (comma-separated)") + deleteFlags.BoolVar(&noRestart, "no-restart", false, "Do not restart the database after deleting config.") + + rootCmd.AddCommand(postgresCmd) +} diff --git a/cmd/projects.go b/cmd/projects.go new file mode 100644 index 0000000..c119e71 --- /dev/null +++ b/cmd/projects.go @@ -0,0 +1,161 @@ +package cmd + +import ( + "os" + "sort" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/projects/apiKeys" + "github.com/supabase/cli/internal/projects/create" + "github.com/supabase/cli/internal/projects/delete" + "github.com/supabase/cli/internal/projects/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" + "golang.org/x/term" +) + +var ( + projectsCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "projects", + Short: "Manage Supabase projects", + } + + interactive bool + projectName string + orgId string + dbPassword string + + region = utils.EnumFlag{ + Allowed: awsRegions(), + } + plan = utils.EnumFlag{ + Allowed: []string{string(api.V1CreateProjectBodyDtoPlanFree), string(api.V1CreateProjectBodyDtoPlanPro)}, + Value: string(api.V1CreateProjectBodyDtoPlanFree), + } + size = utils.EnumFlag{ + Allowed: []string{ + string(api.DesiredInstanceSizeMicro), + string(api.DesiredInstanceSizeSmall), + string(api.DesiredInstanceSizeMedium), + string(api.DesiredInstanceSizeLarge), + string(api.DesiredInstanceSizeXlarge), + string(api.DesiredInstanceSizeN2xlarge), + string(api.DesiredInstanceSizeN4xlarge), + string(api.DesiredInstanceSizeN8xlarge), + string(api.DesiredInstanceSizeN12xlarge), + string(api.DesiredInstanceSizeN16xlarge), + }, + } + + projectsCreateCmd = &cobra.Command{ + Use: "create [project name]", + Short: "Create a project on Supabase", + Args: cobra.MaximumNArgs(1), + Example: `supabase projects create my-project --org-id cool-green-pqdr0qc --db-password ******** --region us-east-1`, + PreRunE: func(cmd *cobra.Command, args []string) error { + if !term.IsTerminal(int(os.Stdin.Fd())) || !interactive { + cobra.CheckErr(cmd.MarkFlagRequired("org-id")) + cobra.CheckErr(cmd.MarkFlagRequired("db-password")) + cobra.CheckErr(cmd.MarkFlagRequired("region")) + return cobra.ExactArgs(1)(cmd, args) + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + projectName = args[0] + } + body := api.V1CreateProjectBodyDto{ + Name: projectName, + OrganizationId: orgId, + DbPass: dbPassword, + Region: api.V1CreateProjectBodyDtoRegion(region.Value), + } + if cmd.Flags().Changed("size") { + body.DesiredInstanceSize = (*api.V1CreateProjectBodyDtoDesiredInstanceSize)(&size.Value) + } + return create.Run(cmd.Context(), body, afero.NewOsFs()) + }, + } + + projectsListCmd = &cobra.Command{ + Use: "list", + Short: "List all Supabase projects", + Long: "List all Supabase projects the logged-in user can access.", + RunE: func(cmd *cobra.Command, args []string) error { + return list.Run(cmd.Context(), afero.NewOsFs()) + }, + } + + projectsApiKeysCmd = &cobra.Command{ + Use: "api-keys", + Short: "List all API keys for a Supabase project", + RunE: func(cmd *cobra.Command, args []string) error { + return apiKeys.Run(cmd.Context(), flags.ProjectRef, afero.NewOsFs()) + }, + } + + projectsDeleteCmd = &cobra.Command{ + Use: "delete ", + Short: "Delete a Supabase project", + Args: cobra.MaximumNArgs(1), + PreRunE: func(cmd *cobra.Command, args []string) error { + if !term.IsTerminal(int(os.Stdin.Fd())) { + return cobra.ExactArgs(1)(cmd, args) + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + if len(args) == 0 { + title := "Which project do you want to delete?" + cobra.CheckErr(flags.PromptProjectRef(ctx, title)) + } else { + flags.ProjectRef = args[0] + } + if err := delete.PreRun(ctx, flags.ProjectRef); err != nil { + return err + } + return delete.Run(ctx, flags.ProjectRef, afero.NewOsFs()) + }, + } +) + +func init() { + // Add flags to cobra command + createFlags := projectsCreateCmd.Flags() + createFlags.BoolVarP(&interactive, "interactive", "i", true, "Enables interactive mode.") + cobra.CheckErr(createFlags.MarkHidden("interactive")) + createFlags.StringVar(&orgId, "org-id", "", "Organization ID to create the project in.") + createFlags.StringVar(&dbPassword, "db-password", "", "Database password of the project.") + createFlags.Var(®ion, "region", "Select a region close to you for the best performance.") + createFlags.Var(&plan, "plan", "Select a plan that suits your needs.") + cobra.CheckErr(createFlags.MarkHidden("plan")) + createFlags.Var(&size, "size", "Select a desired instance size for your project.") + cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", createFlags.Lookup("db-password"))) + + apiKeysFlags := projectsApiKeysCmd.Flags() + apiKeysFlags.StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + + // Add commands to root + projectsCmd.AddCommand(projectsCreateCmd) + projectsCmd.AddCommand(projectsDeleteCmd) + projectsCmd.AddCommand(projectsListCmd) + projectsCmd.AddCommand(projectsApiKeysCmd) + rootCmd.AddCommand(projectsCmd) +} + +func awsRegions() []string { + result := make([]string, len(utils.RegionMap)) + i := 0 + for k := range utils.RegionMap { + result[i] = k + i++ + } + sort.Strings(result) + return result +} diff --git a/cmd/restrictions.go b/cmd/restrictions.go new file mode 100644 index 0000000..4358d61 --- /dev/null +++ b/cmd/restrictions.go @@ -0,0 +1,44 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/restrictions/get" + "github.com/supabase/cli/internal/restrictions/update" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + restrictionsCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "network-restrictions", + Short: "Manage network restrictions", + } + + dbCidrsToAllow []string + bypassCidrChecks bool + + restrictionsUpdateCmd = &cobra.Command{ + Use: "update", + Short: "Update network restrictions", + RunE: func(cmd *cobra.Command, args []string) error { + return update.Run(cmd.Context(), flags.ProjectRef, dbCidrsToAllow, bypassCidrChecks) + }, + } + + restrictionsGetCmd = &cobra.Command{ + Use: "get", + Short: "Get the current network restrictions", + RunE: func(cmd *cobra.Command, args []string) error { + return get.Run(cmd.Context(), flags.ProjectRef) + }, + } +) + +func init() { + restrictionsCmd.PersistentFlags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + restrictionsUpdateCmd.Flags().StringSliceVar(&dbCidrsToAllow, "db-allow-cidr", []string{}, "CIDR to allow DB connections from.") + restrictionsUpdateCmd.Flags().BoolVar(&bypassCidrChecks, "bypass-cidr-checks", false, "Bypass some of the CIDR validation checks.") + restrictionsCmd.AddCommand(restrictionsGetCmd) + restrictionsCmd.AddCommand(restrictionsUpdateCmd) + rootCmd.AddCommand(restrictionsCmd) +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..35540a8 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,268 @@ +package cmd + +import ( + "context" + "fmt" + "net" + "net/url" + "os" + "os/signal" + "strings" + "time" + + "github.com/getsentry/sentry-go" + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "golang.org/x/mod/semver" +) + +const ( + groupQuickStart = "quick-start" + groupLocalDev = "local-dev" + groupManagementAPI = "management-api" +) + +func IsManagementAPI(cmd *cobra.Command) bool { + for cmd != cmd.Root() { + if cmd.GroupID == groupManagementAPI { + return true + } + // Find the last assigned group + if len(cmd.GroupID) > 0 { + break + } + cmd = cmd.Parent() + } + return false +} + +func promptLogin(fsys afero.Fs) error { + if _, err := utils.LoadAccessTokenFS(fsys); err == utils.ErrMissingToken { + utils.CmdSuggestion = fmt.Sprintf("Run %s first.", utils.Aqua("supabase login")) + return errors.New("You need to be logged-in in order to use Management API commands.") + } else { + return err + } +} + +var experimental = []*cobra.Command{ + bansCmd, + restrictionsCmd, + vanityCmd, + sslEnforcementCmd, + genKeysCmd, + postgresCmd, + branchesCmd, + storageCmd, +} + +func IsExperimental(cmd *cobra.Command) bool { + for _, exp := range experimental { + if cmd == exp || cmd.Parent() == exp { + return true + } + } + return false +} + +var ( + sentryOpts = sentry.ClientOptions{ + Dsn: utils.SentryDsn, + Release: utils.Version, + ServerName: "", + // Set TracesSampleRate to 1.0 to capture 100% + // of transactions for performance monitoring. + // We recommend adjusting this value in production, + TracesSampleRate: 1.0, + } + + createTicket bool + + rootCmd = &cobra.Command{ + Use: "supabase", + Short: "Supabase CLI " + utils.Version, + Version: utils.Version, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if IsExperimental(cmd) && !viper.GetBool("EXPERIMENTAL") { + return errors.New("must set the --experimental flag to run this command") + } + cmd.SilenceUsage = true + // Change workdir + fsys := afero.NewOsFs() + if err := utils.ChangeWorkDir(fsys); err != nil { + return err + } + // Add common flags + ctx := cmd.Context() + if IsManagementAPI(cmd) { + if err := promptLogin(fsys); err != nil { + return err + } + ctx, _ = signal.NotifyContext(ctx, os.Interrupt) + if cmd.Flags().Lookup("project-ref") != nil { + if err := flags.ParseProjectRef(ctx, fsys); err != nil { + return err + } + } + } + if err := flags.ParseDatabaseConfig(cmd.Flags(), fsys); err != nil { + return err + } + // Prepare context + if viper.GetBool("DEBUG") { + ctx = utils.WithTraceContext(ctx) + fmt.Fprintln(os.Stderr, cmd.Root().Short) + } + cmd.SetContext(ctx) + // Setup sentry last to ignore errors from parsing cli flags + apiHost, err := url.Parse(utils.GetSupabaseAPIHost()) + if err != nil { + return err + } + sentryOpts.Environment = apiHost.Host + return sentry.Init(sentryOpts) + }, + SilenceErrors: true, + } +) + +func Execute() { + defer recoverAndExit() + if err := rootCmd.Execute(); err != nil { + panic(err) + } + // Check upgrade last because --version flag is initialised after execute + version, err := checkUpgrade(rootCmd.Context(), afero.NewOsFs()) + if err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + } + if semver.Compare(version, "v"+utils.Version) > 0 { + fmt.Fprintln(os.Stderr, suggestUpgrade(version)) + } + if len(utils.CmdSuggestion) > 0 { + fmt.Fprintln(os.Stderr, utils.CmdSuggestion) + } +} + +func checkUpgrade(ctx context.Context, fsys afero.Fs) (string, error) { + if shouldFetchRelease(fsys) { + version, err := utils.GetLatestRelease(ctx) + if exists, _ := afero.DirExists(fsys, utils.SupabaseDirPath); exists { + // If user is offline, write an empty file to skip subsequent checks + err = utils.WriteFile(utils.CliVersionPath, []byte(version), fsys) + } + return version, err + } + version, err := afero.ReadFile(fsys, utils.CliVersionPath) + if err != nil { + return "", errors.Errorf("failed to read cli version: %w", err) + } + return string(version), nil +} + +func shouldFetchRelease(fsys afero.Fs) bool { + // Always fetch latest release when using --version flag + if vf := rootCmd.Flag("version"); vf != nil && vf.Changed { + return true + } + if fi, err := fsys.Stat(utils.CliVersionPath); err == nil { + expiry := fi.ModTime().Add(time.Hour * 10) + // Skip if last checked is less than 10 hours ago + return time.Now().After(expiry) + } + return true +} + +func suggestUpgrade(version string) string { + const guide = "https://supabase.com/docs/guides/cli/getting-started#updating-the-supabase-cli" + return fmt.Sprintf(`A new version of Supabase CLI is available: %s (currently installed v%s) +We recommend updating regularly for new features and bug fixes: %s`, utils.Yellow(version), utils.Version, utils.Bold(guide)) +} + +func recoverAndExit() { + err := recover() + if err == nil { + return + } + var msg string + switch err := err.(type) { + case string: + msg = err + case error: + if !errors.Is(err, context.Canceled) && + len(utils.CmdSuggestion) == 0 && + !viper.GetBool("DEBUG") { + utils.CmdSuggestion = utils.SuggestDebugFlag + } + msg = err.Error() + default: + msg = fmt.Sprintf("%#v", err) + } + // Log error to console + fmt.Fprintln(os.Stderr, utils.Red(msg)) + if len(utils.CmdSuggestion) > 0 { + fmt.Fprintln(os.Stderr, utils.CmdSuggestion) + } + // Report error to sentry + if createTicket && len(utils.SentryDsn) > 0 { + sentry.ConfigureScope(addSentryScope) + eventId := sentry.CurrentHub().Recover(err) + if eventId != nil && sentry.Flush(2*time.Second) { + fmt.Fprintln(os.Stderr, "Sent crash report:", *eventId) + fmt.Fprintln(os.Stderr, "Quote the crash ID above when filing a bug report: https://github.com/supabase/cli/issues/new/choose") + } + } + os.Exit(1) +} + +func init() { + cobra.OnInitialize(func() { + viper.SetEnvPrefix("SUPABASE") + viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + viper.AutomaticEnv() + }) + + flags := rootCmd.PersistentFlags() + flags.Bool("debug", false, "output debug logs to stderr") + flags.String("workdir", "", "path to a Supabase project directory") + flags.Bool("experimental", false, "enable experimental features") + flags.String("network-id", "", "use the specified docker network instead of a generated one") + flags.Var(&utils.OutputFormat, "output", "output format of status variables") + flags.Var(&utils.DNSResolver, "dns-resolver", "lookup domain names using the specified resolver") + flags.BoolVar(&createTicket, "create-ticket", false, "create a support ticket for any CLI error") + cobra.CheckErr(viper.BindPFlags(flags)) + + rootCmd.SetVersionTemplate("{{.Version}}\n") + rootCmd.AddGroup(&cobra.Group{ID: groupQuickStart, Title: "Quick Start:"}) + rootCmd.AddGroup(&cobra.Group{ID: groupLocalDev, Title: "Local Development:"}) + rootCmd.AddGroup(&cobra.Group{ID: groupManagementAPI, Title: "Management APIs:"}) +} + +// instantiate new rootCmd is a bit tricky with cobra, but it can be done later with the following +// approach for example: https://github.com/portworx/pxc/tree/master/cmd +func GetRootCmd() *cobra.Command { + return rootCmd +} + +func addSentryScope(scope *sentry.Scope) { + serviceImages := utils.Config.GetServiceImages() + imageToVersion := make(map[string]interface{}, len(serviceImages)) + for _, image := range serviceImages { + parts := strings.Split(image, ":") + // Bypasses sentry's IP sanitization rule, ie. 15.1.0.147 + if net.ParseIP(parts[1]) != nil { + imageToVersion[parts[0]] = "v" + parts[1] + } else { + imageToVersion[parts[0]] = parts[1] + } + } + scope.SetContext("Services", imageToVersion) + scope.SetContext("Config", map[string]interface{}{ + "Image Registry": utils.GetRegistry(), + "Project ID": flags.ProjectRef, + }) +} diff --git a/cmd/secrets.go b/cmd/secrets.go new file mode 100644 index 0000000..df4084e --- /dev/null +++ b/cmd/secrets.go @@ -0,0 +1,54 @@ +package cmd + +import ( + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/secrets/list" + "github.com/supabase/cli/internal/secrets/set" + "github.com/supabase/cli/internal/secrets/unset" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + secretsCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "secrets", + Short: "Manage Supabase secrets", + } + + secretsListCmd = &cobra.Command{ + Use: "list", + Short: "List all secrets on Supabase", + Long: "List all secrets in the linked project.", + RunE: func(cmd *cobra.Command, args []string) error { + return list.Run(cmd.Context(), flags.ProjectRef, afero.NewOsFs()) + }, + } + + secretsSetCmd = &cobra.Command{ + Use: "set ...", + Short: "Set a secret(s) on Supabase", + Long: "Set a secret(s) to the linked Supabase project.", + RunE: func(cmd *cobra.Command, args []string) error { + return set.Run(cmd.Context(), flags.ProjectRef, envFilePath, args, afero.NewOsFs()) + }, + } + + secretsUnsetCmd = &cobra.Command{ + Use: "unset [NAME] ...", + Short: "Unset a secret(s) on Supabase", + Long: "Unset a secret(s) from the linked Supabase project.", + RunE: func(cmd *cobra.Command, args []string) error { + return unset.Run(cmd.Context(), flags.ProjectRef, args, afero.NewOsFs()) + }, + } +) + +func init() { + secretsCmd.PersistentFlags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + secretsSetCmd.Flags().StringVar(&envFilePath, "env-file", "", "Read secrets from a .env file.") + secretsCmd.AddCommand(secretsListCmd) + secretsCmd.AddCommand(secretsSetCmd) + secretsCmd.AddCommand(secretsUnsetCmd) + rootCmd.AddCommand(secretsCmd) +} diff --git a/cmd/seed.go b/cmd/seed.go new file mode 100644 index 0000000..c36b707 --- /dev/null +++ b/cmd/seed.go @@ -0,0 +1,43 @@ +package cmd + +import ( + "os" + "os/signal" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/seed/buckets" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + seedCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "seed", + Short: "Seed a Supabase project from " + utils.ConfigPath, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + cmd.SetContext(ctx) + return cmd.Root().PersistentPreRunE(cmd, args) + }, + } + + bucketsCmd = &cobra.Command{ + Use: "buckets", + Short: "Seed buckets declared in [storage.buckets]", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return buckets.Run(cmd.Context(), flags.ProjectRef, true, afero.NewOsFs()) + }, + } +) + +func init() { + seedFlags := seedCmd.PersistentFlags() + seedFlags.Bool("linked", false, "Seeds the linked project.") + seedFlags.Bool("local", true, "Seeds the local database.") + seedCmd.MarkFlagsMutuallyExclusive("local", "linked") + seedCmd.AddCommand(bucketsCmd) + rootCmd.AddCommand(seedCmd) +} diff --git a/cmd/services.go b/cmd/services.go new file mode 100644 index 0000000..fe298cb --- /dev/null +++ b/cmd/services.go @@ -0,0 +1,22 @@ +package cmd + +import ( + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/services" +) + +var ( + servicesCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "services", + Short: "Show versions of all Supabase services", + RunE: func(cmd *cobra.Command, args []string) error { + return services.Run(cmd.Context(), afero.NewOsFs()) + }, + } +) + +func init() { + rootCmd.AddCommand(servicesCmd) +} diff --git a/cmd/snippets.go b/cmd/snippets.go new file mode 100644 index 0000000..f69b232 --- /dev/null +++ b/cmd/snippets.go @@ -0,0 +1,43 @@ +package cmd + +import ( + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/snippets/download" + "github.com/supabase/cli/internal/snippets/list" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + snippetsCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "snippets", + Short: "Manage Supabase SQL snippets", + } + + snippetsListCmd = &cobra.Command{ + Use: "list", + Short: "List all SQL snippets", + Long: "List all SQL snippets of the linked project.", + RunE: func(cmd *cobra.Command, args []string) error { + return list.Run(cmd.Context(), afero.NewOsFs()) + }, + } + + snippetsDownloadCmd = &cobra.Command{ + Use: "download ", + Short: "Download contents of a SQL snippet", + Long: "Download contents of the specified SQL snippet.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return download.Run(cmd.Context(), args[0], afero.NewOsFs()) + }, + } +) + +func init() { + snippetsCmd.PersistentFlags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + snippetsCmd.AddCommand(snippetsListCmd) + snippetsCmd.AddCommand(snippetsDownloadCmd) + rootCmd.AddCommand(snippetsCmd) +} diff --git a/cmd/sslEnforcement.go b/cmd/sslEnforcement.go new file mode 100644 index 0000000..7b44d6d --- /dev/null +++ b/cmd/sslEnforcement.go @@ -0,0 +1,51 @@ +package cmd + +import ( + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/ssl_enforcement/get" + "github.com/supabase/cli/internal/ssl_enforcement/update" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + sslEnforcementCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "ssl-enforcement", + Short: "Manage SSL enforcement configuration", + } + + dbEnforceSsl bool + dbDisableSsl bool + + sslEnforcementUpdateCmd = &cobra.Command{ + Use: "update", + Short: "Update SSL enforcement configuration", + RunE: func(cmd *cobra.Command, args []string) error { + if !dbEnforceSsl && !dbDisableSsl { + return errors.New("enable/disable not specified") + } + return update.Run(cmd.Context(), flags.ProjectRef, dbEnforceSsl, afero.NewOsFs()) + }, + } + + sslEnforcementGetCmd = &cobra.Command{ + Use: "get", + Short: "Get the current SSL enforcement configuration", + RunE: func(cmd *cobra.Command, args []string) error { + return get.Run(cmd.Context(), flags.ProjectRef, afero.NewOsFs()) + }, + } +) + +func init() { + sslEnforcementCmd.PersistentFlags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + sslEnforcementUpdateCmd.Flags().BoolVar(&dbEnforceSsl, "enable-db-ssl-enforcement", false, "Whether the DB should enable SSL enforcement for all external connections.") + sslEnforcementUpdateCmd.Flags().BoolVar(&dbDisableSsl, "disable-db-ssl-enforcement", false, "Whether the DB should disable SSL enforcement for all external connections.") + sslEnforcementUpdateCmd.MarkFlagsMutuallyExclusive("enable-db-ssl-enforcement", "disable-db-ssl-enforcement") + sslEnforcementCmd.AddCommand(sslEnforcementUpdateCmd) + sslEnforcementCmd.AddCommand(sslEnforcementGetCmd) + + rootCmd.AddCommand(sslEnforcementCmd) +} diff --git a/cmd/sso.go b/cmd/sso.go new file mode 100644 index 0000000..a9b4db7 --- /dev/null +++ b/cmd/sso.go @@ -0,0 +1,179 @@ +package cmd + +import ( + "github.com/go-errors/errors" + "github.com/spf13/cobra" + + "github.com/supabase/cli/internal/sso/create" + "github.com/supabase/cli/internal/sso/get" + "github.com/supabase/cli/internal/sso/info" + "github.com/supabase/cli/internal/sso/list" + "github.com/supabase/cli/internal/sso/remove" + "github.com/supabase/cli/internal/sso/update" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + ssoCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "sso", + Short: "Manage Single Sign-On (SSO) authentication for projects", + } + + ssoProviderType = utils.EnumFlag{ + Allowed: []string{"saml"}, + // intentionally no default value so users have to specify --type saml explicitly + } + ssoMetadataFile string + ssoMetadataURL string + ssoSkipURLValidation bool + ssoMetadata bool + ssoAttributeMappingFile string + ssoDomains []string + ssoAddDomains []string + ssoRemoveDomains []string + + ssoAddCmd = &cobra.Command{ + Use: "add", + Short: "Add a new SSO identity provider", + Long: "Add and configure a new connection to a SSO identity provider to your Supabase project.", + Example: ` supabase sso add --type saml --project-ref mwjylndxudmiehsxhmmz --metadata-url 'https://...' --domains example.com`, + RunE: func(cmd *cobra.Command, args []string) error { + return create.Run(cmd.Context(), create.RunParams{ + ProjectRef: flags.ProjectRef, + Type: ssoProviderType.String(), + Format: utils.OutputFormat.Value, + MetadataFile: ssoMetadataFile, + MetadataURL: ssoMetadataURL, + SkipURLValidation: ssoSkipURLValidation, + AttributeMapping: ssoAttributeMappingFile, + Domains: ssoDomains, + }) + }, + } + + ssoRemoveCmd = &cobra.Command{ + Use: "remove ", + Short: "Remove an existing SSO identity provider", + Long: "Remove a connection to an already added SSO identity provider. Removing the provider will prevent existing users from logging in. Please treat this command with care.", + Args: cobra.ExactArgs(1), + Example: ` supabase sso remove b5ae62f9-ef1d-4f11-a02b-731c8bbb11e8 --project-ref mwjylndxudmiehsxhmmz`, + RunE: func(cmd *cobra.Command, args []string) error { + if !utils.UUIDPattern.MatchString(args[0]) { + return errors.Errorf("identity provider ID %q is not a UUID", args[0]) + } + + return remove.Run(cmd.Context(), flags.ProjectRef, args[0], utils.OutputFormat.Value) + }, + } + + ssoUpdateCmd = &cobra.Command{ + Use: "update ", + Short: "Update information about an SSO identity provider", + Long: "Update the configuration settings of a already added SSO identity provider.", + Args: cobra.ExactArgs(1), + Example: ` supabase sso update b5ae62f9-ef1d-4f11-a02b-731c8bbb11e8 --project-ref mwjylndxudmiehsxhmmz --add-domains example.com`, + RunE: func(cmd *cobra.Command, args []string) error { + if !utils.UUIDPattern.MatchString(args[0]) { + return errors.Errorf("identity provider ID %q is not a UUID", args[0]) + } + + return update.Run(cmd.Context(), update.RunParams{ + ProjectRef: flags.ProjectRef, + ProviderID: args[0], + Format: utils.OutputFormat.Value, + + MetadataFile: ssoMetadataFile, + MetadataURL: ssoMetadataURL, + SkipURLValidation: ssoSkipURLValidation, + AttributeMapping: ssoAttributeMappingFile, + Domains: ssoDomains, + AddDomains: ssoAddDomains, + RemoveDomains: ssoRemoveDomains, + }) + }, + } + + ssoShowCmd = &cobra.Command{ + Use: "show ", + Short: "Show information about an SSO identity provider", + Long: "Provides the information about an established connection to an identity provider. You can use --metadata to obtain the raw SAML 2.0 Metadata XML document stored in your project's configuration.", + Args: cobra.ExactArgs(1), + Example: ` supabase sso show b5ae62f9-ef1d-4f11-a02b-731c8bbb11e8 --project-ref mwjylndxudmiehsxhmmz`, + RunE: func(cmd *cobra.Command, args []string) error { + if !utils.UUIDPattern.MatchString(args[0]) { + return errors.Errorf("identity provider ID %q is not a UUID", args[0]) + } + + format := utils.OutputFormat.Value + if ssoMetadata { + format = utils.OutputMetadata + } + + return get.Run(cmd.Context(), flags.ProjectRef, args[0], format) + }, + } + + ssoListCmd = &cobra.Command{ + Use: "list", + Short: "List all SSO identity providers for a project", + Long: "List all connections to a SSO identity provider to your Supabase project.", + Example: ` supabase sso list --project-ref mwjylndxudmiehsxhmmz`, + RunE: func(cmd *cobra.Command, args []string) error { + return list.Run(cmd.Context(), flags.ProjectRef, utils.OutputFormat.Value) + }, + } + + ssoInfoCmd = &cobra.Command{ + Use: "info", + Short: "Returns the SAML SSO settings required for the identity provider", + Long: "Returns all of the important SSO information necessary for your project to be registered with a SAML 2.0 compatible identity provider.", + Example: ` supabase sso info --project-ref mwjylndxudmiehsxhmmz`, + RunE: func(cmd *cobra.Command, args []string) error { + return info.Run(cmd.Context(), flags.ProjectRef, utils.OutputFormat.Value) + }, + } +) + +func init() { + persistentFlags := ssoCmd.PersistentFlags() + persistentFlags.StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + ssoAddFlags := ssoAddCmd.Flags() + ssoAddFlags.VarP(&ssoProviderType, "type", "t", "Type of identity provider (according to supported protocol).") + ssoAddFlags.StringSliceVar(&ssoDomains, "domains", nil, "Comma separated list of email domains to associate with the added identity provider.") + ssoAddFlags.StringVar(&ssoMetadataFile, "metadata-file", "", "File containing a SAML 2.0 Metadata XML document describing the identity provider.") + ssoAddFlags.StringVar(&ssoMetadataURL, "metadata-url", "", "URL pointing to a SAML 2.0 Metadata XML document describing the identity provider.") + ssoAddFlags.BoolVar(&ssoSkipURLValidation, "skip-url-validation", false, "Whether local validation of the SAML 2.0 Metadata URL should not be performed.") + ssoAddFlags.StringVar(&ssoAttributeMappingFile, "attribute-mapping-file", "", "File containing a JSON mapping between SAML attributes to custom JWT claims.") + ssoAddCmd.MarkFlagsMutuallyExclusive("metadata-file", "metadata-url") + cobra.CheckErr(ssoAddCmd.MarkFlagRequired("type")) + cobra.CheckErr(ssoAddCmd.MarkFlagFilename("metadata-file", "xml")) + cobra.CheckErr(ssoAddCmd.MarkFlagFilename("attribute-mapping-file", "json")) + + ssoUpdateFlags := ssoUpdateCmd.Flags() + ssoUpdateFlags.StringSliceVar(&ssoDomains, "domains", []string{}, "Replace domains with this comma separated list of email domains.") + ssoUpdateFlags.StringSliceVar(&ssoAddDomains, "add-domains", []string{}, "Add this comma separated list of email domains to the identity provider.") + ssoUpdateFlags.StringSliceVar(&ssoRemoveDomains, "remove-domains", []string{}, "Remove this comma separated list of email domains from the identity provider.") + ssoUpdateFlags.StringVar(&ssoMetadataFile, "metadata-file", "", "File containing a SAML 2.0 Metadata XML document describing the identity provider.") + ssoUpdateFlags.StringVar(&ssoMetadataURL, "metadata-url", "", "URL pointing to a SAML 2.0 Metadata XML document describing the identity provider.") + ssoUpdateFlags.BoolVar(&ssoSkipURLValidation, "skip-url-validation", false, "Whether local validation of the SAML 2.0 Metadata URL should not be performed.") + ssoUpdateFlags.StringVar(&ssoAttributeMappingFile, "attribute-mapping-file", "", "File containing a JSON mapping between SAML attributes to custom JWT claims.") + ssoUpdateCmd.MarkFlagsMutuallyExclusive("metadata-file", "metadata-url") + ssoUpdateCmd.MarkFlagsMutuallyExclusive("domains", "add-domains") + ssoUpdateCmd.MarkFlagsMutuallyExclusive("domains", "remove-domains") + cobra.CheckErr(ssoUpdateCmd.MarkFlagFilename("metadata-file", "xml")) + cobra.CheckErr(ssoUpdateCmd.MarkFlagFilename("attribute-mapping-file", "json")) + + ssoShowFlags := ssoShowCmd.Flags() + ssoShowFlags.BoolVar(&ssoMetadata, "metadata", false, "Show SAML 2.0 XML Metadata only") + + ssoCmd.AddCommand(ssoAddCmd) + ssoCmd.AddCommand(ssoRemoveCmd) + ssoCmd.AddCommand(ssoUpdateCmd) + ssoCmd.AddCommand(ssoShowCmd) + ssoCmd.AddCommand(ssoListCmd) + ssoCmd.AddCommand(ssoInfoCmd) + + rootCmd.AddCommand(ssoCmd) +} diff --git a/cmd/start.go b/cmd/start.go new file mode 100644 index 0000000..a7af80e --- /dev/null +++ b/cmd/start.go @@ -0,0 +1,62 @@ +package cmd + +import ( + "fmt" + "os" + "sort" + "strings" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/start" + "github.com/supabase/cli/internal/utils" +) + +func validateExcludedContainers(excludedContainers []string) { + // Validate excluded containers + validContainers := start.ExcludableContainers() + var invalidContainers []string + + for _, e := range excludedContainers { + if !utils.SliceContains(validContainers, e) { + invalidContainers = append(invalidContainers, e) + } + } + + if len(invalidContainers) > 0 { + // Sort the names list so it's easier to visually spot the one you looking for + sort.Strings(validContainers) + warning := fmt.Sprintf("%s The following container names are not valid to exclude: %s\nValid containers to exclude are: %s\n", + utils.Yellow("WARNING:"), + utils.Aqua(strings.Join(invalidContainers, ", ")), + utils.Aqua(strings.Join(validContainers, ", "))) + fmt.Fprint(os.Stderr, warning) + } +} + +var ( + allowedContainers = start.ExcludableContainers() + excludedContainers []string + ignoreHealthCheck bool + preview bool + + startCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "start", + Short: "Start containers for Supabase local development", + RunE: func(cmd *cobra.Command, args []string) error { + validateExcludedContainers(excludedContainers) + return start.Run(cmd.Context(), afero.NewOsFs(), excludedContainers, ignoreHealthCheck) + }, + } +) + +func init() { + flags := startCmd.Flags() + names := strings.Join(allowedContainers, ",") + flags.StringSliceVarP(&excludedContainers, "exclude", "x", []string{}, "Names of containers to not start. ["+names+"]") + flags.BoolVar(&ignoreHealthCheck, "ignore-health-check", false, "Ignore unhealthy services and exit 0") + flags.BoolVar(&preview, "preview", false, "Connect to feature preview branch") + cobra.CheckErr(flags.MarkHidden("preview")) + rootCmd.AddCommand(startCmd) +} diff --git a/cmd/status.go b/cmd/status.go new file mode 100644 index 0000000..13540bf --- /dev/null +++ b/cmd/status.go @@ -0,0 +1,47 @@ +package cmd + +import ( + "os" + "os/signal" + + env "github.com/Netflix/go-env" + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/status" + "github.com/supabase/cli/internal/utils" +) + +var ( + override []string + names status.CustomName + output = utils.EnumFlag{ + Allowed: append([]string{utils.OutputEnv}, utils.OutputDefaultAllowed...), + Value: utils.OutputPretty, + } + + statusCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "status", + Short: "Show status of local Supabase containers", + PreRunE: func(cmd *cobra.Command, args []string) error { + es, err := env.EnvironToEnvSet(override) + if err != nil { + return err + } + return env.Unmarshal(es, &names) + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + return status.Run(ctx, names, output.Value, afero.NewOsFs()) + }, + Example: ` supabase status -o env --override-name api.url=NEXT_PUBLIC_SUPABASE_URL + supabase status -o json`, + } +) + +func init() { + flags := statusCmd.Flags() + flags.VarP(&output, "output", "o", "Output format of status variables.") + flags.StringSliceVar(&override, "override-name", []string{}, "Override specific variable names.") + rootCmd.AddCommand(statusCmd) +} diff --git a/cmd/stop.go b/cmd/stop.go new file mode 100644 index 0000000..6a6f4aa --- /dev/null +++ b/cmd/stop.go @@ -0,0 +1,37 @@ +package cmd + +import ( + "os" + "os/signal" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/stop" +) + +var ( + noBackup bool + projectId string + all bool + + stopCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "stop", + Short: "Stop all local Supabase containers", + RunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + return stop.Run(ctx, !noBackup, projectId, all, afero.NewOsFs()) + }, + } +) + +func init() { + flags := stopCmd.Flags() + flags.Bool("backup", true, "Backs up the current database before stopping.") + flags.StringVar(&projectId, "project-id", "", "Local project ID to stop.") + cobra.CheckErr(flags.MarkHidden("backup")) + flags.BoolVar(&noBackup, "no-backup", false, "Deletes all data volumes after stopping.") + flags.BoolVar(&all, "all", false, "Stop all local Supabase instances from all projects across the machine.") + stopCmd.MarkFlagsMutuallyExclusive("project-id", "all") + rootCmd.AddCommand(stopCmd) +} diff --git a/cmd/storage.go b/cmd/storage.go new file mode 100644 index 0000000..3e6eb8f --- /dev/null +++ b/cmd/storage.go @@ -0,0 +1,99 @@ +package cmd + +import ( + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/storage/client" + "github.com/supabase/cli/internal/storage/cp" + "github.com/supabase/cli/internal/storage/ls" + "github.com/supabase/cli/internal/storage/mv" + "github.com/supabase/cli/internal/storage/rm" + "github.com/supabase/cli/pkg/storage" +) + +var ( + storageCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "storage", + Short: "Manage Supabase Storage objects", + } + + recursive bool + + lsCmd = &cobra.Command{ + Use: "ls [path]", + Example: "ls ss:///bucket/docs", + Short: "List objects by path prefix", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + objectPath := client.STORAGE_SCHEME + ":///" + if len(args) > 0 { + objectPath = args[0] + } + return ls.Run(cmd.Context(), objectPath, recursive, afero.NewOsFs()) + }, + } + + options storage.FileOptions + maxJobs uint + + cpCmd = &cobra.Command{ + Use: "cp ", + Example: `cp readme.md ss:///bucket/readme.md +cp -r docs ss:///bucket/docs +cp -r ss:///bucket/docs . +`, + Short: "Copy objects from src to dst path", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts := func(fo *storage.FileOptions) { + fo.CacheControl = options.CacheControl + fo.ContentType = options.ContentType + } + return cp.Run(cmd.Context(), args[0], args[1], recursive, maxJobs, afero.NewOsFs(), opts) + }, + } + + mvCmd = &cobra.Command{ + Use: "mv ", + Short: "Move objects from src to dst path", + Example: "mv -r ss:///bucket/docs ss:///bucket/www/docs", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return mv.Run(cmd.Context(), args[0], args[1], recursive, afero.NewOsFs()) + }, + } + + rmCmd = &cobra.Command{ + Use: "rm ...", + Short: "Remove objects by file path", + Example: `rm -r ss:///bucket/docs +rm ss:///bucket/docs/example.md ss:///bucket/readme.md +`, + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return rm.Run(cmd.Context(), args, recursive, afero.NewOsFs()) + }, + } +) + +func init() { + storageFlags := storageCmd.PersistentFlags() + storageFlags.Bool("linked", true, "Connects to Storage API of the linked project.") + storageFlags.Bool("local", false, "Connects to Storage API of the local database.") + storageCmd.MarkFlagsMutuallyExclusive("linked", "local") + lsCmd.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively list a directory.") + storageCmd.AddCommand(lsCmd) + cpFlags := cpCmd.Flags() + cpFlags.BoolVarP(&recursive, "recursive", "r", false, "Recursively copy a directory.") + cpFlags.StringVar(&options.CacheControl, "cache-control", "max-age=3600", "Custom Cache-Control header for HTTP upload.") + cpFlags.StringVar(&options.ContentType, "content-type", "", "Custom Content-Type header for HTTP upload.") + cpFlags.Lookup("content-type").DefValue = "auto-detect" + cpFlags.UintVarP(&maxJobs, "jobs", "j", 1, "Maximum number of parallel jobs.") + storageCmd.AddCommand(cpCmd) + rmCmd.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively remove a directory.") + storageCmd.AddCommand(rmCmd) + mvCmd.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively move a directory.") + storageCmd.AddCommand(mvCmd) + rootCmd.AddCommand(storageCmd) +} diff --git a/cmd/test.go b/cmd/test.go new file mode 100644 index 0000000..06fb777 --- /dev/null +++ b/cmd/test.go @@ -0,0 +1,56 @@ +package cmd + +import ( + "os" + "os/signal" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/test/new" + "github.com/supabase/cli/internal/utils" +) + +var ( + testCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "test", + Short: "Run tests on local Supabase containers", + } + + testDbCmd = &cobra.Command{ + Use: "db [path] ...", + Short: dbTestCmd.Short, + RunE: dbTestCmd.RunE, + } + + template = utils.EnumFlag{ + Allowed: []string{new.TemplatePgTAP}, + Value: new.TemplatePgTAP, + } + + testNewCmd = &cobra.Command{ + Use: "new ", + Short: "Create a new test file", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + return new.Run(ctx, args[0], template.Value, afero.NewOsFs()) + }, + } +) + +func init() { + // Build db command + dbFlags := testDbCmd.Flags() + dbFlags.String("db-url", "", "Tests the database specified by the connection string (must be percent-encoded).") + dbFlags.Bool("linked", false, "Runs pgTAP tests on the linked project.") + dbFlags.Bool("local", true, "Runs pgTAP tests on the local database.") + testDbCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + testCmd.AddCommand(testDbCmd) + // Build new command + newFlags := testNewCmd.Flags() + newFlags.VarP(&template, "template", "t", "Template framework to generate.") + testCmd.AddCommand(testNewCmd) + // Build test command + rootCmd.AddCommand(testCmd) +} diff --git a/cmd/unlink.go b/cmd/unlink.go new file mode 100644 index 0000000..e017e75 --- /dev/null +++ b/cmd/unlink.go @@ -0,0 +1,26 @@ +package cmd + +import ( + "os" + "os/signal" + + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/unlink" +) + +var ( + unlinkCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "unlink", + Short: "Unlink a Supabase project", + RunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) + return unlink.Run(ctx, afero.NewOsFs()) + }, + } +) + +func init() { + rootCmd.AddCommand(unlinkCmd) +} diff --git a/cmd/vanitySubdomains.go b/cmd/vanitySubdomains.go new file mode 100644 index 0000000..dd3608c --- /dev/null +++ b/cmd/vanitySubdomains.go @@ -0,0 +1,74 @@ +package cmd + +import ( + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/internal/vanity_subdomains/activate" + "github.com/supabase/cli/internal/vanity_subdomains/check" + "github.com/supabase/cli/internal/vanity_subdomains/delete" + "github.com/supabase/cli/internal/vanity_subdomains/get" +) + +var ( + vanityCmd = &cobra.Command{ + GroupID: groupManagementAPI, + Use: "vanity-subdomains", + Short: "Manage vanity subdomains for Supabase projects", + Long: `Manage vanity subdomains for Supabase projects. + +Usage of vanity subdomains and custom domains is mutually exclusive.`, + } + + desiredSubdomain string + + vanityActivateCmd = &cobra.Command{ + Use: "activate", + Short: "Activate a vanity subdomain", + Long: `Activate a vanity subdomain for your Supabase project. + +This reconfigures your Supabase project to respond to requests on your vanity subdomain. +After the vanity subdomain is activated, your project's auth services will no longer function on the {project-ref}.{supabase-domain} hostname. +`, + RunE: func(cmd *cobra.Command, args []string) error { + return activate.Run(cmd.Context(), flags.ProjectRef, desiredSubdomain, afero.NewOsFs()) + }, + } + + vanityGetCmd = &cobra.Command{ + Use: "get", + Short: "Get the current vanity subdomain", + RunE: func(cmd *cobra.Command, args []string) error { + return get.Run(cmd.Context(), flags.ProjectRef, afero.NewOsFs()) + }, + } + + vanityCheckCmd = &cobra.Command{ + Use: "check-availability", + Short: "Checks if a desired subdomain is available for use", + RunE: func(cmd *cobra.Command, args []string) error { + return check.Run(cmd.Context(), flags.ProjectRef, desiredSubdomain, afero.NewOsFs()) + }, + } + + vanityDeleteCmd = &cobra.Command{ + Use: "delete", + Short: "Deletes a project's vanity subdomain", + Long: `Deletes the vanity subdomain for a project, and reverts to using the project ref for routing.`, + RunE: func(cmd *cobra.Command, args []string) error { + return delete.Run(cmd.Context(), flags.ProjectRef, afero.NewOsFs()) + }, + } +) + +func init() { + vanityCmd.PersistentFlags().StringVar(&flags.ProjectRef, "project-ref", "", "Project ref of the Supabase project.") + vanityActivateCmd.Flags().StringVar(&desiredSubdomain, "desired-subdomain", "", "The desired vanity subdomain to use for your Supabase project.") + vanityCheckCmd.Flags().StringVar(&desiredSubdomain, "desired-subdomain", "", "The desired vanity subdomain to use for your Supabase project.") + vanityCmd.AddCommand(vanityGetCmd) + vanityCmd.AddCommand(vanityCheckCmd) + vanityCmd.AddCommand(vanityActivateCmd) + vanityCmd.AddCommand(vanityDeleteCmd) + + rootCmd.AddCommand(vanityCmd) +} diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..1838c76 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,21 @@ +## Extended man pages for CLI commands + +### Build + +Update [version string](https://github.com/supabase/cli/blob/main/docs/main.go#L33) to match latest release. + +```bash +go run docs/main.go > cli_v1_commands.yaml +``` + +### Release + +1. Clone the [supabase/supabase](https://github.com/supabase/supabase) repo +2. Copy over the CLI reference and reformat using supabase config + +```bash +mv ../cli/cli_v1_commands.yaml specs/ +npx prettier -w specs/cli_v1_commands.yaml +``` + +3. If there are new commands added, update [common-cli-sections.json](https://github.com/supabase/supabase/blob/master/spec/common-cli-sections.json) manually diff --git a/docs/main.go b/docs/main.go new file mode 100644 index 0000000..bfacba3 --- /dev/null +++ b/docs/main.go @@ -0,0 +1,320 @@ +package main + +import ( + "bytes" + "embed" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + cli "github.com/supabase/cli/cmd" + "github.com/supabase/cli/internal/utils" + "gopkg.in/yaml.v3" +) + +const tagOthers = "other-commands" + +var ( + examples map[string][]ExampleDoc + //go:embed templates/examples.yaml + exampleSpec string + //go:embed supabase/* + docsDir embed.FS +) + +func main() { + semver := "latest" + if len(os.Args) > 1 { + semver = os.Args[1] + } + // Trim version tag + if semver[0] == 'v' { + semver = semver[1:] + } + + if err := generate(semver); err != nil { + log.Fatalln(err) + } +} + +func generate(version string) error { + dec := yaml.NewDecoder(strings.NewReader(exampleSpec)) + if err := dec.Decode(&examples); err != nil { + return err + } + root := cli.GetRootCmd() + root.InitDefaultCompletionCmd() + root.InitDefaultHelpFlag() + spec := SpecDoc{ + Clispec: "001", + Info: InfoDoc{ + Id: "cli", + Version: version, + Title: strings.TrimSpace(root.Short), + Description: forceMultiLine("Supabase CLI provides you with tools to develop your application locally, and deploy your application to the Supabase platform."), + Language: "sh", + Source: "https://github.com/supabase/cli", + Bugs: "https://github.com/supabase/cli/issues", + Spec: "https://github.com/supabase/spec/cli_v1_commands.yaml", + Tags: getTags(root), + }, + } + root.Flags().VisitAll(func(flag *pflag.Flag) { + if !flag.Hidden { + spec.Flags = append(spec.Flags, getFlags(flag)) + } + }) + cobra.CheckErr(root.MarkFlagRequired("experimental")) + // Generate, serialise, and print + yamlDoc := GenYamlDoc(root, &spec) + spec.Info.Options = yamlDoc.Options + // Reverse commands list + for i, j := 0, len(spec.Commands)-1; i < j; i, j = i+1, j-1 { + spec.Commands[i], spec.Commands[j] = spec.Commands[j], spec.Commands[i] + } + // Write to stdout + encoder := yaml.NewEncoder(os.Stdout) + encoder.SetIndent(2) + return encoder.Encode(spec) +} + +type TagDoc struct { + Id string `yaml:",omitempty"` + Title string `yaml:",omitempty"` + Description string `yaml:",omitempty"` +} + +type InfoDoc struct { + Id string `yaml:",omitempty"` + Version string `yaml:",omitempty"` + Title string `yaml:",omitempty"` + Language string `yaml:",omitempty"` + Source string `yaml:",omitempty"` + Bugs string `yaml:",omitempty"` + Spec string `yaml:",omitempty"` + Description string `yaml:",omitempty"` + Options string `yaml:",omitempty"` + Tags []TagDoc `yaml:",omitempty"` +} + +type ValueDoc struct { + Id string `yaml:",omitempty"` + Name string `yaml:",omitempty"` + Type string `yaml:",omitempty"` + Description string `yaml:",omitempty"` +} + +type FlagDoc struct { + Id string `yaml:",omitempty"` + Name string `yaml:",omitempty"` + Description string `yaml:",omitempty"` + Required bool `yaml:",omitempty"` + DefaultValue string `yaml:"default_value"` + AcceptedValues []ValueDoc `yaml:"accepted_values,omitempty"` +} + +type ExampleDoc struct { + Id string `yaml:",omitempty"` + Name string `yaml:",omitempty"` + Code string `yaml:",omitempty"` + Response string `yaml:",omitempty"` +} + +type CmdDoc struct { + Id string `yaml:",omitempty"` + Title string `yaml:",omitempty"` + Summary string `yaml:",omitempty"` + Source string `yaml:",omitempty"` + Description string `yaml:",omitempty"` + Examples []ExampleDoc `yaml:",omitempty"` + Tags []string `yaml:""` + Links []LinkDoc `yaml:""` + Usage string `yaml:",omitempty"` + Subcommands []string `yaml:""` + Options string `yaml:",omitempty"` + Flags []FlagDoc `yaml:""` +} + +type LinkDoc struct { + Name string `yaml:",omitempty"` + Link string `yaml:",omitempty"` +} + +type ParamDoc struct { + Id string `yaml:",omitempty"` + Title string `yaml:",omitempty"` + Description string `yaml:",omitempty"` + Required bool `yaml:",omitempty"` + Default string `yaml:",omitempty"` + Tags []string `yaml:",omitempty"` + Links []LinkDoc `yaml:""` +} + +type SpecDoc struct { + Clispec string `yaml:",omitempty"` + Info InfoDoc `yaml:",omitempty"` + Flags []FlagDoc `yaml:",omitempty"` + Commands []CmdDoc `yaml:",omitempty"` + Parameters []FlagDoc `yaml:",omitempty"` +} + +// DFS on command tree to generate documentation specs. +func GenYamlDoc(cmd *cobra.Command, root *SpecDoc) CmdDoc { + var subcommands []string + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + sub := GenYamlDoc(c, root) + if !cmd.HasParent() && len(sub.Tags) == 0 { + sub.Tags = append(sub.Tags, tagOthers) + } + root.Commands = append(root.Commands, sub) + subcommands = append(subcommands, sub.Id) + } + + yamlDoc := CmdDoc{ + Id: strings.ReplaceAll(cmd.CommandPath(), " ", "-"), + Title: cmd.CommandPath(), + Summary: forceMultiLine(cmd.Short), + Description: forceMultiLine(strings.ReplaceAll(cmd.Long, "\t", " ")), + Subcommands: subcommands, + } + + names := strings.Fields(cmd.CommandPath()) + if len(names) > 3 { + base := strings.Join(names[2:], "-") + names = append(names[:2], base) + } + path := filepath.Join(names...) + ".md" + if contents, err := docsDir.ReadFile(path); err == nil { + noHeader := bytes.TrimLeftFunc(contents, func(r rune) bool { + return r != '\n' + }) + yamlDoc.Description = forceMultiLine(string(noHeader)) + } + + if eg, ok := examples[yamlDoc.Id]; ok { + yamlDoc.Examples = eg + } + + if len(cmd.GroupID) > 0 { + yamlDoc.Tags = append(yamlDoc.Tags, cmd.GroupID) + } + + if cmd.Runnable() { + yamlDoc.Usage = forceMultiLine(cmd.UseLine()) + } + + // Only print flags for root and leaf commands + if !cmd.HasSubCommands() { + flags := cmd.LocalFlags() + flags.VisitAll(func(flag *pflag.Flag) { + if !flag.Hidden { + yamlDoc.Flags = append(yamlDoc.Flags, getFlags(flag)) + } + }) + // Print required flag for experimental commands + globalFlags := cmd.Root().Flags() + if cli.IsExperimental(cmd) { + flag := globalFlags.Lookup("experimental") + yamlDoc.Flags = append(yamlDoc.Flags, getFlags(flag)) + } + // Leaf commands should inherit parent flags except root + parentFlags := cmd.InheritedFlags() + parentFlags.VisitAll(func(flag *pflag.Flag) { + if !flag.Hidden && globalFlags.Lookup(flag.Name) == nil { + yamlDoc.Flags = append(yamlDoc.Flags, getFlags(flag)) + } + }) + } + + return yamlDoc +} + +func getFlags(flag *pflag.Flag) FlagDoc { + doc := FlagDoc{ + Id: flag.Name, + Name: getName(flag), + Description: forceMultiLine(getUsage(flag)), + DefaultValue: flag.DefValue, + Required: flag.Annotations[cobra.BashCompOneRequiredFlag] != nil, + } + if f, ok := flag.Value.(*utils.EnumFlag); ok { + for _, v := range f.Allowed { + doc.AcceptedValues = append(doc.AcceptedValues, ValueDoc{ + Id: v, + Name: v, + Type: flag.Value.Type(), + }) + } + } + return doc +} + +// Prints a human readable flag name. +// +// -f, --flag `string` +func getName(flag *pflag.Flag) (line string) { + // Prefix: shorthand + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + line += fmt.Sprintf("-%s, ", flag.Shorthand) + } + line += fmt.Sprintf("--%s", flag.Name) + // Suffix: type + if varname, _ := pflag.UnquoteUsage(flag); varname != "" { + line += fmt.Sprintf(" <%s>", varname) + } + // Not used by our cmd but kept here for consistency + if flag.NoOptDefVal != "" { + switch flag.Value.Type() { + case "string": + line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) + case "bool": + if flag.NoOptDefVal != "true" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + case "count": + if flag.NoOptDefVal != "+1" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + default: + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + } + return line +} + +// Prints flag usage and default value. +// +// Select a plan. (default "free") +func getUsage(flag *pflag.Flag) string { + _, usage := pflag.UnquoteUsage(flag) + return usage +} + +// Yaml lib generates incorrect yaml with long strings that do not contain \n. +// +// example: 'a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a +// a a a a a a ' +func forceMultiLine(s string) string { + if len(s) > 60 && !strings.Contains(s, "\n") { + s = s + "\n" + } + return s +} + +func getTags(cmd *cobra.Command) (tags []TagDoc) { + for _, group := range cmd.Groups() { + tags = append(tags, TagDoc{ + Id: group.ID, + Title: group.Title[:len(group.Title)-1], + }) + } + tags = append(tags, TagDoc{Id: tagOthers, Title: "Additional Commands"}) + return tags +} diff --git a/docs/supabase/db/diff.md b/docs/supabase/db/diff.md new file mode 100644 index 0000000..a046707 --- /dev/null +++ b/docs/supabase/db/diff.md @@ -0,0 +1,15 @@ +## supabase-db-diff + +Diffs schema changes made to the local or remote database. + +Requires the local development stack to be running when diffing against the local database. To diff against a remote or self-hosted database, specify the `--linked` or `--db-url` flag respectively. + +Runs [djrobstep/migra](https://github.com/djrobstep/migra) in a container to compare schema differences between the target database and a shadow database. The shadow database is created by applying migrations in local `supabase/migrations` directory in a separate container. Output is written to stdout by default. For convenience, you can also save the schema diff as a new migration file by passing in `-f` flag. + +By default, all schemas in the target database are diffed. Use the `--schema public,extensions` flag to restrict diffing to a subset of schemas. + +While the diff command is able to capture most schema changes, there are cases where it is known to fail. Currently, this could happen if you schema contains: + +- Changes to publication +- Changes to storage buckets +- Views with `security_invoker` attributes diff --git a/docs/supabase/db/dump.md b/docs/supabase/db/dump.md new file mode 100644 index 0000000..2ccfef0 --- /dev/null +++ b/docs/supabase/db/dump.md @@ -0,0 +1,9 @@ +## supabase-db-dump + +Dumps contents from a remote database. + +Requires your local project to be linked to a remote database by running `supabase link`. For self-hosted databases, you can pass in the connection parameters using `--db-url` flag. + +Runs `pg_dump` in a container with additional flags to exclude Supabase managed schemas. The ignored schemas include auth, storage, and those created by extensions. + +The default dump does not contain any data or custom roles. To dump those contents explicitly, specify either the `--data-only` and `--role-only` flag. diff --git a/docs/supabase/db/lint.md b/docs/supabase/db/lint.md new file mode 100644 index 0000000..3718d2d --- /dev/null +++ b/docs/supabase/db/lint.md @@ -0,0 +1,17 @@ +## supabase-db-lint + +Lints local database for schema errors. + +Requires the local development stack to be running when linting against the local database. To lint against a remote or self-hosted database, specify the `--linked` or `--db-url` flag respectively. + +Runs `plpgsql_check` extension in the local Postgres container to check for errors in all schemas. The default lint level is `warning` and can be raised to error via the `--level` flag. + +To lint against specific schemas only, pass in the `--schema` flag. + +The `--fail-on` flag can be used to control when the command should exit with a non-zero status code. The possible values are: + +- `none` (default): Always exit with a zero status code, regardless of lint results. +- `warning`: Exit with a non-zero status code if any warnings or errors are found. +- `error`: Exit with a non-zero status code only if errors are found. + +This flag is particularly useful in CI/CD pipelines where you want to fail the build based on certain lint conditions. \ No newline at end of file diff --git a/docs/supabase/db/pull.md b/docs/supabase/db/pull.md new file mode 100644 index 0000000..8f20f0b --- /dev/null +++ b/docs/supabase/db/pull.md @@ -0,0 +1,9 @@ +# supabase-db-pull + +Pulls schema changes from a remote database. A new migration file will be created under `supabase/migrations` directory. + +Requires your local project to be linked to a remote database by running `supabase link`. For self-hosted databases, you can pass in the connection parameters using `--db-url` flag. + +Optionally, a new row can be inserted into the migration history table to reflect the current state of the remote database. + +If no entries exist in the migration history table, `pg_dump` will be used to capture all contents of the remote schemas you have created. Otherwise, this command will only diff schema changes against the remote database, similar to running `db diff --linked`. diff --git a/docs/supabase/db/push.md b/docs/supabase/db/push.md new file mode 100644 index 0000000..fe0893f --- /dev/null +++ b/docs/supabase/db/push.md @@ -0,0 +1,11 @@ +## supabase-db-push + +Pushes all local migrations to a remote database. + +Requires your local project to be linked to a remote database by running `supabase link`. For self-hosted databases, you can pass in the connection parameters using `--db-url` flag. + +The first time this command is run, a migration history table will be created under `supabase_migrations.schema_migrations`. After successfully applying a migration, a new row will be inserted into the migration history table with timestamp as its unique id. Subsequent pushes will skip migrations that have already been applied. + +If you need to mutate the migration history table, such as deleting existing entries or inserting new entries without actually running the migration, use the `migration repair` command. + +Use the `--dry-run` flag to view the list of changes before applying. diff --git a/docs/supabase/db/reset.md b/docs/supabase/db/reset.md new file mode 100644 index 0000000..acb9b98 --- /dev/null +++ b/docs/supabase/db/reset.md @@ -0,0 +1,9 @@ +## supabase-db-reset + +Resets the local database to a clean state. + +Requires the local development stack to be started by running `supabase start`. + +Recreates the local Postgres container and applies all local migrations found in `supabase/migrations` directory. If test data is defined in `supabase/seed.sql`, it will be seeded after the migrations are run. Any other data or schema changes made during local development will be discarded. + +When running db reset with `--linked` or `--db-url` flag, a SQL script is executed to identify and drop all user created entities in the remote database. Since Postgres roles are cluster level entities, any custom roles created through the dashboard or `supabase/roles.sql` will not be deleted by remote reset. diff --git a/docs/supabase/domains/activate.md b/docs/supabase/domains/activate.md new file mode 100644 index 0000000..6624839 --- /dev/null +++ b/docs/supabase/domains/activate.md @@ -0,0 +1,7 @@ +## supabase-domains-activate + +Activates the custom hostname configuration for a project. + +This reconfigures your Supabase project to respond to requests on your custom hostname. + +After the custom hostname is activated, your project's third-party auth providers will no longer function on the Supabase-provisioned subdomain. Please refer to [Prepare to activate your domain](/docs/guides/platform/custom-domains#prepare-to-activate-your-domain) section in our documentation to learn more about the steps you need to follow. diff --git a/docs/supabase/functions/serve.md b/docs/supabase/functions/serve.md new file mode 100644 index 0000000..d31d9df --- /dev/null +++ b/docs/supabase/functions/serve.md @@ -0,0 +1,28 @@ +## supabase-functions-serve + +Serve all Functions locally. + +`supabase functions serve` command includes additional flags to assist developers in debugging Edge Functions via the v8 inspector protocol, allowing for debugging via Chrome DevTools, VS Code, and IntelliJ IDEA for example. Refer to the [docs guide](/docs/guides/functions/debugging-tools) for setup instructions. + +1. `--inspect` + * Alias of `--inspect-mode brk`. + +2. `--inspect-mode [ run | brk | wait ]` + * Activates the inspector capability. + * `run` mode simply allows a connection without additional behavior. It is not ideal for short scripts, but it can be useful for long-running scripts where you might occasionally want to set breakpoints. + * `brk` mode same as `run` mode, but additionally sets a breakpoint at the first line to pause script execution before any code runs. + * `wait` mode similar to `brk` mode, but instead of setting a breakpoint at the first line, it pauses script execution until an inspector session is connected. + +3. `--inspect-main` + * Can only be used when one of the above two flags is enabled. + * By default, creating an inspector session for the main worker is not allowed, but this flag allows it. + * Other behaviors follow the `inspect-mode` flag mentioned above. + +Additionally, the following properties can be customized via `supabase/config.toml` under `edge_runtime` section. + +1. `inspector_port` + * The port used to listen to the Inspector session, defaults to 8083. +2. `policy` + * A value that indicates how the edge-runtime should forward incoming HTTP requests to the worker. + * `per_worker` allows multiple HTTP requests to be forwarded to a worker that has already been created. + * `oneshot` will force the worker to process a single HTTP request and then exit. (Debugging purpose, This is especially useful if you want to reflect changes you've made immediately.) diff --git a/docs/supabase/init.md b/docs/supabase/init.md new file mode 100644 index 0000000..1290561 --- /dev/null +++ b/docs/supabase/init.md @@ -0,0 +1,9 @@ +## supabase-init + +Initialize configurations for Supabase local development. + +A `supabase/config.toml` file is created in your current working directory. This configuration is specific to each local project. + +> You may override the directory path by specifying the `SUPABASE_WORKDIR` environment variable or `--workdir` flag. + +In addition to `config.toml`, the `supabase` directory may also contain other Supabase objects, such as `migrations`, `functions`, `tests`, etc. diff --git a/docs/supabase/inspect/db-bloat.md b/docs/supabase/inspect/db-bloat.md new file mode 100644 index 0000000..7c4f34e --- /dev/null +++ b/docs/supabase/inspect/db-bloat.md @@ -0,0 +1,14 @@ +## db-bloat + +This command displays an estimation of table "bloat" - Due to Postgres' [MVCC](https://www.postgresql.org/docs/current/mvcc.html) when data is updated or deleted new rows are created and old rows are made invisible and marked as "dead tuples". Usually the [autovaccum](https://supabase.com/docs/guides/platform/database-size#vacuum-operations) process will asynchronously clean the dead tuples. Sometimes the autovaccum is unable to work fast enough to reduce or prevent tables from becoming bloated. High bloat can slow down queries, cause excessive IOPS and waste space in your database. + +Tables with a high bloat ratio should be investigated to see if there are vacuuming is not quick enough or there are other issues. + +``` + TYPE │ SCHEMA NAME │ OBJECT NAME │ BLOAT │ WASTE + ────────┼─────────────┼────────────────────────────┼───────┼───────────── + table │ public │ very_bloated_table │ 41.0 │ 700 MB + table │ public │ my_table │ 4.0 │ 76 MB + table │ public │ happy_table │ 1.0 │ 1472 kB + index │ public │ happy_table::my_nice_index │ 0.7 │ 880 kB +``` diff --git a/docs/supabase/inspect/db-blocking.md b/docs/supabase/inspect/db-blocking.md new file mode 100644 index 0000000..b8fb5a6 --- /dev/null +++ b/docs/supabase/inspect/db-blocking.md @@ -0,0 +1,9 @@ +## db-blocking + +This command shows you statements that are currently holding locks and blocking, as well as the statement that is being blocked. This can be used in conjunction with `inspect db locks` to determine which statements need to be terminated in order to resolve lock contention. + +``` + BLOCKED PID │ BLOCKING STATEMENT │ BLOCKING DURATION │ BLOCKING PID │ BLOCKED STATEMENT │ BLOCKED DURATION + ──────────────┼──────────────────────────────┼───────────────────┼──────────────┼────────────────────────────────────────────────────────────────────────────────────────┼─────────────────── + 253 │ select count(*) from mytable │ 00:00:03.838314 │ 13495 │ UPDATE "mytable" SET "updated_at" = '2023─08─03 14:07:04.746688' WHERE "id" = 83719341 │ 00:00:03.821826 +``` diff --git a/docs/supabase/inspect/db-cache-hit.md b/docs/supabase/inspect/db-cache-hit.md new file mode 100644 index 0000000..7bbb928 --- /dev/null +++ b/docs/supabase/inspect/db-cache-hit.md @@ -0,0 +1,14 @@ +# db-cache-hit + +This command provides information on the efficiency of the buffer cache and how often your queries have to go hit the disk rather than reading from memory. Information on both index reads (`index hit rate`) as well as table reads (`table hit rate`) are shown. In general, databases with low cache hit rates perform worse as it is slower to go to disk than retrieve data from memory. If your table hit rate is low, this can indicate that you do not have enough RAM and you may benefit from upgrading to a larger compute addon with more memory. If your index hit rate is low, this may indicate that there is scope to add more appropriate indexes. + +The hit rates are calculated as a ratio of number of table or index blocks fetched from the postgres buffer cache against the sum of cached blocks and uncached blocks read from disk. + +On smaller compute plans (free, small, medium), a ratio of below 99% can indicate a problem. On larger plans the hit rates may be lower but performance will remain constant as the data may use the OS cache rather than Postgres buffer cache. + +``` + NAME │ RATIO + ─────────────────┼─────────── + index hit rate │ 0.996621 + table hit rate │ 0.999341 + ``` \ No newline at end of file diff --git a/docs/supabase/inspect/db-calls.md b/docs/supabase/inspect/db-calls.md new file mode 100644 index 0000000..5b76042 --- /dev/null +++ b/docs/supabase/inspect/db-calls.md @@ -0,0 +1,15 @@ +# db-calls + +This command is much like the `supabase inspect db outliers` command, but ordered by the number of times a statement has been called. + +You can use this information to see which queries are called most often, which can potentially be good candidates for optimisation. + +``` + + QUERY │ TOTAL EXECUTION TIME │ PROPORTION OF TOTAL EXEC TIME │ NUMBER CALLS │ SYNC IO TIME + ─────────────────────────────────────────────────┼──────────────────────┼───────────────────────────────┼──────────────┼────────────────── + SELECT * FROM users WHERE id = $1 │ 14:50:11.828939 │ 89.8% │ 183,389,757 │ 00:00:00.002018 + SELECT * FROM user_events │ 01:20:23.466633 │ 1.4% │ 78,325 │ 00:00:00 + INSERT INTO users (email, name) VALUES ($1, $2)│ 00:40:11.616882 │ 0.8% │ 54,003 │ 00:00:00.000322 + +``` diff --git a/docs/supabase/inspect/db-index-sizes.md b/docs/supabase/inspect/db-index-sizes.md new file mode 100644 index 0000000..025e6ac --- /dev/null +++ b/docs/supabase/inspect/db-index-sizes.md @@ -0,0 +1,14 @@ +# db-index-sizes + +This command displays the size of each each index in the database. It is calculated by taking the number of pages (reported in `relpages`) and multiplying it by the page size (8192 bytes). + +``` + NAME │ SIZE + ──────────────────────────────┼───────────── + user_events_index │ 2082 MB + job_run_details_pkey │ 3856 kB + schema_migrations_pkey │ 16 kB + refresh_tokens_token_unique │ 8192 bytes + users_instance_id_idx │ 0 bytes + buckets_pkey │ 0 bytes +``` \ No newline at end of file diff --git a/docs/supabase/inspect/db-index-usage.md b/docs/supabase/inspect/db-index-usage.md new file mode 100644 index 0000000..527c86d --- /dev/null +++ b/docs/supabase/inspect/db-index-usage.md @@ -0,0 +1,14 @@ +# db-index-usage + +This command provides information on the efficiency of indexes, represented as what percentage of total scans were index scans. A low percentage can indicate under indexing, or wrong data being indexed. + +``` + TABLE NAME │ PERCENTAGE OF TIMES INDEX USED │ ROWS IN TABLE + ────────────────────┼────────────────────────────────┼──────────────── + user_events │ 99 │ 4225318 + user_feed │ 99 │ 3581573 + unindexed_table │ 0 │ 322911 + job │ 100 │ 33242 + schema_migrations │ 97 │ 0 + migrations │ Insufficient data │ 0 +``` \ No newline at end of file diff --git a/docs/supabase/inspect/db-locks.md b/docs/supabase/inspect/db-locks.md new file mode 100644 index 0000000..2e7f56b --- /dev/null +++ b/docs/supabase/inspect/db-locks.md @@ -0,0 +1,11 @@ +# db-locks + +This command displays queries that have taken out an exclusive lock on a relation. Exclusive locks typically prevent other operations on that relation from taking place, and can be a cause of "hung" queries that are waiting for a lock to be granted. + +If you see a query that is hanging for a very long time or causing blocking issues you may consider killing the query by connecting to the database and running `SELECT pg_cancel_backend(PID);` to cancel the query. If the query still does not stop you can force a hard stop by running `SELECT pg_terminate_backend(PID);` + +``` + PID │ RELNAME │ TRANSACTION ID │ GRANTED │ QUERY │ AGE + ─────────┼─────────┼────────────────┼─────────┼─────────────────────────────────────────┼─────────── + 328112 │ null │ 0 │ t │ SELECT * FROM logs; │ 00:04:20 +``` diff --git a/docs/supabase/inspect/db-long-running-queries.md b/docs/supabase/inspect/db-long-running-queries.md new file mode 100644 index 0000000..49118fb --- /dev/null +++ b/docs/supabase/inspect/db-long-running-queries.md @@ -0,0 +1,11 @@ +# db-long-running-queries + +This command displays currently running queries, that have been running for longer than 5 minutes, descending by duration. Very long running queries can be a source of multiple issues, such as preventing DDL statements completing or vacuum being unable to update `relfrozenxid`. + +``` + PID │ DURATION │ QUERY +───────┼─────────────────┼─────────────────────────────────────────────────────────────────────────────────────── + 19578 | 02:29:11.200129 | EXPLAIN SELECT "students".* FROM "students" WHERE "students"."id" = 1450645 LIMIT 1 + 19465 | 02:26:05.542653 | EXPLAIN SELECT "students".* FROM "students" WHERE "students"."id" = 1889881 LIMIT 1 + 19632 | 02:24:46.962818 | EXPLAIN SELECT "students".* FROM "students" WHERE "students"."id" = 1581884 LIMIT 1 +``` diff --git a/docs/supabase/inspect/db-outliers.md b/docs/supabase/inspect/db-outliers.md new file mode 100644 index 0000000..830a7a4 --- /dev/null +++ b/docs/supabase/inspect/db-outliers.md @@ -0,0 +1,16 @@ +# db-outliers + +This command displays statements, obtained from `pg_stat_statements`, ordered by the amount of time to execute in aggregate. This includes the statement itself, the total execution time for that statement, the proportion of total execution time for all statements that statement has taken up, the number of times that statement has been called, and the amount of time that statement spent on synchronous I/O (reading/writing from the file system). + +Typically, an efficient query will have an appropriate ratio of calls to total execution time, with as little time spent on I/O as possible. Queries that have a high total execution time but low call count should be investigated to improve their performance. Queries that have a high proportion of execution time being spent on synchronous I/O should also be investigated. + +``` + + QUERY │ EXECUTION TIME │ PROPORTION OF EXEC TIME │ NUMBER CALLS │ SYNC IO TIME +─────────────────────────────────────────┼──────────────────┼─────────────────────────┼──────────────┼─────────────── + SELECT * FROM archivable_usage_events.. │ 154:39:26.431466 │ 72.2% │ 34,211,877 │ 00:00:00 + COPY public.archivable_usage_events (.. │ 50:38:33.198418 │ 23.6% │ 13 │ 13:34:21.00108 + COPY public.usage_events (id, reporte.. │ 02:32:16.335233 │ 1.2% │ 13 │ 00:34:19.784318 + INSERT INTO usage_events (id, retaine.. │ 01:42:59.436532 │ 0.8% │ 12,328,187 │ 00:00:00 + SELECT * FROM usage_events WHERE (alp.. │ 01:18:10.754354 │ 0.6% │ 102,114,301 │ 00:00:00 +``` diff --git a/docs/supabase/inspect/db-replication-slots.md b/docs/supabase/inspect/db-replication-slots.md new file mode 100644 index 0000000..df2e0e5 --- /dev/null +++ b/docs/supabase/inspect/db-replication-slots.md @@ -0,0 +1,13 @@ +# db-replication-slots + +This command shows information about [logical replication slots](https://www.postgresql.org/docs/current/logical-replication.html) that are setup on the database. It shows if the slot is active, the state of the WAL sender process ('startup', 'catchup', 'streaming', 'backup', 'stopping') the replication client address and the replication lag in GB. + +This command is useful to check that the amount of replication lag is as low as possible, replication lag can occur due to network latency issues, slow disk I/O, long running transactions or lack of ability for the subscriber to consume WAL fast enough. + + +``` + NAME │ ACTIVE │ STATE │ REPLICATION CLIENT ADDRESS │ REPLICATION LAG GB + ─────────────────────────────────────────────┼────────┼─────────┼────────────────────────────┼───────────────────── + supabase_realtime_replication_slot │ t │ N/A │ N/A │ 0 + datastream │ t │ catchup │ 24.201.24.106 │ 45 +``` \ No newline at end of file diff --git a/docs/supabase/inspect/db-role-connections.md b/docs/supabase/inspect/db-role-connections.md new file mode 100644 index 0000000..4353933 --- /dev/null +++ b/docs/supabase/inspect/db-role-connections.md @@ -0,0 +1,33 @@ +# db-role-connections + +This command shows the number of active connections for each database roles to see which specific role might be consuming more connections than expected. + +This is a Supabase specific command. You can see this breakdown on the dashboard as well: +https://app.supabase.com/project/_/database/roles + +The maximum number of active connections depends [on your instance size](https://supabase.com/docs/guides/platform/compute-add-ons). You can [manually overwrite](https://supabase.com/docs/guides/platform/performance#allowing-higher-number-of-connections) the allowed number of connection but it is not advised. + +``` + + + ROLE NAME │ ACTIVE CONNCTION + ────────────────────────────┼─────────────────── + authenticator │ 5 + postgres │ 5 + supabase_admin │ 1 + pgbouncer │ 1 + anon │ 0 + authenticated │ 0 + service_role │ 0 + dashboard_user │ 0 + supabase_auth_admin │ 0 + supabase_storage_admin │ 0 + supabase_functions_admin │ 0 + pgsodium_keyholder │ 0 + pg_read_all_data │ 0 + pg_write_all_data │ 0 + pg_monitor │ 0 + +Active connections 12/90 + +``` diff --git a/docs/supabase/inspect/db-seq-scans.md b/docs/supabase/inspect/db-seq-scans.md new file mode 100644 index 0000000..62a2fae --- /dev/null +++ b/docs/supabase/inspect/db-seq-scans.md @@ -0,0 +1,15 @@ +# db-seq-scans + +This command displays the number of sequential scans recorded against all tables, descending by count of sequential scans. Tables that have very high numbers of sequential scans may be underindexed, and it may be worth investigating queries that read from these tables. + + +``` + NAME │ COUNT + ───────────────────────────────────┼───────── + emails │ 182435 + users │ 25063 + job_run_details │ 60 + schema_migrations │ 0 + migrations │ 0 +``` + diff --git a/docs/supabase/inspect/db-table-index-sizes.md b/docs/supabase/inspect/db-table-index-sizes.md new file mode 100644 index 0000000..c62d5df --- /dev/null +++ b/docs/supabase/inspect/db-table-index-sizes.md @@ -0,0 +1,13 @@ +# db-table-index-sizes + +This command displays the total size of indexes for each table. It is calculated by using the system administration function `pg_indexes_size()`. + +``` + TABLE │ INDEX SIZE + ───────────────────────────────────┼───────────── + job_run_details │ 10104 kB + users │ 128 kB + job │ 32 kB + instances │ 8192 bytes + http_request_queue │ 0 bytes +``` diff --git a/docs/supabase/inspect/db-table-record-counts.md b/docs/supabase/inspect/db-table-record-counts.md new file mode 100644 index 0000000..f1a21b4 --- /dev/null +++ b/docs/supabase/inspect/db-table-record-counts.md @@ -0,0 +1,13 @@ +# db-table-record-counts + +This command displays an estimated count of rows per table, descending by estimated count. The estimated count is derived from `n_live_tup`, which is updated by vacuum operations. Due to the way `n_live_tup` is populated, sparse vs. dense pages can result in estimations that are significantly out from the real count of rows. + + +``` + NAME │ ESTIMATED COUNT + ─────────────┼────────────────── + logs │ 322943 + emails │ 1103 + job │ 1 + migrations │ 0 +``` \ No newline at end of file diff --git a/docs/supabase/inspect/db-table-sizes.md b/docs/supabase/inspect/db-table-sizes.md new file mode 100644 index 0000000..f29fd4e --- /dev/null +++ b/docs/supabase/inspect/db-table-sizes.md @@ -0,0 +1,14 @@ +# db-table-sizes + +This command displays the size of each table in the database. It is calculated by using the system administration function `pg_table_size()`, which includes the size of the main data fork, free space map, visibility map and TOAST data. It does not include the size of the table's indexes. + + +``` + NAME │ SIZE + ───────────────────────────────────┼───────────── + job_run_details │ 385 MB + emails │ 584 kB + job │ 40 kB + sessions │ 0 bytes + prod_resource_notifications_meta │ 0 bytes +``` \ No newline at end of file diff --git a/docs/supabase/inspect/db-total-index-size.md b/docs/supabase/inspect/db-total-index-size.md new file mode 100644 index 0000000..2e7439d --- /dev/null +++ b/docs/supabase/inspect/db-total-index-size.md @@ -0,0 +1,9 @@ +# db-total-index-size + +This command displays the total size of all indexes on the database. It is calculated by taking the number of pages (reported in `relpages`) and multiplying it by the page size (8192 bytes). + +``` + SIZE + ───────── + 12 MB +``` \ No newline at end of file diff --git a/docs/supabase/inspect/db-total-table-sizes.md b/docs/supabase/inspect/db-total-table-sizes.md new file mode 100644 index 0000000..534ceb2 --- /dev/null +++ b/docs/supabase/inspect/db-total-table-sizes.md @@ -0,0 +1,11 @@ +# db-total-table-sizes + +This command displays the total size of each table in the database. It is the sum of the values that `pg_table_size()` and `pg_indexes_size()` gives for each table. System tables inside `pg_catalog` and `information_schema` are not included. + +``` + NAME │ SIZE +───────────────────────────────────┼───────────── + job_run_details │ 395 MB + slack_msgs │ 648 kB + emails │ 640 kB +``` \ No newline at end of file diff --git a/docs/supabase/inspect/db-unused-indexes.md b/docs/supabase/inspect/db-unused-indexes.md new file mode 100644 index 0000000..d0b9aca --- /dev/null +++ b/docs/supabase/inspect/db-unused-indexes.md @@ -0,0 +1,9 @@ +# db-unused-indexes + +This command displays indexes that have < 50 scans recorded against them, and are greater than 5 pages in size, ordered by size relative to the number of index scans. This command is generally useful for discovering indexes that are unused. Indexes can impact write performance, as well as read performance should they occupy space in memory, its a good idea to remove indexes that are not needed or being used. + +``` + TABLE │ INDEX │ INDEX SIZE │ INDEX SCANS +─────────────────────┼────────────────────────────────────────────┼────────────┼────────────── + public.users │ user_id_created_at_idx │ 97 MB │ 0 +``` diff --git a/docs/supabase/inspect/db-vacuum-stats.md b/docs/supabase/inspect/db-vacuum-stats.md new file mode 100644 index 0000000..5989073 --- /dev/null +++ b/docs/supabase/inspect/db-vacuum-stats.md @@ -0,0 +1,18 @@ +# db-vacuum-stats + +This shows you stats about the vacuum activities for each table. Due to Postgres' [MVCC](https://www.postgresql.org/docs/current/mvcc.html) when data is updated or deleted new rows are created and old rows are made invisible and marked as "dead tuples". Usually the [autovaccum](https://supabase.com/docs/guides/platform/database-size#vacuum-operations) process will aysnchronously clean the dead tuples. + +The command lists when the last vacuum and last auto vacuum took place, the row count on the table as well as the count of dead rows and whether autovacuum is expected to run or not. If the number of dead rows is much higher than the row count, or if an autovacuum is expected but has not been performed for some time, this can indicate that autovacuum is not able to keep up and that your vacuum settings need to be tweaked or that you require more compute or disk IOPS to allow autovaccum to complete. + + +``` + SCHEMA │ TABLE │ LAST VACUUM │ LAST AUTO VACUUM │ ROW COUNT │ DEAD ROW COUNT │ EXPECT AUTOVACUUM? +──────────────────────┼──────────────────────────────────┼─────────────┼──────────────────┼──────────────────────┼────────────────┼───────────────────── + auth │ users │ │ 2023-06-26 12:34 │ 18,030 │ 0 │ no + public │ profiles │ │ 2023-06-26 23:45 │ 13,420 │ 28 │ no + public │ logs │ │ 2023-06-26 01:23 │ 1,313,033 │ 3,318,228 │ yes + storage │ objects │ │ │ No stats │ 0 │ no + storage │ buckets │ │ │ No stats │ 0 │ no + supabase_migrations │ schema_migrations │ │ │ No stats │ 0 │ no + +``` diff --git a/docs/supabase/link.md b/docs/supabase/link.md new file mode 100644 index 0000000..bba1291 --- /dev/null +++ b/docs/supabase/link.md @@ -0,0 +1,11 @@ +## supabase-link + +Link your local development project to a hosted Supabase project. + +PostgREST configurations are fetched from the Supabase platform and validated against your local configuration file. + +Optionally, database settings can be validated if you provide a password. Your database password is saved in native credentials storage if available. + +> If you do not want to be prompted for the database password, such as in a CI environment, you may specify it explicitly via the `SUPABASE_DB_PASSWORD` environment variable. + +Some commands like `db dump`, `db push`, and `db pull` require your project to be linked first. diff --git a/docs/supabase/login.md b/docs/supabase/login.md new file mode 100644 index 0000000..59841e7 --- /dev/null +++ b/docs/supabase/login.md @@ -0,0 +1,9 @@ +## supabase-login + +Connect the Supabase CLI to your Supabase account by logging in with your [personal access token](https://supabase.com/dashboard/account/tokens). + +Your access token is stored securely in [native credentials storage](https://github.com/zalando/go-keyring#dependencies). If native credentials storage is unavailable, it will be written to a plain text file at `~/.supabase/access-token`. + +> If this behavior is not desired, such as in a CI environment, you may skip login by specifying the `SUPABASE_ACCESS_TOKEN` environment variable in other commands. + +The Supabase CLI uses the stored token to access Management APIs for projects, functions, secrets, etc. diff --git a/docs/supabase/migration/list.md b/docs/supabase/migration/list.md new file mode 100644 index 0000000..343bbd4 --- /dev/null +++ b/docs/supabase/migration/list.md @@ -0,0 +1,11 @@ +## supabase-migration-list + +Lists migration history in both local and remote databases. + +Requires your local project to be linked to a remote database by running `supabase link`. For self-hosted databases, you can pass in the connection parameters using `--db-url` flag. + +> Note that URL strings must be escaped according to [RFC 3986](https://www.rfc-editor.org/rfc/rfc3986). + +Local migrations are stored in `supabase/migrations` directory while remote migrations are tracked in `supabase_migrations.schema_migrations` table. Only the timestamps are compared to identify any differences. + +In case of discrepancies between the local and remote migration history, you can resolve them using the `migration repair` command. diff --git a/docs/supabase/migration/new.md b/docs/supabase/migration/new.md new file mode 100644 index 0000000..1348844 --- /dev/null +++ b/docs/supabase/migration/new.md @@ -0,0 +1,7 @@ +## supabase-migration-new + +Creates a new migration file locally. + +A `supabase/migrations` directory will be created if it does not already exists in your current `workdir`. All schema migration files must be created in this directory following the pattern `_.sql`. + +Outputs from other commands like `db diff` may be piped to `migration new ` via stdin. diff --git a/docs/supabase/migration/repair.md b/docs/supabase/migration/repair.md new file mode 100644 index 0000000..981bb9c --- /dev/null +++ b/docs/supabase/migration/repair.md @@ -0,0 +1,57 @@ +## supabase-migration-repair + +Repairs the remote migration history table. + +Requires your local project to be linked to a remote database by running `supabase link`. + +If your local and remote migration history goes out of sync, you can repair the remote history by marking specific migrations as `--status applied` or `--status reverted`. Marking as `reverted` will delete an existing record from the migration history table while marking as `applied` will insert a new record. + +For example, your migration history may look like the table below, with missing entries in either local or remote. + +```bash +$ supabase migration list + LOCAL │ REMOTE │ TIME (UTC) + ─────────────────┼────────────────┼────────────────────── + │ 20230103054303 │ 2023-01-03 05:43:03 + 20230103054315 │ │ 2023-01-03 05:43:15 +``` + +To reset your migration history to a clean state, first delete your local migration file. + +```bash +$ rm supabase/migrations/20230103054315_remote_commit.sql + +$ supabase migration list + LOCAL │ REMOTE │ TIME (UTC) + ─────────────────┼────────────────┼────────────────────── + │ 20230103054303 │ 2023-01-03 05:43:03 +``` + +Then mark the remote migration `20230103054303` as reverted. + +```bash +$ supabase migration repair 20230103054303 --status reverted +Connecting to remote database... +Repaired migration history: [20220810154537] => reverted +Finished supabase migration repair. + +$ supabase migration list + LOCAL │ REMOTE │ TIME (UTC) + ─────────────────┼────────────────┼────────────────────── +``` + +Now you can run `db pull` again to dump the remote schema as a local migration file. + +```bash +$ supabase db pull +Connecting to remote database... +Schema written to supabase/migrations/20240414044403_remote_schema.sql +Update remote migration history table? [Y/n] +Repaired migration history: [20240414044403] => applied +Finished supabase db pull. + +$ supabase migration list + LOCAL │ REMOTE │ TIME (UTC) + ─────────────────┼────────────────┼────────────────────── + 20240414044403 │ 20240414044403 │ 2024-04-14 04:44:03 +``` diff --git a/docs/supabase/migration/squash.md b/docs/supabase/migration/squash.md new file mode 100644 index 0000000..150d510 --- /dev/null +++ b/docs/supabase/migration/squash.md @@ -0,0 +1,11 @@ +## supabase-migration-squash + +Squashes local schema migrations to a single migration file. + +The squashed migration is equivalent to a schema only dump of the local database after applying existing migration files. This is especially useful when you want to remove repeated modifications of the same schema from your migration history. + +However, one limitation is that data manipulation statements, such as insert, update, or delete, are omitted from the squashed migration. You will have to add them back manually in a new migration file. This includes cron jobs, storage buckets, and any encrypted secrets in vault. + +By default, the latest `_.sql` file will be updated to contain the squashed migration. You can override the target version using the `--version ` flag. + +If your `supabase/migrations` directory is empty, running `supabase squash` will do nothing. diff --git a/docs/supabase/start.md b/docs/supabase/start.md new file mode 100644 index 0000000..a590610 --- /dev/null +++ b/docs/supabase/start.md @@ -0,0 +1,11 @@ +## supabase-start + +Starts the Supabase local development stack. + +Requires `supabase/config.toml` to be created in your current working directory by running `supabase init`. + +All service containers are started by default. You can exclude those not needed by passing in `-x` flag. To exclude multiple containers, either pass in a comma separated string, such as `-x gotrue,imgproxy`, or specify `-x` flag multiple times. + +> It is recommended to have at least 7GB of RAM to start all services. + +Health checks are automatically added to verify the started containers. Use `--ignore-health-check` flag to ignore these errors. diff --git a/docs/supabase/status.md b/docs/supabase/status.md new file mode 100644 index 0000000..5db5c7c --- /dev/null +++ b/docs/supabase/status.md @@ -0,0 +1,7 @@ +## supabase-status + +Shows status of the Supabase local development stack. + +Requires the local development stack to be started by running `supabase start` or `supabase db start`. + +You can export the connection parameters for [initializing supabase-js](https://supabase.com/docs/reference/javascript/initializing) locally by specifying the `-o env` flag. Supported parameters include `JWT_SECRET`, `ANON_KEY`, and `SERVICE_ROLE_KEY`. diff --git a/docs/supabase/stop.md b/docs/supabase/stop.md new file mode 100644 index 0000000..870fa86 --- /dev/null +++ b/docs/supabase/stop.md @@ -0,0 +1,9 @@ +## supabase-stop + +Stops the Supabase local development stack. + +Requires `supabase/config.toml` to be created in your current working directory by running `supabase init`. + +All Docker resources are maintained across restarts. Use `--no-backup` flag to reset your local development data between restarts. + +Use the `--all` flag to stop all local Supabase projects instances on the machine. Use with caution with `--no-backup` as it will delete all supabase local projects data. \ No newline at end of file diff --git a/docs/supabase/test/db.md b/docs/supabase/test/db.md new file mode 100644 index 0000000..9978bb4 --- /dev/null +++ b/docs/supabase/test/db.md @@ -0,0 +1,9 @@ +# supabase-test-db + +Executes pgTAP tests against the local database. + +Requires the local development stack to be started by running `supabase start`. + +Runs `pg_prove` in a container with unit test files volume mounted from `supabase/tests` directory. The test file can be suffixed by either `.sql` or `.pg` extension. + +Since each test is wrapped in its own transaction, it will be individually rolled back regardless of success or failure. diff --git a/docs/templates/examples.yaml b/docs/templates/examples.yaml new file mode 100644 index 0000000..d0588b6 --- /dev/null +++ b/docs/templates/examples.yaml @@ -0,0 +1,381 @@ +supabase-init: + - id: basic-usage + name: Basic usage + code: supabase init + response: Finished supabase init. + - id: from-workdir + name: Initialize from an existing directory + code: supabase init --workdir . + response: Finished supabase init. +supabase-login: + - id: basic-usage + name: Basic usage + code: supabase login + response: | + You can generate an access token from https://supabase.com/dashboard/account/tokens + Enter your access token: sbp_**************************************** + Finished supabase login. +supabase-link: + - id: basic-usage + name: Basic usage + code: supabase link --project-ref ******************** + response: | + Enter your database password (or leave blank to skip): ******** + Finished supabase link. + - id: without-password + name: Link without database password + code: supabase link --project-ref ******************** <<< "" + response: | + Enter your database password (or leave blank to skip): + Finished supabase link. + - id: using-alternate-dns + name: Link using DNS-over-HTTPS resolver + code: supabase link --project-ref ******************** --dns-resolver https + response: | + Enter your database password (or leave blank to skip): + Finished supabase link. +supabase-start: + - id: basic-usage + name: Basic usage + code: supabase start + response: | + Creating custom roles supabase/roles.sql... + Applying migration 20220810154536_employee.sql... + Seeding data supabase/seed.sql... + Started supabase local development setup. + - id: without-studio + name: Start containers without studio and imgproxy + code: supabase start -x studio,imgproxy + response: | + Excluding container: supabase/studio:20221214-4eecc99 + Excluding container: darthsim/imgproxy:v3.8.0 + Started supabase local development setup. + - id: ignore-health-check + name: Ignore service health checks + code: supabase start --ignore-health-check + response: | + service not healthy: [supabase_storage_cli] + Started supabase local development setup. +supabase-stop: + - id: basic-usage + name: Basic usage + code: supabase stop + response: | + Stopped supabase local development setup. + Local data are backed up to docker volume. + - id: clean-up + name: Clean up local data after stopping + code: supabase stop --no-backup + response: | + Stopped supabase local development setup. +supabase-status: + - id: basic-usage + name: Basic usage + code: supabase status + response: |2 + supabase local development setup is running. + + API URL: http://127.0.0.1:54321 + GraphQL URL: http://127.0.0.1:54321/graphql/v1 + DB URL: postgresql://postgres:postgres@127.0.0.1:54322/postgres + Studio URL: http://127.0.0.1:54323 + Inbucket URL: http://127.0.0.1:54324 + JWT secret: super-secret-jwt-token-with-at-least-32-characters-long + anon key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0 + service_role key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU + - id: output-env + name: Format status as environment variables + code: supabase status -o env + response: | + ANON_KEY="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0" + API_URL="http://127.0.0.1:54321" + DB_URL="postgresql://postgres:postgres@127.0.0.1:54322/postgres" + GRAPHQL_URL="http://127.0.0.1:54321/graphql/v1" + INBUCKET_URL="http://127.0.0.1:54324" + JWT_SECRET="super-secret-jwt-token-with-at-least-32-characters-long" + SERVICE_ROLE_KEY="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU" + STUDIO_URL="http://127.0.0.1:54323" + - id: output-custom-name + name: Customize the names of exported variables + code: supabase status -o env --override-name auth.anon_key=SUPABASE_ANON_KEY --override-name auth.service_role_key=SUPABASE_SERVICE_KEY + response: | + Stopped services: [supabase_inbucket_cli supabase_rest_cli supabase_studio_cli] + SUPABASE_ANON_KEY="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0" + DB_URL="postgresql://postgres:postgres@127.0.0.1:54322/postgres" + GRAPHQL_URL="http://127.0.0.1:54321/graphql/v1" + JWT_SECRET="super-secret-jwt-token-with-at-least-32-characters-long" + SUPABASE_SERVICE_KEY="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU" +supabase-migration-list: + - id: basic-usage + name: Basic usage + code: supabase migration list + response: |2 + LOCAL │ REMOTE │ TIME (UTC) + ─────────────────┼────────────────┼────────────────────── + │ 20230103054303 │ 2023-01-03 05:43:03 + │ 20230103093141 │ 2023-01-03 09:31:41 + 20230222032233 │ │ 2023-02-22 03:22:33 + - id: with-db-url + name: Connect to self-hosted database + code: supabase migration list --db-url 'postgres://postgres[:percent_encoded_password]@127.0.0.1[:port]/postgres' + response: |2 + LOCAL │ REMOTE │ TIME (UTC) + ─────────────────┼────────────────┼────────────────────── + 20230103054303 │ 20230103054303 │ 2023-01-03 05:43:03 + 20230103093141 │ 20230103093141 │ 2023-01-03 09:31:41 +supabase-migration-new: + - id: basic-usage + name: Basic usage + code: supabase migration new schema_test + response: | + Created new migration at supabase/migrations/20230306095710_schema_test.sql. + - id: pipe-stdin + name: With statements piped from stdin + code: echo "create schema if not exists test;" | supabase migration new schema_test + response: | + Created new migration at supabase/migrations/20230306095710_schema_test.sql. +supabase-migration-repair: + - id: basic-usage + name: Mark a migration as reverted + code: supabase migration repair 20230103054303 --status reverted + response: | + Repaired migration history: 20230103054303 => reverted + - id: mark-applied + name: Mark a migration as applied + code: supabase migration repair 20230222032233 --status applied + response: | + Repaired migration history: 20230222032233 => applied +supabase-db-diff: + - id: basic-usage + name: Basic usage + code: supabase db diff -f my_table + response: | + Connecting to local database... + Creating shadow database... + Applying migration 20230425064254_remote_commit.sql... + Diffing schemas: auth,extensions,public,storage + Finished supabase db diff on branch main. + + No schema changes found + - id: linked-project + name: Against linked project + code: supabase db diff -f my_table --linked + response: | + Connecting to local database... + Creating shadow database... + Diffing schemas: auth,extensions,public,storage + Finished supabase db diff on branch main. + + WARNING: The diff tool is not foolproof, so you may need to manually rearrange and modify the generated migration. + Run supabase db reset to verify that the new migration does not generate errors. + - id: specific-schema + name: For a specific schema + code: supabase db diff -f my_table --schema auth + response: | + Connecting to local database... + Creating shadow database... + Diffing schemas: auth + Finished supabase db diff on branch main. + + No schema changes found +supabase-db-dump: + - id: basic-usage + name: Basic usage + code: supabase db dump -f supabase/schema.sql + response: | + Dumping schemas from remote database... + Dumped schema to supabase/schema.sql. + - id: role-only + name: Role only + code: supabase db dump -f supabase/roles.sql --role-only + response: | + Dumping roles from remote database... + Dumped schema to supabase/roles.sql. + - id: data-only + name: Data only + code: supabase db dump -f supabase/seed.sql --data-only + response: | + Dumping data from remote database... + Dumped schema to supabase/seed.sql. +supabase-db-lint: + - id: basic-usage + name: Basic usage + code: supabase db lint + response: | + Linting schema: public + + No schema errors found + - id: schema-warnings + name: Warnings for a specific schema + code: supabase db lint --level warning --schema storage + response: | + Linting schema: storage + [ + { + "function": "storage.search", + "issues": [ + { + "level": "warning", + "message": "unused variable \"_bucketid\"", + "sqlState": "00000" + } + ] + } + ] +supabase-db-pull: + - id: basic-usage + name: Basic usage + code: supabase db pull + response: | + Connecting to remote database... + Schema written to supabase/migrations/20240414044403_remote_schema.sql + Update remote migration history table? [Y/n] + Repaired migration history: [20240414044403] => applied + Finished supabase db pull. + The auth and storage schemas are excluded. Run supabase db pull --schema auth,storage again to diff them. + - id: local-studio + name: Local studio + code: supabase db pull --local + response: | + Connecting to local database... + Setting up initial schema.... + Creating custom roles supabase/roles.sql... + Applying migration 20240414044403_remote_schema.sql... + No schema changes found + The auth and storage schemas are excluded. Run supabase db pull --schema auth,storage again to diff them. + exit status 1 + - id: custom-schemas + name: Custom schemas + code: supabase db pull --schema auth,storage + response: | + Connecting to remote database... + Setting up initial schema.... + Creating custom roles supabase/roles.sql... + Applying migration 20240414044403_remote_schema.sql... + No schema changes found + Try rerunning the command with --debug to troubleshoot the error. + exit status 1 +supabase-db-push: + - id: basic-usage + name: Basic usage + code: supabase db push + response: | + Linked project is up to date. + - id: self-hosted + name: Self hosted + code: supabase db push --db-url "postgres://user:pass@127.0.0.1:5432/postgres" + response: | + Pushing migration 20230410135622_create_employees_table.sql... + Finished supabase db push. + - id: dry-run + name: Dry run + code: supabase db push --dry-run + response: | + DRY RUN: migrations will *not* be pushed to the database. + Would push migration 20230410135622_create_employees_table.sql... + Would push migration 20230425064254_my_table.sql... + Finished supabase db push. +supabase-db-reset: + - id: basic-usage + name: Basic usage + code: supabase db reset + response: | + Resetting database... + Initializing schema... + Applying migration 20220810154537_create_employees_table.sql... + Seeding data supabase/seed.sql... + Finished supabase db reset on branch main. +supabase-test-db: + - id: basic-usage + name: Basic usage + code: supabase test db + response: | + /tmp/supabase/tests/nested/order_test.pg .. ok + /tmp/supabase/tests/pet_test.sql .......... ok + All tests successful. + Files=2, Tests=2, 6 wallclock secs ( 0.03 usr 0.01 sys + 0.05 cusr 0.02 csys = 0.11 CPU) + Result: PASS +# TODO: use actual cli response for sso commands +supabase-sso-show: + - id: basic-usage + name: Show information + code: |- + supabase sso show 6df4d73f-bf21-405f-a084-b11adf19fea5 \ + --project-ref abcdefghijklmnopqrst + response: |- + Information about the identity provider in pretty output. + - id: metadata-output + name: Get raw SAML 2.0 Metadata XML + code: |- + supabase sso show 6df4d73f-bf21-405f-a084-b11adf19fea5 \ + --project-ref abcdefghijklmnopqrst \ + --metadata + response: |- + Raw SAML 2.0 XML assigned to this identity provider. This is the + version used in the authentication project, and if using a SAML 2.0 + Metadata URL it may change depending on the caching information + contained within the metadata. +supabase-sso-update: + - id: basic-usage + name: Replace domains + code: |- + supabase sso update 6df4d73f-bf21-405f-a084-b11adf19fea5 \ + --project-ref abcdefghijklmnopqrst \ + --domains new-company.com,new-company.net + response: |- + Information about the updated provider. + - id: add-domains + name: Add an additional domain + code: |- + supabase sso update 6df4d73f-bf21-405f-a084-b11adf19fea5 \ + --project-ref abcdefghijklmnopqrst \ + --add-domains company.net + response: |- + Information about the updated provider. + - id: remove-domains + name: Remove a domain + code: |- + supabase sso update 6df4d73f-bf21-405f-a084-b11adf19fea5 \ + --project-ref abcdefghijklmnopqrst \ + --remove-domains company.org + response: |- + Information about the updated provider. +supabase-sso-remove: + - id: basic-usage + name: Remove a provider + code: |- + supabase sso remove 6df4d73f-bf21-405f-a084-b11adf19fea5 \ + --project-ref abcdefghijklmnopqrst + response: |- + Information about the removed identity provider. It's a good idea to + save this in case you need it later on. +supabase-sso-add: + - id: basic-usage + name: Add with Metadata URL + code: |- + supabase sso add \ + --project-ref abcdefgijklmnopqrst \ + --type saml \ + --metadata-url 'https://...' \ + --domains company.com + response: |- + Information about the added identity provider. You can use + company.com as the domain name on the frontend side to initiate a SSO + request to the identity provider. + - id: with-xml + name: Add with Metadata File + code: |- + supabase sso add \ + --project-ref abcdefgijklmnopqrst \ + --type saml \ + --metadata-file /path/to/metadata/file.xml \ + --domains company.com + response: |- + Information about the added identity provider. You can use + company.com as the domain name on the frontend side to initiate a SSO + request to the identity provider. +supabase-sso-info: + - id: basic-usage + name: Show project information + code: supabase sso info --project-ref abcdefghijklmnopqrst + response: Information about your project's SAML 2.0 configuration. diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000..bf6a62c --- /dev/null +++ b/examples/README.md @@ -0,0 +1,34 @@ +## Code examples using CLI library + +The examples in this directory demonstrate the minimal code to get started with building your own tools for managing Supabase projects. If you are a 3rd party service provider looking for ways to integrate with Supabase user projects, you may want to use the building blocks provided by this library. + +All examples come with an entrypoint that you can build and run locally. + +### Deploy functions + +```bash +# Place your functions under supabase/functions +export SUPABASE_PROJECT_ID="zeoxvqpvpyrxygmmatng" +export SUPABASE_ACCESS_TOKEN="sbp_..." +go run examples/deploy-functions/main.go +``` + +### Migrate database + +```bash +# Place your schemas under supabase/migrations +export PGHOST="db.zeoxvqpvpyrxygmmatng.supabase.co" +export PGPORT="5432" +export PGUSER="postgres" +export PGPASS="" +export PGDATABASE="postgres" +go run examples/migrate-database/main.go +``` + +### Seed storage buckets + +```bash +export SUPABASE_PROJECT_ID="zeoxvqpvpyrxygmmatng" +export SUPABASE_SERVICE_ROLE_KEY="eyJh..." +go run examples/migrate-database/main.go +``` diff --git a/examples/functions-deploy/main.go b/examples/functions-deploy/main.go new file mode 100644 index 0000000..716ef14 --- /dev/null +++ b/examples/functions-deploy/main.go @@ -0,0 +1,49 @@ +package main + +import ( + "context" + "io/fs" + "log" + "net/http" + "os" + "time" + + "github.com/supabase/cli/pkg/api" + "github.com/supabase/cli/pkg/config" + "github.com/supabase/cli/pkg/function" +) + +func main() { + if err := deploy(context.Background(), os.DirFS(".")); err != nil { + log.Fatalln(err) + } +} + +// Requires edge runtime binary to be added to PATH +func deploy(ctx context.Context, fsys fs.FS) error { + project := os.Getenv("SUPABASE_PROJECT_ID") + apiClient := newAPIClient(os.Getenv("SUPABASE_ACCESS_TOKEN")) + eszipBundler := function.NewNativeBundler(".", fsys) + functionClient := function.NewEdgeRuntimeAPI(project, apiClient, eszipBundler) + fc := config.FunctionConfig{"my-slug": { + Entrypoint: "supabase/functions/my-slug/index.ts", + ImportMap: "supabase/functions/import_map.json", + }} + return functionClient.UpsertFunctions(ctx, fc) +} + +func newAPIClient(token string) api.ClientWithResponses { + header := func(ctx context.Context, req *http.Request) error { + req.Header.Set("Authorization", "Bearer "+token) + return nil + } + client := api.ClientWithResponses{ClientInterface: &api.Client{ + // Ensure the server URL always has a trailing slash + Server: "https://api.supabase.com/", + Client: &http.Client{ + Timeout: 10 * time.Second, + }, + RequestEditors: []api.RequestEditorFn{header}, + }} + return client +} diff --git a/examples/migrations-up/main.go b/examples/migrations-up/main.go new file mode 100644 index 0000000..3ef0324 --- /dev/null +++ b/examples/migrations-up/main.go @@ -0,0 +1,31 @@ +package main + +import ( + "context" + "io/fs" + "log" + "os" + + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgxv5" +) + +func main() { + if err := migrate(context.Background(), os.DirFS(".")); err != nil { + log.Fatalln(err) + } +} + +// Applies local migrations to a remote database, and tracks the history of executed statements. +func migrate(ctx context.Context, fsys fs.FS) error { + conn, err := pgxv5.Connect(ctx, os.Getenv("SUPABASE_POSTGRES_URL")) + if err != nil { + return err + } + defer conn.Close(ctx) + files, err := migration.ListLocalMigrations("supabase/migrations", fsys) + if err != nil { + return err + } + return migration.ApplyMigrations(ctx, files, conn, fsys) +} diff --git a/examples/seed-buckets/main.go b/examples/seed-buckets/main.go new file mode 100644 index 0000000..2c62b10 --- /dev/null +++ b/examples/seed-buckets/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "context" + "fmt" + "log" + "net/http" + "os" + "time" + + "github.com/supabase/cli/pkg/config" + "github.com/supabase/cli/pkg/fetcher" + "github.com/supabase/cli/pkg/storage" +) + +func main() { + if err := seed(context.Background()); err != nil { + log.Fatalln(err) + } +} + +func seed(ctx context.Context) error { + project := os.Getenv("SUPABASE_PROJECT_ID") + serviceRoleKey := os.Getenv("SUPABASE_SERVICE_ROLE_KEY") + storageClient := newStorageClient(project, serviceRoleKey) + public := false + sc := config.BucketConfig{"my-bucket": { + Public: &public, + }} + return storageClient.UpsertBuckets(ctx, sc) +} + +func newStorageClient(project, serviceRoleKey string) storage.StorageAPI { + return storage.StorageAPI{Fetcher: fetcher.NewFetcher( + fmt.Sprintf("https://db.%s.supabase.co", project), + fetcher.WithBearerToken(serviceRoleKey), + fetcher.WithHTTPClient(&http.Client{ + Timeout: time.Second * 10, + }), + fetcher.WithExpectedStatus(http.StatusOK), + )} +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..7d18dd2 --- /dev/null +++ b/go.mod @@ -0,0 +1,343 @@ +module github.com/supabase/cli + +go 1.23.2 + +require ( + github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c + github.com/Netflix/go-env v0.1.2 + github.com/andybalholm/brotli v1.1.1 + github.com/cenkalti/backoff/v4 v4.3.0 + github.com/charmbracelet/bubbles v0.18.0 + github.com/charmbracelet/bubbletea v0.25.0 + github.com/charmbracelet/glamour v0.7.0 + github.com/charmbracelet/lipgloss v0.12.1 + github.com/containers/common v0.62.0 + github.com/docker/cli v27.5.1+incompatible + github.com/docker/docker v27.5.1+incompatible + github.com/docker/go-connections v0.5.0 + github.com/docker/go-units v0.5.0 + github.com/ecies/go/v2 v2.0.10 + github.com/getsentry/sentry-go v0.31.1 + github.com/go-errors/errors v1.5.1 + github.com/go-git/go-git/v5 v5.13.2 + github.com/go-xmlfmt/xmlfmt v1.1.3 + github.com/golang-jwt/jwt/v5 v5.2.1 + github.com/golangci/golangci-lint v1.64.5 + github.com/google/go-github/v62 v62.0.0 + github.com/google/go-querystring v1.1.0 + github.com/google/uuid v1.6.0 + github.com/h2non/gock v1.2.0 + github.com/jackc/pgconn v1.14.3 + github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 + github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 + github.com/jackc/pgproto3/v2 v2.3.3 + github.com/jackc/pgtype v1.14.4 + github.com/jackc/pgx/v4 v4.18.3 + github.com/joho/godotenv v1.5.1 + github.com/mitchellh/mapstructure v1.5.0 + github.com/muesli/reflow v0.3.0 + github.com/oapi-codegen/oapi-codegen/v2 v2.4.1 + github.com/oapi-codegen/runtime v1.1.1 + github.com/slack-go/slack v0.16.0 + github.com/spf13/afero v1.12.0 + github.com/spf13/cobra v1.9.1 + github.com/spf13/pflag v1.0.6 + github.com/spf13/viper v1.19.0 + github.com/stretchr/testify v1.10.0 + github.com/stripe/pg-schema-diff v0.8.0 + github.com/tidwall/jsonc v0.3.2 + github.com/withfig/autocomplete-tools/packages/cobra v1.2.0 + github.com/zalando/go-keyring v0.2.6 + go.opentelemetry.io/otel v1.34.0 + golang.org/x/mod v0.23.0 + golang.org/x/oauth2 v0.26.0 + golang.org/x/term v0.29.0 + google.golang.org/grpc v1.70.0 + gopkg.in/yaml.v3 v3.0.1 + gotest.tools/gotestsum v1.12.0 +) + +require ( + 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect + 4d63.com/gochecknoglobals v0.2.2 // indirect + al.essio.dev/pkg/shellescape v1.5.1 // indirect + dario.cat/mergo v1.0.1 // indirect + github.com/4meepo/tagalign v1.4.1 // indirect + github.com/Abirdcfly/dupword v0.1.3 // indirect + github.com/Antonboom/errname v1.0.0 // indirect + github.com/Antonboom/nilnil v1.0.1 // indirect + github.com/Antonboom/testifylint v1.5.2 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/Crocmagnon/fatcontext v0.7.1 // indirect + github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect + github.com/ProtonMail/go-crypto v1.1.5 // indirect + github.com/alecthomas/chroma/v2 v2.8.0 // indirect + github.com/alecthomas/go-check-sumtype v0.3.1 // indirect + github.com/alexkohler/nakedret/v2 v2.0.5 // indirect + github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/alingse/asasalint v0.0.11 // indirect + github.com/alingse/nilnesserr v0.1.2 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/ashanbrown/forbidigo v1.6.0 // indirect + github.com/ashanbrown/makezero v1.2.0 // indirect + github.com/atotto/clipboard v0.1.4 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/aymerick/douceur v0.2.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bitfield/gotestdox v0.2.2 // indirect + github.com/bkielbasa/cyclop v1.2.3 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bombsimon/wsl/v4 v4.5.0 // indirect + github.com/breml/bidichk v0.3.2 // indirect + github.com/breml/errchkjson v0.4.0 // indirect + github.com/butuzov/ireturn v0.3.1 // indirect + github.com/butuzov/mirror v1.3.0 // indirect + github.com/catenacyber/perfsprint v0.8.1 // indirect + github.com/ccojocar/zxcvbn-go v1.0.2 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/charithe/durationcheck v0.0.10 // indirect + github.com/charmbracelet/harmonica v0.2.0 // indirect + github.com/charmbracelet/x/ansi v0.1.4 // indirect + github.com/chavacava/garif v0.1.0 // indirect + github.com/ckaznocha/intrange v0.3.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containers/storage v1.57.1 // indirect + github.com/curioswitch/go-reassign v0.3.0 // indirect + github.com/cyphar/filepath-securejoin v0.3.6 // indirect + github.com/daixiang0/gci v0.13.5 // indirect + github.com/danieljoos/wincred v1.2.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/denis-tingaikin/go-header v0.5.0 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/dlclark/regexp2 v1.11.0 // indirect + github.com/dnephin/pflag v1.0.7 // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect + github.com/docker/go-metrics v0.0.1 // indirect + github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/ethereum/go-ethereum v1.14.13 // indirect + github.com/ettle/strcase v0.2.0 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/firefart/nonamedreturns v1.0.5 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fvbommel/sortorder v1.1.0 // indirect + github.com/fzipp/gocyclo v0.6.0 // indirect + github.com/getkin/kin-openapi v0.127.0 // indirect + github.com/ghostiam/protogetter v0.3.9 // indirect + github.com/go-critic/go-critic v0.12.0 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-toolsmith/astcast v1.1.0 // indirect + github.com/go-toolsmith/astcopy v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.2.0 // indirect + github.com/go-toolsmith/astfmt v1.1.0 // indirect + github.com/go-toolsmith/astp v1.1.0 // indirect + github.com/go-toolsmith/strparse v1.1.0 // indirect + github.com/go-toolsmith/typep v1.1.0 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gofrs/flock v0.12.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/go-printf-func-name v0.1.0 // indirect + github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect + github.com/golangci/misspell v0.6.0 // indirect + github.com/golangci/plugin-module-register v0.1.1 // indirect + github.com/golangci/revgrep v0.8.0 // indirect + github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gordonklaus/ineffassign v0.1.0 // indirect + github.com/gorilla/css v1.0.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.4.2 // indirect + github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect + github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect + github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/yaml v0.3.1 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/jgautheron/goconst v1.7.1 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jjti/go-spancheck v0.6.4 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/julz/importas v0.2.0 // indirect + github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/kisielk/errcheck v1.8.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.5 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/kulti/thelper v0.6.3 // indirect + github.com/kunwardeep/paralleltest v1.0.10 // indirect + github.com/lasiar/canonicalheader v1.1.2 // indirect + github.com/ldez/exptostd v0.4.1 // indirect + github.com/ldez/gomoddirectives v0.6.1 // indirect + github.com/ldez/grignotin v0.9.0 // indirect + github.com/ldez/tagliatelle v0.7.1 // indirect + github.com/ldez/usetesting v0.4.2 // indirect + github.com/leonklingele/grouper v1.1.2 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/macabu/inamedparam v0.1.3 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/maratori/testableexamples v1.0.0 // indirect + github.com/maratori/testpackage v1.1.1 // indirect + github.com/matoous/godox v1.1.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-localereader v0.0.1 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mgechev/revive v1.6.1 // indirect + github.com/microcosm-cc/bluemonday v1.0.25 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/moricho/tparallel v0.3.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b // indirect + github.com/muesli/cancelreader v0.2.2 // indirect + github.com/muesli/termenv v0.15.2 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nishanths/exhaustive v0.12.0 // indirect + github.com/nishanths/predeclared v0.2.2 // indirect + github.com/nunnatsa/ginkgolinter v0.19.0 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pjbgf/sha1cd v0.3.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/polyfloyd/go-errorlint v1.7.1 // indirect + github.com/prometheus/client_golang v1.12.1 // indirect + github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect + github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect + github.com/quasilyte/gogrep v0.5.0 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect + github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/raeperd/recvcheck v0.2.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/ryancurrah/gomodguard v1.3.5 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sahilm/fuzzy v0.1.1-0.20230530133925-c48e322e2a8f // indirect + github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect + github.com/securego/gosec/v2 v2.22.1 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sivchari/containedctx v1.0.3 // indirect + github.com/sivchari/tenv v1.12.1 // indirect + github.com/skeema/knownhosts v1.3.0 // indirect + github.com/sonatard/noctx v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect + github.com/speakeasy-api/openapi-overlay v0.9.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tdakkota/asciicheck v0.4.0 // indirect + github.com/tetafro/godot v1.4.20 // indirect + github.com/theupdateframework/notary v0.7.0 // indirect + github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 // indirect + github.com/timonwong/loggercheck v0.10.1 // indirect + github.com/tomarrell/wrapcheck/v2 v2.10.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect + github.com/ultraware/funlen v0.2.0 // indirect + github.com/ultraware/whitespace v0.2.0 // indirect + github.com/uudashr/gocognit v1.2.0 // indirect + github.com/uudashr/iface v1.3.1 // indirect + github.com/vmware-labs/yaml-jsonpath v0.3.2 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xen0n/gosmopolitan v1.2.2 // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.3.0 // indirect + github.com/ykadowak/zerologlint v0.1.5 // indirect + github.com/yuin/goldmark v1.5.4 // indirect + github.com/yuin/goldmark-emoji v1.0.2 // indirect + gitlab.com/bosi/decorder v0.4.2 // indirect + go-simpler.org/musttag v0.13.0 // indirect + go-simpler.org/sloglint v0.9.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/crypto v0.33.0 // indirect + golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect + golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect + golang.org/x/net v0.35.0 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/text v0.22.0 // indirect + golang.org/x/tools v0.30.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 // indirect + google.golang.org/protobuf v1.36.4 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + honnef.co/go/tools v0.6.0 // indirect + mvdan.cc/gofumpt v0.7.0 // indirect + mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..e6bcbdd --- /dev/null +++ b/go.sum @@ -0,0 +1,1561 @@ +4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= +4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= +al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= +al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/4meepo/tagalign v1.4.1 h1:GYTu2FaPGOGb/xJalcqHeD4il5BiCywyEYZOA55P6J4= +github.com/4meepo/tagalign v1.4.1/go.mod h1:2H9Yu6sZ67hmuraFgfZkNcg5Py9Ch/Om9l2K/2W1qS4= +github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE= +github.com/Abirdcfly/dupword v0.1.3/go.mod h1:8VbB2t7e10KRNdwTVoxdBaxla6avbhGzb8sCTygUMhw= +github.com/Antonboom/errname v1.0.0 h1:oJOOWR07vS1kRusl6YRSlat7HFnb3mSfMl6sDMRoTBA= +github.com/Antonboom/errname v1.0.0/go.mod h1:gMOBFzK/vrTiXN9Oh+HFs+e6Ndl0eTFbtsRTSRdXyGI= +github.com/Antonboom/nilnil v1.0.1 h1:C3Tkm0KUxgfO4Duk3PM+ztPncTFlOf0b2qadmS0s4xs= +github.com/Antonboom/nilnil v1.0.1/go.mod h1:CH7pW2JsRNFgEh8B2UaPZTEPhCMuFowP/e8Udp9Nnb0= +github.com/Antonboom/testifylint v1.5.2 h1:4s3Xhuv5AvdIgbd8wOOEeo0uZG7PbDKQyKY5lGoQazk= +github.com/Antonboom/testifylint v1.5.2/go.mod h1:vxy8VJ0bc6NavlYqjZfmp6EfqXMtBgQ4+mhCojwC1P8= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Crocmagnon/fatcontext v0.7.1 h1:SC/VIbRRZQeQWj/TcQBS6JmrXcfA+BU4OGSVUt54PjM= +github.com/Crocmagnon/fatcontext v0.7.1/go.mod h1:1wMvv3NXEBJucFGfwOJBxSVWcoIO6emV215SMkW9MFU= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Netflix/go-env v0.1.2 h1:0DRoLR9lECQ9Zqvkswuebm3jJ/2enaDX6Ei8/Z+EnK0= +github.com/Netflix/go-env v0.1.2/go.mod h1:WlIhYi++8FlKNJtrop1mjXYAJMzv1f43K4MqCoh0yGE= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= +github.com/ProtonMail/go-crypto v1.1.5 h1:eoAQfK2dwL+tFSFpr7TbOaPNUbPiJj4fLYwwGE1FQO4= +github.com/ProtonMail/go-crypto v1.1.5/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d h1:hi6J4K6DKrR4/ljxn6SF6nURyu785wKMuQcjt7H3VCQ= +github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/chroma/v2 v2.8.0 h1:w9WJUjFFmHHB2e8mRpL9jjy3alYDlU0QLDezj1xE264= +github.com/alecthomas/chroma/v2 v2.8.0/go.mod h1:yrkMI9807G1ROx13fhe1v6PN2DDeaR73L3d+1nmYQtw= +github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= +github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/nakedret/v2 v2.0.5 h1:fP5qLgtwbx9EJE8dGEERT02YwS8En4r9nnZ71RK+EVU= +github.com/alexkohler/nakedret/v2 v2.0.5/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= +github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alingse/nilnesserr v0.1.2 h1:Yf8Iwm3z2hUUrP4muWfW83DF4nE3r1xZ26fGWUKCZlo= +github.com/alingse/nilnesserr v0.1.2/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= +github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU= +github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4= +github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= +github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitfield/gotestdox v0.2.2 h1:x6RcPAbBbErKLnapz1QeAlf3ospg8efBsedU93CDsnE= +github.com/bitfield/gotestdox v0.2.2/go.mod h1:D+gwtS0urjBrzguAkTM2wodsTQYFHdpx8eqRJ3N+9pY= +github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= +github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bombsimon/wsl/v4 v4.5.0 h1:iZRsEvDdyhd2La0FVi5k6tYehpOR/R7qIUjmKk7N74A= +github.com/bombsimon/wsl/v4 v4.5.0/go.mod h1:NOQ3aLF4nD7N5YPXMruR6ZXDOAqLoM0GEpLwTdvmOSc= +github.com/breml/bidichk v0.3.2 h1:xV4flJ9V5xWTqxL+/PMFF6dtJPvZLPsyixAoPe8BGJs= +github.com/breml/bidichk v0.3.2/go.mod h1:VzFLBxuYtT23z5+iVkamXO386OB+/sVwZOpIj6zXGos= +github.com/breml/errchkjson v0.4.0 h1:gftf6uWZMtIa/Is3XJgibewBm2ksAQSY/kABDNFTAdk= +github.com/breml/errchkjson v0.4.0/go.mod h1:AuBOSTHyLSaaAFlWsRSuRBIroCh3eh7ZHh5YeelDIk8= +github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0 h1:s7+5BfS4WFJoVF9pnB8kBk03S7pZXRdKamnV0FOl5Sc= +github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/butuzov/ireturn v0.3.1 h1:mFgbEI6m+9W8oP/oDdfA34dLisRFCj2G6o/yiI1yZrY= +github.com/butuzov/ireturn v0.3.1/go.mod h1:ZfRp+E7eJLC0NQmk1Nrm1LOrn/gQlOykv+cVPdiXH5M= +github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= +github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= +github.com/catenacyber/perfsprint v0.8.1 h1:bGOHuzHe0IkoGeY831RW4aSlt1lPRd3WRAScSWOaV7E= +github.com/catenacyber/perfsprint v0.8.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= +github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= +github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= +github.com/charmbracelet/bubbles v0.18.0 h1:PYv1A036luoBGroX6VWjQIE9Syf2Wby2oOl/39KLfy0= +github.com/charmbracelet/bubbles v0.18.0/go.mod h1:08qhZhtIwzgrtBjAcJnij1t1H0ZRjwHyGsy6AL11PSw= +github.com/charmbracelet/bubbletea v0.25.0 h1:bAfwk7jRz7FKFl9RzlIULPkStffg5k6pNt5dywy4TcM= +github.com/charmbracelet/bubbletea v0.25.0/go.mod h1:EN3QDR1T5ZdWmdfDzYcqOCAps45+QIJbLOBxmVNWNNg= +github.com/charmbracelet/glamour v0.7.0 h1:2BtKGZ4iVJCDfMF229EzbeR1QRKLWztO9dMtjmqZSng= +github.com/charmbracelet/glamour v0.7.0/go.mod h1:jUMh5MeihljJPQbJ/wf4ldw2+yBP59+ctV36jASy7ps= +github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= +github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= +github.com/charmbracelet/lipgloss v0.12.1 h1:/gmzszl+pedQpjCOH+wFkZr/N90Snz40J/NR7A0zQcs= +github.com/charmbracelet/lipgloss v0.12.1/go.mod h1:V2CiwIuhx9S1S1ZlADfOj9HmxeMAORuz5izHb0zGbB8= +github.com/charmbracelet/x/ansi v0.1.4 h1:IEU3D6+dWwPSgZ6HBH+v6oUuZ/nVawMiWj5831KfiLM= +github.com/charmbracelet/x/ansi v0.1.4/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw= +github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/ckaznocha/intrange v0.3.0 h1:VqnxtK32pxgkhJgYQEeOArVidIPg+ahLP7WBOXZd5ZY= +github.com/ckaznocha/intrange v0.3.0/go.mod h1:+I/o2d2A1FBHgGELbGxzIcyd3/9l9DuwjM8FsbSS3Lo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ= +github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY= +github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10= +github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s= +github.com/containers/storage v1.57.1 h1:hKPoFsuBcB3qTzBxa4IFpZMRzUuL5Xhv/BE44W0XHx8= +github.com/containers/storage v1.57.1/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= +github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= +github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= +github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c= +github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= +github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= +github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= +github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= +github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY= +github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= +github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960/go.mod h1:9HQzr9D/0PGwMEbC3d5AB7oi67+h4TsQqItC1GVYG58= +github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936 h1:PRxIJD8XjimM5aTknUK9w6DHLDox2r2M3DI4i2pnd3w= +github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936/go.mod h1:ttYvX5qlB+mlV1okblJqcSMtR4c52UKxDiX9GRBS8+Q= +github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= +github.com/ecies/go/v2 v2.0.10 h1:AaLxGio0MLLbvWur4rKnLzw+K9zI+wMScIDAtqCqOtU= +github.com/ecies/go/v2 v2.0.10/go.mod h1:N73OyuR6tuKznit2LhXjrZ0XAQ234uKbzYz8pEPYzlI= +github.com/elazarl/goproxy v1.4.0 h1:4GyuSbFa+s26+3rmYNSuUVsx+HgPrV1bk1jXI0l9wjM= +github.com/elazarl/goproxy v1.4.0/go.mod h1:X/5W/t+gzDyLfHW4DrMdpjqYjpXsURlBt9lpBDxZZZQ= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= +github.com/ethereum/go-ethereum v1.14.13 h1:L81Wmv0OUP6cf4CW6wtXsr23RUrDhKs2+Y9Qto+OgHU= +github.com/ethereum/go-ethereum v1.14.13/go.mod h1:RAC2gVMWJ6FkxSPESfbshrcKpIokgQKsVKmAuqdekDY= +github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA= +github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/getkin/kin-openapi v0.127.0 h1:Mghqi3Dhryf3F8vR370nN67pAERW+3a95vomb3MAREY= +github.com/getkin/kin-openapi v0.127.0/go.mod h1:OZrfXzUfGrNbsKj+xmFBx6E5c6yH3At/tAKSc2UszXM= +github.com/getsentry/sentry-go v0.31.1 h1:ELVc0h7gwyhnXHDouXkhqTFSO5oslsRDk0++eyE0KJ4= +github.com/getsentry/sentry-go v0.31.1/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= +github.com/ghostiam/protogetter v0.3.9 h1:j+zlLLWzqLay22Cz/aYwTHKQ88GE2DQ6GkWSYFOI4lQ= +github.com/ghostiam/protogetter v0.3.9/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= +github.com/go-critic/go-critic v0.12.0 h1:iLosHZuye812wnkEz1Xu3aBwn5ocCPfc9yqmFG9pa6w= +github.com/go-critic/go-critic v0.12.0/go.mod h1:DpE0P6OVc6JzVYzmM5gq5jMU31zLr4am5mB/VfFK64w= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.13.2 h1:7O7xvsK7K+rZPKW6AQR1YyNhfywkv7B8/FsP3ki6Zv0= +github.com/go-git/go-git/v5 v5.13.2/go.mod h1:hWdW5P4YZRjmpGHwRH2v3zkWcNl6HeXaXQEMGb3NJ9A= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= +github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= +github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= +github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= +github.com/golangci/golangci-lint v1.64.5 h1:5omC86XFBKXZgCrVdUWU+WNHKd+CWCxNx717KXnzKZY= +github.com/golangci/golangci-lint v1.64.5/go.mod h1:WZnwq8TF0z61h3jLQ7Sk5trcP7b3kUFxLD6l1ivtdvU= +github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs= +github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= +github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= +github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= +github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= +github.com/golangci/revgrep v0.8.0/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs= +github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI= +github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4= +github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= +github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.2.0 h1:uSnWrrUEYDr86OCxWa4/Tp2jeYDlogZiZHzGkWFefTk= +github.com/gostaticanalysis/forcetypeassert v0.2.0/go.mod h1:M5iPavzE9pPqWyeiVXSFghQjljW1+l/Uke3PXHS6ILY= +github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= +github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= +github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE= +github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/yaml v0.3.1 h1:f0+ZpmhfBSS4MhG+4HYseMdJhoeeopbSKbq5Rpeelso= +github.com/invopop/yaml v0.3.1/go.mod h1:PMOp3nn4/12yEZUFfmOuNHJsZToEEOwoWsT+D81KkeA= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 h1:Dj0L5fhJ9F82ZJyVOmBx6msDp/kfd1t9GRfny/mfJA0= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8= +github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk= +github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8 h1:CZkYfurY6KGhVtlalI4QwQ6T0Cu6iuY3e0x5RLu96WE= +github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= +github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d h1:jRQLvyVGL+iVtDElaEIDdKwpPqUIZJfzkNLV34htpEc= +github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jjti/go-spancheck v0.6.4 h1:Tl7gQpYf4/TMU7AT84MN83/6PutY21Nb9fuQjFTpRRc= +github.com/jjti/go-spancheck v0.6.4/go.mod h1:yAEYdKJ2lRkDA8g7X+oKUHXOWVAXSBJRv04OhF+QUjk= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= +github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= +github.com/karamaru-alpha/copyloopvar v1.2.1 h1:wmZaZYIjnJ0b5UoKDjUHrikcV0zuPyyxI4SVplLd2CI= +github.com/karamaru-alpha/copyloopvar v1.2.1/go.mod h1:nFmMlFNlClC2BPvNaHMdkirmTJxVCY0lhxBtlfOypMM= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.8.0 h1:ZX/URYa7ilESY19ik/vBmCn6zdGQLxACwjAcWbHlYlg= +github.com/kisielk/errcheck v1.8.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.5 h1:CdnJh63tcDe53vG+RebdpdXJTc9atMgGqdx8LXxiilg= +github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs= +github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= +github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= +github.com/ldez/exptostd v0.4.1 h1:DIollgQ3LWZMp3HJbSXsdE2giJxMfjyHj3eX4oiD6JU= +github.com/ldez/exptostd v0.4.1/go.mod h1:iZBRYaUmcW5jwCR3KROEZ1KivQQp6PHXbDPk9hqJKCQ= +github.com/ldez/gomoddirectives v0.6.1 h1:Z+PxGAY+217f/bSGjNZr/b2KTXcyYLgiWI6geMBN2Qc= +github.com/ldez/gomoddirectives v0.6.1/go.mod h1:cVBiu3AHR9V31em9u2kwfMKD43ayN5/XDgr+cdaFaKs= +github.com/ldez/grignotin v0.9.0 h1:MgOEmjZIVNn6p5wPaGp/0OKWyvq42KnzAt/DAb8O4Ow= +github.com/ldez/grignotin v0.9.0/go.mod h1:uaVTr0SoZ1KBii33c47O1M8Jp3OP3YDwhZCmzT9GHEk= +github.com/ldez/tagliatelle v0.7.1 h1:bTgKjjc2sQcsgPiT902+aadvMjCeMHrY7ly2XKFORIk= +github.com/ldez/tagliatelle v0.7.1/go.mod h1:3zjxUpsNB2aEZScWiZTHrAXOl1x25t3cRmzfK1mlo2I= +github.com/ldez/usetesting v0.4.2 h1:J2WwbrFGk3wx4cZwSMiCQQ00kjGR0+tuuyW0Lqm4lwA= +github.com/ldez/usetesting v0.4.2/go.mod h1:eEs46T3PpQ+9RgN9VjpY6qWdiw2/QmfiDeWmdZdrjIQ= +github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= +github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= +github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= +github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= +github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v1.1.0 h1:W5mqwbyWrwZv6OQ5Z1a/DHGMOvXYCBP3+Ht7KMoJhq4= +github.com/matoous/godox v1.1.0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= +github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mgechev/revive v1.6.1 h1:ncK0ZCMWtb8GXwVAmk+IeWF2ULIDsvRxSRfg5sTwQ2w= +github.com/mgechev/revive v1.6.1/go.mod h1:/2tfHWVO8UQi/hqJsIYNEKELi+DJy/e+PQpLgTB1v88= +github.com/microcosm-cc/bluemonday v1.0.25 h1:4NEwSfiJ+Wva0VxN5B8OwMicaJvD8r9tlJWm9rtloEg= +github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= +github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b h1:1XF24mVaiu7u+CFywTdcDo2ie1pzzhwjt6RHqzpMU34= +github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho= +github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= +github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= +github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.19.0 h1:CnHRFAeBS3LdLI9h+Jidbcc5KH71GKOmaBZQk8Srnto= +github.com/nunnatsa/ginkgolinter v0.19.0/go.mod h1:jkQ3naZDmxaZMXPWaS9rblH+i+GWXQCaS/JFIWcOH2s= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oapi-codegen/oapi-codegen/v2 v2.4.1 h1:ykgG34472DWey7TSjd8vIfNykXgjOgYJZoQbKfEeY/Q= +github.com/oapi-codegen/oapi-codegen/v2 v2.4.1/go.mod h1:N5+lY1tiTDV3V1BeHtOxeWXHoPVeApvsvjJqegfoaz8= +github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v1.7.1 h1:RyLVXIbosq1gBdk/pChWA8zWYLsq9UEw7a1L5TVMCnA= +github.com/polyfloyd/go-errorlint v1.7.1/go.mod h1:aXjNb1x2TNhoLsk26iv1yl7a+zTnXPhwEMtEXukiLR8= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a h1:CmF68hwI0XsOQ5UwlBopMi2Ow4Pbg32akc4KIVCOm+Y= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= +github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= +github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU= +github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sahilm/fuzzy v0.1.1-0.20230530133925-c48e322e2a8f h1:MvTmaQdww/z0Q4wrYjDSCcZ78NoftLQyHBSLW/Cx79Y= +github.com/sahilm/fuzzy v0.1.1-0.20230530133925-c48e322e2a8f/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= +github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= +github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.28.0 h1:jZnudE2zKCtYlGzLVreNp5pmCdOxXUzwsMDBkR21cyQ= +github.com/sashamelentyev/usestdlibvars v1.28.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/securego/gosec/v2 v2.22.1 h1:IcBt3TpI5Y9VN1YlwjSpM2cHu0i3Iw52QM+PQeg7jN8= +github.com/securego/gosec/v2 v2.22.1/go.mod h1:4bb95X4Jz7VSEPdVjC0hD7C/yR6kdeUBvCPOy9gDQ0g= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sivchari/tenv v1.12.1 h1:+E0QzjktdnExv/wwsnnyk4oqZBUfuh89YMQT1cyuvSY= +github.com/sivchari/tenv v1.12.1/go.mod h1:1LjSOUCc25snIr5n3DtGGrENhX3LuWefcplwVGC24mw= +github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= +github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= +github.com/slack-go/slack v0.16.0 h1:khp/WCFv+Hb/B/AJaAwvcxKun0hM6grN0bUZ8xG60P8= +github.com/slack-go/slack v0.16.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= +github.com/sonatard/noctx v0.1.0 h1:JjqOc2WN16ISWAjAk8M5ej0RfExEXtkEyExl2hLW+OM= +github.com/sonatard/noctx v0.1.0/go.mod h1:0RvBxqY8D4j9cTTTWE8ylt2vqj2EPI8fHmrxHdsaZ2c= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/speakeasy-api/openapi-overlay v0.9.0 h1:Wrz6NO02cNlLzx1fB093lBlYxSI54VRhy1aSutx0PQg= +github.com/speakeasy-api/openapi-overlay v0.9.0/go.mod h1:f5FloQrHA7MsxYg9djzMD5h6dxrHjVVByWKh7an8TRc= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= +github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stripe/pg-schema-diff v0.8.0 h1:Ggm4yDbPtaflYQLV3auEMTLxQPaentV/wmDEoCF5jxQ= +github.com/stripe/pg-schema-diff v0.8.0/go.mod h1:HuTBuWLuvnY9g9nptbSD58xugN19zSJNkF4w/sYRtdU= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tdakkota/asciicheck v0.4.0 h1:VZ13Itw4k1i7d+dpDSNS8Op645XgGHpkCEh/WHicgWw= +github.com/tdakkota/asciicheck v0.4.0/go.mod h1:0k7M3rCfRXb0Z6bwgvkEIMleKH3kXNz9UqJ9Xuqopr8= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.20 h1:z/p8Ek55UdNvzt4TFn2zx2KscpW4rWqcnUrdmvWJj7E= +github.com/tetafro/godot v1.4.20/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c= +github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw= +github.com/tidwall/jsonc v0.3.2 h1:ZTKrmejRlAJYdn0kcaFqRAKlxxFIC21pYq8vLa4p2Wc= +github.com/tidwall/jsonc v0.3.2/go.mod h1:dw+3CIxqHi+t8eFSpzzMlcVYxKp08UP5CD8/uSFCyJE= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 h1:y4mJRFlM6fUyPhoXuFg/Yu02fg/nIPFMOY8tOqppoFg= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/timonwong/loggercheck v0.10.1 h1:uVZYClxQFpw55eh+PIoqM7uAOHMrhVcDoWDery9R8Lg= +github.com/timonwong/loggercheck v0.10.1/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= +github.com/tomarrell/wrapcheck/v2 v2.10.0 h1:SzRCryzy4IrAH7bVGG4cK40tNUhmVmMDuJujy4XwYDg= +github.com/tomarrell/wrapcheck/v2 v2.10.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= +github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= +github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= +github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= +github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= +github.com/uudashr/iface v1.3.1 h1:bA51vmVx1UIhiIsQFSNq6GZ6VPTk3WNMZgRiCe9R29U= +github.com/uudashr/iface v1.3.1/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg= +github.com/vmware-labs/yaml-jsonpath v0.3.2 h1:/5QKeCBGdsInyDCyVNLbXyilb61MXGi9NP674f9Hobk= +github.com/vmware-labs/yaml-jsonpath v0.3.2/go.mod h1:U6whw1z03QyqgWdgXxvVnQ90zN1BWz5V+51Ewf8k+rQ= +github.com/withfig/autocomplete-tools/packages/cobra v1.2.0 h1:MzD3XeOOSO3mAjOPpF07jFteSKZxsRHvlIcAR9RQzKM= +github.com/withfig/autocomplete-tools/packages/cobra v1.2.0/go.mod h1:RoXh7+7qknOXL65uTzdzE1mPxqcPwS7FLCE9K5GfmKo= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs= +github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4= +github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.3.7/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.5.4 h1:2uY/xC0roWy8IBEGLgB1ywIoEJFGmRrX21YQcvGZzjU= +github.com/yuin/goldmark v1.5.4/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark-emoji v1.0.2 h1:c/RgTShNgHTtc6xdz2KKI74jJr6rWi7FPgnP9GAsO5s= +github.com/yuin/goldmark-emoji v1.0.2/go.mod h1:RhP/RWpexdp+KHs7ghKnifRoIs/Bq4nDS7tRbCkOwKY= +github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= +github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= +gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= +go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= +go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= +go-simpler.org/musttag v0.13.0 h1:Q/YAW0AHvaoaIbsPj3bvEI5/QFP7w696IMUpnKXQfCE= +go-simpler.org/musttag v0.13.0/go.mod h1:FTzIGeK6OkKlUDVpj0iQUXZLUO1Js9+mvykDQy9C5yM= +go-simpler.org/sloglint v0.9.0 h1:/40NQtjRx9txvsB/RN022KsUJU+zaaSb/9q9BSefSrE= +go-simpler.org/sloglint v0.9.0/go.mod h1:G/OrAF6uxj48sHahCzrbarVMptL2kjWTaUeC8+fOGww= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 h1:ajl4QczuJVA2TU9W9AGw++86Xga/RKt//16z/yxPgdk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0/go.mod h1:Vn3/rlOJ3ntf/Q3zAI0V5lDnTbHGaUsNUeF6nZmm7pA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588= +golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac h1:TSSpLIG4v+p0rPv1pNOQtl1I8knsO4S9trOxNMOLVP4= +golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= +golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 h1:J1H9f+LEdWAfHcez/4cvaVBox7cOYT+IU6rgqj5x++8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk= +google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII= +gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM= +gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20191026110619-0b21df46bc1d/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/gotestsum v1.12.0 h1:CmwtaGDkHxrZm4Ib0Vob89MTfpc3GrEFMJKovliPwGk= +gotest.tools/gotestsum v1.12.0/go.mod h1:fAvqkSptospfSbQw26CTYzNwnsE/ztqLeyhP0h67ARY= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.6.0 h1:TAODvD3knlq75WCp2nyGJtT4LeRV/o7NN9nYPeVJXf8= +honnef.co/go/tools v0.6.0/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= +mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= +mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo= +mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U= +mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/internal/bans/get/get.go b/internal/bans/get/get.go new file mode 100644 index 0000000..a9e5290 --- /dev/null +++ b/internal/bans/get/get.go @@ -0,0 +1,26 @@ +package get + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, fsys afero.Fs) error { + // 1. Sanity checks. + // 2. get network bans + { + resp, err := utils.GetSupabase().V1ListAllNetworkBansWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to retrieve network bans: %w", err) + } + if resp.JSON201 == nil { + return errors.New("Unexpected error retrieving network bans: " + string(resp.Body)) + } + fmt.Printf("DB banned IPs: %+v\n", resp.JSON201.BannedIpv4Addresses) + return nil + } +} diff --git a/internal/bans/update/update.go b/internal/bans/update/update.go new file mode 100644 index 0000000..dfb31d8 --- /dev/null +++ b/internal/bans/update/update.go @@ -0,0 +1,47 @@ +package update + +import ( + "context" + "fmt" + "net" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func validateIps(ips []string) error { + for _, ip := range ips { + ip := net.ParseIP(ip) + if ip.To4() == nil { + return errors.Errorf("only IPv4 supported at the moment: %s", ip) + } + } + return nil +} + +func Run(ctx context.Context, projectRef string, dbIpsToUnban []string, fsys afero.Fs) error { + // 1. sanity checks + { + err := validateIps(dbIpsToUnban) + if err != nil { + return err + } + } + + // 2. remove bans + { + resp, err := utils.GetSupabase().V1DeleteNetworkBansWithResponse(ctx, projectRef, api.RemoveNetworkBanRequest{ + Ipv4Addresses: dbIpsToUnban, + }) + if err != nil { + return errors.Errorf("failed to remove network bans: %w", err) + } + if resp.StatusCode() != 200 { + return errors.New("Unexpected error removing network bans: " + string(resp.Body)) + } + fmt.Printf("Successfully removed bans for %+v.\n", dbIpsToUnban) + return nil + } +} diff --git a/internal/bans/update/update_test.go b/internal/bans/update/update_test.go new file mode 100644 index 0000000..8e488fe --- /dev/null +++ b/internal/bans/update/update_test.go @@ -0,0 +1,17 @@ +package update + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPrivateSubnet(t *testing.T) { + err := validateIps([]string{"12.3.4.5", "10.0.0.0", "1.2.3.1"}) + assert.Nil(t, err) +} + +func TestIpv4(t *testing.T) { + err := validateIps([]string{"12.3.4.5", "2001:db8:abcd:0012::0", "1.2.3.1"}) + assert.ErrorContains(t, err, "only IPv4 supported at the moment: 2001:db8:abcd:12::") +} diff --git a/internal/bootstrap/bootstrap.go b/internal/bootstrap/bootstrap.go new file mode 100644 index 0000000..8310a82 --- /dev/null +++ b/internal/bootstrap/bootstrap.go @@ -0,0 +1,395 @@ +package bootstrap + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/go-errors/errors" + "github.com/google/go-github/v62/github" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/joho/godotenv" + "github.com/spf13/afero" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/db/push" + initBlank "github.com/supabase/cli/internal/init" + "github.com/supabase/cli/internal/link" + "github.com/supabase/cli/internal/login" + "github.com/supabase/cli/internal/projects/apiKeys" + "github.com/supabase/cli/internal/projects/create" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/internal/utils/tenant" + "github.com/supabase/cli/pkg/api" + "github.com/supabase/cli/pkg/fetcher" + "github.com/supabase/cli/pkg/queue" + "golang.org/x/term" +) + +func Run(ctx context.Context, starter StarterTemplate, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + workdir := viper.GetString("WORKDIR") + if !filepath.IsAbs(workdir) { + workdir = filepath.Join(utils.CurrentDirAbs, workdir) + } + if err := utils.MkdirIfNotExistFS(fsys, workdir); err != nil { + return err + } + if empty, err := afero.IsEmpty(fsys, workdir); err != nil { + return errors.Errorf("failed to read workdir: %w", err) + } else if !empty { + title := fmt.Sprintf("Do you want to overwrite existing files in %s directory?", utils.Bold(workdir)) + if shouldOverwrite, err := utils.NewConsole().PromptYesNo(ctx, title, true); err != nil { + return err + } else if !shouldOverwrite { + return errors.New(context.Canceled) + } + } + if err := utils.ChangeWorkDir(fsys); err != nil { + return err + } + // 0. Download starter template + if len(starter.Url) > 0 { + client := utils.GetGitHubClient(ctx) + if err := downloadSample(ctx, client, starter.Url, fsys); err != nil { + return err + } + } else if err := initBlank.Run(ctx, fsys, nil, nil, utils.InitParams{Overwrite: true}); err != nil { + return err + } + // 1. Login + _, err := utils.LoadAccessTokenFS(fsys) + if errors.Is(err, utils.ErrMissingToken) { + if err := login.Run(ctx, os.Stdout, login.RunParams{ + OpenBrowser: term.IsTerminal(int(os.Stdin.Fd())), + Fsys: fsys, + }); err != nil { + return err + } + } else if err != nil { + return err + } + // 2. Create project + params := api.V1CreateProjectBodyDto{ + Name: filepath.Base(workdir), + TemplateUrl: &starter.Url, + } + if err := create.Run(ctx, params, fsys); err != nil { + return err + } + // 3. Get api keys + var keys []api.ApiKeyResponse + policy := newBackoffPolicy(ctx) + if err := backoff.RetryNotify(func() error { + fmt.Fprintln(os.Stderr, "Linking project...") + keys, err = apiKeys.RunGetApiKeys(ctx, flags.ProjectRef) + return err + }, policy, newErrorCallback()); err != nil { + return err + } + // 4. Link project + if err := flags.LoadConfig(fsys); err != nil { + return err + } + link.LinkServices(ctx, flags.ProjectRef, tenant.NewApiKey(keys).Anon, fsys) + if err := utils.WriteFile(utils.ProjectRefPath, []byte(flags.ProjectRef), fsys); err != nil { + return err + } + // 5. Wait for project healthy + policy.Reset() + if err := backoff.RetryNotify(func() error { + fmt.Fprintln(os.Stderr, "Checking project health...") + return checkProjectHealth(ctx) + }, policy, newErrorCallback()); err != nil { + return err + } + // 6. Push migrations + config := flags.NewDbConfigWithPassword(flags.ProjectRef) + if err := writeDotEnv(keys, config, fsys); err != nil { + fmt.Fprintln(os.Stderr, "Failed to create .env file:", err) + } + policy.Reset() + if err := backoff.RetryNotify(func() error { + return push.Run(ctx, false, false, true, true, config, fsys) + }, policy, newErrorCallback()); err != nil { + return err + } + // 7. TODO: deploy functions + utils.CmdSuggestion = suggestAppStart(utils.CurrentDirAbs, starter.Start) + return nil +} + +func suggestAppStart(cwd, command string) string { + logger := utils.GetDebugLogger() + workdir, err := os.Getwd() + if err != nil { + fmt.Fprintln(logger, err) + } + workdir, err = filepath.Rel(cwd, workdir) + if err != nil { + fmt.Fprintln(logger, err) + } + var cmd []string + if len(workdir) > 0 && workdir != "." { + cmd = append(cmd, "cd "+workdir) + } + if len(command) > 0 { + cmd = append(cmd, command) + } + suggestion := "To start your app:" + for _, c := range cmd { + suggestion += fmt.Sprintf("\n %s", utils.Aqua(c)) + } + return suggestion +} + +func checkProjectHealth(ctx context.Context) error { + params := api.V1GetServicesHealthParams{ + Services: []api.V1GetServicesHealthParamsServices{ + api.V1GetServicesHealthParamsServicesDb, + }, + } + resp, err := utils.GetSupabase().V1GetServicesHealthWithResponse(ctx, flags.ProjectRef, ¶ms) + if err != nil { + return err + } + if resp.JSON200 == nil { + return errors.Errorf("Error status %d: %s", resp.StatusCode(), resp.Body) + } + for _, service := range *resp.JSON200 { + if !service.Healthy { + return errors.Errorf("Service not healthy: %s (%s)", service.Name, service.Status) + } + } + return nil +} + +const maxRetries = 8 + +func newBackoffPolicy(ctx context.Context) backoff.BackOffContext { + b := backoff.ExponentialBackOff{ + InitialInterval: 3 * time.Second, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: backoff.DefaultMaxInterval, + MaxElapsedTime: backoff.DefaultMaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + return backoff.WithContext(backoff.WithMaxRetries(&b, maxRetries), ctx) +} + +func newErrorCallback() backoff.Notify { + failureCount := 0 + logger := utils.GetDebugLogger() + return func(err error, d time.Duration) { + failureCount += 1 + fmt.Fprintln(logger, err) + fmt.Fprintf(os.Stderr, "Retry (%d/%d): ", failureCount, maxRetries) + } +} + +const ( + SUPABASE_SERVICE_ROLE_KEY = "SUPABASE_SERVICE_ROLE_KEY" + SUPABASE_ANON_KEY = "SUPABASE_ANON_KEY" + SUPABASE_URL = "SUPABASE_URL" + POSTGRES_URL = "POSTGRES_URL" + // Derived keys + POSTGRES_PRISMA_URL = "POSTGRES_PRISMA_URL" + POSTGRES_URL_NON_POOLING = "POSTGRES_URL_NON_POOLING" + POSTGRES_USER = "POSTGRES_USER" + POSTGRES_HOST = "POSTGRES_HOST" + POSTGRES_PASSWORD = "POSTGRES_PASSWORD" //nolint:gosec + POSTGRES_DATABASE = "POSTGRES_DATABASE" + NEXT_PUBLIC_SUPABASE_ANON_KEY = "NEXT_PUBLIC_SUPABASE_ANON_KEY" + NEXT_PUBLIC_SUPABASE_URL = "NEXT_PUBLIC_SUPABASE_URL" + EXPO_PUBLIC_SUPABASE_ANON_KEY = "EXPO_PUBLIC_SUPABASE_ANON_KEY" + EXPO_PUBLIC_SUPABASE_URL = "EXPO_PUBLIC_SUPABASE_URL" +) + +func writeDotEnv(keys []api.ApiKeyResponse, config pgconn.Config, fsys afero.Fs) error { + // Initialise default envs + transactionMode := *config.Copy() + transactionMode.Port = 6543 + initial := map[string]string{ + SUPABASE_URL: "https://" + utils.GetSupabaseHost(flags.ProjectRef), + POSTGRES_URL: utils.ToPostgresURL(transactionMode), + } + for _, entry := range keys { + name := strings.ToUpper(entry.Name) + key := fmt.Sprintf("SUPABASE_%s_KEY", name) + initial[key] = entry.ApiKey + } + // Populate from .env.example if exists + envs, err := parseExampleEnv(fsys) + if err != nil { + return err + } + for k, v := range envs { + switch k { + case SUPABASE_SERVICE_ROLE_KEY: + case SUPABASE_ANON_KEY: + case SUPABASE_URL: + case POSTGRES_URL: + // Derived keys + case POSTGRES_PRISMA_URL: + initial[k] = initial[POSTGRES_URL] + case POSTGRES_URL_NON_POOLING: + initial[k] = utils.ToPostgresURL(config) + case POSTGRES_USER: + initial[k] = config.User + case POSTGRES_HOST: + initial[k] = config.Host + case POSTGRES_PASSWORD: + initial[k] = config.Password + case POSTGRES_DATABASE: + initial[k] = config.Database + case NEXT_PUBLIC_SUPABASE_ANON_KEY: + fallthrough + case EXPO_PUBLIC_SUPABASE_ANON_KEY: + initial[k] = initial[SUPABASE_ANON_KEY] + case NEXT_PUBLIC_SUPABASE_URL: + fallthrough + case EXPO_PUBLIC_SUPABASE_URL: + initial[k] = initial[SUPABASE_URL] + default: + initial[k] = v + } + } + // Write to .env file + out, err := godotenv.Marshal(initial) + if err != nil { + return errors.Errorf("failed to marshal env map: %w", err) + } + return utils.WriteFile(".env", []byte(out), fsys) +} + +func parseExampleEnv(fsys afero.Fs) (map[string]string, error) { + path := ".env.example" + f, err := fsys.Open(path) + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } else if err != nil { + return nil, errors.Errorf("failed to open %s: %w", path, err) + } + defer f.Close() + envs, err := godotenv.Parse(f) + if err != nil { + return nil, errors.Errorf("failed to parse %s: %w", path, err) + } + return envs, nil +} + +type samplesRepo struct { + Samples []StarterTemplate `json:"samples"` +} + +type StarterTemplate struct { + Name string `json:"name"` + Description string `json:"description"` + Url string `json:"url"` + Start string `json:"start"` +} + +func ListSamples(ctx context.Context, client *github.Client) ([]StarterTemplate, error) { + owner := "supabase-community" + repo := "supabase-samples" + path := "samples.json" + ref := "main" + opts := github.RepositoryContentGetOptions{Ref: ref} + file, _, _, err := client.Repositories.GetContents(ctx, owner, repo, path, &opts) + if err != nil { + return nil, errors.Errorf("failed to list samples: %w", err) + } + content, err := file.GetContent() + if err != nil { + return nil, errors.Errorf("failed to decode samples: %w", err) + } + var data samplesRepo + if err := json.Unmarshal([]byte(content), &data); err != nil { + return nil, errors.Errorf("failed to unmarshal samples: %w", err) + } + return data.Samples, nil +} + +func downloadSample(ctx context.Context, client *github.Client, templateUrl string, fsys afero.Fs) error { + fmt.Println("Downloading:", templateUrl) + // https://github.com/supabase/supabase/tree/master/examples/user-management/nextjs-user-management + parsed, err := url.Parse(templateUrl) + if err != nil { + return errors.Errorf("failed to parse template url: %w", err) + } + parts := strings.Split(parsed.Path, "/") + owner := parts[1] + repo := parts[2] + ref := parts[4] + root := strings.Join(parts[5:], "/") + opts := github.RepositoryContentGetOptions{Ref: ref} + queue := make([]string, 0) + queue = append(queue, root) + download := NewDownloader(5, fsys) + for len(queue) > 0 { + contentPath := queue[0] + queue = queue[1:] + _, directory, _, err := client.Repositories.GetContents(ctx, owner, repo, contentPath, &opts) + if err != nil { + return errors.Errorf("failed to download template: %w", err) + } + for _, file := range directory { + switch file.GetType() { + case "file": + path := strings.TrimPrefix(file.GetPath(), root) + hostPath := filepath.Join(".", filepath.FromSlash(path)) + if err := download.Start(ctx, hostPath, file.GetDownloadURL()); err != nil { + return err + } + case "dir": + queue = append(queue, file.GetPath()) + default: + fmt.Fprintf(os.Stderr, "Ignoring %s: %s\n", file.GetType(), file.GetPath()) + } + } + } + return download.Wait() +} + +type Downloader struct { + api *fetcher.Fetcher + queue *queue.JobQueue + fsys afero.Fs +} + +func NewDownloader(concurrency uint, fsys afero.Fs) *Downloader { + return &Downloader{ + api: fetcher.NewFetcher("", fetcher.WithExpectedStatus(http.StatusOK)), + queue: queue.NewJobQueue(concurrency), + fsys: fsys, + } +} + +func (d *Downloader) Start(ctx context.Context, localPath, remotePath string) error { + job := func() error { + resp, err := d.api.Send(ctx, http.MethodGet, remotePath, nil) + if err != nil { + return err + } + defer resp.Body.Close() + if err := afero.WriteReader(d.fsys, localPath, resp.Body); err != nil { + return errors.Errorf("failed to write file: %w", err) + } + return nil + } + return d.queue.Put(job) +} + +func (d *Downloader) Wait() error { + return d.queue.Collect() +} diff --git a/internal/bootstrap/bootstrap_test.go b/internal/bootstrap/bootstrap_test.go new file mode 100644 index 0000000..6cf6eb6 --- /dev/null +++ b/internal/bootstrap/bootstrap_test.go @@ -0,0 +1,140 @@ +package bootstrap + +import ( + "os" + "path/filepath" + "testing" + + "github.com/jackc/pgconn" + "github.com/joho/godotenv" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" +) + +func TestSuggestAppStart(t *testing.T) { + t.Run("suggest npm", func(t *testing.T) { + cwd, err := os.Getwd() + require.NoError(t, err) + // Run test + suggestion := suggestAppStart(cwd, "npm ci && npm run dev") + // Check error + assert.Equal(t, "To start your app:\n npm ci && npm run dev", suggestion) + }) + + t.Run("suggest cd", func(t *testing.T) { + cwd, err := os.Getwd() + require.NoError(t, err) + // Run test + suggestion := suggestAppStart(filepath.Dir(cwd), "npm ci && npm run dev") + // Check error + expected := "To start your app:" + expected += "\n cd " + filepath.Base(cwd) + expected += "\n npm ci && npm run dev" + assert.Equal(t, expected, suggestion) + }) + + t.Run("ignore relative path", func(t *testing.T) { + // Run test + suggestion := suggestAppStart(".", "supabase start") + // Check error + assert.Equal(t, "To start your app:\n supabase start", suggestion) + }) +} + +func TestWriteEnv(t *testing.T) { + var apiKeys = []api.ApiKeyResponse{{ + ApiKey: "anonkey", + Name: "anon", + }, { + ApiKey: "servicekey", + Name: "service_role", + }} + + var dbConfig = pgconn.Config{ + Host: "db.supabase.co", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", + } + + t.Run("writes .env", func(t *testing.T) { + flags.ProjectRef = "testing" + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := writeDotEnv(apiKeys, dbConfig, fsys) + // Check error + assert.NoError(t, err) + env, err := afero.ReadFile(fsys, ".env") + assert.NoError(t, err) + assert.Equal(t, `POSTGRES_URL="postgresql://admin:password@db.supabase.co:6543/postgres?connect_timeout=10" +SUPABASE_ANON_KEY="anonkey" +SUPABASE_SERVICE_ROLE_KEY="servicekey" +SUPABASE_URL="https://testing.supabase.co"`, string(env)) + }) + + t.Run("merges with .env.example", func(t *testing.T) { + flags.ProjectRef = "testing" + // Setup in-memory fs + fsys := afero.NewMemMapFs() + example, err := godotenv.Marshal(map[string]string{ + POSTGRES_PRISMA_URL: "example", + POSTGRES_URL_NON_POOLING: "example", + POSTGRES_USER: "example", + POSTGRES_HOST: "example", + POSTGRES_PASSWORD: "example", + POSTGRES_DATABASE: "example", + NEXT_PUBLIC_SUPABASE_ANON_KEY: "example", + NEXT_PUBLIC_SUPABASE_URL: "example", + "no_match": "example", + SUPABASE_SERVICE_ROLE_KEY: "example", + SUPABASE_ANON_KEY: "example", + SUPABASE_URL: "example", + POSTGRES_URL: "example", + }) + require.NoError(t, err) + require.NoError(t, afero.WriteFile(fsys, ".env.example", []byte(example), 0644)) + // Run test + err = writeDotEnv(apiKeys, dbConfig, fsys) + // Check error + assert.NoError(t, err) + env, err := afero.ReadFile(fsys, ".env") + assert.NoError(t, err) + assert.Equal(t, `NEXT_PUBLIC_SUPABASE_ANON_KEY="anonkey" +NEXT_PUBLIC_SUPABASE_URL="https://testing.supabase.co" +POSTGRES_DATABASE="postgres" +POSTGRES_HOST="db.supabase.co" +POSTGRES_PASSWORD="password" +POSTGRES_PRISMA_URL="postgresql://admin:password@db.supabase.co:6543/postgres?connect_timeout=10" +POSTGRES_URL="postgresql://admin:password@db.supabase.co:6543/postgres?connect_timeout=10" +POSTGRES_URL_NON_POOLING="postgresql://admin:password@db.supabase.co:5432/postgres?connect_timeout=10" +POSTGRES_USER="admin" +SUPABASE_ANON_KEY="anonkey" +SUPABASE_SERVICE_ROLE_KEY="servicekey" +SUPABASE_URL="https://testing.supabase.co" +no_match="example"`, string(env)) + }) + + t.Run("throws error on malformed example", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, ".env.example", []byte("!="), 0644)) + // Run test + err := writeDotEnv(nil, dbConfig, fsys) + // Check error + assert.ErrorContains(t, err, `unexpected character "!" in variable name near "!="`) + }) + + t.Run("throws error on permission denied", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := writeDotEnv(nil, dbConfig, afero.NewReadOnlyFs(fsys)) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) +} diff --git a/internal/branches/create/create.go b/internal/branches/create/create.go new file mode 100644 index 0000000..bf2931b --- /dev/null +++ b/internal/branches/create/create.go @@ -0,0 +1,39 @@ +package create + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/gen/keys" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, body api.CreateBranchBody, fsys afero.Fs) error { + gitBranch := keys.GetGitBranchOrDefault("", fsys) + if len(body.BranchName) == 0 && len(gitBranch) > 0 { + title := fmt.Sprintf("Do you want to create a branch named %s?", utils.Aqua(gitBranch)) + if shouldCreate, err := utils.NewConsole().PromptYesNo(ctx, title, true); err != nil { + return err + } else if !shouldCreate { + return errors.New(context.Canceled) + } + body.BranchName = gitBranch + } + body.GitBranch = &gitBranch + + resp, err := utils.GetSupabase().V1CreateABranchWithResponse(ctx, flags.ProjectRef, body) + if err != nil { + return errors.Errorf("failed to create preview branch: %w", err) + } + + if resp.JSON201 == nil { + return errors.New("Unexpected error creating preview branch: " + string(resp.Body)) + } + + fmt.Println("Created preview branch:", resp.JSON201.Id) + return nil +} diff --git a/internal/branches/create/create_test.go b/internal/branches/create/create_test.go new file mode 100644 index 0000000..e07e103 --- /dev/null +++ b/internal/branches/create/create_test.go @@ -0,0 +1,79 @@ +package create + +import ( + "context" + "net" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" + "github.com/supabase/cli/pkg/cast" +) + +func TestCreateCommand(t *testing.T) { + // Setup valid project ref + flags.ProjectRef = apitest.RandomProjectRef() + + t.Run("creates preview branch", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + flags.ProjectRef + "/branches"). + Reply(http.StatusCreated). + JSON(api.BranchResponse{ + Id: "test-uuid", + }) + // Run test + err := Run(context.Background(), api.CreateBranchBody{ + Region: cast.Ptr("sin"), + }, fsys) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on network disconnected", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.ProjectRefPath, []byte(flags.ProjectRef), 0644)) + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + flags.ProjectRef + "/branches"). + ReplyError(net.ErrClosed) + // Run test + err := Run(context.Background(), api.CreateBranchBody{ + Region: cast.Ptr("sin"), + }, fsys) + // Check error + assert.ErrorIs(t, err, net.ErrClosed) + }) + + t.Run("throws error on service unavailable", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.ProjectRefPath, []byte(flags.ProjectRef), 0644)) + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + flags.ProjectRef + "/branches"). + Reply(http.StatusServiceUnavailable) + // Run test + err := Run(context.Background(), api.CreateBranchBody{ + Region: cast.Ptr("sin"), + }, fsys) + // Check error + assert.ErrorContains(t, err, "Unexpected error creating preview branch:") + }) +} diff --git a/internal/branches/delete/delete.go b/internal/branches/delete/delete.go new file mode 100644 index 0000000..b7fdd66 --- /dev/null +++ b/internal/branches/delete/delete.go @@ -0,0 +1,22 @@ +package delete + +import ( + "context" + "fmt" + "net/http" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, branchId string) error { + resp, err := utils.GetSupabase().V1DeleteABranchWithResponse(ctx, branchId) + if err != nil { + return errors.Errorf("failed to delete preview branch: %w", err) + } + if resp.StatusCode() != http.StatusOK { + return errors.New("Unexpected error deleting preview branch: " + string(resp.Body)) + } + fmt.Println("Deleted preview branch:", branchId) + return nil +} diff --git a/internal/branches/disable/disable.go b/internal/branches/disable/disable.go new file mode 100644 index 0000000..d94c1e6 --- /dev/null +++ b/internal/branches/disable/disable.go @@ -0,0 +1,24 @@ +package disable + +import ( + "context" + "fmt" + "net/http" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +func Run(ctx context.Context, fsys afero.Fs) error { + resp, err := utils.GetSupabase().V1DisablePreviewBranchingWithResponse(ctx, flags.ProjectRef) + if err != nil { + return errors.Errorf("failed to disable preview branching: %w", err) + } + if resp.StatusCode() != http.StatusOK { + return errors.New("Unexpected error disabling preview branching: " + string(resp.Body)) + } + fmt.Println("Disabled preview branching for project:", flags.ProjectRef) + return nil +} diff --git a/internal/branches/get/get.go b/internal/branches/get/get.go new file mode 100644 index 0000000..387e4ab --- /dev/null +++ b/internal/branches/get/get.go @@ -0,0 +1,66 @@ +package get + +import ( + "context" + "fmt" + "os" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/cast" +) + +func Run(ctx context.Context, branchId string, fsys afero.Fs) error { + resp, err := utils.GetSupabase().V1GetABranchConfigWithResponse(ctx, branchId) + if err != nil { + return errors.Errorf("failed to retrieve preview branch: %w", err) + } + if resp.JSON200 == nil { + return errors.New("Unexpected error retrieving preview branch: " + string(resp.Body)) + } + + masked := "******" + if resp.JSON200.DbUser == nil { + resp.JSON200.DbUser = &masked + } + if resp.JSON200.DbPass == nil { + resp.JSON200.DbPass = &masked + } + if resp.JSON200.JwtSecret == nil { + resp.JSON200.JwtSecret = &masked + } + + config := pgconn.Config{ + Host: resp.JSON200.DbHost, + Port: cast.UIntToUInt16(cast.IntToUint(resp.JSON200.DbPort)), + User: *resp.JSON200.DbUser, + Password: *resp.JSON200.DbPass, + } + + postgresConnectionString := utils.ToPostgresURL(config) + if utils.OutputFormat.Value != utils.OutputPretty { + envs := map[string]string{ + "POSTGRES_URL": postgresConnectionString, + } + return utils.EncodeOutput(utils.OutputFormat.Value, os.Stdout, envs) + } + + table := `|HOST|PORT|USER|PASSWORD|JWT SECRET|POSTGRES VERSION|STATUS|POSTGRES URL| +|-|-|-|-|-|-|-|-| +` + fmt.Sprintf( + "|`%s`|`%d`|`%s`|`%s`|`%s`|`%s`|`%s`|`%s`|\n", + resp.JSON200.DbHost, + resp.JSON200.DbPort, + *resp.JSON200.DbUser, + *resp.JSON200.DbPass, + *resp.JSON200.JwtSecret, + resp.JSON200.PostgresVersion, + resp.JSON200.Status, + postgresConnectionString, + ) + + return list.RenderTable(table) +} diff --git a/internal/branches/list/list.go b/internal/branches/list/list.go new file mode 100644 index 0000000..79eb75e --- /dev/null +++ b/internal/branches/list/list.go @@ -0,0 +1,46 @@ +package list + +import ( + "context" + "fmt" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +func Run(ctx context.Context, fsys afero.Fs) error { + resp, err := utils.GetSupabase().V1ListAllBranchesWithResponse(ctx, flags.ProjectRef) + if err != nil { + return errors.Errorf("failed to list preview branches: %w", err) + } + + if resp.JSON200 == nil { + return errors.New("Unexpected error listing preview branches: " + string(resp.Body)) + } + + table := `|ID|NAME|DEFAULT|GIT BRANCH|STATUS|CREATED AT (UTC)|UPDATED AT (UTC)| +|-|-|-|-|-|-|-| +` + for _, branch := range *resp.JSON200 { + gitBranch := " " + if branch.GitBranch != nil { + gitBranch = *branch.GitBranch + } + table += fmt.Sprintf( + "|`%s`|`%s`|`%t`|`%s`|`%s`|`%s`|`%s`|\n", + branch.Id, + strings.ReplaceAll(branch.Name, "|", "\\|"), + branch.IsDefault, + strings.ReplaceAll(gitBranch, "|", "\\|"), + branch.Status, + utils.FormatTimestamp(branch.CreatedAt), + utils.FormatTimestamp(branch.UpdatedAt), + ) + } + + return list.RenderTable(table) +} diff --git a/internal/branches/update/update.go b/internal/branches/update/update.go new file mode 100644 index 0000000..6c0f5fc --- /dev/null +++ b/internal/branches/update/update.go @@ -0,0 +1,23 @@ +package update + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, branchId string, body api.UpdateBranchBody, fsys afero.Fs) error { + resp, err := utils.GetSupabase().V1UpdateABranchConfigWithResponse(ctx, branchId, body) + if err != nil { + return errors.Errorf("failed to update preview branch: %w", err) + } + if resp.JSON200 == nil { + return errors.New("Unexpected error updating preview branch: " + string(resp.Body)) + } + fmt.Println("Updated preview branch:", resp.JSON200.Id) + return nil +} diff --git a/internal/config/push/push.go b/internal/config/push/push.go new file mode 100644 index 0000000..d2daddd --- /dev/null +++ b/internal/config/push/push.go @@ -0,0 +1,35 @@ +package push + +import ( + "context" + "fmt" + "os" + + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/config" +) + +func Run(ctx context.Context, ref string, fsys afero.Fs) error { + if err := flags.LoadConfig(fsys); err != nil { + return err + } + client := config.NewConfigUpdater(*utils.GetSupabase()) + remote, err := utils.Config.GetRemoteByProjectRef(ref) + if err != nil { + // Use base config when no remote is declared + remote.ProjectId = ref + } + fmt.Fprintln(os.Stderr, "Pushing config to project:", remote.ProjectId) + console := utils.NewConsole() + keep := func(name string) bool { + title := fmt.Sprintf("Do you want to push %s config to remote?", name) + shouldPush, err := console.PromptYesNo(ctx, title, true) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } + return shouldPush + } + return client.UpdateRemoteConfig(ctx, remote, keep) +} diff --git a/internal/db/branch/create/create.go b/internal/db/branch/create/create.go new file mode 100644 index 0000000..5501283 --- /dev/null +++ b/internal/db/branch/create/create.go @@ -0,0 +1,103 @@ +package create + +import ( + "bytes" + "context" + _ "embed" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/stdcopy" + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + //go:embed templates/clone.sh + cloneScript string +) + +func Run(branch string, fsys afero.Fs) error { + if err := flags.LoadConfig(fsys); err != nil { + return err + } + if err := utils.AssertSupabaseDbIsRunning(); err != nil { + return err + } + + branchPath := filepath.Join(filepath.Dir(utils.CurrBranchPath), branch) + if err := assertNewBranchIsValid(branchPath, fsys); err != nil { + return nil + } + + var ctx = context.Background() + if err := createBranch(ctx, branch); err != nil { + return err + } + + if err := fsys.MkdirAll(branchPath, 0755); err != nil { + return err + } + + fmt.Println("Created branch " + utils.Aqua(branch) + ".") + return nil +} + +func assertNewBranchIsValid(branchPath string, fsys afero.Fs) error { + branch := filepath.Base(branchPath) + + if utils.IsBranchNameReserved(branch) { + return errors.New("Cannot create branch " + utils.Aqua(branch) + ": branch name is reserved.") + } + + if !utils.BranchNamePattern.MatchString(branch) { + return errors.New("Branch name " + utils.Aqua(branch) + " is invalid. Must match [0-9A-Za-z_-]+.") + } + + if _, err := afero.ReadDir(fsys, branchPath); errors.Is(err, os.ErrNotExist) { + // skip + } else if err != nil { + return err + } else { + return errors.New("Branch " + utils.Aqua(branch) + " already exists.") + } + + return nil +} + +func createBranch(ctx context.Context, branch string) error { + exec, err := utils.Docker.ContainerExecCreate(ctx, utils.DbId, container.ExecOptions{ + Cmd: []string{"/bin/bash", "-c", cloneScript}, + Env: []string{"DB_NAME=" + branch}, + AttachStderr: true, + AttachStdout: true, + }) + if err != nil { + return err + } + // Read exec output + resp, err := utils.Docker.ContainerExecAttach(ctx, exec.ID, container.ExecStartOptions{}) + if err != nil { + return err + } + defer resp.Close() + // Capture error details + var errBuf bytes.Buffer + if _, err := stdcopy.StdCopy(io.Discard, &errBuf, resp.Reader); err != nil { + return err + } + // Get the exit code + iresp, err := utils.Docker.ContainerExecInspect(ctx, exec.ID) + if err != nil { + return err + } + if iresp.ExitCode > 0 { + return errors.New("Error creating branch: " + errBuf.String()) + } + return nil +} diff --git a/internal/db/branch/create/create_test.go b/internal/db/branch/create/create_test.go new file mode 100644 index 0000000..8e38423 --- /dev/null +++ b/internal/db/branch/create/create_test.go @@ -0,0 +1,106 @@ +package create + +import ( + "context" + "net/http" + "testing" + + "github.com/docker/docker/api/types" + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" +) + +func TestBranchValidation(t *testing.T) { + t.Run("branch name is valid", func(t *testing.T) { + assert.NoError(t, assertNewBranchIsValid("test-branch", afero.NewMemMapFs())) + }) + + t.Run("branch name is reserved", func(t *testing.T) { + assert.Error(t, assertNewBranchIsValid("main", afero.NewMemMapFs())) + }) + + t.Run("branch name is invalid", func(t *testing.T) { + assert.Error(t, assertNewBranchIsValid("@", afero.NewMemMapFs())) + }) + + t.Run("branch not a directory", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := "/supabase/.branches/test-branch" + _, err := fsys.Create(path) + require.NoError(t, err) + // Run test + assert.Error(t, assertNewBranchIsValid(path, fsys)) + }) + + t.Run("branch already exists", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := "/supabase/.branches/test-branch" + require.NoError(t, fsys.MkdirAll(path, 0755)) + // Run test + assert.Error(t, assertNewBranchIsValid(path, fsys)) + }) +} + +func TestBranchCreation(t *testing.T) { + utils.DbId = "test-db" + + t.Run("docker exec failure", func(t *testing.T) { + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/exec"). + Reply(http.StatusServiceUnavailable) + // Run test + err := createBranch(context.Background(), "test-branch") + // Validate api + assert.ErrorContains(t, err, "request returned Service Unavailable for API route and version") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("docker attach failure", func(t *testing.T) { + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/exec"). + Reply(http.StatusCreated). + JSON(types.ContainerJSON{}) + // Run test + err := createBranch(context.Background(), "test-branch") + // Validate api + assert.ErrorContains(t, err, "unable to upgrade to tcp, received 404") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestCreateCommand(t *testing.T) { + const branch = "test-branch" + + t.Run("throws error on missing config", func(t *testing.T) { + assert.Error(t, Run(branch, afero.NewMemMapFs())) + }) + + t.Run("throws error on stopped db", func(t *testing.T) { + // Setup in-memory fs + fsys := &afero.MemMapFs{} + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusNotFound) + // Run test + err := Run(branch, fsys) + // Validate api + assert.ErrorIs(t, err, utils.ErrNotRunning) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/db/branch/create/templates/clone.sh b/internal/db/branch/create/templates/clone.sh new file mode 100644 index 0000000..ac8d6cc --- /dev/null +++ b/internal/db/branch/create/templates/clone.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -euo pipefail + +createdb --username postgres --host 127.0.0.1 "$DB_NAME" +pg_dump --username postgres --host 127.0.0.1 postgres | psql --username postgres --host 127.0.0.1 "$DB_NAME" diff --git a/internal/db/branch/delete/delete.go b/internal/db/branch/delete/delete.go new file mode 100644 index 0000000..ea14cd3 --- /dev/null +++ b/internal/db/branch/delete/delete.go @@ -0,0 +1,90 @@ +package delete + +import ( + "bytes" + "context" + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/stdcopy" + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +func Run(branch string, fsys afero.Fs) error { + if err := flags.LoadConfig(fsys); err != nil { + return err + } + if err := utils.AssertSupabaseDbIsRunning(); err != nil { + return err + } + + // TODO: update branch history atomically with database + if err := deleteBranchDir(branch, fsys); err != nil { + return err + } + + ctx := context.Background() + if err := deleteBranchPG(ctx, branch); err != nil { + return err + } + + fmt.Println("Deleted branch " + utils.Aqua(branch) + ".") + return nil +} + +func deleteBranchDir(branch string, fsys afero.Fs) error { + if currBranch, _ := utils.GetCurrentBranchFS(fsys); branch == currBranch { + return errors.New("Cannot delete current branch.") + } + + if utils.IsBranchNameReserved(branch) { + return errors.New("Cannot delete branch " + utils.Aqua(branch) + ": branch name is reserved.") + } + + branchPath := filepath.Join(filepath.Dir(utils.CurrBranchPath), branch) + if _, err := afero.ReadDir(fsys, branchPath); err != nil { + return errors.New("Branch " + utils.Aqua(branch) + " does not exist.") + } + + if err := fsys.RemoveAll(branchPath); err != nil { + return errors.Errorf("Failed deleting branch %s: %w", utils.Aqua(branch), err) + } + + return nil +} + +func deleteBranchPG(ctx context.Context, branch string) error { + exec, err := utils.Docker.ContainerExecCreate(ctx, utils.DbId, container.ExecOptions{ + Cmd: []string{"dropdb", "--username", "postgres", "--host", "127.0.0.1", branch}, + AttachStderr: true, + AttachStdout: true, + }) + if err != nil { + return err + } + // Read exec output + resp, err := utils.Docker.ContainerExecAttach(ctx, exec.ID, container.ExecStartOptions{}) + if err != nil { + return err + } + defer resp.Close() + // Capture error details + var errBuf bytes.Buffer + if _, err := stdcopy.StdCopy(io.Discard, &errBuf, resp.Reader); err != nil { + return err + } + // Get the exit code + iresp, err := utils.Docker.ContainerExecInspect(ctx, exec.ID) + if err != nil { + return err + } + if iresp.ExitCode > 0 { + return errors.New("Error deleting branch: " + errBuf.String()) + } + return nil +} diff --git a/internal/db/branch/delete/delete_test.go b/internal/db/branch/delete/delete_test.go new file mode 100644 index 0000000..76957e2 --- /dev/null +++ b/internal/db/branch/delete/delete_test.go @@ -0,0 +1,79 @@ +package delete + +import ( + "net/http" + "path/filepath" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" +) + +func TestBranchDir(t *testing.T) { + t.Run("removes a branch directory", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(filepath.Dir(utils.CurrBranchPath), "test-branch") + require.NoError(t, fsys.Mkdir(path, 0755)) + // Run test + assert.NoError(t, deleteBranchDir("test-branch", fsys)) + // Validate removal + exists, err := afero.Exists(fsys, path) + assert.NoError(t, err) + assert.False(t, exists) + }) + + t.Run("branch is current", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.CurrBranchPath, []byte("main"), 0644)) + // Run test + assert.Error(t, deleteBranchDir("main", fsys)) + }) + + t.Run("branch is reserved", func(t *testing.T) { + assert.Error(t, deleteBranchDir("main", afero.NewMemMapFs())) + }) + + t.Run("branch does not exist", func(t *testing.T) { + assert.Error(t, deleteBranchDir("test-branch", afero.NewMemMapFs())) + }) + + t.Run("branch permission denied", func(t *testing.T) { + // Setup read-only fs + fsys := afero.NewMemMapFs() + path := filepath.Join(filepath.Dir(utils.CurrBranchPath), "test-branch") + require.NoError(t, fsys.Mkdir(path, 0755)) + // Run test + assert.Error(t, deleteBranchDir("test-branch", afero.NewReadOnlyFs(fsys))) + }) +} + +func TestDeleteCommand(t *testing.T) { + const branch = "test-branch" + + t.Run("throws error on missing config", func(t *testing.T) { + assert.Error(t, Run(branch, afero.NewMemMapFs())) + }) + + t.Run("throws error on stopped db", func(t *testing.T) { + // Setup in-memory fs + fsys := &afero.MemMapFs{} + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusNotFound) + // Run test + err := Run(branch, fsys) + // Validate api + assert.ErrorIs(t, err, utils.ErrNotRunning) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/db/branch/list/list.go b/internal/db/branch/list/list.go new file mode 100644 index 0000000..b1037ac --- /dev/null +++ b/internal/db/branch/list/list.go @@ -0,0 +1,36 @@ +package list + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" +) + +func Run(fsys afero.Fs, out io.Writer) error { + branches, err := afero.ReadDir(fsys, filepath.Dir(utils.CurrBranchPath)) + if errors.Is(err, os.ErrNotExist) { + return nil + } else if err != nil { + return err + } + + currBranch, _ := utils.GetCurrentBranchFS(fsys) + for _, branch := range branches { + if branch.Name() == filepath.Base(utils.CurrBranchPath) { + continue + } + + if branch.Name() == currBranch { + fmt.Fprintln(out, "*", branch.Name()) + } else { + fmt.Fprintln(out, " ", branch.Name()) + } + } + + return nil +} diff --git a/internal/db/branch/list/list_test.go b/internal/db/branch/list/list_test.go new file mode 100644 index 0000000..24e34d5 --- /dev/null +++ b/internal/db/branch/list/list_test.go @@ -0,0 +1,66 @@ +package list + +import ( + "bytes" + "io" + "path/filepath" + "strings" + "testing" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/utils" +) + +func TestListCommand(t *testing.T) { + t.Run("lists all branches", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.CurrBranchPath, []byte("main"), 0644)) + base := filepath.Dir(utils.CurrBranchPath) + require.NoError(t, fsys.Mkdir(filepath.Join(base, "main"), 0755)) + require.NoError(t, fsys.Mkdir(filepath.Join(base, "test"), 0755)) + // Run test + var out bytes.Buffer + require.NoError(t, Run(fsys, &out)) + // Validate output + lines := strings.Split(out.String(), "\n") + assert.ElementsMatch(t, []string{ + "* main", + " test", + "", + }, lines) + }) + + t.Run("lists without current branch", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + base := filepath.Dir(utils.CurrBranchPath) + require.NoError(t, fsys.Mkdir(filepath.Join(base, "main"), 0755)) + require.NoError(t, fsys.Mkdir(filepath.Join(base, "test"), 0755)) + // Run test + var out bytes.Buffer + require.NoError(t, Run(fsys, &out)) + // Validate output + lines := strings.Split(out.String(), "\n") + assert.ElementsMatch(t, []string{ + " main", + " test", + "", + }, lines) + }) + + t.Run("lists uninitialized branch", func(t *testing.T) { + require.NoError(t, Run(afero.NewMemMapFs(), io.Discard)) + }) + + t.Run("throws error on unreadable directory", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + _, err := fsys.Create(filepath.Dir(utils.CurrBranchPath)) + require.NoError(t, err) + // Run test + require.Error(t, Run(fsys, io.Discard)) + }) +} diff --git a/internal/db/branch/switch_/switch_.go b/internal/db/branch/switch_/switch_.go new file mode 100644 index 0000000..d11803e --- /dev/null +++ b/internal/db/branch/switch_/switch_.go @@ -0,0 +1,88 @@ +package switch_ + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +func Run(ctx context.Context, target string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + // 1. Sanity checks + { + if err := flags.LoadConfig(fsys); err != nil { + return err + } + if err := utils.AssertSupabaseDbIsRunning(); err != nil { + return err + } + if target != "main" && utils.IsBranchNameReserved(target) { + return errors.New("Cannot switch branch " + utils.Aqua(target) + ": branch name is reserved.") + } + branchPath := filepath.Join(filepath.Dir(utils.CurrBranchPath), target) + if _, err := fsys.Stat(branchPath); errors.Is(err, os.ErrNotExist) { + return errors.New("Branch " + utils.Aqua(target) + " does not exist.") + } else if err != nil { + return err + } + } + + // 2. Check current branch + currBranch, err := utils.GetCurrentBranchFS(fsys) + if err != nil { + // Assume we are on main branch + currBranch = "main" + } + + // 3. Switch Postgres database + if currBranch == target { + fmt.Println("Already on branch " + utils.Aqua(target) + ".") + } else if err := switchDatabase(ctx, currBranch, target, options...); err != nil { + return errors.New("Error switching to branch " + utils.Aqua(target) + ": " + err.Error()) + } else { + fmt.Println("Switched to branch " + utils.Aqua(target) + ".") + } + + // 4. Update current branch + if err := afero.WriteFile(fsys, utils.CurrBranchPath, []byte(target), 0644); err != nil { + return errors.New("Unable to update local branch file. Fix by running: echo '" + target + "' > " + utils.CurrBranchPath) + } + return nil +} + +func switchDatabase(ctx context.Context, source, target string, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{Database: "template1"}, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + if err := reset.DisconnectClients(ctx, conn); err != nil { + return err + } + defer func() { + if err := reset.RestartDatabase(context.Background(), os.Stderr); err != nil { + fmt.Fprintln(os.Stderr, "Failed to restart database:", err) + } + }() + backup := "ALTER DATABASE postgres RENAME TO " + source + ";" + if _, err := conn.Exec(ctx, backup); err != nil { + return err + } + rename := "ALTER DATABASE " + target + " RENAME TO postgres;" + if _, err := conn.Exec(ctx, rename); err != nil { + rollback := "ALTER DATABASE " + source + " RENAME TO postgres;" + if _, err := conn.Exec(ctx, rollback); err != nil { + fmt.Fprintln(os.Stderr, "Failed to rollback database:", err) + } + return err + } + return nil +} diff --git a/internal/db/branch/switch_/switch__test.go b/internal/db/branch/switch_/switch__test.go new file mode 100644 index 0000000..4e2e105 --- /dev/null +++ b/internal/db/branch/switch_/switch__test.go @@ -0,0 +1,290 @@ +package switch_ + +import ( + "context" + "net/http" + "path/filepath" + "testing" + + "github.com/docker/docker/api/types" + "github.com/h2non/gock" + "github.com/jackc/pgerrcode" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +func TestSwitchCommand(t *testing.T) { + t.Run("switches local branch", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup target branch + branch := "target" + branchPath := filepath.Join(filepath.Dir(utils.CurrBranchPath), branch) + require.NoError(t, fsys.Mkdir(branchPath, 0755)) + require.NoError(t, afero.WriteFile(fsys, utils.CurrBranchPath, []byte("main"), 0644)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusServiceUnavailable) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("ALTER DATABASE postgres ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query("ALTER DATABASE _supabase ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query(reset.TERMINATE_BACKENDS). + Reply("SELECT 1"). + Query(reset.COUNT_REPLICATION_SLOTS). + Reply("SELECT 1", []interface{}{0}). + Query("ALTER DATABASE postgres RENAME TO main;"). + Reply("ALTER DATABASE"). + Query("ALTER DATABASE " + branch + " RENAME TO postgres;"). + Reply("ALTER DATABASE") + // Run test + assert.NoError(t, Run(context.Background(), branch, fsys, conn.Intercept)) + // Validate output + assert.Empty(t, apitest.ListUnmatchedRequests()) + contents, err := afero.ReadFile(fsys, utils.CurrBranchPath) + assert.NoError(t, err) + assert.Equal(t, []byte(branch), contents) + }) + + t.Run("throws error on malformed config", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.ConfigPath, []byte("malformed"), 0644)) + // Run test + err := Run(context.Background(), "target", fsys) + // Check error + assert.ErrorContains(t, err, "toml: expected = after a key, but the document ends there") + }) + + t.Run("throws error on missing database", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusNotFound) + // Run test + err := Run(context.Background(), "target", fsys) + // Check error + assert.ErrorIs(t, err, utils.ErrNotRunning) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on reserved branch", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + // Run test + err := Run(context.Background(), "postgres", fsys) + // Check error + assert.ErrorContains(t, err, "branch name is reserved.") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on missing branch", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + // Run test + err := Run(context.Background(), "main", fsys) + // Check error + assert.ErrorContains(t, err, "Branch main does not exist.") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("noop on current branch", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + // Setup target branch + branch := "main" + branchPath := filepath.Join(filepath.Dir(utils.CurrBranchPath), branch) + require.NoError(t, fsys.Mkdir(branchPath, 0755)) + // Run test + assert.NoError(t, Run(context.Background(), branch, fsys)) + // Check error + assert.Empty(t, apitest.ListUnmatchedRequests()) + contents, err := afero.ReadFile(fsys, utils.CurrBranchPath) + assert.NoError(t, err) + assert.Equal(t, []byte(branch), contents) + }) + + t.Run("throws error on failure to switch", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + // Setup target branch + branch := "target" + branchPath := filepath.Join(filepath.Dir(utils.CurrBranchPath), branch) + require.NoError(t, fsys.Mkdir(branchPath, 0755)) + // Setup mock postgres + conn := pgtest.NewConn() + // Run test + err := Run(context.Background(), branch, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, "Error switching to branch target") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on failure to write", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + // Setup target branch + branch := "main" + branchPath := filepath.Join(filepath.Dir(utils.CurrBranchPath), branch) + require.NoError(t, fsys.Mkdir(branchPath, 0755)) + // Run test + err := Run(context.Background(), branch, afero.NewReadOnlyFs(fsys)) + // Check error + assert.ErrorContains(t, err, "Unable to update local branch file.") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestSwitchDatabase(t *testing.T) { + t.Run("throws error on failure to connect", func(t *testing.T) { + // Setup invalid port + utils.Config.Db.Port = 0 + // Run test + err := switchDatabase(context.Background(), "main", "target") + // Check error + assert.ErrorContains(t, err, "invalid port") + }) + + t.Run("throws error on failure to disconnect", func(t *testing.T) { + // Setup valid config + utils.Config.Db.Port = 54322 + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("ALTER DATABASE postgres ALLOW_CONNECTIONS false"). + ReplyError(pgerrcode.InvalidParameterValue, `cannot disallow connections for current database`). + Query("ALTER DATABASE _supabase ALLOW_CONNECTIONS false"). + Query(reset.TERMINATE_BACKENDS) + // Run test + err := switchDatabase(context.Background(), "main", "target", conn.Intercept) + // Check error + assert.ErrorContains(t, err, pgerrcode.InvalidParameterValue) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on failure to backup", func(t *testing.T) { + // Setup valid config + utils.DbId = "test-switch" + utils.Config.Db.Port = 54322 + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("ALTER DATABASE postgres ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query("ALTER DATABASE _supabase ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query(reset.TERMINATE_BACKENDS). + Reply("SELECT 1"). + Query(reset.COUNT_REPLICATION_SLOTS). + Reply("SELECT 1", []interface{}{0}). + Query("ALTER DATABASE postgres RENAME TO main;"). + ReplyError(pgerrcode.DuplicateDatabase, `database "main" already exists`) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/restart"). + Reply(http.StatusServiceUnavailable) + // Run test + err := switchDatabase(context.Background(), "main", "target", conn.Intercept) + // Check error + assert.ErrorContains(t, err, pgerrcode.DuplicateDatabase) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on failure to rename", func(t *testing.T) { + // Setup valid config + utils.DbId = "test-switch" + utils.Config.Db.Port = 54322 + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("ALTER DATABASE postgres ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query("ALTER DATABASE _supabase ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query(reset.TERMINATE_BACKENDS). + Reply("SELECT 1"). + Query(reset.COUNT_REPLICATION_SLOTS). + Reply("SELECT 1", []interface{}{0}). + Query("ALTER DATABASE postgres RENAME TO main;"). + Reply("ALTER DATABASE"). + Query("ALTER DATABASE target RENAME TO postgres;"). + ReplyError(pgerrcode.InvalidCatalogName, `database "target" does not exist`). + // Attempt to rollback + Query("ALTER DATABASE main RENAME TO postgres;"). + ReplyError(pgerrcode.DuplicateDatabase, `database "postgres" already exists`) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/restart"). + Reply(http.StatusServiceUnavailable) + // Run test + err := switchDatabase(context.Background(), "main", "target", conn.Intercept) + // Check error + assert.ErrorContains(t, err, pgerrcode.InvalidCatalogName) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/db/diff/diff.go b/internal/db/diff/diff.go new file mode 100644 index 0000000..b2d52a9 --- /dev/null +++ b/internal/db/diff/diff.go @@ -0,0 +1,212 @@ +package diff + +import ( + "context" + _ "embed" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/gen/keys" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/parser" +) + +type DiffFunc func(context.Context, string, string, []string) (string, error) + +func Run(ctx context.Context, schema []string, file string, config pgconn.Config, differ DiffFunc, fsys afero.Fs, options ...func(*pgx.ConnConfig)) (err error) { + // Sanity checks. + if utils.IsLocalDatabase(config) { + if container, err := createShadowIfNotExists(ctx, fsys); err != nil { + return err + } else if len(container) > 0 { + defer utils.DockerRemove(container) + if err := start.WaitForHealthyService(ctx, start.HealthTimeout, container); err != nil { + return err + } + if err := migrateBaseDatabase(ctx, container, fsys, options...); err != nil { + return err + } + } + } + // 1. Load all user defined schemas + if len(schema) == 0 { + schema, err = loadSchema(ctx, config, options...) + if err != nil { + return err + } + } + // 3. Run migra to diff schema + out, err := DiffDatabase(ctx, schema, config, os.Stderr, fsys, differ, options...) + if err != nil { + return err + } + branch := keys.GetGitBranch(fsys) + fmt.Fprintln(os.Stderr, "Finished "+utils.Aqua("supabase db diff")+" on branch "+utils.Aqua(branch)+".\n") + if err := SaveDiff(out, file, fsys); err != nil { + return err + } + drops := findDropStatements(out) + if len(drops) > 0 { + fmt.Fprintln(os.Stderr, "Found drop statements in schema diff. Please double check if these are expected:") + fmt.Fprintln(os.Stderr, utils.Yellow(strings.Join(drops, "\n"))) + } + return nil +} + +func createShadowIfNotExists(ctx context.Context, fsys afero.Fs) (string, error) { + if exists, err := afero.DirExists(fsys, utils.SchemasDir); err != nil { + return "", errors.Errorf("failed to check schemas: %w", err) + } else if !exists { + return "", nil + } + if err := utils.AssertSupabaseDbIsRunning(); !errors.Is(err, utils.ErrNotRunning) { + return "", err + } + fmt.Fprintf(os.Stderr, "Creating local database from %s...\n", utils.Bold(utils.SchemasDir)) + return CreateShadowDatabase(ctx, utils.Config.Db.Port) +} + +func loadDeclaredSchemas(fsys afero.Fs) ([]string, error) { + if schemas := utils.Config.Db.Migrations.SchemaPaths; len(schemas) > 0 { + return schemas.Files(afero.NewIOFS(fsys)) + } + var declared []string + if err := afero.Walk(fsys, utils.SchemasDir, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if info.Mode().IsRegular() && filepath.Ext(info.Name()) == ".sql" { + declared = append(declared, path) + } + return nil + }); err != nil { + return nil, errors.Errorf("failed to walk dir: %w", err) + } + return declared, nil +} + +// https://github.com/djrobstep/migra/blob/master/migra/statements.py#L6 +var dropStatementPattern = regexp.MustCompile(`(?i)drop\s+`) + +func findDropStatements(out string) []string { + lines, err := parser.SplitAndTrim(strings.NewReader(out)) + if err != nil { + return nil + } + var drops []string + for _, line := range lines { + if dropStatementPattern.MatchString(line) { + drops = append(drops, line) + } + } + return drops +} + +func loadSchema(ctx context.Context, config pgconn.Config, options ...func(*pgx.ConnConfig)) ([]string, error) { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return nil, err + } + defer conn.Close(context.Background()) + // RLS policies in auth and storage schemas can be included with -s flag + return migration.ListUserSchemas(ctx, conn) +} + +func CreateShadowDatabase(ctx context.Context, port uint16) (string, error) { + config := start.NewContainerConfig() + hostPort := strconv.FormatUint(uint64(port), 10) + hostConfig := container.HostConfig{ + PortBindings: nat.PortMap{"5432/tcp": []nat.PortBinding{{HostPort: hostPort}}}, + AutoRemove: true, + } + networkingConfig := network.NetworkingConfig{} + if utils.Config.Db.MajorVersion <= 14 { + config.Entrypoint = nil + hostConfig.Tmpfs = map[string]string{"/docker-entrypoint-initdb.d": ""} + } + return utils.DockerStart(ctx, config, hostConfig, networkingConfig, "") +} + +func ConnectShadowDatabase(ctx context.Context, timeout time.Duration, options ...func(*pgx.ConnConfig)) (conn *pgx.Conn, err error) { + // Retry until connected, cancelled, or timeout + policy := start.NewBackoffPolicy(ctx, timeout) + config := pgconn.Config{Port: utils.Config.Db.ShadowPort} + connect := func() (*pgx.Conn, error) { + return utils.ConnectLocalPostgres(ctx, config, options...) + } + return backoff.RetryWithData(connect, policy) +} + +func MigrateShadowDatabase(ctx context.Context, container string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + migrations, err := migration.ListLocalMigrations(utils.MigrationsDir, afero.NewIOFS(fsys)) + if err != nil { + return err + } + conn, err := ConnectShadowDatabase(ctx, 10*time.Second, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + if err := start.SetupDatabase(ctx, conn, container[:12], os.Stderr, fsys); err != nil { + return err + } + return migration.ApplyMigrations(ctx, migrations, conn, afero.NewIOFS(fsys)) +} + +func migrateBaseDatabase(ctx context.Context, container string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + migrations, err := loadDeclaredSchemas(fsys) + if err != nil { + return err + } + conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{}, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + if err := start.SetupDatabase(ctx, conn, container[:12], os.Stderr, fsys); err != nil { + return err + } + return migration.SeedGlobals(ctx, migrations, conn, afero.NewIOFS(fsys)) +} + +func DiffDatabase(ctx context.Context, schema []string, config pgconn.Config, w io.Writer, fsys afero.Fs, differ func(context.Context, string, string, []string) (string, error), options ...func(*pgx.ConnConfig)) (string, error) { + fmt.Fprintln(w, "Creating shadow database...") + shadow, err := CreateShadowDatabase(ctx, utils.Config.Db.ShadowPort) + if err != nil { + return "", err + } + defer utils.DockerRemove(shadow) + if err := start.WaitForHealthyService(ctx, start.HealthTimeout, shadow); err != nil { + return "", err + } + if err := MigrateShadowDatabase(ctx, shadow, fsys, options...); err != nil { + return "", err + } + fmt.Fprintln(w, "Diffing schemas:", strings.Join(schema, ",")) + source := utils.ToPostgresURL(pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.ShadowPort, + User: "postgres", + Password: utils.Config.Db.Password, + Database: "postgres", + }) + target := utils.ToPostgresURL(config) + return differ(ctx, source, target, schema) +} diff --git a/internal/db/diff/diff_test.go b/internal/db/diff/diff_test.go new file mode 100644 index 0000000..20094d9 --- /dev/null +++ b/internal/db/diff/diff_test.go @@ -0,0 +1,346 @@ +package diff + +import ( + "context" + "errors" + "io" + "net/http" + "os" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/h2non/gock" + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/testing/helper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/config" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "db.supabase.co", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestRun(t *testing.T) { + t.Run("runs migra diff", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, flags.LoadConfig(fsys)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-shadow-db") + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db"). + Reply(http.StatusOK) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Realtime.Image), "test-shadow-realtime") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-shadow-realtime", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Storage.Image), "test-shadow-storage") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-shadow-storage", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Auth.Image), "test-shadow-auth") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-shadow-auth", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(config.Images.Migra), "test-migra") + diff := "create table test();" + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-migra", diff)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + // Run test + err := Run(context.Background(), []string{"public"}, "file", dbConfig, DiffSchemaMigra, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + // Check diff file + files, err := afero.ReadDir(fsys, utils.MigrationsDir) + assert.NoError(t, err) + assert.Equal(t, 1, len(files)) + diffPath := filepath.Join(utils.MigrationsDir, files[0].Name()) + contents, err := afero.ReadFile(fsys, diffPath) + assert.NoError(t, err) + assert.Equal(t, []byte(diff), contents) + }) + + t.Run("throws error on failure to load user schemas", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.ListSchemas, migration.ManagedSchemas). + ReplyError(pgerrcode.DuplicateTable, `relation "test" already exists`) + // Run test + err := Run(context.Background(), []string{}, "", dbConfig, DiffSchemaMigra, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, `ERROR: relation "test" already exists (SQLSTATE 42P07)`) + }) + + t.Run("throws error on failure to diff target", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + utils.GetRegistryImageUrl(utils.Config.Db.Image) + "/json"). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), []string{"public"}, "file", dbConfig, DiffSchemaMigra, fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestMigrateShadow(t *testing.T) { + utils.Config.Db.MajorVersion = 14 + + t.Run("migrates shadow database", func(t *testing.T) { + utils.Config.Db.ShadowPort = 54320 + utils.GlobalsSql = "create schema public" + utils.InitialSchemaPg14Sql = "create schema private" + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + sql := "create schema test" + require.NoError(t, afero.WriteFile(fsys, path, []byte(sql), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(utils.GlobalsSql). + Reply("CREATE SCHEMA"). + Query(utils.InitialSchemaPg14Sql). + Reply("CREATE SCHEMA") + helper.MockMigrationHistory(conn). + Query(sql). + Reply("CREATE SCHEMA"). + Query(migration.INSERT_MIGRATION_VERSION, "0", "test", []string{sql}). + Reply("INSERT 0 1") + // Run test + err := MigrateShadowDatabase(context.Background(), "test-shadow-db", fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on timeout", func(t *testing.T) { + utils.Config.Db.ShadowPort = 54320 + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup cancelled context + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // Run test + err := MigrateShadowDatabase(ctx, "", fsys) + // Check error + assert.ErrorIs(t, err, context.Canceled) + }) + + t.Run("throws error on permission denied", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.MigrationsDir} + // Run test + err := MigrateShadowDatabase(context.Background(), "", fsys) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) + + t.Run("throws error on globals schema", func(t *testing.T) { + utils.Config.Db.ShadowPort = 54320 + utils.GlobalsSql = "create schema public" + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(utils.GlobalsSql). + ReplyError(pgerrcode.DuplicateSchema, `schema "public" already exists`) + // Run test + err := MigrateShadowDatabase(context.Background(), "test-shadow-db", fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, `ERROR: schema "public" already exists (SQLSTATE 42P06)`) + }) +} + +func TestDiffDatabase(t *testing.T) { + utils.Config.Db.MajorVersion = 14 + utils.Config.Db.ShadowPort = 54320 + utils.GlobalsSql = "create schema public" + utils.InitialSchemaPg14Sql = "create schema private" + + t.Run("throws error on failure to create shadow", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + utils.GetRegistryImageUrl(utils.Config.Db.Image) + "/json"). + ReplyError(errors.New("network error")) + // Run test + diff, err := DiffDatabase(context.Background(), []string{"public"}, dbConfig, io.Discard, fsys, DiffSchemaMigra) + // Check error + assert.Empty(t, diff) + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on health check failure", func(t *testing.T) { + start.HealthTimeout = time.Millisecond + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-shadow-db") + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: false, + Status: "exited", + }, + }}) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db/logs"). + Reply(http.StatusServiceUnavailable) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db"). + Reply(http.StatusOK) + // Run test + diff, err := DiffDatabase(context.Background(), []string{"public"}, dbConfig, io.Discard, fsys, DiffSchemaMigra) + // Check error + assert.Empty(t, diff) + assert.ErrorContains(t, err, "test-shadow-db container is not running: exited") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on failure to migrate shadow", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-shadow-db") + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db"). + Reply(http.StatusOK) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(utils.GlobalsSql). + ReplyError(pgerrcode.DuplicateSchema, `schema "public" already exists`) + // Run test + diff, err := DiffDatabase(context.Background(), []string{"public"}, dbConfig, io.Discard, fsys, DiffSchemaMigra, conn.Intercept) + // Check error + assert.Empty(t, diff) + assert.ErrorContains(t, err, `ERROR: schema "public" already exists (SQLSTATE 42P06) +At statement 0: +create schema public`) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on failure to diff target", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + sql := "create schema test" + require.NoError(t, afero.WriteFile(fsys, path, []byte(sql), 0644)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-shadow-db") + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db"). + Reply(http.StatusOK) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(config.Images.Migra), "test-migra") + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-migra/logs"). + ReplyError(errors.New("network error")) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/test-migra"). + Reply(http.StatusOK) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(utils.GlobalsSql). + Reply("CREATE SCHEMA"). + Query(utils.InitialSchemaPg14Sql). + Reply("CREATE SCHEMA") + helper.MockMigrationHistory(conn). + Query(sql). + Reply("CREATE SCHEMA"). + Query(migration.INSERT_MIGRATION_VERSION, "0", "test", []string{sql}). + Reply("INSERT 0 1") + // Run test + diff, err := DiffDatabase(context.Background(), []string{"public"}, dbConfig, io.Discard, fsys, DiffSchemaMigra, conn.Intercept) + // Check error + assert.Empty(t, diff) + assert.ErrorContains(t, err, "error diffing schema") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestDropStatements(t *testing.T) { + drops := findDropStatements("create table t(); drop table t; alter table t drop column c") + assert.Equal(t, []string{"drop table t", "alter table t drop column c"}, drops) +} + +func TestLoadSchemas(t *testing.T) { + expected := []string{ + filepath.Join(utils.SchemasDir, "comment", "model.sql"), + filepath.Join(utils.SchemasDir, "model.sql"), + filepath.Join(utils.SchemasDir, "reaction", "dislike", "model.sql"), + filepath.Join(utils.SchemasDir, "reaction", "like", "model.sql"), + } + fsys := afero.NewMemMapFs() + for _, fp := range expected { + require.NoError(t, afero.WriteFile(fsys, fp, nil, 0644)) + } + // Run test + schemas, err := loadDeclaredSchemas(fsys) + // Check error + assert.NoError(t, err) + assert.ElementsMatch(t, expected, schemas) +} diff --git a/internal/db/diff/migra.go b/internal/db/diff/migra.go new file mode 100644 index 0000000..6c5cc41 --- /dev/null +++ b/internal/db/diff/migra.go @@ -0,0 +1,44 @@ +package diff + +import ( + "bytes" + "context" + _ "embed" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/config" +) + +//go:embed templates/migra.sh +var diffSchemaScript string + +// Diffs local database schema against shadow, dumps output to stdout. +func DiffSchemaMigra(ctx context.Context, source, target string, schema []string) (string, error) { + env := []string{"SOURCE=" + source, "TARGET=" + target} + // Passing in script string means command line args must be set manually, ie. "$@" + args := "set -- " + strings.Join(schema, " ") + ";" + cmd := []string{"/bin/sh", "-c", args + diffSchemaScript} + var out, stderr bytes.Buffer + if err := utils.DockerRunOnceWithConfig( + ctx, + container.Config{ + Image: config.Images.Migra, + Env: env, + Cmd: cmd, + }, + container.HostConfig{ + NetworkMode: network.NetworkHost, + }, + network.NetworkingConfig{}, + "", + &out, + &stderr, + ); err != nil { + return "", errors.Errorf("error diffing schema: %w:\n%s", err, stderr.String()) + } + return out.String(), nil +} diff --git a/internal/db/diff/pgadmin.go b/internal/db/diff/pgadmin.go new file mode 100644 index 0000000..cb983eb --- /dev/null +++ b/internal/db/diff/pgadmin.go @@ -0,0 +1,107 @@ +package diff + +import ( + "context" + _ "embed" + "fmt" + "os" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/migration/new" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/config" +) + +var warnDiff = `WARNING: The diff tool is not foolproof, so you may need to manually rearrange and modify the generated migration. +Run ` + utils.Aqua("supabase db reset") + ` to verify that the new migration does not generate errors.` + +func SaveDiff(out, file string, fsys afero.Fs) error { + if len(out) < 2 { + fmt.Fprintln(os.Stderr, "No schema changes found") + } else if len(file) > 0 { + path := new.GetMigrationPath(utils.GetCurrentTimestamp(), file) + if err := utils.WriteFile(path, []byte(out), fsys); err != nil { + return err + } + fmt.Fprintln(os.Stderr, warnDiff) + } else { + fmt.Println(out) + } + return nil +} + +func RunPgAdmin(ctx context.Context, schema []string, file string, config pgconn.Config, fsys afero.Fs) error { + // Sanity checks. + if err := utils.AssertSupabaseDbIsRunning(); err != nil { + return err + } + + if err := utils.RunProgram(ctx, func(p utils.Program, ctx context.Context) error { + return run(p, ctx, schema, config, fsys) + }); err != nil { + return err + } + + return SaveDiff(output, file, fsys) +} + +var output string + +func run(p utils.Program, ctx context.Context, schema []string, config pgconn.Config, fsys afero.Fs) error { + p.Send(utils.StatusMsg("Creating shadow database...")) + + // 1. Create shadow db and run migrations + shadow, err := CreateShadowDatabase(ctx, utils.Config.Db.ShadowPort) + if err != nil { + return err + } + defer utils.DockerRemove(shadow) + if err := start.WaitForHealthyService(ctx, start.HealthTimeout, shadow); err != nil { + return err + } + if err := MigrateShadowDatabase(ctx, shadow, fsys); err != nil { + return err + } + + p.Send(utils.StatusMsg("Diffing local database with current migrations...")) + + // 2. Diff local db (source) with shadow db (target), print it. + source := utils.ToPostgresURL(config) + target := fmt.Sprintf("postgresql://postgres:postgres@127.0.0.1:%d/postgres", utils.Config.Db.ShadowPort) + output, err = DiffSchemaPgAdmin(ctx, source, target, schema, p) + return err +} + +func DiffSchemaPgAdmin(ctx context.Context, source, target string, schema []string, p utils.Program) (string, error) { + stream := utils.NewDiffStream(p) + args := []string{"--json-diff", source, target} + if len(schema) == 0 { + if err := utils.DockerRunOnceWithStream( + ctx, + config.Images.Differ, + nil, + args, + stream.Stdout(), + stream.Stderr(), + ); err != nil { + return "", err + } + } + for _, s := range schema { + p.Send(utils.StatusMsg("Diffing schema: " + s)) + if err := utils.DockerRunOnceWithStream( + ctx, + config.Images.Differ, + nil, + append([]string{"--schema", s}, args...), + stream.Stdout(), + stream.Stderr(), + ); err != nil { + return "", err + } + } + diffBytes, err := stream.Collect() + return string(diffBytes), err +} diff --git a/internal/db/diff/pgschema.go b/internal/db/diff/pgschema.go new file mode 100644 index 0000000..a72a3c3 --- /dev/null +++ b/internal/db/diff/pgschema.go @@ -0,0 +1,43 @@ +package diff + +import ( + "context" + "database/sql" + "fmt" + "strings" + + "github.com/go-errors/errors" + pgschema "github.com/stripe/pg-schema-diff/pkg/diff" +) + +func DiffPgSchema(ctx context.Context, source, target string, schema []string) (string, error) { + dbSrc, err := sql.Open("pgx", source) + if err != nil { + return "", errors.Errorf("failed to open source database: %w", err) + } + defer dbSrc.Close() + dbDst, err := sql.Open("pgx", target) + if err != nil { + return "", errors.Errorf("failed to open target database: %w", err) + } + defer dbDst.Close() + // Generate DDL based on schema plan + plan, err := pgschema.Generate( + ctx, + dbSrc, + pgschema.DBSchemaSource(dbDst), + pgschema.WithDoNotValidatePlan(), + pgschema.WithIncludeSchemas(schema...), + ) + if err != nil { + return "", errors.Errorf("failed to generate plan: %w", err) + } + var lines []string + for _, stat := range plan.Statements { + for _, harzard := range stat.Hazards { + lines = append(lines, fmt.Sprintf("-- %s", harzard)) + } + lines = append(lines, fmt.Sprintf("%s;\n", stat.DDL)) + } + return fmt.Sprintln(strings.Join(lines, "\n")), nil +} diff --git a/internal/db/diff/templates/migra.sh b/internal/db/diff/templates/migra.sh new file mode 100644 index 0000000..f4a8145 --- /dev/null +++ b/internal/db/diff/templates/migra.sh @@ -0,0 +1,20 @@ +#!/bin/sh +set -eu + +# migra doesn't shutdown gracefully, so kill it ourselves +trap 'kill -9 %1' TERM + +run_migra() { + # additional flags for diffing extensions + [ "$schema" = "extensions" ] && set -- --create-extensions-only --ignore-extension-versions "$@" + migra --with-privileges --unsafe --schema="$schema" "$@" +} + +# accepts command line args as a list of schema to generate +for schema in "$@"; do + # migra exits 2 when differences are found + run_migra "$SOURCE" "$TARGET" || status=$? + if [ ${status:-2} -ne 2 ]; then + exit $status + fi +done diff --git a/internal/db/dump/dump.go b/internal/db/dump/dump.go new file mode 100644 index 0000000..5336e62 --- /dev/null +++ b/internal/db/dump/dump.go @@ -0,0 +1,188 @@ +package dump + +import ( + "context" + _ "embed" + "fmt" + "io" + "os" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + cliConfig "github.com/supabase/cli/pkg/config" +) + +var ( + //go:embed templates/dump_schema.sh + dumpSchemaScript string + //go:embed templates/dump_data.sh + dumpDataScript string + //go:embed templates/dump_role.sh + dumpRoleScript string +) + +func Run(ctx context.Context, path string, config pgconn.Config, schema, excludeTable []string, dataOnly, roleOnly, keepComments, useCopy, dryRun bool, fsys afero.Fs) error { + // Initialize output stream + var outStream afero.File + if len(path) > 0 { + f, err := fsys.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return errors.Errorf("failed to open dump file: %w", err) + } + defer f.Close() + outStream = f + } else { + outStream = os.Stdout + } + // Load the requested script + if dryRun { + fmt.Fprintln(os.Stderr, "DRY RUN: *only* printing the pg_dump script to console.") + } + db := "remote" + if utils.IsLocalDatabase(config) { + db = "local" + } + if dataOnly { + fmt.Fprintf(os.Stderr, "Dumping data from %s database...\n", db) + return dumpData(ctx, config, schema, excludeTable, useCopy, dryRun, outStream) + } else if roleOnly { + fmt.Fprintf(os.Stderr, "Dumping roles from %s database...\n", db) + return dumpRole(ctx, config, keepComments, dryRun, outStream) + } + fmt.Fprintf(os.Stderr, "Dumping schemas from %s database...\n", db) + return DumpSchema(ctx, config, schema, keepComments, dryRun, outStream) +} + +func DumpSchema(ctx context.Context, config pgconn.Config, schema []string, keepComments, dryRun bool, stdout io.Writer) error { + var env []string + if len(schema) > 0 { + // Must append flag because empty string results in error + env = append(env, "EXTRA_FLAGS=--schema="+strings.Join(schema, "|")) + } else { + env = append(env, "EXCLUDED_SCHEMAS="+strings.Join(utils.InternalSchemas, "|")) + } + if !keepComments { + env = append(env, "EXTRA_SED=/^--/d") + } + return dump(ctx, config, dumpSchemaScript, env, dryRun, stdout) +} + +func dumpData(ctx context.Context, config pgconn.Config, schema, excludeTable []string, useCopy, dryRun bool, stdout io.Writer) error { + // We want to dump user data in auth, storage, etc. for migrating to new project + excludedSchemas := []string{ + "information_schema", + "pg_*", // Wildcard pattern follows pg_dump + // Owned by extensions + // "cron", + "graphql", + "graphql_public", + // "net", + // "pgmq", + // "pgsodium", + // "pgsodium_masks", + "pgtle", + "repack", + "tiger", + "tiger_data", + "timescaledb_*", + "_timescaledb_*", + "topology", + // "vault", + // Managed by Supabase + // "auth", + "extensions", + "pgbouncer", + "realtime", + // "storage", + // "supabase_functions", + "supabase_migrations", + // TODO: Remove in a few version in favor of _supabase internal db + "_analytics", + "_realtime", + "_supavisor", + } + var env []string + if len(schema) > 0 { + env = append(env, "INCLUDED_SCHEMAS="+strings.Join(schema, "|")) + } else { + env = append(env, "INCLUDED_SCHEMAS=*", "EXCLUDED_SCHEMAS="+strings.Join(excludedSchemas, "|")) + } + var extraFlags []string + if !useCopy { + extraFlags = append(extraFlags, "--column-inserts", "--rows-per-insert 100000") + } + for _, table := range excludeTable { + escaped := quoteUpperCase(table) + // Use separate flags to avoid error: too many dotted names + extraFlags = append(extraFlags, "--exclude-table "+escaped) + } + if len(extraFlags) > 0 { + env = append(env, "EXTRA_FLAGS="+strings.Join(extraFlags, " ")) + } + return dump(ctx, config, dumpDataScript, env, dryRun, stdout) +} + +func quoteUpperCase(table string) string { + escaped := strings.ReplaceAll(table, ".", `"."`) + return fmt.Sprintf(`"%s"`, escaped) +} + +func dumpRole(ctx context.Context, config pgconn.Config, keepComments, dryRun bool, stdout io.Writer) error { + env := []string{} + if !keepComments { + env = append(env, "EXTRA_SED=/^--/d") + } + return dump(ctx, config, dumpRoleScript, env, dryRun, stdout) +} + +func dump(ctx context.Context, config pgconn.Config, script string, env []string, dryRun bool, stdout io.Writer) error { + allEnvs := append(env, + "PGHOST="+config.Host, + fmt.Sprintf("PGPORT=%d", config.Port), + "PGUSER="+config.User, + "PGPASSWORD="+config.Password, + "PGDATABASE="+config.Database, + "RESERVED_ROLES="+strings.Join(utils.ReservedRoles, "|"), + "ALLOWED_CONFIGS="+strings.Join(utils.AllowedConfigs, "|"), + ) + if dryRun { + envMap := make(map[string]string, len(allEnvs)) + for _, e := range allEnvs { + index := strings.IndexByte(e, '=') + if index < 0 { + continue + } + envMap[e[:index]] = e[index+1:] + } + expanded := os.Expand(script, func(key string) string { + // Bash variable expansion is unsupported: + // https://github.com/golang/go/issues/47187 + parts := strings.Split(key, ":") + value := envMap[parts[0]] + // Escape double quotes in env vars + return strings.ReplaceAll(value, `"`, `\"`) + }) + fmt.Println(expanded) + return nil + } + return utils.DockerRunOnceWithConfig( + ctx, + container.Config{ + Image: cliConfig.Images.Pg15, + Env: allEnvs, + Cmd: []string{"bash", "-c", script, "--"}, + }, + container.HostConfig{ + NetworkMode: network.NetworkHost, + }, + network.NetworkingConfig{}, + "", + stdout, + os.Stderr, + ) +} diff --git a/internal/db/dump/dump_test.go b/internal/db/dump/dump_test.go new file mode 100644 index 0000000..3a7c3cc --- /dev/null +++ b/internal/db/dump/dump_test.go @@ -0,0 +1,93 @@ +package dump + +import ( + "context" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestPullCommand(t *testing.T) { + imageUrl := utils.GetRegistryImageUrl(utils.Config.Db.Image) + const containerId = "test-container" + + t.Run("pulls from remote", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "hello world")) + // Run test + err := Run(context.Background(), "schema.sql", dbConfig, nil, nil, false, false, false, false, false, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + // Validate migration + contents, err := afero.ReadFile(fsys, "schema.sql") + assert.NoError(t, err) + assert.Equal(t, []byte("hello world"), contents) + }) + + t.Run("writes to stdout", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "hello world\n")) + // Run test + err := Run(context.Background(), "", dbConfig, []string{"public"}, nil, false, false, false, false, false, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on missing docker", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images"). + Reply(http.StatusServiceUnavailable) + // Run test + err := Run(context.Background(), "", dbConfig, nil, nil, false, false, false, false, false, fsys) + // Check error + assert.ErrorContains(t, err, "request returned Service Unavailable for API route and version") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on permission denied", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewReadOnlyFs(afero.NewMemMapFs()) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "hello world\n")) + // Run test + err := Run(context.Background(), "schema.sql", dbConfig, nil, nil, false, false, false, false, false, fsys) + // Check error + assert.ErrorContains(t, err, "operation not permitted") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/db/dump/templates/dump_data.sh b/internal/db/dump/templates/dump_data.sh new file mode 100644 index 0000000..7647665 --- /dev/null +++ b/internal/db/dump/templates/dump_data.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +set -euo pipefail + +export PGHOST="$PGHOST" +export PGPORT="$PGPORT" +export PGUSER="$PGUSER" +export PGPASSWORD="$PGPASSWORD" +export PGDATABASE="$PGDATABASE" + +# Disable triggers so that data dump can be restored exactly as it is +echo "SET session_replication_role = replica; +" + +# Explanation of pg_dump flags: +# +# --exclude-schema omit data from internal schemas as they are maintained by platform +# --exclude-table omit data from migration history tables as they are managed by platform +# --column-inserts only column insert syntax is supported, ie. no copy from stdin +# --schema '*' include all other schemas by default +# +# Never delete SQL comments because multiline records may begin with them. +pg_dump \ + --data-only \ + --quote-all-identifier \ + --exclude-schema "${EXCLUDED_SCHEMAS:-}" \ + --exclude-table "auth.schema_migrations" \ + --exclude-table "storage.migrations" \ + --exclude-table "supabase_functions.migrations" \ + --schema "$INCLUDED_SCHEMAS" \ + ${EXTRA_FLAGS:-} + +# Reset session config generated by pg_dump +echo "RESET ALL;" diff --git a/internal/db/dump/templates/dump_role.sh b/internal/db/dump/templates/dump_role.sh new file mode 100644 index 0000000..e5c157b --- /dev/null +++ b/internal/db/dump/templates/dump_role.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -euo pipefail + +export PGHOST="$PGHOST" +export PGPORT="$PGPORT" +export PGUSER="$PGUSER" +export PGPASSWORD="$PGPASSWORD" +export PGDATABASE="$PGDATABASE" + +# Explanation of pg_dumpall flags: +# +# --roles-only only include create, alter, and grant role statements +# +# Explanation of sed substitutions: +# +# - do not create or alter reserved roles as they are blocked by supautils +# - explicitly allow altering safe attributes, ie. statement_timeout, pgrst.* +# - discard role attributes that require superuser, ie. nosuperuser, noreplication +# - do not alter membership grants by supabase_admin role +pg_dumpall \ + --roles-only \ + --quote-all-identifier \ + --no-role-passwords \ + --no-comments \ +| sed -E "s/^CREATE ROLE \"($RESERVED_ROLES)\"/-- &/" \ +| sed -E "s/^ALTER ROLE \"($RESERVED_ROLES)\"/-- &/" \ +| sed -E "s/ (NOSUPERUSER|NOREPLICATION)//g" \ +| sed -E "s/^-- (.* SET \"($ALLOWED_CONFIGS)\" .*)/\1/" \ +| sed -E "s/GRANT \".*\" TO \"($RESERVED_ROLES)\"/-- &/" \ +| sed -E "${EXTRA_SED:-}" \ +| uniq + +# Reset session config generated by pg_dump +echo "RESET ALL;" diff --git a/internal/db/dump/templates/dump_schema.sh b/internal/db/dump/templates/dump_schema.sh new file mode 100644 index 0000000..71aae48 --- /dev/null +++ b/internal/db/dump/templates/dump_schema.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -euo pipefail + +export PGHOST="$PGHOST" +export PGPORT="$PGPORT" +export PGUSER="$PGUSER" +export PGPASSWORD="$PGPASSWORD" +export PGDATABASE="$PGDATABASE" + +# Explanation of pg_dump flags: +# +# --schema-only omit data like migration history, pgsodium key, etc. +# --exclude-schema omit internal schemas as they are maintained by platform +# +# Explanation of sed substitutions: +# +# - do not alter superuser role "supabase_admin" +# - do not alter foreign data wrappers owner +# - do not include ACL changes on internal schemas +# - do not include RLS policies on cron extension schema +# - do not include event triggers +# - do not create pgtle schema and extension comments +# - do not create publication "supabase_realtime" +pg_dump \ + --schema-only \ + --quote-all-identifier \ + --exclude-schema "${EXCLUDED_SCHEMAS:-}" \ + ${EXTRA_FLAGS:-} \ +| sed -E 's/^CREATE SCHEMA "/CREATE SCHEMA IF NOT EXISTS "/' \ +| sed -E 's/^CREATE TABLE "/CREATE TABLE IF NOT EXISTS "/' \ +| sed -E 's/^CREATE SEQUENCE "/CREATE SEQUENCE IF NOT EXISTS "/' \ +| sed -E 's/^CREATE VIEW "/CREATE OR REPLACE VIEW "/' \ +| sed -E 's/^CREATE FUNCTION "/CREATE OR REPLACE FUNCTION "/' \ +| sed -E 's/^CREATE TRIGGER "/CREATE OR REPLACE TRIGGER "/' \ +| sed -E 's/^CREATE PUBLICATION "supabase_realtime/-- &/' \ +| sed -E 's/^CREATE EVENT TRIGGER /-- &/' \ +| sed -E 's/^ WHEN TAG IN /-- &/' \ +| sed -E 's/^ EXECUTE FUNCTION /-- &/' \ +| sed -E 's/^ALTER EVENT TRIGGER /-- &/' \ +| sed -E 's/^ALTER PUBLICATION "supabase_realtime_/-- &/' \ +| sed -E 's/^ALTER FOREIGN DATA WRAPPER (.+) OWNER TO /-- &/' \ +| sed -E 's/^ALTER DEFAULT PRIVILEGES FOR ROLE "supabase_admin"/-- &/' \ +| sed -E "s/^GRANT (.+) ON (.+) \"(${EXCLUDED_SCHEMAS:-})\"/-- &/" \ +| sed -E "s/^REVOKE (.+) ON (.+) \"(${EXCLUDED_SCHEMAS:-})\"/-- &/" \ +| sed -E 's/^(CREATE EXTENSION IF NOT EXISTS "pg_tle").+/\1;/' \ +| sed -E 's/^(CREATE EXTENSION IF NOT EXISTS "pgsodium").+/\1;/' \ +| sed -E 's/^COMMENT ON EXTENSION (.+)/-- &/' \ +| sed -E 's/^CREATE POLICY "cron_job_/-- &/' \ +| sed -E 's/^ALTER TABLE "cron"/-- &/' \ +| sed -E "${EXTRA_SED:-}" + +# Reset session config generated by pg_dump +echo "RESET ALL;" diff --git a/internal/db/lint/lint.go b/internal/db/lint/lint.go new file mode 100644 index 0000000..5701d89 --- /dev/null +++ b/internal/db/lint/lint.go @@ -0,0 +1,189 @@ +package lint + +import ( + "context" + _ "embed" + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +const ENABLE_PGSQL_CHECK = "CREATE EXTENSION IF NOT EXISTS plpgsql_check" + +var ( + AllowedLevels = []string{ + "warning", + "error", + } + //go:embed templates/check.sql + checkSchemaScript string +) + +type LintLevel int + +func toEnum(level string) LintLevel { + for i, curr := range AllowedLevels { + if strings.HasPrefix(level, curr) { + return LintLevel(i) + } + } + return -1 +} + +func Run(ctx context.Context, schema []string, level string, failOn string, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + // Sanity checks. + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + // Run lint script + result, err := LintDatabase(ctx, conn, schema) + if err != nil { + return err + } + if len(result) == 0 { + fmt.Fprintln(os.Stderr, "\nNo schema errors found") + return nil + } + + // Apply filtering based on the minimum level + minLevel := toEnum(level) + filtered := filterResult(result, minLevel) + err = printResultJSON(filtered, os.Stdout) + if err != nil { + return err + } + // Check for fail-on condition + failOnLevel := toEnum(failOn) + if failOnLevel != -1 { + for _, r := range filtered { + for _, issue := range r.Issues { + if toEnum(issue.Level) >= failOnLevel { + return fmt.Errorf("fail-on is set to %s, non-zero exit", AllowedLevels[failOnLevel]) + } + } + } + } + return nil +} + +func filterResult(result []Result, minLevel LintLevel) (filtered []Result) { + for _, r := range result { + out := Result{Function: r.Function} + for _, issue := range r.Issues { + if toEnum(issue.Level) >= minLevel { + out.Issues = append(out.Issues, issue) + } + } + if len(out.Issues) > 0 { + filtered = append(filtered, out) + } + } + return filtered +} + +func printResultJSON(result []Result, stdout io.Writer) error { + if len(result) == 0 { + return nil + } + // Pretty print output + enc := json.NewEncoder(stdout) + enc.SetIndent("", " ") + if err := enc.Encode(result); err != nil { + return errors.Errorf("failed to print result json: %w", err) + } + return nil +} + +func LintDatabase(ctx context.Context, conn *pgx.Conn, schema []string) ([]Result, error) { + tx, err := conn.Begin(ctx) + if err != nil { + return nil, errors.Errorf("failed to begin transaction: %w", err) + } + if len(schema) == 0 { + schema, err = migration.ListUserSchemas(ctx, conn) + if err != nil { + return nil, err + } + } + // Always rollback since lint should not have side effects + defer func() { + if err := tx.Rollback(context.Background()); err != nil { + fmt.Fprintln(os.Stderr, err) + } + }() + if _, err := conn.Exec(ctx, ENABLE_PGSQL_CHECK); err != nil { + return nil, errors.Errorf("failed to enable pgsql_check: %w", err) + } + // Batch prepares statements + batch := pgx.Batch{} + for _, s := range schema { + batch.Queue(checkSchemaScript, s) + } + br := conn.SendBatch(ctx, &batch) + defer br.Close() + var result []Result + for _, s := range schema { + fmt.Fprintln(os.Stderr, "Linting schema:", s) + rows, err := br.Query() + if err != nil { + return nil, errors.Errorf("failed to query rows: %w", err) + } + // Parse result row + for rows.Next() { + var name string + var data []byte + if err := rows.Scan(&name, &data); err != nil { + return nil, errors.Errorf("failed to scan rows: %w", err) + } + var r Result + if err := json.Unmarshal(data, &r); err != nil { + return nil, errors.Errorf("failed to marshal json: %w", err) + } + // Update function name + r.Function = s + "." + name + result = append(result, r) + } + err = rows.Err() + if err != nil { + return nil, errors.Errorf("failed to parse rows: %w", err) + } + } + return result, nil +} + +type Query struct { + Position string `json:"position"` + Text string `json:"text"` +} + +type Statement struct { + LineNumber string `json:"lineNumber"` + Text string `json:"text"` +} + +type Issue struct { + Level string `json:"level"` + Message string `json:"message"` + Statement *Statement `json:"statement,omitempty"` + Query *Query `json:"query,omitempty"` + Hint string `json:"hint,omitempty"` + Detail string `json:"detail,omitempty"` + Context string `json:"context,omitempty"` + SQLState string `json:"sqlState,omitempty"` +} + +type Result struct { + Function string `json:"function"` + Issues []Issue `json:"issues"` +} diff --git a/internal/db/lint/lint_test.go b/internal/db/lint/lint_test.go new file mode 100644 index 0000000..8d7abef --- /dev/null +++ b/internal/db/lint/lint_test.go @@ -0,0 +1,299 @@ +package lint + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "testing" + + "github.com/docker/docker/api/types" + "github.com/h2non/gock" + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestLintCommand(t *testing.T) { + utils.Config.Hostname = "127.0.0.1" + utils.Config.Db.Port = 5432 + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + // Setup db response + expected := Result{ + Function: "22751", + Issues: []Issue{{ + Level: AllowedLevels[1], + Message: `record "r" has no field "c"`, + Statement: &Statement{ + LineNumber: "6", + Text: "RAISE", + }, + Context: `SQL expression "r.c"`, + SQLState: pgerrcode.UndefinedColumn, + }}, + } + data, err := json.Marshal(expected) + require.NoError(t, err) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("begin").Reply("BEGIN"). + Query(ENABLE_PGSQL_CHECK). + Reply("CREATE EXTENSION"). + Query(checkSchemaScript, "public"). + Reply("SELECT 1", []interface{}{"f1", string(data)}). + Query("rollback").Reply("ROLLBACK") + // Run test + err = Run(context.Background(), []string{"public"}, "warning", "none", dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) +} + +func TestLintDatabase(t *testing.T) { + t.Run("parses lint results", func(t *testing.T) { + expected := []Result{{ + Function: "public.f1", + Issues: []Issue{{ + Level: AllowedLevels[1], + Message: `record "r" has no field "c"`, + Statement: &Statement{ + LineNumber: "6", + Text: "RAISE", + }, + Context: `SQL expression "r.c"`, + SQLState: pgerrcode.UndefinedColumn, + }, { + Level: "warning extra", + Message: `never read variable "entity"`, + SQLState: pgerrcode.SuccessfulCompletion, + }}, + }, { + Function: "public.f2", + Issues: []Issue{{ + Level: AllowedLevels[1], + Message: `relation "t2" does not exist`, + Statement: &Statement{ + LineNumber: "4", + Text: "FOR over SELECT rows", + }, + Query: &Query{ + Position: "15", + Text: "SELECT * FROM t2", + }, + SQLState: pgerrcode.UndefinedTable, + }}, + }} + r1, err := json.Marshal(expected[0]) + require.NoError(t, err) + r2, err := json.Marshal(expected[1]) + require.NoError(t, err) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("begin").Reply("BEGIN"). + Query(ENABLE_PGSQL_CHECK). + Reply("CREATE EXTENSION"). + Query(checkSchemaScript, "public"). + Reply("SELECT 2", + []interface{}{"f1", string(r1)}, + []interface{}{"f2", string(r2)}, + ). + Query("rollback").Reply("ROLLBACK") + // Run test + result, err := LintDatabase(context.Background(), conn.MockClient(t), []string{"public"}) + assert.NoError(t, err) + // Validate result + assert.ElementsMatch(t, expected, result) + }) + + t.Run("supports multiple schema", func(t *testing.T) { + expected := []Result{{ + Function: "public.where_clause", + Issues: []Issue{{ + Level: AllowedLevels[0], + Message: "target type is different type than source type", + Statement: &Statement{ + LineNumber: "32", + Text: "statement block", + }, + Hint: "The input expression type does not have an assignment cast to the target type.", + Detail: `cast "text" value to "text[]" type`, + Context: `during statement block local variable "clause_arr" initialization on line 3`, + SQLState: pgerrcode.DatatypeMismatch, + }}, + }, { + Function: "private.f2", + Issues: []Issue{}, + }} + r1, err := json.Marshal(expected[0]) + require.NoError(t, err) + r2, err := json.Marshal(expected[1]) + require.NoError(t, err) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("begin").Reply("BEGIN"). + Query(ENABLE_PGSQL_CHECK). + Reply("CREATE EXTENSION"). + Query(checkSchemaScript, "public"). + Reply("SELECT 1", []interface{}{"where_clause", string(r1)}). + Query(checkSchemaScript, "private"). + Reply("SELECT 1", []interface{}{"f2", string(r2)}). + Query("rollback").Reply("ROLLBACK") + // Run test + result, err := LintDatabase(context.Background(), conn.MockClient(t), []string{"public", "private"}) + // Check error + assert.NoError(t, err) + assert.ElementsMatch(t, expected, result) + }) + + t.Run("throws error on missing extension", func(t *testing.T) { + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("begin").Reply("BEGIN"). + Query(ENABLE_PGSQL_CHECK). + ReplyError(pgerrcode.UndefinedFile, `could not open extension control file "/usr/share/postgresql/14/extension/plpgsql_check.control": No such file or directory"`). + Query("rollback").Reply("ROLLBACK") + // Run test + _, err := LintDatabase(context.Background(), conn.MockClient(t), []string{"public"}) + // Check error + assert.Error(t, err) + }) + + t.Run("throws error on malformed json", func(t *testing.T) { + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("begin").Reply("BEGIN"). + Query(ENABLE_PGSQL_CHECK). + Reply("CREATE EXTENSION"). + Query(checkSchemaScript, "public"). + Reply("SELECT 1", []interface{}{"f1", "malformed"}). + Query("rollback").Reply("ROLLBACK") + // Run test + _, err := LintDatabase(context.Background(), conn.MockClient(t), []string{"public"}) + // Check error + assert.Error(t, err) + }) +} + +func TestPrintResult(t *testing.T) { + result := []Result{{ + Function: "public.f1", + Issues: []Issue{{ + Level: "warning", + Message: "test 1a", + }, { + Level: "error", + Message: "test 1b", + }}, + }, { + Function: "private.f2", + Issues: []Issue{{ + Level: "warning extra", + Message: "test 2", + }}, + }} + + t.Run("filters warning level", func(t *testing.T) { + // Run test + var out bytes.Buffer + filtered := filterResult(result, toEnum("warning")) + assert.NoError(t, printResultJSON(filtered, &out)) + // Validate output + var actual []Result + assert.NoError(t, json.Unmarshal(out.Bytes(), &actual)) + assert.ElementsMatch(t, result, actual) + }) + + t.Run("filters error level", func(t *testing.T) { + // Run test + var out bytes.Buffer + filtered := filterResult(result, toEnum("error")) + assert.NoError(t, printResultJSON(filtered, &out)) + // Validate output + var actual []Result + assert.NoError(t, json.Unmarshal(out.Bytes(), &actual)) + assert.ElementsMatch(t, []Result{{ + Function: result[0].Function, + Issues: []Issue{result[0].Issues[1]}, + }}, actual) + }) + + t.Run("exits with non-zero status on warning", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("begin").Reply("BEGIN"). + Query(ENABLE_PGSQL_CHECK). + Reply("CREATE EXTENSION"). + Query(checkSchemaScript, "public"). + Reply("SELECT 1", []interface{}{"f1", `{"function":"22751","issues":[{"level":"warning","message":"test warning"}]}`}). + Query("rollback").Reply("ROLLBACK") + // Run test + err := Run(context.Background(), []string{"public"}, "warning", "warning", dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, "fail-on is set to warning, non-zero exit") + }) + + t.Run("exits with non-zero status on error", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("begin").Reply("BEGIN"). + Query(ENABLE_PGSQL_CHECK). + Reply("CREATE EXTENSION"). + Query(checkSchemaScript, "public"). + Reply("SELECT 1", []interface{}{"f1", `{"function":"22751","issues":[{"level":"error","message":"test error"}]}`}). + Query("rollback").Reply("ROLLBACK") + // Run test + err := Run(context.Background(), []string{"public"}, "warning", "error", dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, "fail-on is set to error, non-zero exit") + }) + + t.Run("does not exit with non-zero status when fail-on is none", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("begin").Reply("BEGIN"). + Query(ENABLE_PGSQL_CHECK). + Reply("CREATE EXTENSION"). + Query(checkSchemaScript, "public"). + Reply("SELECT 1", []interface{}{"f1", `{"function":"22751","issues":[{"level":"error","message":"test error"}]}`}). + Query("rollback").Reply("ROLLBACK") + // Run test + err := Run(context.Background(), []string{"public"}, "warning", "none", dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/db/lint/templates/check.sql b/internal/db/lint/templates/check.sql new file mode 100644 index 0000000..aa382ff --- /dev/null +++ b/internal/db/lint/templates/check.sql @@ -0,0 +1,6 @@ +-- Ref: https://github.com/okbob/plpgsql_check#mass-check +SELECT p.proname, plpgsql_check_function(p.oid, format:='json') +FROM pg_catalog.pg_namespace n +JOIN pg_catalog.pg_proc p ON pronamespace = n.oid +JOIN pg_catalog.pg_language l ON p.prolang = l.oid +WHERE l.lanname = 'plpgsql' AND p.prorettype <> 2279 AND n.nspname = $1::text; diff --git a/internal/db/pull/pull.go b/internal/db/pull/pull.go new file mode 100644 index 0000000..3f99f97 --- /dev/null +++ b/internal/db/pull/pull.go @@ -0,0 +1,177 @@ +package pull + +import ( + "context" + _ "embed" + "fmt" + "math" + "os" + "strconv" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/diff" + "github.com/supabase/cli/internal/db/dump" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/migration/new" + "github.com/supabase/cli/internal/migration/repair" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +var ( + errMissing = errors.New("No migrations found") + errInSync = errors.New("No schema changes found") + errConflict = errors.Errorf("The remote database's migration history does not match local files in %s directory.", utils.MigrationsDir) + suggestExtraPull = fmt.Sprintf( + "The %s and %s schemas are excluded. Run %s again to diff them.", + utils.Bold("auth"), + utils.Bold("storage"), + utils.Aqua("supabase db pull --schema auth,storage"), + ) +) + +func Run(ctx context.Context, schema []string, config pgconn.Config, name string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + // 1. Check postgres connection + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + // 2. Pull schema + timestamp := utils.GetCurrentTimestamp() + path := new.GetMigrationPath(timestamp, name) + if err := utils.RunProgram(ctx, func(p utils.Program, ctx context.Context) error { + return run(p, ctx, schema, path, conn, fsys) + }); err != nil { + return err + } + // 3. Insert a row to `schema_migrations` + fmt.Fprintln(os.Stderr, "Schema written to "+utils.Bold(path)) + if shouldUpdate, err := utils.NewConsole().PromptYesNo(ctx, "Update remote migration history table?", true); err != nil { + return err + } else if shouldUpdate { + return repair.UpdateMigrationTable(ctx, conn, []string{timestamp}, repair.Applied, false, fsys) + } + return nil +} + +func run(p utils.Program, ctx context.Context, schema []string, path string, conn *pgx.Conn, fsys afero.Fs) error { + config := conn.Config().Config + // 1. Assert `supabase/migrations` and `schema_migrations` are in sync. + if err := assertRemoteInSync(ctx, conn, fsys); errors.Is(err, errMissing) { + // Not passing down schemas to avoid pulling in managed schemas + if err = dumpRemoteSchema(p, ctx, path, config, fsys); err == nil { + utils.CmdSuggestion = suggestExtraPull + } + return err + } else if err != nil { + return err + } + // 2. Fetch remote schema changes + defaultSchema := len(schema) == 0 + if defaultSchema { + var err error + schema, err = migration.ListUserSchemas(ctx, conn) + if err != nil { + return err + } + } + err := diffRemoteSchema(p, ctx, schema, path, config, fsys) + if defaultSchema && (err == nil || errors.Is(err, errInSync)) { + utils.CmdSuggestion = suggestExtraPull + } + return err +} + +func dumpRemoteSchema(p utils.Program, ctx context.Context, path string, config pgconn.Config, fsys afero.Fs) error { + // Special case if this is the first migration + p.Send(utils.StatusMsg("Dumping schema from remote database...")) + if err := utils.MkdirIfNotExistFS(fsys, utils.MigrationsDir); err != nil { + return err + } + f, err := fsys.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return errors.Errorf("failed to open dump file: %w", err) + } + defer f.Close() + return dump.DumpSchema(ctx, config, nil, false, false, f) +} + +func diffRemoteSchema(p utils.Program, ctx context.Context, schema []string, path string, config pgconn.Config, fsys afero.Fs) error { + w := utils.StatusWriter{Program: p} + // Diff remote db (source) & shadow db (target) and write it as a new migration. + output, err := diff.DiffDatabase(ctx, schema, config, w, fsys, diff.DiffSchemaMigra) + if err != nil { + return err + } + if len(output) == 0 { + return errors.New(errInSync) + } + if err := utils.WriteFile(path, []byte(output), fsys); err != nil { + return errors.Errorf("failed to write dump file: %w", err) + } + return nil +} + +func assertRemoteInSync(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { + remoteMigrations, err := migration.ListRemoteMigrations(ctx, conn) + if err != nil { + return err + } + localMigrations, err := list.LoadLocalVersions(fsys) + if err != nil { + return err + } + // Find any mismatch between local and remote migrations + var extraRemote, extraLocal []string + for i, j := 0, 0; i < len(remoteMigrations) || j < len(localMigrations); { + remoteTimestamp := math.MaxInt + if i < len(remoteMigrations) { + if remoteTimestamp, err = strconv.Atoi(remoteMigrations[i]); err != nil { + i++ + continue + } + } + localTimestamp := math.MaxInt + if j < len(localMigrations) { + if localTimestamp, err = strconv.Atoi(localMigrations[j]); err != nil { + j++ + continue + } + } + // Top to bottom chronological order + if localTimestamp < remoteTimestamp { + extraLocal = append(extraLocal, localMigrations[j]) + j++ + } else if remoteTimestamp < localTimestamp { + extraRemote = append(extraRemote, remoteMigrations[i]) + i++ + } else { + i++ + j++ + } + } + // Suggest delete local migrations / reset migration history + if len(extraRemote)+len(extraLocal) > 0 { + utils.CmdSuggestion = suggestMigrationRepair(extraRemote, extraLocal) + return errors.New(errConflict) + } + if len(localMigrations) == 0 { + return errors.New(errMissing) + } + return nil +} + +func suggestMigrationRepair(extraRemote, extraLocal []string) string { + result := fmt.Sprintln("\nMake sure your local git repo is up-to-date. If the error persists, try repairing the migration history table:") + for _, version := range extraRemote { + result += fmt.Sprintln(utils.Bold("supabase migration repair --status reverted " + version)) + } + for _, version := range extraLocal { + result += fmt.Sprintln(utils.Bold("supabase migration repair --status applied " + version)) + } + return result +} diff --git a/internal/db/pull/pull_test.go b/internal/db/pull/pull_test.go new file mode 100644 index 0000000..d156db9 --- /dev/null +++ b/internal/db/pull/pull_test.go @@ -0,0 +1,194 @@ +package pull + +import ( + "context" + "errors" + "os" + "path/filepath" + "testing" + + "github.com/h2non/gock" + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "db.supabase.co", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestPullCommand(t *testing.T) { + t.Run("throws error on connect failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), nil, pgconn.Config{}, "", fsys) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on sync failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + ReplyError(pgerrcode.InvalidCatalogName, `database "postgres" does not exist`) + // Run test + err := Run(context.Background(), nil, dbConfig, "", fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, `ERROR: database "postgres" does not exist (SQLSTATE 3D000)`) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestPullSchema(t *testing.T) { + t.Run("dumps remote schema", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-db") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", "test")) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + err := utils.RunProgram(context.Background(), func(p utils.Program, ctx context.Context) error { + return run(p, ctx, nil, "0_test.sql", conn.MockClient(t), fsys) + }) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + contents, err := afero.ReadFile(fsys, "0_test.sql") + assert.NoError(t, err) + assert.Equal(t, []byte("test"), contents) + }) + + t.Run("throws error on load user schema failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 1", []interface{}{"0"}). + Query(migration.ListSchemas, migration.ManagedSchemas). + ReplyError(pgerrcode.DuplicateTable, `relation "test" already exists`) + // Run test + err := utils.RunProgram(context.Background(), func(p utils.Program, ctx context.Context) error { + return run(p, ctx, nil, "", conn.MockClient(t), fsys) + }) + // Check error + assert.ErrorContains(t, err, `ERROR: relation "test" already exists (SQLSTATE 42P07)`) + }) + + t.Run("throws error on diff failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + utils.GetRegistryImageUrl(utils.Config.Db.Image) + "/json"). + ReplyError(errors.New("network error")) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 1", []interface{}{"0"}) + // Run test + err := utils.RunProgram(context.Background(), func(p utils.Program, ctx context.Context) error { + return run(p, ctx, []string{"public"}, "", conn.MockClient(t), fsys) + }) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestSyncRemote(t *testing.T) { + t.Run("throws error on permission denied", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.MigrationsDir} + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + err := assertRemoteInSync(context.Background(), conn.MockClient(t), fsys) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on mismatched length", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + err := assertRemoteInSync(context.Background(), conn.MockClient(t), fsys) + // Check error + assert.ErrorIs(t, err, errConflict) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on mismatched migration", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 1", []interface{}{"20220727064247"}) + // Run test + err := assertRemoteInSync(context.Background(), conn.MockClient(t), fsys) + // Check error + assert.ErrorIs(t, err, errConflict) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on missing migration", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + err := assertRemoteInSync(context.Background(), conn.MockClient(t), fsys) + // Check error + assert.ErrorIs(t, err, errMissing) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/db/push/push.go b/internal/db/push/push.go new file mode 100644 index 0000000..4a1fdf6 --- /dev/null +++ b/internal/db/push/push.go @@ -0,0 +1,130 @@ +package push + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/up" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/vault" +) + +func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, includeSeed bool, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + if dryRun { + fmt.Fprintln(os.Stderr, "DRY RUN: migrations will *not* be pushed to the database.") + } + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + pending, err := up.GetPendingMigrations(ctx, ignoreVersionMismatch, conn, fsys) + if err != nil { + return err + } + var seeds []migration.SeedFile + if includeSeed { + // TODO: flag should override config but we don't resolve glob paths when seed is disabled. + if !utils.Config.Db.Seed.Enabled { + fmt.Fprintln(os.Stderr, "Skipping seed because it is disabled in config.toml for project:", flags.ProjectRef) + } else if seeds, err = migration.GetPendingSeeds(ctx, utils.Config.Db.Seed.SqlPaths, conn, afero.NewIOFS(fsys)); err != nil { + return err + } + } + var globals []string + if includeRoles { + if exists, err := afero.Exists(fsys, utils.CustomRolesPath); err != nil { + return errors.Errorf("failed to find custom roles: %w", err) + } else if exists { + globals = append(globals, utils.CustomRolesPath) + } + } + if len(pending) == 0 && len(seeds) == 0 && len(globals) == 0 { + fmt.Println("Remote database is up to date.") + return nil + } + // Push pending migrations + if dryRun { + if len(globals) > 0 { + fmt.Fprintln(os.Stderr, "Would create custom roles "+utils.Bold(globals[0])+"...") + } + if len(pending) > 0 { + fmt.Fprintln(os.Stderr, "Would push these migrations:") + fmt.Fprint(os.Stderr, confirmPushAll(pending)) + } + if len(seeds) > 0 { + fmt.Fprintln(os.Stderr, "Would seed these files:") + fmt.Fprint(os.Stderr, confirmSeedAll(seeds)) + } + } else { + if len(globals) > 0 { + msg := "Do you want to create custom roles in the database cluster?" + if shouldPush, err := utils.NewConsole().PromptYesNo(ctx, msg, true); err != nil { + return err + } else if !shouldPush { + return errors.New(context.Canceled) + } + if err := migration.SeedGlobals(ctx, globals, conn, afero.NewIOFS(fsys)); err != nil { + return err + } + } + if len(pending) > 0 { + msg := fmt.Sprintf("Do you want to push these migrations to the remote database?\n%s\n", confirmPushAll(pending)) + if shouldPush, err := utils.NewConsole().PromptYesNo(ctx, msg, true); err != nil { + return err + } else if !shouldPush { + return errors.New(context.Canceled) + } + if err := vault.UpsertVaultSecrets(ctx, utils.Config.Db.Vault, conn); err != nil { + return err + } + if err := migration.ApplyMigrations(ctx, pending, conn, afero.NewIOFS(fsys)); err != nil { + return err + } + } else { + fmt.Fprintln(os.Stderr, "Schema migrations are up to date.") + } + if len(seeds) > 0 { + msg := fmt.Sprintf("Do you want to seed the remote database with these files?\n%s\n", confirmSeedAll(seeds)) + if shouldPush, err := utils.NewConsole().PromptYesNo(ctx, msg, true); err != nil { + return err + } else if !shouldPush { + return errors.New(context.Canceled) + } + if err := migration.SeedData(ctx, seeds, conn, afero.NewIOFS(fsys)); err != nil { + return err + } + } else if includeSeed { + fmt.Fprintln(os.Stderr, "Seed files are up to date.") + } + } + fmt.Println("Finished " + utils.Aqua("supabase db push") + ".") + return nil +} + +func confirmPushAll(pending []string) (msg string) { + for _, path := range pending { + filename := filepath.Base(path) + msg += fmt.Sprintf(" • %s\n", utils.Bold(filename)) + } + return msg +} + +func confirmSeedAll(pending []migration.SeedFile) (msg string) { + for _, seed := range pending { + notice := seed.Path + if seed.Dirty { + notice += " (hash update)" + } + msg += fmt.Sprintf(" • %s\n", utils.Bold(notice)) + } + return msg +} diff --git a/internal/db/push/push_test.go b/internal/db/push/push_test.go new file mode 100644 index 0000000..f25ba20 --- /dev/null +++ b/internal/db/push/push_test.go @@ -0,0 +1,192 @@ +package push + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "os" + "path/filepath" + "testing" + + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/testing/helper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestMigrationPush(t *testing.T) { + t.Run("dry run", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + err := Run(context.Background(), true, false, true, true, dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("ignores up to date", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + err := Run(context.Background(), false, false, false, false, dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on connect failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), false, false, false, false, pgconn.Config{}, fsys) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + + t.Run("throws error on remote load failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + ReplyError(pgerrcode.InvalidCatalogName, `database "target" does not exist`) + // Run test + err := Run(context.Background(), false, false, false, false, pgconn.Config{ + Host: "db.supabase.co", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", + }, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, `ERROR: database "target" does not exist (SQLSTATE 3D000)`) + }) + + t.Run("throws error on push failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + helper.MockMigrationHistory(conn). + Query(migration.INSERT_MIGRATION_VERSION, "0", "test", nil). + ReplyError(pgerrcode.NotNullViolation, `null value in column "version" of relation "schema_migrations"`) + // Run test + err := Run(context.Background(), false, false, false, false, dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, `ERROR: null value in column "version" of relation "schema_migrations" (SQLSTATE 23502)`) + assert.ErrorContains(t, err, "At statement 0:\n"+migration.INSERT_MIGRATION_VERSION) + }) +} + +func TestPushAll(t *testing.T) { + t.Run("ignores missing roles and seed", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + helper.MockMigrationHistory(conn). + Query(migration.INSERT_MIGRATION_VERSION, "0", "test", nil). + Reply("INSERT 0 1") + // Run test + err := Run(context.Background(), false, false, true, true, dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on cancel", func(t *testing.T) { + t.Cleanup(fstest.MockStdin(t, "n")) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + err := Run(context.Background(), false, false, true, true, dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorIs(t, err, context.Canceled) + }) + + t.Run("throws error on roles failure", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.StatErrorFs{DenyPath: utils.CustomRolesPath} + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + err := Run(context.Background(), false, false, true, false, dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) + + t.Run("throws error on seed failure", func(t *testing.T) { + digest := hex.EncodeToString(sha256.New().Sum(nil)) + seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") + utils.Config.Db.Seed.SqlPaths = []string{seedPath} + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, seedPath, []byte{}, 0644)) + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0"). + Query(migration.SELECT_SEED_TABLE). + Reply("SELECT 0") + helper.MockMigrationHistory(conn). + Query(migration.INSERT_MIGRATION_VERSION, "0", "test", nil). + Reply("INSERT 0 1") + helper.MockSeedHistory(conn). + Query(migration.UPSERT_SEED_FILE, seedPath, digest). + ReplyError(pgerrcode.NotNullViolation, `null value in column "hash" of relation "seed_files"`) + // Run test + err := Run(context.Background(), false, false, false, true, dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, `ERROR: null value in column "hash" of relation "seed_files" (SQLSTATE 23502)`) + }) +} diff --git a/internal/db/remote/changes/changes.go b/internal/db/remote/changes/changes.go new file mode 100644 index 0000000..12c3928 --- /dev/null +++ b/internal/db/remote/changes/changes.go @@ -0,0 +1,47 @@ +package changes + +import ( + "context" + "io" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/diff" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +var output string + +func Run(ctx context.Context, schema []string, config pgconn.Config, fsys afero.Fs) error { + if err := utils.RunProgram(ctx, func(p utils.Program, ctx context.Context) error { + return run(p, ctx, schema, config, fsys) + }); err != nil { + return err + } + return diff.SaveDiff(output, "", fsys) +} + +func run(p utils.Program, ctx context.Context, schema []string, config pgconn.Config, fsys afero.Fs) (err error) { + // 1. Assert `supabase/migrations` and `schema_migrations` are in sync. + w := utils.StatusWriter{Program: p} + if len(schema) == 0 { + schema, err = loadSchema(ctx, config, w) + if err != nil { + return err + } + } + + // 2. Diff remote db (source) & shadow db (target) and print it. + output, err = diff.DiffDatabase(ctx, schema, config, w, fsys, diff.DiffSchemaMigra) + return err +} + +func loadSchema(ctx context.Context, config pgconn.Config, w io.Writer) ([]string, error) { + conn, err := utils.ConnectByConfigStream(ctx, config, w) + if err != nil { + return nil, err + } + defer conn.Close(context.Background()) + return migration.ListUserSchemas(ctx, conn) +} diff --git a/internal/db/remote/commit/commit.go b/internal/db/remote/commit/commit.go new file mode 100644 index 0000000..3e49346 --- /dev/null +++ b/internal/db/remote/commit/commit.go @@ -0,0 +1,106 @@ +package commit + +import ( + "context" + _ "embed" + "fmt" + "path/filepath" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/diff" + "github.com/supabase/cli/internal/db/dump" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/migration/repair" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +func Run(ctx context.Context, schema []string, config pgconn.Config, fsys afero.Fs) error { + if err := utils.RunProgram(ctx, func(p utils.Program, ctx context.Context) error { + return run(p, ctx, schema, config, fsys) + }); err != nil { + return err + } + fmt.Println("Finished " + utils.Aqua("supabase db remote commit") + ".") + return nil +} + +func run(p utils.Program, ctx context.Context, schema []string, config pgconn.Config, fsys afero.Fs) error { + // 1. Assert `supabase/migrations` and `schema_migrations` are in sync. + w := utils.StatusWriter{Program: p} + conn, err := utils.ConnectByConfigStream(ctx, config, w) + if err != nil { + return err + } + defer conn.Close(context.Background()) + if err := assertRemoteInSync(ctx, conn, fsys); err != nil { + return err + } + + // 2. Fetch remote schema changes + if len(schema) == 0 { + schema, err = migration.ListUserSchemas(ctx, conn) + if err != nil { + return err + } + } + timestamp := utils.GetCurrentTimestamp() + if err := fetchRemote(p, ctx, schema, timestamp, config, fsys); err != nil { + return err + } + + // 3. Insert a row to `schema_migrations` + return repair.UpdateMigrationTable(ctx, conn, []string{timestamp}, repair.Applied, false, fsys) +} + +func fetchRemote(p utils.Program, ctx context.Context, schema []string, timestamp string, config pgconn.Config, fsys afero.Fs) error { + path := filepath.Join(utils.MigrationsDir, timestamp+"_remote_commit.sql") + // Special case if this is the first migration + if migrations, err := migration.ListLocalMigrations(utils.MigrationsDir, afero.NewIOFS(fsys)); err != nil { + return err + } else if len(migrations) == 0 { + p.Send(utils.StatusMsg("Committing initial migration on remote database...")) + return dump.Run(ctx, path, config, nil, nil, false, false, false, false, false, fsys) + } + + w := utils.StatusWriter{Program: p} + // Diff remote db (source) & shadow db (target) and write it as a new migration. + output, err := diff.DiffDatabase(ctx, schema, config, w, fsys, diff.DiffSchemaMigra) + if err != nil { + return err + } + if len(output) == 0 { + return errors.New("No schema changes found") + } + return utils.WriteFile(path, []byte(output), fsys) +} + +func assertRemoteInSync(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { + remoteMigrations, err := migration.ListRemoteMigrations(ctx, conn) + if err != nil { + return err + } + localMigrations, err := list.LoadLocalVersions(fsys) + if err != nil { + return err + } + + conflictErr := errors.New("The remote database's migration history is not in sync with the contents of " + utils.Bold(utils.MigrationsDir) + `. Resolve this by: +- Updating the project from version control to get the latest ` + utils.Bold(utils.MigrationsDir) + `, +- Pushing unapplied migrations with ` + utils.Aqua("supabase db push") + `, +- Or failing that, manually editing supabase_migrations.schema_migrations table with ` + utils.Aqua("supabase migration repair") + ".") + if len(remoteMigrations) != len(localMigrations) { + return conflictErr + } + + for i, remoteTimestamp := range remoteMigrations { + if localMigrations[i] != remoteTimestamp { + return conflictErr + } + } + + return nil +} diff --git a/internal/db/reset/reset.go b/internal/db/reset/reset.go new file mode 100644 index 0000000..d3e33e5 --- /dev/null +++ b/internal/db/reset/reset.go @@ -0,0 +1,258 @@ +package reset + +import ( + "context" + _ "embed" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/errdefs" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/gen/keys" + "github.com/supabase/cli/internal/migration/apply" + "github.com/supabase/cli/internal/migration/repair" + "github.com/supabase/cli/internal/seed/buckets" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/vault" +) + +func Run(ctx context.Context, version string, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + if len(version) > 0 { + if _, err := strconv.Atoi(version); err != nil { + return errors.New(repair.ErrInvalidVersion) + } + if _, err := repair.GetMigrationFile(version, fsys); err != nil { + return err + } + } + if !utils.IsLocalDatabase(config) { + msg := "Do you want to reset the remote database?" + if shouldReset, err := utils.NewConsole().PromptYesNo(ctx, msg, false); err != nil { + return err + } else if !shouldReset { + return errors.New(context.Canceled) + } + return resetRemote(ctx, version, config, fsys, options...) + } + // Config file is loaded before parsing --linked or --local flags + if err := utils.AssertSupabaseDbIsRunning(); err != nil { + return err + } + // Reset postgres database because extensions (pg_cron, pg_net) require postgres + if err := resetDatabase(ctx, version, fsys, options...); err != nil { + return err + } + // Seed objects from supabase/buckets directory + if resp, err := utils.Docker.ContainerInspect(ctx, utils.StorageId); err == nil { + if resp.State.Health == nil || resp.State.Health.Status != types.Healthy { + if err := start.WaitForHealthyService(ctx, 30*time.Second, utils.StorageId); err != nil { + return err + } + } + if err := buckets.Run(ctx, "", false, fsys); err != nil { + return err + } + } + branch := keys.GetGitBranch(fsys) + fmt.Fprintln(os.Stderr, "Finished "+utils.Aqua("supabase db reset")+" on branch "+utils.Aqua(branch)+".") + return nil +} + +func resetDatabase(ctx context.Context, version string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + fmt.Fprintln(os.Stderr, "Resetting local database"+toLogMessage(version)) + if utils.Config.Db.MajorVersion <= 14 { + return resetDatabase14(ctx, version, fsys, options...) + } + return resetDatabase15(ctx, version, fsys, options...) +} + +func toLogMessage(version string) string { + if len(version) > 0 { + return " to version: " + version + } + return "..." +} + +func resetDatabase14(ctx context.Context, version string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + if err := recreateDatabase(ctx, options...); err != nil { + return err + } + if err := initDatabase(ctx, options...); err != nil { + return err + } + if err := RestartDatabase(ctx, os.Stderr); err != nil { + return err + } + conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{}, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + return apply.MigrateAndSeed(ctx, version, conn, fsys) +} + +func resetDatabase15(ctx context.Context, version string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + if err := utils.Docker.ContainerRemove(ctx, utils.DbId, container.RemoveOptions{Force: true}); err != nil { + return errors.Errorf("failed to remove container: %w", err) + } + if err := utils.Docker.VolumeRemove(ctx, utils.DbId, true); err != nil { + return errors.Errorf("failed to remove volume: %w", err) + } + config := start.NewContainerConfig() + hostConfig := start.NewHostConfig() + networkingConfig := network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.DbAliases, + }, + }, + } + fmt.Fprintln(os.Stderr, "Recreating database...") + if _, err := utils.DockerStart(ctx, config, hostConfig, networkingConfig, utils.DbId); err != nil { + return err + } + if err := start.WaitForHealthyService(ctx, start.HealthTimeout, utils.DbId); err != nil { + return err + } + if err := start.SetupLocalDatabase(ctx, version, fsys, os.Stderr, options...); err != nil { + return err + } + fmt.Fprintln(os.Stderr, "Restarting containers...") + return restartServices(ctx) +} + +func initDatabase(ctx context.Context, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{User: "supabase_admin"}, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + return start.InitSchema14(ctx, conn) +} + +// Recreate postgres database by connecting to template1 +func recreateDatabase(ctx context.Context, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{User: "supabase_admin", Database: "template1"}, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + if err := DisconnectClients(ctx, conn); err != nil { + return err + } + // We are not dropping roles here because they are cluster level entities. Use stop && start instead. + sql := migration.MigrationFile{ + Statements: []string{ + "DROP DATABASE IF EXISTS postgres WITH (FORCE)", + "CREATE DATABASE postgres WITH OWNER postgres", + "DROP DATABASE IF EXISTS _supabase WITH (FORCE)", + "CREATE DATABASE _supabase WITH OWNER postgres", + }, + } + return sql.ExecBatch(ctx, conn) +} + +const ( + TERMINATE_BACKENDS = "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname IN ('postgres', '_supabase')" + COUNT_REPLICATION_SLOTS = "SELECT COUNT(*) FROM pg_replication_slots WHERE database IN ('postgres', '_supabase')" +) + +func DisconnectClients(ctx context.Context, conn *pgx.Conn) error { + // Must be executed separately because looping in transaction is unsupported + // https://dba.stackexchange.com/a/11895 + disconn := migration.MigrationFile{ + Statements: []string{ + "ALTER DATABASE postgres ALLOW_CONNECTIONS false", + "ALTER DATABASE _supabase ALLOW_CONNECTIONS false", + TERMINATE_BACKENDS, + }, + } + if err := disconn.ExecBatch(ctx, conn); err != nil { + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) && pgErr.Code != pgerrcode.InvalidCatalogName { + return errors.Errorf("failed to disconnect clients: %w", err) + } + } + // Wait for WAL senders to drop their replication slots + policy := start.NewBackoffPolicy(ctx, 10*time.Second) + waitForDrop := func() error { + var count int + if err := conn.QueryRow(ctx, COUNT_REPLICATION_SLOTS).Scan(&count); err != nil { + err = errors.Errorf("failed to count replication slots: %w", err) + return &backoff.PermanentError{Err: err} + } else if count > 0 { + return errors.Errorf("replication slots still active: %d", count) + } + return nil + } + return backoff.Retry(waitForDrop, policy) +} + +func RestartDatabase(ctx context.Context, w io.Writer) error { + fmt.Fprintln(w, "Restarting containers...") + // Some extensions must be manually restarted after pg_terminate_backend + // Ref: https://github.com/citusdata/pg_cron/issues/99 + if err := utils.Docker.ContainerRestart(ctx, utils.DbId, container.StopOptions{}); err != nil { + return errors.Errorf("failed to restart container: %w", err) + } + if err := start.WaitForHealthyService(ctx, start.HealthTimeout, utils.DbId); err != nil { + return err + } + return restartServices(ctx) +} + +func restartServices(ctx context.Context) error { + // No need to restart PostgREST because it automatically reconnects and listens for schema changes + services := listServicesToRestart() + result := utils.WaitAll(services, func(id string) error { + if err := utils.Docker.ContainerRestart(ctx, id, container.StopOptions{}); err != nil && !errdefs.IsNotFound(err) { + return errors.Errorf("failed to restart %s: %w", id, err) + } + return nil + }) + // Do not wait for service healthy as those services may be excluded from starting + return errors.Join(result...) +} + +func listServicesToRestart() []string { + return []string{utils.StorageId, utils.GotrueId, utils.RealtimeId, utils.PoolerId} +} + +func resetRemote(ctx context.Context, version string, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + fmt.Fprintln(os.Stderr, "Resetting remote database"+toLogMessage(version)) + conn, err := utils.ConnectByConfigStream(ctx, config, io.Discard, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + if err := migration.DropUserSchemas(ctx, conn); err != nil { + return err + } + if err := vault.UpsertVaultSecrets(ctx, utils.Config.Db.Vault, conn); err != nil { + return err + } + return apply.MigrateAndSeed(ctx, version, conn, fsys) +} + +func LikeEscapeSchema(schemas []string) (result []string) { + // Treat _ as literal, * as any character + replacer := strings.NewReplacer("_", `\_`, "*", "%") + for _, sch := range schemas { + result = append(result, replacer.Replace(sch)) + } + return result +} diff --git a/internal/db/reset/reset_test.go b/internal/db/reset/reset_test.go new file mode 100644 index 0000000..4e3558b --- /dev/null +++ b/internal/db/reset/reset_test.go @@ -0,0 +1,494 @@ +package reset + +import ( + "context" + "errors" + "io" + "net/http" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/h2non/gock" + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/testing/helper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgtest" + "github.com/supabase/cli/pkg/storage" +) + +func TestResetCommand(t *testing.T) { + utils.Config.Hostname = "127.0.0.1" + utils.Config.Db.Port = 5432 + + var dbConfig = pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.Port, + User: "admin", + Password: "password", + Database: "postgres", + } + + t.Run("seeds storage after reset", func(t *testing.T) { + utils.DbId = "test-reset" + utils.ConfigId = "test-config" + utils.Config.Db.MajorVersion = 15 + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId). + Reply(http.StatusOK) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/volumes/" + utils.DbId). + Reply(http.StatusOK) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), utils.DbId) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + // Restarts services + utils.StorageId = "test-storage" + utils.GotrueId = "test-auth" + utils.RealtimeId = "test-realtime" + utils.PoolerId = "test-pooler" + for _, container := range listServicesToRestart() { + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + container + "/restart"). + Reply(http.StatusOK) + } + // Seeds storage + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.StorageId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + gock.New(utils.Config.Api.ExternalUrl). + Get("/storage/v1/bucket"). + Reply(http.StatusOK). + JSON([]storage.BucketResponse{}) + // Run test + err := Run(context.Background(), "", dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on context canceled", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), "", pgconn.Config{Host: "db.supabase.co"}, fsys) + // Check error + assert.ErrorIs(t, err, context.Canceled) + }) + + t.Run("throws error on invalid port", func(t *testing.T) { + t.Cleanup(fstest.MockStdin(t, "y")) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), "", pgconn.Config{Host: "db.supabase.co"}, fsys) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + + t.Run("throws error on db is not started", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusNotFound) + // Run test + err := Run(context.Background(), "", dbConfig, fsys) + // Check error + assert.ErrorIs(t, err, utils.ErrNotRunning) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on failure to recreate", func(t *testing.T) { + utils.DbId = "test-reset" + utils.Config.Db.MajorVersion = 15 + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), "", dbConfig, fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestInitDatabase(t *testing.T) { + t.Run("initializes postgres database", func(t *testing.T) { + utils.Config.Db.Port = 54322 + utils.InitialSchemaPg14Sql = "create schema private" + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(utils.InitialSchemaPg14Sql). + Reply("CREATE SCHEMA") + // Run test + assert.NoError(t, initDatabase(context.Background(), conn.Intercept)) + }) + + t.Run("throws error on connect failure", func(t *testing.T) { + utils.Config.Db.Port = 0 + // Run test + err := initDatabase(context.Background()) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + + t.Run("throws error on duplicate schema", func(t *testing.T) { + utils.Config.Db.Port = 54322 + utils.InitialSchemaPg14Sql = "create schema private" + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(utils.InitialSchemaPg14Sql). + ReplyError(pgerrcode.DuplicateSchema, `schema "public" already exists`) + // Run test + err := initDatabase(context.Background(), conn.Intercept) + // Check error + assert.ErrorContains(t, err, `ERROR: schema "public" already exists (SQLSTATE 42P06)`) + }) +} + +func TestRecreateDatabase(t *testing.T) { + t.Run("resets postgres database", func(t *testing.T) { + utils.Config.Db.Port = 54322 + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("ALTER DATABASE postgres ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query("ALTER DATABASE _supabase ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query(TERMINATE_BACKENDS). + Reply("SELECT 1"). + Query(COUNT_REPLICATION_SLOTS). + Reply("SELECT 1", []interface{}{0}). + Query("DROP DATABASE IF EXISTS postgres WITH (FORCE)"). + Reply("DROP DATABASE"). + Query("CREATE DATABASE postgres WITH OWNER postgres"). + Reply("CREATE DATABASE"). + Query("DROP DATABASE IF EXISTS _supabase WITH (FORCE)"). + Reply("DROP DATABASE"). + Query("CREATE DATABASE _supabase WITH OWNER postgres"). + Reply("CREATE DATABASE") + // Run test + assert.NoError(t, recreateDatabase(context.Background(), conn.Intercept)) + }) + + t.Run("throws error on invalid port", func(t *testing.T) { + utils.Config.Db.Port = 0 + assert.ErrorContains(t, recreateDatabase(context.Background()), "invalid port") + }) + + t.Run("continues on disconnecting missing database", func(t *testing.T) { + utils.Config.Db.Port = 54322 + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("ALTER DATABASE postgres ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query("ALTER DATABASE _supabase ALLOW_CONNECTIONS false"). + ReplyError(pgerrcode.InvalidCatalogName, `database "_supabase" does not exist`). + Query(TERMINATE_BACKENDS). + Query(COUNT_REPLICATION_SLOTS). + ReplyError(pgerrcode.UndefinedTable, `relation "pg_replication_slots" does not exist`) + // Run test + err := recreateDatabase(context.Background(), conn.Intercept) + // Check error + assert.ErrorContains(t, err, `ERROR: relation "pg_replication_slots" does not exist (SQLSTATE 42P01)`) + }) + + t.Run("throws error on failure to disconnect", func(t *testing.T) { + utils.Config.Db.Port = 54322 + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("ALTER DATABASE postgres ALLOW_CONNECTIONS false"). + ReplyError(pgerrcode.InvalidParameterValue, `cannot disallow connections for current database`). + Query("ALTER DATABASE _supabase ALLOW_CONNECTIONS false"). + Query(TERMINATE_BACKENDS) + // Run test + err := recreateDatabase(context.Background(), conn.Intercept) + // Check error + assert.ErrorContains(t, err, "ERROR: cannot disallow connections for current database (SQLSTATE 22023)") + }) + + t.Run("throws error on failure to drop", func(t *testing.T) { + utils.Config.Db.Port = 54322 + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query("ALTER DATABASE postgres ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query("ALTER DATABASE _supabase ALLOW_CONNECTIONS false"). + Reply("ALTER DATABASE"). + Query(TERMINATE_BACKENDS). + Reply("SELECT 1"). + Query(COUNT_REPLICATION_SLOTS). + Reply("SELECT 1", []interface{}{0}). + Query("DROP DATABASE IF EXISTS postgres WITH (FORCE)"). + ReplyError(pgerrcode.ObjectInUse, `database "postgres" is used by an active logical replication slot`). + Query("CREATE DATABASE postgres WITH OWNER postgres"). + Query("DROP DATABASE IF EXISTS _supabase WITH (FORCE)"). + Reply("DROP DATABASE"). + Query("CREATE DATABASE _supabase WITH OWNER postgres"). + Reply("CREATE DATABASE") + err := recreateDatabase(context.Background(), conn.Intercept) + // Check error + assert.ErrorContains(t, err, `ERROR: database "postgres" is used by an active logical replication slot (SQLSTATE 55006)`) + }) +} + +func TestRestartDatabase(t *testing.T) { + t.Run("restarts affected services", func(t *testing.T) { + utils.DbId = "test-reset" + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + // Restarts postgres + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/restart"). + Reply(http.StatusOK) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + // Restarts services + utils.StorageId = "test-storage" + utils.GotrueId = "test-auth" + utils.RealtimeId = "test-realtime" + utils.PoolerId = "test-pooler" + for _, container := range listServicesToRestart() { + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + container + "/restart"). + Reply(http.StatusOK) + } + // Run test + err := RestartDatabase(context.Background(), io.Discard) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on service restart failure", func(t *testing.T) { + utils.DbId = "test-reset" + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + // Restarts postgres + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/restart"). + Reply(http.StatusOK) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + // Restarts services + utils.StorageId = "test-storage" + utils.GotrueId = "test-auth" + utils.RealtimeId = "test-realtime" + utils.PoolerId = "test-pooler" + for _, container := range []string{utils.StorageId, utils.GotrueId, utils.RealtimeId} { + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + container + "/restart"). + Reply(http.StatusServiceUnavailable) + } + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.PoolerId + "/restart"). + Reply(http.StatusNotFound) + // Run test + err := RestartDatabase(context.Background(), io.Discard) + // Check error + assert.ErrorContains(t, err, "failed to restart "+utils.StorageId) + assert.ErrorContains(t, err, "failed to restart "+utils.GotrueId) + assert.ErrorContains(t, err, "failed to restart "+utils.RealtimeId) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on db restart failure", func(t *testing.T) { + utils.DbId = "test-reset" + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + // Restarts postgres + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/restart"). + Reply(http.StatusServiceUnavailable) + // Run test + err := RestartDatabase(context.Background(), io.Discard) + // Check error + assert.ErrorContains(t, err, "failed to restart container") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on health check timeout", func(t *testing.T) { + utils.DbId = "test-reset" + start.HealthTimeout = 0 * time.Second + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/test-reset/restart"). + Reply(http.StatusOK) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-reset/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: false, + Status: "exited", + }, + }}) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-reset/logs"). + Reply(http.StatusServiceUnavailable) + // Run test + err := RestartDatabase(context.Background(), io.Discard) + // Check error + assert.ErrorContains(t, err, "test-reset container is not running: exited") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +var escapedSchemas = append(migration.ManagedSchemas, "extensions", "public") + +func TestResetRemote(t *testing.T) { + dbConfig := pgconn.Config{ + Host: "db.supabase.co", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", + } + + t.Run("resets remote database", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_schema.sql") + require.NoError(t, afero.WriteFile(fsys, path, nil, 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.ListSchemas, escapedSchemas). + Reply("SELECT 1", []interface{}{"private"}). + Query("DROP SCHEMA IF EXISTS private CASCADE"). + Reply("DROP SCHEMA"). + Query(migration.DropObjects). + Reply("INSERT 0") + helper.MockMigrationHistory(conn). + Query(migration.INSERT_MIGRATION_VERSION, "0", "schema", nil). + Reply("INSERT 0 1") + // Run test + err := resetRemote(context.Background(), "", dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("resets remote database with seed config disabled", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_schema.sql") + require.NoError(t, afero.WriteFile(fsys, path, nil, 0644)) + seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") + // Will raise an error when seeding + require.NoError(t, afero.WriteFile(fsys, seedPath, []byte("INSERT INTO test_table;"), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.ListSchemas, escapedSchemas). + Reply("SELECT 1", []interface{}{"private"}). + Query("DROP SCHEMA IF EXISTS private CASCADE"). + Reply("DROP SCHEMA"). + Query(migration.DropObjects). + Reply("INSERT 0") + helper.MockMigrationHistory(conn). + Query(migration.INSERT_MIGRATION_VERSION, "0", "schema", nil). + Reply("INSERT 0 1") + utils.Config.Db.Seed.Enabled = false + // Run test + err := resetRemote(context.Background(), "", dbConfig, fsys, conn.Intercept) + // No error should be raised since we're skipping the seed + assert.NoError(t, err) + }) + + t.Run("throws error on connect failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := resetRemote(context.Background(), "", pgconn.Config{}, fsys) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + + t.Run("throws error on drop schema failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.ListSchemas, escapedSchemas). + Reply("SELECT 0"). + Query(migration.DropObjects). + ReplyError(pgerrcode.InsufficientPrivilege, "permission denied for relation supabase_migrations") + // Run test + err := resetRemote(context.Background(), "", dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, "ERROR: permission denied for relation supabase_migrations (SQLSTATE 42501)") + }) +} diff --git a/internal/db/start/start.go b/internal/db/start/start.go new file mode 100644 index 0000000..bb26b71 --- /dev/null +++ b/internal/db/start/start.go @@ -0,0 +1,385 @@ +package start + +import ( + "context" + _ "embed" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/apply" + "github.com/supabase/cli/internal/status" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/vault" +) + +var ( + HealthTimeout = 120 * time.Second + //go:embed templates/schema.sql + initialSchema string + //go:embed templates/webhook.sql + webhookSchema string + //go:embed templates/_supabase.sql + _supabaseSchema string + //go:embed templates/restore.sh + restoreScript string +) + +func Run(ctx context.Context, fromBackup string, fsys afero.Fs) error { + if err := flags.LoadConfig(fsys); err != nil { + return err + } + if err := utils.AssertSupabaseDbIsRunning(); err == nil { + fmt.Fprintln(os.Stderr, "Postgres database is already running.") + return nil + } else if !errors.Is(err, utils.ErrNotRunning) { + return err + } + err := StartDatabase(ctx, fromBackup, fsys, os.Stderr) + if err != nil { + if err := utils.DockerRemoveAll(context.Background(), os.Stderr, utils.Config.ProjectId); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } + return err +} + +func NewContainerConfig() container.Config { + env := []string{ + "POSTGRES_PASSWORD=" + utils.Config.Db.Password, + "POSTGRES_HOST=/var/run/postgresql", + "JWT_SECRET=" + utils.Config.Auth.JwtSecret, + fmt.Sprintf("JWT_EXP=%d", utils.Config.Auth.JwtExpiry), + } + if len(utils.Config.Experimental.OrioleDBVersion) > 0 { + env = append(env, + "POSTGRES_INITDB_ARGS=--lc-collate=C", + fmt.Sprintf("S3_ENABLED=%t", true), + "S3_HOST="+utils.Config.Experimental.S3Host, + "S3_REGION="+utils.Config.Experimental.S3Region, + "S3_ACCESS_KEY="+utils.Config.Experimental.S3AccessKey, + "S3_SECRET_KEY="+utils.Config.Experimental.S3SecretKey, + ) + } else { + env = append(env, "POSTGRES_INITDB_ARGS=--lc-collate=C.UTF-8") + } + config := container.Config{ + Image: utils.Config.Db.Image, + Env: env, + Healthcheck: &container.HealthConfig{ + Test: []string{"CMD", "pg_isready", "-U", "postgres", "-h", "127.0.0.1", "-p", "5432"}, + Interval: 10 * time.Second, + Timeout: 2 * time.Second, + Retries: 3, + }, + Entrypoint: []string{"sh", "-c", ` +cat <<'EOF' > /etc/postgresql.schema.sql && \ +cat <<'EOF' > /etc/postgresql-custom/pgsodium_root.key && \ +cat <<'EOF' >> /etc/postgresql/postgresql.conf && \ +docker-entrypoint.sh postgres -D /etc/postgresql +` + initialSchema + ` +` + webhookSchema + ` +` + _supabaseSchema + ` +EOF +` + utils.Config.Db.RootKey + ` +EOF +` + utils.Config.Db.Settings.ToPostgresConfig() + ` +EOF`}, + } + if utils.Config.Db.MajorVersion >= 14 { + config.Cmd = []string{"postgres", + "-c", "config_file=/etc/postgresql/postgresql.conf", + // Ref: https://postgrespro.com/list/thread-id/2448092 + "-c", `search_path="$user",public,extensions`, + } + } + return config +} + +func NewHostConfig() container.HostConfig { + hostPort := strconv.FormatUint(uint64(utils.Config.Db.Port), 10) + hostConfig := container.HostConfig{ + PortBindings: nat.PortMap{"5432/tcp": []nat.PortBinding{{HostPort: hostPort}}}, + RestartPolicy: container.RestartPolicy{Name: "always"}, + Binds: []string{ + utils.DbId + ":/var/lib/postgresql/data", + utils.ConfigId + ":/etc/postgresql-custom", + }, + } + return hostConfig +} + +func StartDatabase(ctx context.Context, fromBackup string, fsys afero.Fs, w io.Writer, options ...func(*pgx.ConnConfig)) error { + config := NewContainerConfig() + hostConfig := NewHostConfig() + networkingConfig := network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.DbAliases, + }, + }, + } + if utils.Config.Db.MajorVersion <= 14 { + config.Entrypoint = []string{"sh", "-c", ` +cat <<'EOF' > /docker-entrypoint-initdb.d/supabase_schema.sql && \ +cat <<'EOF' >> /etc/postgresql/postgresql.conf && \ +docker-entrypoint.sh postgres -D /etc/postgresql +` + _supabaseSchema + ` +EOF +` + utils.Config.Db.Settings.ToPostgresConfig() + ` +EOF`} + hostConfig.Tmpfs = map[string]string{"/docker-entrypoint-initdb.d": ""} + } + if len(fromBackup) > 0 { + config.Entrypoint = []string{"sh", "-c", ` +cat <<'EOF' > /etc/postgresql.schema.sql && \ +cat <<'EOF' > /docker-entrypoint-initdb.d/migrate.sh && \ +cat <<'EOF' > /etc/postgresql-custom/pgsodium_root.key && \ +cat <<'EOF' >> /etc/postgresql/postgresql.conf && \ +docker-entrypoint.sh postgres -D /etc/postgresql +` + initialSchema + ` +` + _supabaseSchema + ` +EOF +` + restoreScript + ` +EOF +` + utils.Config.Db.RootKey + ` +EOF +` + utils.Config.Db.Settings.ToPostgresConfig() + ` +EOF`} + if !filepath.IsAbs(fromBackup) { + fromBackup = filepath.Join(utils.CurrentDirAbs, fromBackup) + } + hostConfig.Binds = append(hostConfig.Binds, utils.ToDockerPath(fromBackup)+":/etc/backup.sql:ro") + } + // Creating volume will not override existing volume, so we must inspect explicitly + _, err := utils.Docker.VolumeInspect(ctx, utils.DbId) + utils.NoBackupVolume = client.IsErrNotFound(err) + if utils.NoBackupVolume { + fmt.Fprintln(w, "Starting database...") + } else if len(fromBackup) > 0 { + utils.CmdSuggestion = fmt.Sprintf("Run %s to remove existing docker volumes.", utils.Aqua("supabase stop --no-backup")) + return errors.Errorf("backup volume already exists") + } else { + fmt.Fprintln(w, "Starting database from backup...") + } + if _, err := utils.DockerStart(ctx, config, hostConfig, networkingConfig, utils.DbId); err != nil { + return err + } + if err := WaitForHealthyService(ctx, HealthTimeout, utils.DbId); err != nil { + return err + } + // Initialize if we are on PG14 and there's no existing db volume + if len(fromBackup) > 0 { + if err := initSchema15(ctx, utils.DbId); err != nil { + return err + } + } else if utils.NoBackupVolume { + if err := SetupLocalDatabase(ctx, "", fsys, w, options...); err != nil { + return err + } + } + return initCurrentBranch(fsys) +} + +func NewBackoffPolicy(ctx context.Context, timeout time.Duration) backoff.BackOff { + policy := backoff.WithMaxRetries( + backoff.NewConstantBackOff(time.Second), + uint64(timeout.Seconds()), + ) + return backoff.WithContext(policy, ctx) +} + +func WaitForHealthyService(ctx context.Context, timeout time.Duration, started ...string) error { + probe := func() error { + var errHealth []error + var unhealthy []string + for _, container := range started { + if err := status.IsServiceReady(ctx, container); err != nil { + unhealthy = append(unhealthy, container) + errHealth = append(errHealth, err) + } + } + started = unhealthy + return errors.Join(errHealth...) + } + policy := NewBackoffPolicy(ctx, timeout) + err := backoff.Retry(probe, policy) + if err != nil && !errors.Is(err, context.Canceled) { + // Print container logs for easier debugging + for _, containerId := range started { + fmt.Fprintln(os.Stderr, containerId, "container logs:") + if err := utils.DockerStreamLogsOnce(context.Background(), containerId, os.Stderr, os.Stderr); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } + } + return err +} + +func IsUnhealthyError(err error) bool { + // Health check always returns a joinError + _, ok := err.(interface{ Unwrap() []error }) + return ok +} + +func initCurrentBranch(fsys afero.Fs) error { + // Create _current_branch file to avoid breaking db branch commands + if _, err := fsys.Stat(utils.CurrBranchPath); err == nil { + return nil + } else if !errors.Is(err, os.ErrNotExist) { + return errors.Errorf("failed init current branch: %w", err) + } + return utils.WriteFile(utils.CurrBranchPath, []byte("main"), fsys) +} + +func initSchema(ctx context.Context, conn *pgx.Conn, host string, w io.Writer) error { + fmt.Fprintln(w, "Initialising schema...") + if utils.Config.Db.MajorVersion <= 14 { + if file, err := migration.NewMigrationFromReader(strings.NewReader(utils.GlobalsSql)); err != nil { + return err + } else if err := file.ExecBatch(ctx, conn); err != nil { + return err + } + return InitSchema14(ctx, conn) + } + return initSchema15(ctx, host) +} + +func InitSchema14(ctx context.Context, conn *pgx.Conn) error { + sql := utils.InitialSchemaPg14Sql + if utils.Config.Db.MajorVersion == 13 { + sql = utils.InitialSchemaPg13Sql + } + file, err := migration.NewMigrationFromReader(strings.NewReader(sql)) + if err != nil { + return err + } + return file.ExecBatch(ctx, conn) +} + +func initRealtimeJob(host string) utils.DockerJob { + return utils.DockerJob{ + Image: utils.Config.Realtime.Image, + Env: []string{ + "PORT=4000", + "DB_HOST=" + host, + "DB_PORT=5432", + "DB_USER=supabase_admin", + "DB_PASSWORD=" + utils.Config.Db.Password, + "DB_NAME=postgres", + "DB_AFTER_CONNECT_QUERY=SET search_path TO _realtime", + "DB_ENC_KEY=" + utils.Config.Realtime.EncryptionKey, + "API_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + "METRICS_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + "APP_NAME=realtime", + "SECRET_KEY_BASE=" + utils.Config.Realtime.SecretKeyBase, + "ERL_AFLAGS=" + utils.ToRealtimeEnv(utils.Config.Realtime.IpVersion), + "DNS_NODES=''", + "RLIMIT_NOFILE=", + "SEED_SELF_HOST=true", + "RUN_JANITOR=true", + fmt.Sprintf("MAX_HEADER_LENGTH=%d", utils.Config.Realtime.MaxHeaderLength), + }, + Cmd: []string{"/app/bin/realtime", "eval", fmt.Sprintf(`{:ok, _} = Application.ensure_all_started(:realtime) +{:ok, _} = Realtime.Tenants.health_check("%s")`, utils.Config.Realtime.TenantId)}, + } +} + +func initStorageJob(host string) utils.DockerJob { + return utils.DockerJob{ + Image: utils.Config.Storage.Image, + Env: []string{ + "DB_INSTALL_ROLES=false", + "ANON_KEY=" + utils.Config.Auth.AnonKey, + "SERVICE_KEY=" + utils.Config.Auth.ServiceRoleKey, + "PGRST_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + fmt.Sprintf("DATABASE_URL=postgresql://supabase_storage_admin:%s@%s:5432/postgres", utils.Config.Db.Password, host), + fmt.Sprintf("FILE_SIZE_LIMIT=%v", utils.Config.Storage.FileSizeLimit), + "STORAGE_BACKEND=file", + "STORAGE_FILE_BACKEND_PATH=/mnt", + "TENANT_ID=stub", + // TODO: https://github.com/supabase/storage-api/issues/55 + "REGION=stub", + "GLOBAL_S3_BUCKET=stub", + }, + Cmd: []string{"node", "dist/scripts/migrate-call.js"}, + } +} + +func initAuthJob(host string) utils.DockerJob { + return utils.DockerJob{ + Image: utils.Config.Auth.Image, + Env: []string{ + "API_EXTERNAL_URL=" + utils.Config.Api.ExternalUrl, + "GOTRUE_LOG_LEVEL=error", + "GOTRUE_DB_DRIVER=postgres", + fmt.Sprintf("GOTRUE_DB_DATABASE_URL=postgresql://supabase_auth_admin:%s@%s:5432/postgres", utils.Config.Db.Password, host), + "GOTRUE_SITE_URL=" + utils.Config.Auth.SiteUrl, + "GOTRUE_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + }, + Cmd: []string{"gotrue", "migrate"}, + } +} + +func initSchema15(ctx context.Context, host string) error { + // Apply service migrations + var initJobs []utils.DockerJob + if utils.Config.Realtime.Enabled { + initJobs = append(initJobs, initRealtimeJob(host)) + } + if utils.Config.Storage.Enabled { + initJobs = append(initJobs, initStorageJob(host)) + } + if utils.Config.Auth.Enabled { + initJobs = append(initJobs, initAuthJob(host)) + } + logger := utils.GetDebugLogger() + for _, job := range initJobs { + if err := utils.DockerRunJob(ctx, job, io.Discard, logger); err != nil { + return err + } + } + return nil +} + +func SetupLocalDatabase(ctx context.Context, version string, fsys afero.Fs, w io.Writer, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{}, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + if err := SetupDatabase(ctx, conn, utils.DbId, w, fsys); err != nil { + return err + } + return apply.MigrateAndSeed(ctx, version, conn, fsys) +} + +func SetupDatabase(ctx context.Context, conn *pgx.Conn, host string, w io.Writer, fsys afero.Fs) error { + if err := initSchema(ctx, conn, host, w); err != nil { + return err + } + // Create vault secrets first so roles.sql can reference them + if err := vault.UpsertVaultSecrets(ctx, utils.Config.Db.Vault, conn); err != nil { + return err + } + err := migration.SeedGlobals(ctx, []string{utils.CustomRolesPath}, conn, afero.NewIOFS(fsys)) + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err +} diff --git a/internal/db/start/start_test.go b/internal/db/start/start_test.go new file mode 100644 index 0000000..fdc76c6 --- /dev/null +++ b/internal/db/start/start_test.go @@ -0,0 +1,364 @@ +package start + +import ( + "context" + "errors" + "io" + "net/http" + "os" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/volume" + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/cast" + "github.com/supabase/cli/pkg/pgtest" +) + +func TestInitBranch(t *testing.T) { + t.Run("throws error on permission denied", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewReadOnlyFs(afero.NewMemMapFs()) + // Run test + err := initCurrentBranch(fsys) + // Check error + assert.ErrorContains(t, err, "operation not permitted") + }) + + t.Run("throws error on stat failure", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.StatErrorFs{DenyPath: utils.CurrBranchPath} + // Run test + err := initCurrentBranch(fsys) + // Check error + assert.ErrorContains(t, err, "permission denied") + }) + + t.Run("throws error on write failure", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.CurrBranchPath} + // Run test + err := initCurrentBranch(fsys) + // Check error + assert.ErrorContains(t, err, "permission denied") + }) +} + +func TestStartDatabase(t *testing.T) { + t.Run("initialize main branch", func(t *testing.T) { + utils.Config.Db.MajorVersion = 15 + utils.DbId = "supabase_db_test" + utils.ConfigId = "supabase_config_test" + utils.Config.Db.Port = 5432 + // Setup in-memory fs + fsys := afero.NewMemMapFs() + roles := "create role test" + require.NoError(t, afero.WriteFile(fsys, utils.CustomRolesPath, []byte(roles), 0644)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/volumes/" + utils.DbId). + Reply(http.StatusNotFound). + JSON(volume.Volume{}) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), utils.DbId) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Realtime.Image), "test-realtime") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-realtime", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Storage.Image), "test-storage") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-storage", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Auth.Image), "test-auth") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-auth", "")) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(roles). + Reply("CREATE ROLE") + // Run test + err := StartDatabase(context.Background(), "", fsys, io.Discard, conn.Intercept) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + // Check current branch + contents, err := afero.ReadFile(fsys, utils.CurrBranchPath) + assert.NoError(t, err) + assert.Equal(t, []byte("main"), contents) + }) + + t.Run("recover from backup volume", func(t *testing.T) { + utils.Config.Db.MajorVersion = 14 + utils.DbId = "supabase_db_test" + utils.ConfigId = "supabase_config_test" + utils.Config.Db.Port = 5432 + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/volumes/" + utils.DbId). + Reply(http.StatusOK). + JSON(volume.Volume{}) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), utils.DbId) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + // Run test + err := StartDatabase(context.Background(), "", fsys, io.Discard) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + // Check current branch + contents, err := afero.ReadFile(fsys, utils.CurrBranchPath) + assert.NoError(t, err) + assert.Equal(t, []byte("main"), contents) + }) + + t.Run("throws error on start failure", func(t *testing.T) { + utils.Config.Db.MajorVersion = 15 + utils.DbId = "supabase_db_test" + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/volumes/" + utils.DbId). + ReplyError(errors.New("network error")) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + utils.GetRegistryImageUrl(utils.Config.Db.Image) + "/json"). + Reply(http.StatusInternalServerError) + // Run test + err := StartDatabase(context.Background(), "", fsys, io.Discard) + // Check error + assert.ErrorContains(t, err, "request returned Internal Server Error for API route and version") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestStartCommand(t *testing.T) { + t.Run("throws error on malformed config", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.ConfigPath, []byte("malformed"), 0644)) + // Run test + err := Run(context.Background(), "", fsys) + // Check error + assert.ErrorContains(t, err, "toml: expected = after a key, but the document ends there") + }) + + t.Run("throws error on missing docker", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), "", fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("exits if already started", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + // Run test + err := Run(context.Background(), "", fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on start failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/"). + Reply(http.StatusNotFound) + // Fail to start + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/volumes/"). + ReplyError(errors.New("network error")) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + utils.GetRegistryImageUrl(utils.Config.Db.Image) + "/json"). + ReplyError(errors.New("network error")) + // Cleanup resources + apitest.MockDockerStop(utils.Docker) + // Run test + err := Run(context.Background(), "", fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestSetupDatabase(t *testing.T) { + utils.Config.Db.MajorVersion = 15 + + t.Run("initializes database 14", func(t *testing.T) { + utils.Config.Db.MajorVersion = 14 + defer func() { + utils.Config.Db.MajorVersion = 15 + }() + utils.Config.Db.Port = 5432 + utils.GlobalsSql = "create schema public" + utils.InitialSchemaPg14Sql = "create schema private" + // Setup in-memory fs + fsys := afero.NewMemMapFs() + roles := "create role postgres" + require.NoError(t, afero.WriteFile(fsys, utils.CustomRolesPath, []byte(roles), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(utils.GlobalsSql). + Reply("CREATE SCHEMA"). + Query(utils.InitialSchemaPg14Sql). + Reply("CREATE SCHEMA"). + Query(roles). + Reply("CREATE ROLE") + // Run test + err := SetupLocalDatabase(context.Background(), "", fsys, io.Discard, conn.Intercept) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on connect failure", func(t *testing.T) { + utils.Config.Db.Port = 0 + // Run test + err := SetupLocalDatabase(context.Background(), "", nil, io.Discard) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + + t.Run("throws error on init failure", func(t *testing.T) { + utils.Config.Realtime.Enabled = true + utils.Config.Db.Port = 5432 + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + utils.GetRegistryImageUrl(utils.Config.Realtime.Image) + "/json"). + ReplyError(errors.New("network error")) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + // Run test + err := SetupLocalDatabase(context.Background(), "", nil, io.Discard, conn.Intercept) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on read failure", func(t *testing.T) { + utils.Config.Db.Port = 5432 + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.CustomRolesPath} + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Realtime.Image), "test-realtime") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-realtime", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Storage.Image), "test-storage") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-storage", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Auth.Image), "test-auth") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-auth", "")) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + // Run test + err := SetupLocalDatabase(context.Background(), "", fsys, io.Discard, conn.Intercept) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} +func TestStartDatabaseWithCustomSettings(t *testing.T) { + t.Run("starts database with custom MaxConnections", func(t *testing.T) { + // Setup + utils.Config.Db.MajorVersion = 15 + utils.DbId = "supabase_db_test" + utils.ConfigId = "supabase_config_test" + utils.Config.Db.Port = 5432 + utils.Config.Db.Settings.MaxConnections = cast.Ptr(uint(50)) + + // Setup in-memory fs + fsys := afero.NewMemMapFs() + + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/volumes/" + utils.DbId). + Reply(http.StatusNotFound). + JSON(volume.Volume{}) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), utils.DbId) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Realtime.Image), "test-realtime") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-realtime", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Storage.Image), "test-storage") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-storage", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Auth.Image), "test-auth") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-auth", "")) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + + // Run test + err := StartDatabase(context.Background(), "", fsys, io.Discard, conn.Intercept) + + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + + // Check if the custom MaxConnections setting was applied + config := NewContainerConfig() + assert.Contains(t, config.Entrypoint[2], "max_connections = 50") + }) +} diff --git a/internal/db/start/templates/_supabase.sql b/internal/db/start/templates/_supabase.sql new file mode 100644 index 0000000..6e5d848 --- /dev/null +++ b/internal/db/start/templates/_supabase.sql @@ -0,0 +1,13 @@ +CREATE DATABASE _supabase WITH OWNER postgres; + +-- Switch to the newly created _supabase database +\c _supabase +-- Create schemas in _supabase database for +-- internals tools and reports to not overload user database +-- with non-user activity +CREATE SCHEMA IF NOT EXISTS _analytics; +ALTER SCHEMA _analytics OWNER TO postgres; + +CREATE SCHEMA IF NOT EXISTS _supavisor; +ALTER SCHEMA _supavisor OWNER TO postgres; +\c postgres diff --git a/internal/db/start/templates/restore.sh b/internal/db/start/templates/restore.sh new file mode 100644 index 0000000..d13d608 --- /dev/null +++ b/internal/db/start/templates/restore.sh @@ -0,0 +1,43 @@ +#!/bin/sh +set -eu + +####################################### +# Used by both ami and docker builds to initialise database schema. +# Env vars: +# POSTGRES_DB defaults to postgres +# POSTGRES_HOST defaults to localhost +# POSTGRES_PORT defaults to 5432 +# POSTGRES_PASSWORD defaults to "" +# USE_DBMATE defaults to "" +# Exit code: +# 0 if migration succeeds, non-zero on error. +####################################### + +export PGDATABASE="${POSTGRES_DB:-postgres}" +export PGHOST="${POSTGRES_HOST:-localhost}" +export PGPORT="${POSTGRES_PORT:-5432}" +export PGPASSWORD="${POSTGRES_PASSWORD:-}" + +echo "$0: restoring roles" +cat "/etc/backup.sql" \ +| grep 'CREATE ROLE' \ +| grep -v 'supabase_admin' \ +| sed -E 's/^(CREATE ROLE postgres);/\1 WITH SUPERUSER;/' \ +| psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U supabase_admin + +echo "$0: restoring schema" +cat "/etc/backup.sql" \ +| sed -E 's/^CREATE VIEW /CREATE OR REPLACE VIEW /' \ +| sed -E 's/^CREATE FUNCTION /CREATE OR REPLACE FUNCTION /' \ +| sed -E 's/^CREATE TRIGGER /CREATE OR REPLACE TRIGGER /' \ +| sed -E 's/^GRANT ALL ON FUNCTION graphql_public\./-- &/' \ +| sed -E 's/^CREATE ROLE /-- &/' \ +| sed -e '/ALTER ROLE postgres WITH / { h; $p; d; }' -e '$G' \ +| psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U supabase_admin + +# run any post migration script to update role passwords +postinit="/etc/postgresql.schema.sql" +if [ -e "$postinit" ]; then + echo "$0: running $postinit" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U supabase_admin -f "$postinit" +fi diff --git a/internal/db/start/templates/schema.sql b/internal/db/start/templates/schema.sql new file mode 100644 index 0000000..dee166a --- /dev/null +++ b/internal/db/start/templates/schema.sql @@ -0,0 +1,17 @@ +\set pgpass `echo "$PGPASSWORD"` +\set jwt_secret `echo "$JWT_SECRET"` +\set jwt_exp `echo "$JWT_EXP"` + +ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret'; +ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp'; + +ALTER USER postgres WITH PASSWORD :'pgpass'; +ALTER USER authenticator WITH PASSWORD :'pgpass'; +ALTER USER pgbouncer WITH PASSWORD :'pgpass'; +ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_replication_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_read_only_user WITH PASSWORD :'pgpass'; + +create schema if not exists _realtime; +alter schema _realtime owner to postgres; diff --git a/internal/db/start/templates/webhook.sql b/internal/db/start/templates/webhook.sql new file mode 100644 index 0000000..52cd097 --- /dev/null +++ b/internal/db/start/templates/webhook.sql @@ -0,0 +1,232 @@ +BEGIN; + +-- Create pg_net extension +CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions; + +-- Create supabase_functions schema +CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin; + +GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role; +ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role; +ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role; +ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role; + +-- supabase_functions.migrations definition +CREATE TABLE supabase_functions.migrations ( + version text PRIMARY KEY, + inserted_at timestamptz NOT NULL DEFAULT NOW() +); + +-- Initial supabase_functions migration +INSERT INTO supabase_functions.migrations (version) VALUES ('initial'); + +-- supabase_functions.hooks definition +CREATE TABLE supabase_functions.hooks ( + id bigserial PRIMARY KEY, + hook_table_id integer NOT NULL, + hook_name text NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + request_id bigint +); +CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id); +CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name); +COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.'; + +CREATE FUNCTION supabase_functions.http_request() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + DECLARE + request_id bigint; + payload jsonb; + url text := TG_ARGV[0]::text; + method text := TG_ARGV[1]::text; + headers jsonb DEFAULT '{}'::jsonb; + params jsonb DEFAULT '{}'::jsonb; + timeout_ms integer DEFAULT 1000; + BEGIN + IF url IS NULL OR url = 'null' THEN + RAISE EXCEPTION 'url argument is missing'; + END IF; + + IF method IS NULL OR method = 'null' THEN + RAISE EXCEPTION 'method argument is missing'; + END IF; + + IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN + headers = '{"Content-Type": "application/json"}'::jsonb; + ELSE + headers = TG_ARGV[2]::jsonb; + END IF; + + IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN + params = '{}'::jsonb; + ELSE + params = TG_ARGV[3]::jsonb; + END IF; + + IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN + timeout_ms = 1000; + ELSE + timeout_ms = TG_ARGV[4]::integer; + END IF; + + CASE + WHEN method = 'GET' THEN + SELECT http_get INTO request_id FROM net.http_get( + url, + params, + headers, + timeout_ms + ); + WHEN method = 'POST' THEN + payload = jsonb_build_object( + 'old_record', OLD, + 'record', NEW, + 'type', TG_OP, + 'table', TG_TABLE_NAME, + 'schema', TG_TABLE_SCHEMA + ); + + SELECT http_post INTO request_id FROM net.http_post( + url, + payload, + params, + headers, + timeout_ms + ); + ELSE + RAISE EXCEPTION 'method argument % is invalid', method; + END CASE; + + INSERT INTO supabase_functions.hooks + (hook_table_id, hook_name, request_id) + VALUES + (TG_RELID, TG_NAME, request_id); + + RETURN NEW; + END +$function$; + +-- Supabase super admin +DO +$$ +BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; +END +$$; + +GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin; +ALTER USER supabase_functions_admin SET search_path = "supabase_functions"; +ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin; +ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin; +ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin; +GRANT supabase_functions_admin TO postgres; + +-- Remove unused supabase_pg_net_admin role +DO +$$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_pg_net_admin' + ) + THEN + REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin; + DROP OWNED BY supabase_pg_net_admin; + DROP ROLE supabase_pg_net_admin; + END IF; +END +$$; + +-- pg_net grants when extension is already enabled +DO +$$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_extension + WHERE extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END +$$; + +-- Event trigger for pg_net +CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END; +$$; +COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; + +DO +$$ +BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_event_trigger + WHERE evtname = 'issue_pg_net_access' + ) THEN + CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION') + EXECUTE PROCEDURE extensions.grant_pg_net_access(); + END IF; +END +$$; + +INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants'); + +ALTER function supabase_functions.http_request() SECURITY DEFINER; +ALTER function supabase_functions.http_request() SET search_path = supabase_functions; +REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC; +GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role; + +COMMIT; diff --git a/internal/db/test/test.go b/internal/db/test/test.go new file mode 100644 index 0000000..428fc61 --- /dev/null +++ b/internal/db/test/test.go @@ -0,0 +1,99 @@ +package test + +import ( + "context" + _ "embed" + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/utils" + cliConfig "github.com/supabase/cli/pkg/config" +) + +const ( + ENABLE_PGTAP = "create extension if not exists pgtap with schema extensions" + DISABLE_PGTAP = "drop extension if exists pgtap" +) + +func Run(ctx context.Context, testFiles []string, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + // Build test command + cmd := []string{"pg_prove", "--ext", ".pg", "--ext", ".sql", "-r"} + for _, fp := range testFiles { + relPath, err := filepath.Rel(utils.DbTestsDir, fp) + if err != nil { + return errors.Errorf("failed to resolve relative path: %w", err) + } + cmd = append(cmd, relPath) + } + if viper.GetBool("DEBUG") { + cmd = append(cmd, "--verbose") + } + // Mount tests directory into container as working directory + srcPath, err := filepath.Abs(utils.DbTestsDir) + if err != nil { + return errors.Errorf("failed to resolve absolute path: %w", err) + } + dstPath := "/tmp" + binds := []string{fmt.Sprintf("%s:%s:ro", srcPath, dstPath)} + // Enable pgTAP if not already exists + alreadyExists := false + options = append(options, func(cc *pgx.ConnConfig) { + cc.OnNotice = func(pc *pgconn.PgConn, n *pgconn.Notice) { + alreadyExists = n.Code == pgerrcode.DuplicateObject + } + }) + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + if _, err := conn.Exec(ctx, ENABLE_PGTAP); err != nil { + return errors.Errorf("failed to enable pgTAP: %w", err) + } + if !alreadyExists { + defer func() { + if _, err := conn.Exec(ctx, DISABLE_PGTAP); err != nil { + fmt.Fprintln(os.Stderr, "failed to disable pgTAP:", err) + } + }() + } + // Use custom network when connecting to local database + // disable selinux via security-opt to allow pg-tap to work properly + hostConfig := container.HostConfig{Binds: binds, SecurityOpt: []string{"label:disable"}} + if utils.IsLocalDatabase(config) { + config.Host = utils.DbAliases[0] + config.Port = 5432 + } else { + hostConfig.NetworkMode = network.NetworkHost + } + // Run pg_prove on volume mount + return utils.DockerRunOnceWithConfig( + ctx, + container.Config{ + Image: cliConfig.Images.PgProve, + Env: []string{ + "PGHOST=" + config.Host, + fmt.Sprintf("PGPORT=%d", config.Port), + "PGUSER=" + config.User, + "PGPASSWORD=" + config.Password, + "PGDATABASE=" + config.Database, + }, + Cmd: cmd, + WorkingDir: dstPath, + }, + hostConfig, + network.NetworkingConfig{}, + "", + os.Stdout, + os.Stderr, + ) +} diff --git a/internal/db/test/test_test.go b/internal/db/test/test_test.go new file mode 100644 index 0000000..063f9bc --- /dev/null +++ b/internal/db/test/test_test.go @@ -0,0 +1,101 @@ +package test + +import ( + "context" + "errors" + "testing" + + "github.com/h2non/gock" + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/config" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "db.supabase.co", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestRunCommand(t *testing.T) { + t.Run("runs tests with pg_prove", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(ENABLE_PGTAP). + Reply("CREATE EXTENSION"). + Query(DISABLE_PGTAP). + Reply("DROP EXTENSION") + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + containerId := "test-pg-prove" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(config.Images.PgProve), containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "Result: SUCCESS")) + // Run test + err := Run(context.Background(), []string{"nested"}, dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on connect failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Run test + err := Run(context.Background(), nil, dbConfig, fsys) + // Check error + assert.ErrorContains(t, err, "failed to connect to postgres") + }) + + t.Run("throws error on pgtap failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(ENABLE_PGTAP). + ReplyError(pgerrcode.DuplicateObject, `extension "pgtap" already exists, skipping`) + // Run test + err := Run(context.Background(), nil, dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, "failed to enable pgTAP") + }) + + t.Run("throws error on network failure", func(t *testing.T) { + errNetwork := errors.New("network error") + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(ENABLE_PGTAP). + Reply("CREATE EXTENSION"). + Query(DISABLE_PGTAP). + Reply("DROP EXTENSION") + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + utils.GetRegistryImageUrl(config.Images.PgProve) + "/json"). + ReplyError(errNetwork) + // Run test + err := Run(context.Background(), nil, dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorIs(t, err, errNetwork) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/debug/postgres.go b/internal/debug/postgres.go new file mode 100644 index 0000000..9bc275b --- /dev/null +++ b/internal/debug/postgres.go @@ -0,0 +1,165 @@ +package debug + +import ( + "context" + "encoding/json" + "errors" + "io" + "log" + "net" + "os" + + "github.com/jackc/pgproto3/v2" + "github.com/jackc/pgx/v4" + "google.golang.org/grpc/test/bufconn" +) + +type Proxy struct { + dialContext func(ctx context.Context, network, addr string) (net.Conn, error) + errChan chan error +} + +func NewProxy() Proxy { + dialer := net.Dialer{} + return Proxy{ + dialContext: dialer.DialContext, + errChan: make(chan error, 1), + } +} + +func SetupPGX(config *pgx.ConnConfig) { + proxy := Proxy{ + dialContext: config.DialFunc, + errChan: make(chan error, 1), + } + config.DialFunc = proxy.DialFunc + config.TLSConfig = nil +} + +func (p *Proxy) DialFunc(ctx context.Context, network, addr string) (net.Conn, error) { + serverConn, err := p.dialContext(ctx, network, addr) + if err != nil { + return nil, err + } + + const bufSize = 1024 * 1024 + ln := bufconn.Listen(bufSize) + go func() { + defer serverConn.Close() + clientConn, err := ln.Accept() + if err != nil { + // Unreachable code as bufconn never throws, but just in case + panic(err) + } + defer clientConn.Close() + + backend := NewBackend(clientConn) + frontend := NewFrontend(serverConn) + go backend.forward(frontend, p.errChan) + go frontend.forward(backend, p.errChan) + + for { + // Since pgx closes connection first, every EOF is seen as unexpected + if err := <-p.errChan; err != nil && !errors.Is(err, io.ErrUnexpectedEOF) { + panic(err) + } + } + }() + + return ln.DialContext(ctx) +} + +type Backend struct { + *pgproto3.Backend + logger *log.Logger +} + +func NewBackend(clientConn net.Conn) Backend { + return Backend{ + pgproto3.NewBackend(pgproto3.NewChunkReader(clientConn), clientConn), + log.New(os.Stderr, "PG Recv: ", log.LstdFlags|log.Lmsgprefix), + } +} + +func (b *Backend) forward(frontend Frontend, errChan chan error) { + startupMessage, err := b.ReceiveStartupMessage() + if err != nil { + errChan <- err + return + } + + buf, err := json.Marshal(startupMessage) + if err != nil { + errChan <- err + return + } + frontend.logger.Println(string(buf)) + + if err = frontend.Send(startupMessage); err != nil { + errChan <- err + return + } + + for { + msg, err := b.Receive() + if err != nil { + errChan <- err + return + } + + buf, err := json.Marshal(msg) + if err != nil { + errChan <- err + return + } + frontend.logger.Println(string(buf)) + + if err = frontend.Send(msg); err != nil { + errChan <- err + return + } + } +} + +type Frontend struct { + *pgproto3.Frontend + logger *log.Logger +} + +func NewFrontend(serverConn net.Conn) Frontend { + return Frontend{ + pgproto3.NewFrontend(pgproto3.NewChunkReader(serverConn), serverConn), + log.New(os.Stderr, "PG Send: ", log.LstdFlags|log.Lmsgprefix), + } +} + +func (f *Frontend) forward(backend Backend, errChan chan error) { + for { + msg, err := f.Receive() + if err != nil { + errChan <- err + return + } + + buf, err := json.Marshal(msg) + if err != nil { + errChan <- err + return + } + backend.logger.Println(string(buf)) + + if _, ok := msg.(pgproto3.AuthenticationResponseMessage); ok { + // Set the authentication type so the next backend.Receive() will + // properly decode the appropriate 'p' message. + if err := backend.SetAuthType(f.GetAuthType()); err != nil { + errChan <- err + return + } + } + + if err := backend.Send(msg); err != nil { + errChan <- err + return + } + } +} diff --git a/internal/debug/postgres_test.go b/internal/debug/postgres_test.go new file mode 100644 index 0000000..9f46fd6 --- /dev/null +++ b/internal/debug/postgres_test.go @@ -0,0 +1,31 @@ +package debug + +import ( + "context" + "testing" + + "github.com/jackc/pgx/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/pkg/pgtest" +) + +func TestPostgresProxy(t *testing.T) { + const postgresUrl = "postgresql://postgres:password@127.0.0.1:5432/postgres" + + t.Run("forwards messages between frontend and backend", func(t *testing.T) { + // Parse connection url + config, err := pgx.ParseConfig(postgresUrl) + require.NoError(t, err) + // Setup postgres mock + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Intercept(config) + // Run test + SetupPGX(config) + ctx := context.Background() + proxy, err := pgx.ConnectConfig(ctx, config) + assert.NoError(t, err) + assert.NoError(t, proxy.Close(ctx)) + }) +} diff --git a/internal/encryption/get/get.go b/internal/encryption/get/get.go new file mode 100644 index 0000000..93c6986 --- /dev/null +++ b/internal/encryption/get/get.go @@ -0,0 +1,23 @@ +package get + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string) error { + resp, err := utils.GetSupabase().V1GetPgsodiumConfigWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to retrieve pgsodium config: %w", err) + } + + if resp.JSON200 == nil { + return errors.New("Unexpected error retrieving project root key: " + string(resp.Body)) + } + + fmt.Println(resp.JSON200.RootKey) + return nil +} diff --git a/internal/encryption/get/get_test.go b/internal/encryption/get/get_test.go new file mode 100644 index 0000000..8f28018 --- /dev/null +++ b/internal/encryption/get/get_test.go @@ -0,0 +1,52 @@ +package get + +import ( + "context" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestGetRootKey(t *testing.T) { + t.Run("fetches project encryption key", func(t *testing.T) { + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/pgsodium"). + Reply(http.StatusOK). + JSON(api.PgsodiumConfigResponse{RootKey: "test-key"}) + // Run test + err := Run(context.Background(), project) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws on invalid credentials", func(t *testing.T) { + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/pgsodium"). + Reply(http.StatusForbidden) + // Run test + err := Run(context.Background(), project) + // Check error + assert.ErrorContains(t, err, "Unexpected error retrieving project root key:") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/encryption/update/update.go b/internal/encryption/update/update.go new file mode 100644 index 0000000..ed57682 --- /dev/null +++ b/internal/encryption/update/update.go @@ -0,0 +1,31 @@ +package update + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/credentials" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, projectRef string, stdin *os.File) error { + fmt.Fprintf(os.Stderr, "Enter a new root key: ") + input := credentials.PromptMasked(stdin) + resp, err := utils.GetSupabase().V1UpdatePgsodiumConfigWithResponse(ctx, projectRef, api.UpdatePgsodiumConfigBody{ + RootKey: strings.TrimSpace(input), + }) + if err != nil { + return errors.Errorf("failed to update pgsodium config: %w", err) + } + + if resp.JSON200 == nil { + return errors.New("Unexpected error updating project root key: " + string(resp.Body)) + } + + fmt.Println("Finished " + utils.Aqua("supabase root-key update") + ".") + return nil +} diff --git a/internal/encryption/update/update_test.go b/internal/encryption/update/update_test.go new file mode 100644 index 0000000..8a6f609 --- /dev/null +++ b/internal/encryption/update/update_test.go @@ -0,0 +1,61 @@ +package update + +import ( + "context" + "net/http" + "os" + "testing" + + "github.com/h2non/gock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestUpdateRootKey(t *testing.T) { + t.Run("updates project encryption key", func(t *testing.T) { + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Setup root key + r, w, err := os.Pipe() + require.NoError(t, err) + _, err = w.WriteString("test-key") + require.NoError(t, err) + require.NoError(t, w.Close()) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Put("/v1/projects/" + project + "/pgsodium"). + JSON(api.UpdatePgsodiumConfigBody{RootKey: "test-key"}). + Reply(http.StatusOK). + JSON(api.PgsodiumConfigResponse{RootKey: "test-key"}) + // Run test + err = Run(context.Background(), project, r) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws on invalid credentials", func(t *testing.T) { + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Put("/v1/projects/" + project + "/pgsodium"). + Reply(http.StatusForbidden) + // Run test + err := Run(context.Background(), project, nil) + // Check error + assert.ErrorContains(t, err, "Unexpected error updating project root key:") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/functions/delete/delete.go b/internal/functions/delete/delete.go new file mode 100644 index 0000000..47d5957 --- /dev/null +++ b/internal/functions/delete/delete.go @@ -0,0 +1,37 @@ +package delete + +import ( + "context" + "fmt" + "net/http" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, slug string, projectRef string, fsys afero.Fs) error { + // 1. Sanity checks. + { + if err := utils.ValidateFunctionSlug(slug); err != nil { + return err + } + } + + // 2. Delete Function. + resp, err := utils.GetSupabase().V1DeleteAFunctionWithResponse(ctx, projectRef, slug) + if err != nil { + return errors.Errorf("failed to delete function: %w", err) + } + switch resp.StatusCode() { + case http.StatusNotFound: + return errors.New("Function " + utils.Aqua(slug) + " does not exist on the Supabase project.") + case http.StatusOK: + break + default: + return errors.New("Failed to delete Function " + utils.Aqua(slug) + " on the Supabase project: " + string(resp.Body)) + } + + fmt.Println("Deleted Function " + utils.Aqua(slug) + " from project " + utils.Aqua(projectRef) + ".") + return nil +} diff --git a/internal/functions/delete/delete_test.go b/internal/functions/delete/delete_test.go new file mode 100644 index 0000000..110208e --- /dev/null +++ b/internal/functions/delete/delete_test.go @@ -0,0 +1,94 @@ +package delete + +import ( + "context" + "errors" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" +) + +func TestDeleteCommand(t *testing.T) { + const slug = "test-func" + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + t.Run("deletes function from project", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + project + "/functions/" + slug). + Reply(http.StatusOK) + // Run test + assert.NoError(t, Run(context.Background(), slug, project, fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on malformed slug", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Run test + err := Run(context.Background(), "@", project, fsys) + // Check error + assert.ErrorContains(t, err, "Invalid Function name.") + }) + + t.Run("throws error on network failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + project + "/functions/" + slug). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), slug, project, fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on missing function", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + project + "/functions/" + slug). + Reply(http.StatusNotFound). + JSON(map[string]string{"message": "Function not found"}) + // Run test + err := Run(context.Background(), slug, project, fsys) + // Check error + assert.ErrorContains(t, err, "Function test-func does not exist on the Supabase project.") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on service unavailable", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + project + "/functions/" + slug). + Reply(http.StatusServiceUnavailable) + // Run test + err := Run(context.Background(), slug, project, fsys) + // Check error + assert.ErrorContains(t, err, "Failed to delete Function test-func on the Supabase project:") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/functions/deploy/bundle.go b/internal/functions/deploy/bundle.go new file mode 100644 index 0000000..c82826b --- /dev/null +++ b/internal/functions/deploy/bundle.go @@ -0,0 +1,163 @@ +package deploy + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/function" +) + +type dockerBundler struct { + fsys afero.Fs +} + +func NewDockerBundler(fsys afero.Fs) function.EszipBundler { + return &dockerBundler{fsys: fsys} +} + +func (b *dockerBundler) Bundle(ctx context.Context, entrypoint string, importMap string, staticFiles []string, output io.Writer) error { + // Create temp directory to store generated eszip + slug := filepath.Base(filepath.Dir(entrypoint)) + fmt.Fprintln(os.Stderr, "Bundling Function:", utils.Bold(slug)) + cwd, err := os.Getwd() + if err != nil { + return errors.Errorf("failed to get working directory: %w", err) + } + // BitBucket pipelines require docker bind mounts to be world writable + hostOutputDir := filepath.Join(utils.TempDir, fmt.Sprintf(".output_%s", slug)) + if err := b.fsys.MkdirAll(hostOutputDir, 0777); err != nil { + return errors.Errorf("failed to mkdir: %w", err) + } + defer func() { + if err := b.fsys.RemoveAll(hostOutputDir); err != nil { + fmt.Fprintln(os.Stderr, err) + } + }() + // Create bind mounts + binds, err := GetBindMounts(cwd, utils.FunctionsDir, hostOutputDir, entrypoint, importMap, b.fsys) + if err != nil { + return err + } + hostOutputPath := filepath.Join(hostOutputDir, "output.eszip") + // Create exec command + cmd := []string{"bundle", "--entrypoint", utils.ToDockerPath(entrypoint), "--output", utils.ToDockerPath(hostOutputPath)} + if len(importMap) > 0 { + cmd = append(cmd, "--import-map", utils.ToDockerPath(importMap)) + } + for _, staticFile := range staticFiles { + cmd = append(cmd, "--static", utils.ToDockerPath(staticFile)) + } + if viper.GetBool("DEBUG") { + cmd = append(cmd, "--verbose") + } + + env := []string{} + if custom_registry := os.Getenv("NPM_CONFIG_REGISTRY"); custom_registry != "" { + env = append(env, "NPM_CONFIG_REGISTRY="+custom_registry) + } + // Run bundle + if err := utils.DockerRunOnceWithConfig( + ctx, + container.Config{ + Image: utils.Config.EdgeRuntime.Image, + Env: env, + Cmd: cmd, + WorkingDir: utils.ToDockerPath(cwd), + }, + container.HostConfig{ + Binds: binds, + }, + network.NetworkingConfig{}, + "", + os.Stdout, + os.Stderr, + ); err != nil { + return err + } + // Read and compress + eszipBytes, err := b.fsys.Open(hostOutputPath) + if err != nil { + return errors.Errorf("failed to open eszip: %w", err) + } + defer eszipBytes.Close() + return function.Compress(eszipBytes, output) +} + +func GetBindMounts(cwd, hostFuncDir, hostOutputDir, hostEntrypointPath, hostImportMapPath string, fsys afero.Fs) ([]string, error) { + sep := string(filepath.Separator) + // Docker requires all host paths to be absolute + if !filepath.IsAbs(hostFuncDir) { + hostFuncDir = filepath.Join(cwd, hostFuncDir) + } + if !strings.HasSuffix(hostFuncDir, sep) { + hostFuncDir += sep + } + dockerFuncDir := utils.ToDockerPath(hostFuncDir) + // TODO: bind ./supabase/functions:/home/deno/functions to hide PII? + binds := []string{ + // Reuse deno cache directory, ie. DENO_DIR, between container restarts + // https://denolib.gitbook.io/guide/advanced/deno_dir-code-fetch-and-cache + utils.EdgeRuntimeId + ":/root/.cache/deno:rw", + hostFuncDir + ":" + dockerFuncDir + ":ro", + } + if len(hostOutputDir) > 0 { + if !filepath.IsAbs(hostOutputDir) { + hostOutputDir = filepath.Join(cwd, hostOutputDir) + } + if !strings.HasSuffix(hostOutputDir, sep) { + hostOutputDir += sep + } + if !strings.HasPrefix(hostOutputDir, hostFuncDir) { + dockerOutputDir := utils.ToDockerPath(hostOutputDir) + binds = append(binds, hostOutputDir+":"+dockerOutputDir+":rw") + } + } + // Allow entrypoints outside the functions directory + hostEntrypointDir := filepath.Dir(hostEntrypointPath) + if len(hostEntrypointDir) > 0 { + if !filepath.IsAbs(hostEntrypointDir) { + hostEntrypointDir = filepath.Join(cwd, hostEntrypointDir) + } + if !strings.HasSuffix(hostEntrypointDir, sep) { + hostEntrypointDir += sep + } + if !strings.HasPrefix(hostEntrypointDir, hostFuncDir) && + !strings.HasPrefix(hostEntrypointDir, hostOutputDir) { + dockerEntrypointDir := utils.ToDockerPath(hostEntrypointDir) + binds = append(binds, hostEntrypointDir+":"+dockerEntrypointDir+":ro") + } + } + // Imports outside of ./supabase/functions will be bound by absolute path + if len(hostImportMapPath) > 0 { + if !filepath.IsAbs(hostImportMapPath) { + hostImportMapPath = filepath.Join(cwd, hostImportMapPath) + } + importMap, err := utils.NewImportMap(hostImportMapPath, fsys) + if err != nil { + return nil, err + } + modules := importMap.BindHostModules() + dockerImportMapPath := utils.ToDockerPath(hostImportMapPath) + modules = append(modules, hostImportMapPath+":"+dockerImportMapPath+":ro") + // Remove any duplicate mount points + for _, mod := range modules { + hostPath := strings.Split(mod, ":")[0] + if !strings.HasPrefix(hostPath, hostFuncDir) && + (len(hostOutputDir) == 0 || !strings.HasPrefix(hostPath, hostOutputDir)) && + (len(hostEntrypointDir) == 0 || !strings.HasPrefix(hostPath, hostEntrypointDir)) { + binds = append(binds, mod) + } + } + } + return binds, nil +} diff --git a/internal/functions/deploy/bundle_test.go b/internal/functions/deploy/bundle_test.go new file mode 100644 index 0000000..45b5aed --- /dev/null +++ b/internal/functions/deploy/bundle_test.go @@ -0,0 +1,51 @@ +package deploy + +import ( + "archive/zip" + "bytes" + "context" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" +) + +func TestDockerBundle(t *testing.T) { + imageUrl := utils.GetRegistryImageUrl(utils.Config.EdgeRuntime.Image) + utils.EdgeRuntimeId = "test-edge-runtime" + const containerId = "test-container" + + t.Run("throws error on bundle failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup deno error + t.Setenv("TEST_DENO_ERROR", "bundle failed") + var body bytes.Buffer + archive := zip.NewWriter(&body) + w, err := archive.Create("deno") + require.NoError(t, err) + _, err = w.Write([]byte("binary")) + require.NoError(t, err) + require.NoError(t, archive.Close()) + // Setup mock api + defer gock.OffAll() + gock.New("https://github.com"). + Get("/denoland/deno/releases/download/v" + utils.DenoVersion). + Reply(http.StatusOK). + Body(&body) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogsExitCode(utils.Docker, containerId, 1)) + // Run test + err = NewDockerBundler(fsys).Bundle(context.Background(), "", "", []string{}, &body) + // Check error + assert.ErrorContains(t, err, "error running container: exit 1") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/functions/deploy/deploy.go b/internal/functions/deploy/deploy.go new file mode 100644 index 0000000..33ba5c2 --- /dev/null +++ b/internal/functions/deploy/deploy.go @@ -0,0 +1,144 @@ +package deploy + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/config" + "github.com/supabase/cli/pkg/function" +) + +func Run(ctx context.Context, slugs []string, useDocker bool, noVerifyJWT *bool, importMapPath string, maxJobs uint, fsys afero.Fs) error { + // Load function config and project id + if err := flags.LoadConfig(fsys); err != nil { + return err + } else if len(slugs) > 0 { + for _, s := range slugs { + if err := utils.ValidateFunctionSlug(s); err != nil { + return err + } + } + } else if slugs, err = GetFunctionSlugs(fsys); err != nil { + return err + } + // TODO: require all functions to be deployed from config for v2 + if len(slugs) == 0 { + return errors.Errorf("No Functions specified or found in %s", utils.Bold(utils.FunctionsDir)) + } + functionConfig, err := GetFunctionConfig(slugs, importMapPath, noVerifyJWT, fsys) + if err != nil { + return err + } + if useDocker { + api := function.NewEdgeRuntimeAPI(flags.ProjectRef, *utils.GetSupabase(), NewDockerBundler(fsys)) + if err := api.UpsertFunctions(ctx, functionConfig); err != nil { + return err + } + } else if err := deploy(ctx, functionConfig, maxJobs, fsys); errors.Is(err, errNoDeploy) { + fmt.Fprintln(os.Stderr, err) + return nil + } else if err != nil { + return err + } + fmt.Printf("Deployed Functions on project %s: %s\n", utils.Aqua(flags.ProjectRef), strings.Join(slugs, ", ")) + url := fmt.Sprintf("%s/project/%v/functions", utils.GetSupabaseDashboardURL(), flags.ProjectRef) + fmt.Println("You can inspect your deployment in the Dashboard: " + url) + return nil +} + +func GetFunctionSlugs(fsys afero.Fs) (slugs []string, err error) { + pattern := filepath.Join(utils.FunctionsDir, "*", "index.ts") + paths, err := afero.Glob(fsys, pattern) + if err != nil { + return nil, errors.Errorf("failed to glob function slugs: %w", err) + } + for _, path := range paths { + slug := filepath.Base(filepath.Dir(path)) + if utils.FuncSlugPattern.MatchString(slug) { + slugs = append(slugs, slug) + } + } + // Add all function slugs declared in config file + for slug := range utils.Config.Functions { + slugs = append(slugs, slug) + } + return slugs, nil +} + +func GetFunctionConfig(slugs []string, importMapPath string, noVerifyJWT *bool, fsys afero.Fs) (config.FunctionConfig, error) { + // Although some functions do not require import map, it's more convenient to setup + // vscode deno extension with a single import map for all functions. + fallbackExists := true + functionsUsingDeprecatedGlobalFallback := []string{} + functionsUsingDeprecatedImportMap := []string{} + if _, err := fsys.Stat(utils.FallbackImportMapPath); errors.Is(err, os.ErrNotExist) { + fallbackExists = false + } else if err != nil { + return nil, errors.Errorf("failed to fallback import map: %w", err) + } + // Flag import map is specified relative to current directory instead of workdir + if len(importMapPath) > 0 && !filepath.IsAbs(importMapPath) { + importMapPath = filepath.Join(utils.CurrentDirAbs, importMapPath) + } + functionConfig := make(config.FunctionConfig, len(slugs)) + for _, name := range slugs { + function, ok := utils.Config.Functions[name] + if !ok { + function.Enabled = true + function.VerifyJWT = true + } + // Precedence order: flag > config > fallback + functionDir := filepath.Join(utils.FunctionsDir, name) + if len(function.Entrypoint) == 0 { + function.Entrypoint = filepath.Join(functionDir, "index.ts") + } + if len(importMapPath) > 0 { + function.ImportMap = importMapPath + } else if len(function.ImportMap) == 0 { + denoJsonPath := filepath.Join(functionDir, "deno.json") + denoJsoncPath := filepath.Join(functionDir, "deno.jsonc") + importMapPath := filepath.Join(functionDir, "import_map.json") + if _, err := fsys.Stat(denoJsonPath); err == nil { + function.ImportMap = denoJsonPath + } else if _, err := fsys.Stat(denoJsoncPath); err == nil { + function.ImportMap = denoJsoncPath + } else if _, err := fsys.Stat(importMapPath); err == nil { + function.ImportMap = importMapPath + functionsUsingDeprecatedImportMap = append(functionsUsingDeprecatedImportMap, name) + } else if fallbackExists { + function.ImportMap = utils.FallbackImportMapPath + functionsUsingDeprecatedGlobalFallback = append(functionsUsingDeprecatedGlobalFallback, name) + } + } + if noVerifyJWT != nil { + function.VerifyJWT = !*noVerifyJWT + } + functionConfig[name] = function + } + if len(functionsUsingDeprecatedImportMap) > 0 { + fmt.Fprintln(os.Stderr, + utils.Yellow("WARNING:"), + "Functions using deprecated import_map.json (please migrate to deno.json):", + utils.Aqua(strings.Join(functionsUsingDeprecatedImportMap, ", ")), + ) + } + if len(functionsUsingDeprecatedGlobalFallback) > 0 { + fmt.Fprintln(os.Stderr, + utils.Yellow("WARNING:"), + "Functions using fallback import map:", + utils.Aqua(strings.Join(functionsUsingDeprecatedGlobalFallback, ", ")), + ) + fmt.Fprintln(os.Stderr, + "Please use recommended per function dependency declaration ", + utils.Aqua("https://supabase.com/docs/guides/functions/import-maps"), + ) + } + return functionConfig, nil +} diff --git a/internal/functions/deploy/deploy_test.go b/internal/functions/deploy/deploy_test.go new file mode 100644 index 0000000..9a5b997 --- /dev/null +++ b/internal/functions/deploy/deploy_test.go @@ -0,0 +1,350 @@ +package deploy + +import ( + "context" + "fmt" + "net/http" + "os" + "path/filepath" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" + "github.com/supabase/cli/pkg/cast" + "github.com/supabase/cli/pkg/config" +) + +func TestDeployCommand(t *testing.T) { + flags.ProjectRef = apitest.RandomProjectRef() + const slug = "test-func" + const containerId = "test-container" + imageUrl := utils.GetRegistryImageUrl(utils.Config.EdgeRuntime.Image) + + t.Run("deploys multiple functions", func(t *testing.T) { + functions := []string{slug, slug + "-2"} + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Setup valid deno path + _, err := fsys.Create(utils.DenoPathOverride) + require.NoError(t, err) + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + flags.ProjectRef + "/functions"). + Reply(http.StatusOK). + JSON([]api.FunctionResponse{}) + for i := range functions { + // Do not match slug to avoid flakey tests + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + flags.ProjectRef + "/functions"). + Reply(http.StatusCreated). + JSON(api.FunctionResponse{Id: fmt.Sprintf("%d", i)}) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "bundled")) + } + // Setup output file + for _, v := range functions { + outputDir := filepath.Join(utils.TempDir, fmt.Sprintf(".output_%s", v)) + require.NoError(t, afero.WriteFile(fsys, filepath.Join(outputDir, "output.eszip"), []byte(""), 0644)) + } + // Run test + noVerifyJWT := true + err = Run(context.Background(), functions, true, &noVerifyJWT, "", 1, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("deploys functions from config", func(t *testing.T) { + t.Cleanup(func() { clear(utils.Config.Functions) }) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + f, err := fsys.OpenFile(utils.ConfigPath, os.O_APPEND|os.O_WRONLY, 0600) + require.NoError(t, err) + _, err = f.WriteString(` +[functions.` + slug + `] +import_map = "./import_map.json" +`) + require.NoError(t, err) + require.NoError(t, f.Close()) + importMapPath, err := filepath.Abs(filepath.Join(utils.SupabaseDirPath, "import_map.json")) + require.NoError(t, err) + require.NoError(t, afero.WriteFile(fsys, importMapPath, []byte("{}"), 0644)) + // Setup function entrypoint + entrypointPath := filepath.Join(utils.FunctionsDir, slug, "index.ts") + require.NoError(t, afero.WriteFile(fsys, entrypointPath, []byte{}, 0644)) + ignorePath := filepath.Join(utils.FunctionsDir, "_ignore", "index.ts") + require.NoError(t, afero.WriteFile(fsys, ignorePath, []byte{}, 0644)) + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Setup valid deno path + _, err = fsys.Create(utils.DenoPathOverride) + require.NoError(t, err) + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + flags.ProjectRef + "/functions"). + Reply(http.StatusOK). + JSON([]api.FunctionResponse{}) + gock.New(utils.DefaultApiHost). + Post("/v1/projects/"+flags.ProjectRef+"/functions"). + MatchParam("slug", slug). + ParamPresent("import_map_path"). + Reply(http.StatusCreated). + JSON(api.FunctionResponse{Id: "1"}) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "bundled")) + // Setup output file + outputDir := filepath.Join(utils.TempDir, fmt.Sprintf(".output_%s", slug)) + require.NoError(t, afero.WriteFile(fsys, filepath.Join(outputDir, "output.eszip"), []byte(""), 0644)) + // Run test + err = Run(context.Background(), nil, true, nil, "", 1, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("skip disabled functions from config", func(t *testing.T) { + t.Cleanup(func() { clear(utils.Config.Functions) }) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + f, err := fsys.OpenFile(utils.ConfigPath, os.O_APPEND|os.O_WRONLY, 0600) + require.NoError(t, err) + _, err = f.WriteString(` +[functions.disabled-func] +enabled = false +import_map = "./import_map.json" +`) + require.NoError(t, err) + require.NoError(t, f.Close()) + importMapPath, err := filepath.Abs(filepath.Join(utils.SupabaseDirPath, "import_map.json")) + require.NoError(t, err) + require.NoError(t, afero.WriteFile(fsys, importMapPath, []byte("{}"), 0644)) + // Setup function entrypoints + require.NoError(t, afero.WriteFile(fsys, filepath.Join(utils.FunctionsDir, "enabled-func", "index.ts"), []byte{}, 0644)) + require.NoError(t, afero.WriteFile(fsys, filepath.Join(utils.FunctionsDir, "disabled-func", "index.ts"), []byte{}, 0644)) + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Setup valid deno path + _, err = fsys.Create(utils.DenoPathOverride) + require.NoError(t, err) + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + flags.ProjectRef + "/functions"). + Reply(http.StatusOK). + JSON([]api.FunctionResponse{}) + gock.New(utils.DefaultApiHost). + Post("/v1/projects/"+flags.ProjectRef+"/functions"). + MatchParam("slug", "enabled-func"). + Reply(http.StatusCreated). + JSON(api.FunctionResponse{Id: "1"}) + require.NoError(t, apitest.MockDocker(utils.Docker)) + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "bundled")) + // Setup output file + outputDir := filepath.Join(utils.TempDir, ".output_enabled-func") + require.NoError(t, afero.WriteFile(fsys, filepath.Join(outputDir, "output.eszip"), []byte(""), 0644)) + // Run test + err = Run(context.Background(), nil, true, nil, "", 1, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on malformed slug", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Run test + err := Run(context.Background(), []string{"_invalid"}, true, nil, "", 1, fsys) + // Check error + assert.ErrorContains(t, err, "Invalid Function name.") + }) + + t.Run("throws error on empty functions", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Run test + err := Run(context.Background(), nil, true, nil, "", 1, fsys) + // Check error + assert.ErrorContains(t, err, "No Functions specified or found in supabase/functions") + }) + + t.Run("verify_jwt param falls back to config", func(t *testing.T) { + t.Cleanup(func() { clear(utils.Config.Functions) }) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + f, err := fsys.OpenFile(utils.ConfigPath, os.O_APPEND|os.O_WRONLY, 0600) + require.NoError(t, err) + _, err = f.WriteString(` +[functions.` + slug + `] +verify_jwt = false +`) + require.NoError(t, err) + require.NoError(t, f.Close()) + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Setup valid deno path + _, err = fsys.Create(utils.DenoPathOverride) + require.NoError(t, err) + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + flags.ProjectRef + "/functions"). + Reply(http.StatusOK). + JSON([]api.FunctionResponse{}) + gock.New(utils.DefaultApiHost). + Post("/v1/projects/"+flags.ProjectRef+"/functions"). + MatchParam("verify_jwt", "false"). + Reply(http.StatusCreated). + JSON(api.FunctionResponse{Id: "1"}) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "bundled")) + // Setup output file + outputDir := filepath.Join(utils.TempDir, fmt.Sprintf(".output_%s", slug)) + require.NoError(t, afero.WriteFile(fsys, filepath.Join(outputDir, "output.eszip"), []byte(""), 0644)) + // Run test + assert.NoError(t, Run(context.Background(), []string{slug}, true, nil, "", 1, fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("verify_jwt flag overrides config", func(t *testing.T) { + t.Cleanup(func() { clear(utils.Config.Functions) }) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + f, err := fsys.OpenFile(utils.ConfigPath, os.O_APPEND|os.O_WRONLY, 0600) + require.NoError(t, err) + _, err = f.WriteString(` +[functions.` + slug + `] +verify_jwt = false +`) + require.NoError(t, err) + require.NoError(t, f.Close()) + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Setup valid deno path + _, err = fsys.Create(utils.DenoPathOverride) + require.NoError(t, err) + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + flags.ProjectRef + "/functions"). + Reply(http.StatusOK). + JSON([]api.FunctionResponse{}) + gock.New(utils.DefaultApiHost). + Post("/v1/projects/"+flags.ProjectRef+"/functions"). + MatchParam("verify_jwt", "true"). + Reply(http.StatusCreated). + JSON(api.FunctionResponse{Id: "1"}) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "bundled")) + // Setup output file + outputDir := filepath.Join(utils.TempDir, fmt.Sprintf(".output_%s", slug)) + require.NoError(t, afero.WriteFile(fsys, filepath.Join(outputDir, "output.eszip"), []byte(""), 0644)) + // Run test + noVerifyJwt := false + assert.NoError(t, Run(context.Background(), []string{slug}, true, &noVerifyJwt, "", 1, fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestImportMapPath(t *testing.T) { + t.Run("loads import map from default location", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.FallbackImportMapPath, []byte("{}"), 0644)) + // Run test + fc, err := GetFunctionConfig([]string{"test"}, "", nil, fsys) + // Check error + assert.NoError(t, err) + assert.Equal(t, utils.FallbackImportMapPath, fc["test"].ImportMap) + }) + + t.Run("per function config takes precedence", func(t *testing.T) { + t.Cleanup(func() { clear(utils.Config.Functions) }) + slug := "hello" + utils.Config.Functions = config.FunctionConfig{ + slug: {ImportMap: "import_map.json"}, + } + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.FallbackImportMapPath, []byte("{}"), 0644)) + // Run test + fc, err := GetFunctionConfig([]string{slug}, "", nil, fsys) + // Check error + assert.NoError(t, err) + assert.Equal(t, "import_map.json", fc[slug].ImportMap) + }) + + t.Run("overrides with cli flag", func(t *testing.T) { + t.Cleanup(func() { clear(utils.Config.Functions) }) + slug := "hello" + utils.Config.Functions = config.FunctionConfig{ + slug: {ImportMap: "import_map.json"}, + } + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Custom global import map loaded via cli flag + customImportMapPath := filepath.Join(utils.FunctionsDir, "custom_import_map.json") + require.NoError(t, afero.WriteFile(fsys, customImportMapPath, []byte("{}"), 0644)) + // Create fallback import map to test precedence order + require.NoError(t, afero.WriteFile(fsys, utils.FallbackImportMapPath, []byte("{}"), 0644)) + // Run test + fc, err := GetFunctionConfig([]string{slug}, customImportMapPath, cast.Ptr(false), fsys) + // Check error + assert.NoError(t, err) + assert.Equal(t, customImportMapPath, fc[slug].ImportMap) + }) + + t.Run("returns empty string if no fallback", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + fc, err := GetFunctionConfig([]string{"test"}, "", nil, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, fc["test"].ImportMap) + }) + + t.Run("preserves absolute path", func(t *testing.T) { + path := "/tmp/import_map.json" + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.FallbackImportMapPath, []byte("{}"), 0644)) + // Run test + fc, err := GetFunctionConfig([]string{"test"}, path, nil, fsys) + // Check error + assert.NoError(t, err) + assert.Equal(t, path, fc["test"].ImportMap) + }) +} diff --git a/internal/functions/deploy/testdata/geometries/Geometries.js b/internal/functions/deploy/testdata/geometries/Geometries.js new file mode 100644 index 0000000..e69de29 diff --git a/internal/functions/deploy/testdata/modules/imports.ts b/internal/functions/deploy/testdata/modules/imports.ts new file mode 100644 index 0000000..1b415c3 --- /dev/null +++ b/internal/functions/deploy/testdata/modules/imports.ts @@ -0,0 +1,90 @@ +import { + Component +} from '@angular2/core'; +import defaultMember from "module-name"; +import * as name from "module-name "; +import { member } from " module-name"; +import { member as alias } from "module-name"; +import { member1 , member2 } from "module-name"; +import { member1 , member2 as alias2 , member3 as alias3 } from "module-name"; +import { + Component +} from '@angular2/core'; +import defaultMember from "$module-name"; +import defaultMember, { member, member } from "module-name"; +import defaultMember, * as name from "module-name"; + +import * as name from "module-name " +import { member } from " module-name" +import { member as alias } from "module-name" +import { member1 , member2 } from "module-name" +import { member1 , member2 as alias2 , member3 as alias3 } from "module-name" +import { + Component +} from '@angular2/core' +import defaultMember from "$module-name" +import defaultMember, { member, member } from "module-name" +import defaultMember, * as name from "module-name" + +import "module-name"; +import React from "react" +import { Field } from "redux-form" +import "module-name"; + +import { + PlaneBufferGeometry, + OctahedronGeometry, + TorusBufferGeometry +} from '../geometries/Geometries.js'; + +import { + PlaneBufferGeometry, + OctahedronGeometry, + TorusBufferGeometry +} from '../geometries/Geometries.js' + +import("module-name/whatever.ts"); +import("module-name/whatever.ts") + +import { Field } from "redux-form"; +import MultiContentListView from "./views/ListView"; +import MultiContentAddView from "./views/AddView"; +import MultiContentEditView from "./views/EditView"; + +import { Field } from "redux-form" +import MultiContentListView from "./views/ListView" +import MultiContentAddView from "./views/AddView" +import MultiContentEditView from "./views/EditView" + + + + + +// *Add all needed dependency to this module +// *app requires those import modules to function + + +/** +* + *Add all needed dependency to this module + *app requires those import modules to function + * +**/ + +let modules = []; + + +import defaultExport from "module-name"; +import * as name from "module-name"; +import { export1 } from "module-name"; +import { export1 as alias1 } from "module-name"; +import { default as alias } from "module-name"; +import { export1, export2 } from "module-name"; +import { export1, export2 as alias2, /* … */ } from "module-name"; +import { "string name" as alias } from "module-name"; +import defaultExport, { export1, /* … */ } from "module-name"; +import defaultExport, * as name from "module-name"; +import "module-name"; +import { "a-b" as a } from "/modules/my-module.ts"; +import myDefault from "/modules/my-module.ts"; +import myDefault, * as myModule from "/modules/my-module.ts"; diff --git a/internal/functions/deploy/testdata/nested/deno.json b/internal/functions/deploy/testdata/nested/deno.json new file mode 100644 index 0000000..0967ef4 --- /dev/null +++ b/internal/functions/deploy/testdata/nested/deno.json @@ -0,0 +1 @@ +{} diff --git a/internal/functions/deploy/testdata/nested/index.ts b/internal/functions/deploy/testdata/nested/index.ts new file mode 100644 index 0000000..e69de29 diff --git a/internal/functions/deploy/testdata/shared/whatever.ts b/internal/functions/deploy/testdata/shared/whatever.ts new file mode 100644 index 0000000..879e79c --- /dev/null +++ b/internal/functions/deploy/testdata/shared/whatever.ts @@ -0,0 +1,3 @@ +import * as _ from "../nested/index.ts"; + +export * from "./mod.ts"; diff --git a/internal/functions/deploy/testdata/writes_import_map.form b/internal/functions/deploy/testdata/writes_import_map.form new file mode 100644 index 0000000..fee95d8 --- /dev/null +++ b/internal/functions/deploy/testdata/writes_import_map.form @@ -0,0 +1,20 @@ +--test +Content-Disposition: form-data; name="metadata" + +{"entrypoint_path":"testdata/nested/index.ts","import_map_path":"testdata/nested/deno.json","name":"nested","static_patterns":["testdata/*/*.js"],"verify_jwt":true} + +--test +Content-Disposition: form-data; name="file"; filename="testdata/nested/deno.json" +Content-Type: application/octet-stream + +{} + +--test +Content-Disposition: form-data; name="file"; filename="testdata/geometries/Geometries.js" +Content-Type: application/octet-stream + + +--test +Content-Disposition: form-data; name="file"; filename="testdata/nested/index.ts" +Content-Type: application/octet-stream + diff --git a/internal/functions/deploy/upload.go b/internal/functions/deploy/upload.go new file mode 100644 index 0000000..06accca --- /dev/null +++ b/internal/functions/deploy/upload.go @@ -0,0 +1,242 @@ +package deploy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" + "github.com/supabase/cli/pkg/cast" + "github.com/supabase/cli/pkg/config" + "github.com/supabase/cli/pkg/queue" +) + +var errNoDeploy = errors.New("All Functions are up to date.") + +func deploy(ctx context.Context, functionConfig config.FunctionConfig, maxJobs uint, fsys afero.Fs) error { + var toDeploy []api.FunctionDeployMetadata + for slug, fc := range functionConfig { + if !fc.Enabled { + fmt.Fprintln(os.Stderr, "Skipped deploying Function:", slug) + continue + } + meta := api.FunctionDeployMetadata{ + Name: &slug, + EntrypointPath: filepath.ToSlash(fc.Entrypoint), + ImportMapPath: cast.Ptr(filepath.ToSlash(fc.ImportMap)), + VerifyJwt: &fc.VerifyJWT, + } + files := make([]string, len(fc.StaticFiles)) + for i, sf := range fc.StaticFiles { + files[i] = filepath.ToSlash(sf) + } + meta.StaticPatterns = &files + toDeploy = append(toDeploy, meta) + } + if len(toDeploy) == 0 { + return errors.New(errNoDeploy) + } else if len(toDeploy) == 1 { + param := api.V1DeployAFunctionParams{Slug: toDeploy[0].Name} + _, err := upload(ctx, param, toDeploy[0], fsys) + return err + } + return bulkUpload(ctx, toDeploy, maxJobs, fsys) +} + +func bulkUpload(ctx context.Context, toDeploy []api.FunctionDeployMetadata, maxJobs uint, fsys afero.Fs) error { + jq := queue.NewJobQueue(maxJobs) + toUpdate := make([]api.BulkUpdateFunctionBody, len(toDeploy)) + for i, meta := range toDeploy { + fmt.Fprintln(os.Stderr, "Deploying Function:", *meta.Name) + param := api.V1DeployAFunctionParams{ + Slug: meta.Name, + BundleOnly: cast.Ptr(true), + } + bundle := func() error { + resp, err := upload(ctx, param, meta, fsys) + if err != nil { + return err + } + toUpdate[i].Id = resp.Id + toUpdate[i].Name = resp.Name + toUpdate[i].Slug = resp.Slug + toUpdate[i].Version = resp.Version + toUpdate[i].EntrypointPath = resp.EntrypointPath + toUpdate[i].ImportMap = resp.ImportMap + toUpdate[i].ImportMapPath = resp.ImportMapPath + toUpdate[i].VerifyJwt = resp.VerifyJwt + toUpdate[i].Status = api.BulkUpdateFunctionBodyStatus(resp.Status) + toUpdate[i].CreatedAt = resp.CreatedAt + return nil + } + if err := jq.Put(bundle); err != nil { + return err + } + } + if err := jq.Collect(); err != nil { + return err + } + if resp, err := utils.GetSupabase().V1BulkUpdateFunctionsWithResponse(ctx, flags.ProjectRef, toUpdate); err != nil { + return errors.Errorf("failed to bulk update: %w", err) + } else if resp.JSON200 == nil { + return errors.Errorf("unexpected bulk update status %d: %s", resp.StatusCode(), string(resp.Body)) + } + return nil +} + +func upload(ctx context.Context, param api.V1DeployAFunctionParams, meta api.FunctionDeployMetadata, fsys afero.Fs) (*api.DeployFunctionResponse, error) { + body, w := io.Pipe() + form := multipart.NewWriter(w) + ctx, cancel := context.WithCancelCause(ctx) + go func() { + defer w.Close() + defer form.Close() + if err := writeForm(form, meta, fsys); err != nil { + // Since we are streaming files to the POST request body, any errors + // should be propagated to the request context to cancel the upload. + cancel(err) + } + }() + resp, err := utils.GetSupabase().V1DeployAFunctionWithBodyWithResponse(ctx, flags.ProjectRef, ¶m, form.FormDataContentType(), body) + if cause := context.Cause(ctx); cause != ctx.Err() { + return nil, cause + } else if err != nil { + return nil, errors.Errorf("failed to deploy function: %w", err) + } else if resp.JSON201 == nil { + return nil, errors.Errorf("unexpected deploy status %d: %s", resp.StatusCode(), string(resp.Body)) + } + return resp.JSON201, nil +} + +func writeForm(form *multipart.Writer, meta api.FunctionDeployMetadata, fsys afero.Fs) error { + m, err := form.CreateFormField("metadata") + if err != nil { + return errors.Errorf("failed to create metadata: %w", err) + } + enc := json.NewEncoder(m) + if err := enc.Encode(meta); err != nil { + return errors.Errorf("failed to encode metadata: %w", err) + } + addFile := func(srcPath string, w io.Writer) error { + f, err := fsys.Open(filepath.FromSlash(srcPath)) + if err != nil { + return errors.Errorf("failed to read file: %w", err) + } + defer f.Close() + if fi, err := f.Stat(); err != nil { + return errors.Errorf("failed to stat file: %w", err) + } else if fi.IsDir() { + return errors.New("file path is a directory: " + srcPath) + } + fmt.Fprintf(os.Stderr, "Uploading asset (%s): %s\n", *meta.Name, srcPath) + r := io.TeeReader(f, w) + dst, err := form.CreateFormFile("file", srcPath) + if err != nil { + return errors.Errorf("failed to create form: %w", err) + } + if _, err := io.Copy(dst, r); err != nil { + return errors.Errorf("failed to write form: %w", err) + } + return nil + } + // Add import map + importMap := utils.ImportMap{} + if imPath := cast.Val(meta.ImportMapPath, ""); len(imPath) > 0 { + data, err := afero.ReadFile(fsys, filepath.FromSlash(imPath)) + if err != nil { + return errors.Errorf("failed to load import map: %w", err) + } + if err := importMap.Parse(data); err != nil { + return err + } + // TODO: replace with addFile once edge runtime supports jsonc + fmt.Fprintf(os.Stderr, "Uploading asset (%s): %s\n", *meta.Name, imPath) + f, err := form.CreateFormFile("file", imPath) + if err != nil { + return errors.Errorf("failed to create import map: %w", err) + } + if _, err := f.Write(data); err != nil { + return errors.Errorf("failed to write import map: %w", err) + } + } + // Add static files + patterns := config.Glob(cast.Val(meta.StaticPatterns, []string{})) + files, err := patterns.Files(afero.NewIOFS(fsys)) + if err != nil { + fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), err) + } + for _, sfPath := range files { + if err := addFile(sfPath, io.Discard); err != nil { + return err + } + } + return walkImportPaths(meta.EntrypointPath, importMap, addFile) +} + +// Ref: https://regex101.com/r/DfBdJA/1 +var importPathPattern = regexp.MustCompile(`(?i)(?:import|export)\s+(?:{[^{}]+}|.*?)\s*(?:from)?\s*['"](.*?)['"]|import\(\s*['"](.*?)['"]\)`) + +func walkImportPaths(srcPath string, importMap utils.ImportMap, readFile func(curr string, w io.Writer) error) error { + seen := map[string]struct{}{} + // DFS because it's more efficient to pop from end of array + q := make([]string, 1) + q[0] = srcPath + for len(q) > 0 { + curr := q[len(q)-1] + q = q[:len(q)-1] + // Assume no file is symlinked + if _, ok := seen[curr]; ok { + continue + } + seen[curr] = struct{}{} + // Read into memory for regex match later + var buf bytes.Buffer + if err := readFile(curr, &buf); errors.Is(err, os.ErrNotExist) { + fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), err) + continue + } else if err != nil { + return err + } + // Traverse all modules imported by the current source file + for _, matches := range importPathPattern.FindAllStringSubmatch(buf.String(), -1) { + if len(matches) < 3 { + continue + } + // Matches 'from' clause if present, else fallback to 'import' + mod := matches[1] + if len(mod) == 0 { + mod = matches[2] + } + mod = strings.TrimSpace(mod) + // Substitute kv from import map + for k, v := range importMap.Imports { + if strings.HasPrefix(mod, k) { + mod = v + mod[len(k):] + } + } + // Deno import path must begin with these prefixes + if strings.HasPrefix(mod, "./") || strings.HasPrefix(mod, "../") { + mod = path.Join(path.Dir(curr), mod) + } else if !strings.HasPrefix(mod, "/") { + continue + } + if len(path.Ext(mod)) > 0 { + // Cleans import path to help detect duplicates + q = append(q, path.Clean(mod)) + } + } + } + return nil +} diff --git a/internal/functions/deploy/upload_test.go b/internal/functions/deploy/upload_test.go new file mode 100644 index 0000000..12c00c1 --- /dev/null +++ b/internal/functions/deploy/upload_test.go @@ -0,0 +1,217 @@ +package deploy + +import ( + "bytes" + "context" + "embed" + "errors" + "io" + "mime/multipart" + "net/http" + "os" + "path" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" + "github.com/supabase/cli/pkg/cast" + "github.com/supabase/cli/pkg/config" +) + +//go:embed testdata +var testImports embed.FS + +type MockFS struct { + mock.Mock +} + +func (m *MockFS) ReadFile(srcPath string, w io.Writer) error { + _ = m.Called(srcPath) + data, err := testImports.ReadFile(srcPath) + if err != nil { + return err + } + if _, err := w.Write(data); err != nil { + return err + } + return nil +} + +func TestImportPaths(t *testing.T) { + t.Run("iterates all import paths", func(t *testing.T) { + // Setup in-memory fs + fsys := MockFS{} + fsys.On("ReadFile", "/modules/my-module.ts").Once() + fsys.On("ReadFile", "testdata/modules/imports.ts").Once() + fsys.On("ReadFile", "testdata/geometries/Geometries.js").Once() + // Run test + im := utils.ImportMap{} + err := walkImportPaths("testdata/modules/imports.ts", im, fsys.ReadFile) + // Check error + assert.NoError(t, err) + fsys.AssertExpectations(t) + }) + + t.Run("iterates with import map", func(t *testing.T) { + // Setup in-memory fs + fsys := MockFS{} + fsys.On("ReadFile", "/modules/my-module.ts").Once() + fsys.On("ReadFile", "testdata/modules/imports.ts").Once() + fsys.On("ReadFile", "testdata/geometries/Geometries.js").Once() + fsys.On("ReadFile", "testdata/shared/whatever.ts").Once() + fsys.On("ReadFile", "testdata/shared/mod.ts").Once() + fsys.On("ReadFile", "testdata/nested/index.ts").Once() + // Run test + im := utils.ImportMap{Imports: map[string]string{ + "module-name/": "../shared/", + }} + err := walkImportPaths("testdata/modules/imports.ts", im, fsys.ReadFile) + // Check error + assert.NoError(t, err) + fsys.AssertExpectations(t) + }) +} + +func assertFormEqual(t *testing.T, actual []byte) { + snapshot := path.Join("testdata", path.Base(t.Name())+".form") + expected, err := testImports.ReadFile(snapshot) + if errors.Is(err, os.ErrNotExist) { + assert.NoError(t, os.WriteFile(snapshot, actual, 0600)) + } + assert.Equal(t, string(expected), string(actual)) +} + +func TestWriteForm(t *testing.T) { + t.Run("writes import map", func(t *testing.T) { + var buf bytes.Buffer + form := multipart.NewWriter(&buf) + require.NoError(t, form.SetBoundary("test")) + // Setup in-memory fs + fsys := afero.FromIOFS{FS: testImports} + // Run test + err := writeForm(form, api.FunctionDeployMetadata{ + Name: cast.Ptr("nested"), + VerifyJwt: cast.Ptr(true), + EntrypointPath: "testdata/nested/index.ts", + ImportMapPath: cast.Ptr("testdata/nested/deno.json"), + StaticPatterns: cast.Ptr([]string{"testdata/*/*.js"}), + }, fsys) + // Check error + assert.NoError(t, err) + assertFormEqual(t, buf.Bytes()) + }) + + t.Run("throws error on missing file", func(t *testing.T) { + var buf bytes.Buffer + form := multipart.NewWriter(&buf) + require.NoError(t, form.SetBoundary("test")) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := writeForm(form, api.FunctionDeployMetadata{ + ImportMapPath: cast.Ptr("testdata/import_map.json"), + }, fsys) + // Check error + assert.ErrorIs(t, err, os.ErrNotExist) + }) + + t.Run("throws error on directory path", func(t *testing.T) { + var buf bytes.Buffer + form := multipart.NewWriter(&buf) + require.NoError(t, form.SetBoundary("test")) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := writeForm(form, api.FunctionDeployMetadata{ + StaticPatterns: cast.Ptr([]string{"testdata"}), + }, fsys) + // Check error + assert.ErrorContains(t, err, "file path is a directory:") + }) +} + +func TestDeployAll(t *testing.T) { + flags.ProjectRef = apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + t.Run("deploys single slug", func(t *testing.T) { + c := config.FunctionConfig{"demo": { + Enabled: true, + Entrypoint: "testdata/shared/whatever.ts", + }} + // Setup in-memory fs + fsys := afero.FromIOFS{FS: testImports} + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/"+flags.ProjectRef+"/functions/deploy"). + MatchParam("slug", "demo"). + Reply(http.StatusCreated). + JSON(api.DeployFunctionResponse{}) + // Run test + err := deploy(context.Background(), c, 1, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("deploys multiple slugs", func(t *testing.T) { + c := config.FunctionConfig{ + "test-ts": { + Enabled: true, + Entrypoint: "testdata/shared/whatever.ts", + }, + "test-js": { + Enabled: true, + Entrypoint: "testdata/geometries/Geometries.js", + }, + } + // Setup in-memory fs + fsys := afero.FromIOFS{FS: testImports} + // Setup mock api + defer gock.OffAll() + for slug := range c { + gock.New(utils.DefaultApiHost). + Post("/v1/projects/"+flags.ProjectRef+"/functions/deploy"). + MatchParam("slug", slug). + Reply(http.StatusCreated). + JSON(api.DeployFunctionResponse{Id: slug}) + } + gock.New(utils.DefaultApiHost). + Put("/v1/projects/" + flags.ProjectRef + "/functions"). + Reply(http.StatusOK). + JSON(api.BulkUpdateFunctionResponse{}) + // Run test + err := deploy(context.Background(), c, 1, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on network failure", func(t *testing.T) { + errNetwork := errors.New("network") + c := config.FunctionConfig{"demo": {Enabled: true}} + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/"+flags.ProjectRef+"/functions/deploy"). + MatchParam("slug", "demo"). + ReplyError(errNetwork) + // Run test + err := deploy(context.Background(), c, 1, fsys) + // Check error + assert.ErrorIs(t, err, errNetwork) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/functions/download/download.go b/internal/functions/download/download.go new file mode 100644 index 0000000..ca99c1b --- /dev/null +++ b/internal/functions/download/download.go @@ -0,0 +1,200 @@ +package download + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path" + "path/filepath" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" +) + +var ( + legacyEntrypointPath = "file:///src/index.ts" + legacyImportMapPath = "file:///src/import_map.json" +) + +func RunLegacy(ctx context.Context, slug string, projectRef string, fsys afero.Fs) error { + // 1. Sanity checks. + { + if err := utils.ValidateFunctionSlug(slug); err != nil { + return err + } + } + if err := utils.InstallOrUpgradeDeno(ctx, fsys); err != nil { + return err + } + + scriptDir, err := utils.CopyDenoScripts(ctx, fsys) + if err != nil { + return err + } + + // 2. Download Function. + if err := downloadFunction(ctx, projectRef, slug, scriptDir.ExtractPath); err != nil { + return err + } + + fmt.Println("Downloaded Function " + utils.Aqua(slug) + " from project " + utils.Aqua(projectRef) + ".") + return nil +} + +func getFunctionMetadata(ctx context.Context, projectRef, slug string) (*api.FunctionSlugResponse, error) { + resp, err := utils.GetSupabase().V1GetAFunctionWithResponse(ctx, projectRef, slug) + if err != nil { + return nil, errors.Errorf("failed to get function metadata: %w", err) + } + + switch resp.StatusCode() { + case http.StatusNotFound: + return nil, errors.Errorf("Function %s does not exist on the Supabase project.", utils.Aqua(slug)) + case http.StatusOK: + break + default: + return nil, errors.Errorf("Failed to download Function %s on the Supabase project: %s", utils.Aqua(slug), string(resp.Body)) + } + + if resp.JSON200.EntrypointPath == nil { + resp.JSON200.EntrypointPath = &legacyEntrypointPath + } + if resp.JSON200.ImportMapPath == nil { + resp.JSON200.ImportMapPath = &legacyImportMapPath + } + return resp.JSON200, nil +} + +func downloadFunction(ctx context.Context, projectRef, slug, extractScriptPath string) error { + fmt.Println("Downloading " + utils.Bold(slug)) + denoPath, err := utils.GetDenoPath() + if err != nil { + return err + } + + meta, err := getFunctionMetadata(ctx, projectRef, slug) + if err != nil { + return err + } + + resp, err := utils.GetSupabase().V1GetAFunctionBodyWithResponse(ctx, projectRef, slug) + if err != nil { + return errors.Errorf("failed to get function body: %w", err) + } + if resp.StatusCode() != http.StatusOK { + return errors.New("Unexpected error downloading Function: " + string(resp.Body)) + } + + resBuf := bytes.NewReader(resp.Body) + funcDir := filepath.Join(utils.FunctionsDir, slug) + args := []string{"run", "-A", extractScriptPath, funcDir, *meta.EntrypointPath} + cmd := exec.CommandContext(ctx, denoPath, args...) + var errBuf bytes.Buffer + cmd.Stdin = resBuf + cmd.Stdout = os.Stdout + cmd.Stderr = &errBuf + if err := cmd.Run(); err != nil { + return errors.Errorf("Error downloading function: %w\n%v", err, errBuf.String()) + } + return nil +} + +func Run(ctx context.Context, slug string, projectRef string, useLegacyBundle bool, fsys afero.Fs) error { + if useLegacyBundle { + return RunLegacy(ctx, slug, projectRef, fsys) + } + // 1. Sanity check + if err := flags.LoadConfig(fsys); err != nil { + return err + } + // 2. Download eszip to temp file + eszipPath, err := downloadOne(ctx, slug, projectRef, fsys) + if err != nil { + return err + } + defer func() { + if err := fsys.Remove(eszipPath); err != nil { + fmt.Fprintln(os.Stderr, err) + } + }() + // Extract eszip to functions directory + err = extractOne(ctx, slug, eszipPath) + if err != nil { + utils.CmdSuggestion += suggestLegacyBundle(slug) + } + return err +} + +func downloadOne(ctx context.Context, slug, projectRef string, fsys afero.Fs) (string, error) { + fmt.Println("Downloading " + utils.Bold(slug)) + resp, err := utils.GetSupabase().V1GetAFunctionBody(ctx, projectRef, slug) + if err != nil { + return "", errors.Errorf("failed to get function body: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", errors.Errorf("Error status %d: unexpected error downloading Function", resp.StatusCode) + } + return "", errors.Errorf("Error status %d: %s", resp.StatusCode, string(body)) + } + // Create temp file to store downloaded eszip + eszipPath := filepath.Join(utils.TempDir, fmt.Sprintf("output_%s.eszip", slug)) + if err := utils.MkdirIfNotExistFS(fsys, utils.TempDir); err != nil { + return "", err + } + if err := afero.WriteReader(fsys, eszipPath, resp.Body); err != nil { + return "", errors.Errorf("failed to download file: %w", err) + } + return eszipPath, nil +} + +func extractOne(ctx context.Context, slug, eszipPath string) error { + hostFuncDirPath, err := filepath.Abs(filepath.Join(utils.FunctionsDir, slug)) + if err != nil { + return errors.Errorf("failed to resolve absolute path: %w", err) + } + + hostEszipPath, err := filepath.Abs(eszipPath) + if err != nil { + return errors.Errorf("failed to resolve eszip path: %w", err) + } + dockerEszipPath := path.Join(utils.DockerEszipDir, filepath.Base(hostEszipPath)) + + binds := []string{ + // Reuse deno cache directory, ie. DENO_DIR, between container restarts + // https://denolib.gitbook.io/guide/advanced/deno_dir-code-fetch-and-cache + utils.EdgeRuntimeId + ":/root/.cache/deno:rw", + hostEszipPath + ":" + dockerEszipPath + ":ro", + hostFuncDirPath + ":" + utils.DockerDenoDir + ":rw", + } + + return utils.DockerRunOnceWithConfig( + ctx, + container.Config{ + Image: utils.Config.EdgeRuntime.Image, + Cmd: []string{"unbundle", "--eszip", dockerEszipPath, "--output", utils.DockerDenoDir}, + }, + container.HostConfig{ + Binds: binds, + }, + network.NetworkingConfig{}, + "", + os.Stdout, + os.Stderr, + ) +} + +func suggestLegacyBundle(slug string) string { + return fmt.Sprintf("\nIf your function is deployed using CLI < 1.120.0, trying running %s instead.", utils.Aqua("supabase functions download --legacy-bundle "+slug)) +} diff --git a/internal/functions/download/download_test.go b/internal/functions/download/download_test.go new file mode 100644 index 0000000..b727f95 --- /dev/null +++ b/internal/functions/download/download_test.go @@ -0,0 +1,237 @@ +package download + +import ( + "context" + "errors" + "fmt" + "log" + "net/http" + "os" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestMain(m *testing.M) { + // Setup fake deno binary + if len(os.Args) > 1 && (os.Args[1] == "bundle" || os.Args[1] == "upgrade" || os.Args[1] == "run") { + msg := os.Getenv("TEST_DENO_ERROR") + if msg != "" { + fmt.Fprintln(os.Stderr, msg) + os.Exit(1) + } + os.Exit(0) + } + denoPath, err := os.Executable() + if err != nil { + log.Fatalln(err) + } + utils.DenoPathOverride = denoPath + // Run test suite + os.Exit(m.Run()) +} + +func TestDownloadCommand(t *testing.T) { + const slug = "test-func" + + t.Run("downloads eszip bundle", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Setup valid deno path + _, err := fsys.Create(utils.DenoPathOverride) + require.NoError(t, err) + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug). + Reply(http.StatusOK). + JSON(api.FunctionResponse{Id: "1"}) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug + "/body"). + Reply(http.StatusOK) + // Run test + err = Run(context.Background(), slug, project, true, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on malformed slug", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Run test + err := Run(context.Background(), "@", project, true, fsys) + // Check error + assert.ErrorContains(t, err, "Invalid Function name.") + }) + + t.Run("throws error on failure to install deno", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewReadOnlyFs(afero.NewMemMapFs()) + // Setup valid project ref + project := apitest.RandomProjectRef() + // Run test + err := Run(context.Background(), slug, project, true, fsys) + // Check error + assert.ErrorContains(t, err, "operation not permitted") + }) + + t.Run("throws error on copy failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid deno path + _, err := fsys.Create(utils.DenoPathOverride) + require.NoError(t, err) + // Run test + err = Run(context.Background(), slug, project, true, afero.NewReadOnlyFs(fsys)) + // Check error + assert.ErrorContains(t, err, "operation not permitted") + }) + + t.Run("throws error on missing function", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Setup valid deno path + _, err := fsys.Create(utils.DenoPathOverride) + require.NoError(t, err) + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug). + Reply(http.StatusNotFound). + JSON(map[string]string{"message": "Function not found"}) + // Run test + err = Run(context.Background(), slug, project, true, fsys) + // Check error + assert.ErrorContains(t, err, "Function test-func does not exist on the Supabase project.") + }) +} + +func TestDownloadFunction(t *testing.T) { + const slug = "test-func" + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug). + Reply(http.StatusOK). + JSON(api.FunctionResponse{Id: "1"}) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug + "/body"). + ReplyError(errors.New("network error")) + // Run test + err := downloadFunction(context.Background(), project, slug, "") + // Check error + assert.ErrorContains(t, err, "network error") + }) + + t.Run("throws error on service unavailable", func(t *testing.T) { + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug). + Reply(http.StatusOK). + JSON(api.FunctionResponse{Id: "1"}) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug + "/body"). + Reply(http.StatusServiceUnavailable) + // Run test + err := downloadFunction(context.Background(), project, slug, "") + // Check error + assert.ErrorContains(t, err, "Unexpected error downloading Function:") + }) + + t.Run("throws error on extract failure", func(t *testing.T) { + // Setup deno error + t.Setenv("TEST_DENO_ERROR", "extract failed") + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug). + Reply(http.StatusOK). + JSON(api.FunctionResponse{Id: "1"}) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug + "/body"). + Reply(http.StatusOK) + // Run test + err := downloadFunction(context.Background(), project, slug, "") + // Check error + assert.ErrorContains(t, err, "Error downloading function: exit status 1\nextract failed\n") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestGetMetadata(t *testing.T) { + const slug = "test-func" + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + t.Run("fallback to default paths", func(t *testing.T) { + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug). + Reply(http.StatusOK). + JSON(api.FunctionResponse{Id: "1"}) + // Run test + meta, err := getFunctionMetadata(context.Background(), project, slug) + // Check error + assert.NoError(t, err) + assert.Equal(t, legacyEntrypointPath, *meta.EntrypointPath) + assert.Equal(t, legacyImportMapPath, *meta.ImportMapPath) + }) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug). + ReplyError(errors.New("network error")) + // Run test + meta, err := getFunctionMetadata(context.Background(), project, slug) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Nil(t, meta) + }) + + t.Run("throws error on service unavailable", func(t *testing.T) { + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions/" + slug). + Reply(http.StatusServiceUnavailable) + // Run test + meta, err := getFunctionMetadata(context.Background(), project, slug) + // Check error + assert.ErrorContains(t, err, "Failed to download Function test-func on the Supabase project:") + assert.Nil(t, meta) + }) +} diff --git a/internal/functions/list/list.go b/internal/functions/list/list.go new file mode 100644 index 0000000..d0d2d9e --- /dev/null +++ b/internal/functions/list/list.go @@ -0,0 +1,41 @@ +package list + +import ( + "context" + "fmt" + "time" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, fsys afero.Fs) error { + resp, err := utils.GetSupabase().V1ListAllFunctionsWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to list functions: %w", err) + } + + if resp.JSON200 == nil { + return errors.New("Unexpected error retrieving functions: " + string(resp.Body)) + } + + table := `|ID|NAME|SLUG|STATUS|VERSION|UPDATED_AT (UTC)| +|-|-|-|-|-|-| +` + for _, function := range *resp.JSON200 { + t := time.UnixMilli(function.UpdatedAt) + table += fmt.Sprintf( + "|`%s`|`%s`|`%s`|`%s`|`%d`|`%s`|\n", + function.Id, + function.Name, + function.Slug, + function.Status, + function.Version, + t.UTC().Format("2006-01-02 15:04:05"), + ) + } + + return list.RenderTable(table) +} diff --git a/internal/functions/list/list_test.go b/internal/functions/list/list_test.go new file mode 100644 index 0000000..e9c7f26 --- /dev/null +++ b/internal/functions/list/list_test.go @@ -0,0 +1,84 @@ +package list + +import ( + "context" + "errors" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestFunctionsListCommand(t *testing.T) { + t.Run("lists all functions", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + + testEntrypointPath := "test-entrypoint-path" + testImportMapPath := "test-import-map-path" + testImportMap := false + testVerifyJwt := true + + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions"). + Reply(200). + JSON([]api.FunctionResponse{{ + Id: "test-id", + Name: "Test Function", + Slug: "test-function", + Status: api.FunctionResponseStatusACTIVE, + UpdatedAt: 1687423025152.000000, + CreatedAt: 1687423025152.000000, + Version: 1.000000, + VerifyJwt: &testVerifyJwt, + EntrypointPath: &testEntrypointPath, + ImportMap: &testImportMap, + ImportMapPath: &testImportMapPath, + }}) + // Run test + err := Run(context.Background(), project, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on missing access token", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), "", fsys) + // Check error + assert.ErrorContains(t, err, "Unexpected error retrieving functions") + }) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/functions"). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), project, fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/functions/new/new.go b/internal/functions/new/new.go new file mode 100644 index 0000000..67a9893 --- /dev/null +++ b/internal/functions/new/new.go @@ -0,0 +1,80 @@ +package new + +import ( + "context" + _ "embed" + "fmt" + "html/template" + "os" + "path/filepath" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +var ( + //go:embed templates/index.ts + indexEmbed string + //go:embed templates/deno.json + denoEmbed string + //go:embed templates/.npmrc + npmrcEmbed string + + indexTemplate = template.Must(template.New("index").Parse(indexEmbed)) +) + +type indexConfig struct { + URL string + Token string +} + +func Run(ctx context.Context, slug string, fsys afero.Fs) error { + // 1. Sanity checks. + funcDir := filepath.Join(utils.FunctionsDir, slug) + { + if err := utils.ValidateFunctionSlug(slug); err != nil { + return err + } + } + + // 2. Create new function. + { + if err := utils.MkdirIfNotExistFS(fsys, funcDir); err != nil { + return err + } + + // Load config if available + if err := flags.LoadConfig(fsys); err != nil { + utils.CmdSuggestion = "" + } + + if err := createTemplateFile(fsys, filepath.Join(funcDir, "index.ts"), indexTemplate, indexConfig{ + URL: utils.GetApiUrl("/functions/v1/" + slug), + Token: utils.Config.Auth.AnonKey, + }); err != nil { + return errors.Errorf("failed to create function entrypoint: %w", err) + } + + if err := afero.WriteFile(fsys, filepath.Join(funcDir, "deno.json"), []byte(denoEmbed), 0644); err != nil { + return errors.Errorf("failed to create deno.json config: %w", err) + } + + if err := afero.WriteFile(fsys, filepath.Join(funcDir, ".npmrc"), []byte(npmrcEmbed), 0644); err != nil { + return errors.Errorf("failed to create .npmrc config: %w", err) + } + } + + fmt.Println("Created new Function at " + utils.Bold(funcDir)) + return nil +} + +func createTemplateFile(fsys afero.Fs, path string, tmpl *template.Template, data interface{}) error { + f, err := fsys.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644) + if err != nil { + return err + } + defer f.Close() + return tmpl.Option("missingkey=error").Execute(f, data) +} diff --git a/internal/functions/new/new_test.go b/internal/functions/new/new_test.go new file mode 100644 index 0000000..8e00fa6 --- /dev/null +++ b/internal/functions/new/new_test.go @@ -0,0 +1,58 @@ +package new + +import ( + "context" + "path/filepath" + "testing" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/utils" +) + +func TestNewCommand(t *testing.T) { + t.Run("creates new function", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + assert.NoError(t, Run(context.Background(), "test-func", fsys)) + // Validate output + funcPath := filepath.Join(utils.FunctionsDir, "test-func", "index.ts") + content, err := afero.ReadFile(fsys, funcPath) + assert.NoError(t, err) + assert.Contains(t, string(content), + "curl -i --location --request POST 'http://127.0.0.1:54321/functions/v1/test-func'", + ) + + // Verify deno.json exists + denoPath := filepath.Join(utils.FunctionsDir, "test-func", "deno.json") + _, err = afero.ReadFile(fsys, denoPath) + assert.NoError(t, err, "deno.json should be created") + + // Verify .npmrc exists + npmrcPath := filepath.Join(utils.FunctionsDir, "test-func", ".npmrc") + _, err = afero.ReadFile(fsys, npmrcPath) + assert.NoError(t, err, ".npmrc should be created") + }) + + t.Run("throws error on malformed slug", func(t *testing.T) { + assert.Error(t, Run(context.Background(), "@", afero.NewMemMapFs())) + }) + + t.Run("throws error on duplicate slug", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + funcPath := filepath.Join(utils.FunctionsDir, "test-func", "index.ts") + require.NoError(t, afero.WriteFile(fsys, funcPath, []byte{}, 0644)) + // Run test + assert.Error(t, Run(context.Background(), "test-func", fsys)) + }) + + t.Run("throws error on permission denied", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewReadOnlyFs(afero.NewMemMapFs()) + // Run test + assert.Error(t, Run(context.Background(), "test-func", fsys)) + }) +} diff --git a/internal/functions/new/templates/.npmrc b/internal/functions/new/templates/.npmrc new file mode 100644 index 0000000..48c6388 --- /dev/null +++ b/internal/functions/new/templates/.npmrc @@ -0,0 +1,3 @@ +# Configuration for private npm package dependencies +# For more information on using private registries with Edge Functions, see: +# https://supabase.com/docs/guides/functions/import-maps#importing-from-private-registries diff --git a/internal/functions/new/templates/deno.json b/internal/functions/new/templates/deno.json new file mode 100644 index 0000000..f6ca845 --- /dev/null +++ b/internal/functions/new/templates/deno.json @@ -0,0 +1,3 @@ +{ + "imports": {} +} diff --git a/internal/functions/new/templates/index.ts b/internal/functions/new/templates/index.ts new file mode 100644 index 0000000..c7f64ff --- /dev/null +++ b/internal/functions/new/templates/index.ts @@ -0,0 +1,32 @@ +// Follow this setup guide to integrate the Deno language server with your editor: +// https://deno.land/manual/getting_started/setup_your_environment +// This enables autocomplete, go to definition, etc. + +// Setup type definitions for built-in Supabase Runtime APIs +import "jsr:@supabase/functions-js/edge-runtime.d.ts" + +console.log("Hello from Functions!") + +Deno.serve(async (req) => { + const { name } = await req.json() + const data = { + message: `Hello ${name}!`, + } + + return new Response( + JSON.stringify(data), + { headers: { "Content-Type": "application/json" } }, + ) +}) + +/* To invoke locally: + + 1. Run `supabase start` (see: https://supabase.com/docs/reference/cli/supabase-start) + 2. Make an HTTP request: + + curl -i --location --request POST '{{ .URL }}' \ + --header 'Authorization: Bearer {{ .Token }}' \ + --header 'Content-Type: application/json' \ + --data '{"name":"Functions"}' + +*/ diff --git a/internal/functions/serve/serve.go b/internal/functions/serve/serve.go new file mode 100644 index 0000000..978cb9e --- /dev/null +++ b/internal/functions/serve/serve.go @@ -0,0 +1,242 @@ +package serve + +import ( + "context" + _ "embed" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/functions/deploy" + "github.com/supabase/cli/internal/secrets/set" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +type InspectMode string + +const ( + InspectModeRun InspectMode = "run" + InspectModeBrk InspectMode = "brk" + InspectModeWait InspectMode = "wait" +) + +func (mode InspectMode) toFlag() string { + switch mode { + case InspectModeBrk: + return "inspect-brk" + case InspectModeWait: + return "inspect-wait" + case InspectModeRun: + fallthrough + default: + return "inspect" + } +} + +type RuntimeOption struct { + InspectMode *InspectMode + InspectMain bool +} + +func (i *RuntimeOption) toArgs() []string { + flags := []string{} + if i.InspectMode != nil { + flags = append(flags, fmt.Sprintf("--%s=0.0.0.0:%d", i.InspectMode.toFlag(), dockerRuntimeInspectorPort)) + if i.InspectMain { + flags = append(flags, "--inspect-main") + } + } + return flags +} + +const ( + dockerRuntimeServerPort = 8081 + dockerRuntimeInspectorPort = 8083 +) + +var ( + //go:embed templates/main.ts + mainFuncEmbed string +) + +func Run(ctx context.Context, envFilePath string, noVerifyJWT *bool, importMapPath string, runtimeOption RuntimeOption, fsys afero.Fs) error { + // 1. Sanity checks. + if err := flags.LoadConfig(fsys); err != nil { + return err + } + if err := utils.AssertSupabaseDbIsRunning(); err != nil { + return err + } + // 2. Remove existing container. + _ = utils.Docker.ContainerRemove(ctx, utils.EdgeRuntimeId, container.RemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + // Use network alias because Deno cannot resolve `_` in hostname + dbUrl := fmt.Sprintf("postgresql://postgres:postgres@%s:5432/postgres", utils.DbAliases[0]) + // 3. Serve and log to console + fmt.Fprintln(os.Stderr, "Setting up Edge Functions runtime...") + if err := ServeFunctions(ctx, envFilePath, noVerifyJWT, importMapPath, dbUrl, runtimeOption, fsys); err != nil { + return err + } + if err := utils.DockerStreamLogs(ctx, utils.EdgeRuntimeId, os.Stdout, os.Stderr); err != nil { + return err + } + fmt.Println("Stopped serving " + utils.Bold(utils.FunctionsDir)) + return nil +} + +func ServeFunctions(ctx context.Context, envFilePath string, noVerifyJWT *bool, importMapPath string, dbUrl string, runtimeOption RuntimeOption, fsys afero.Fs) error { + // 1. Load default values + if envFilePath == "" { + if f, err := fsys.Stat(utils.FallbackEnvFilePath); err == nil && !f.IsDir() { + envFilePath = utils.FallbackEnvFilePath + } + } else if !filepath.IsAbs(envFilePath) { + envFilePath = filepath.Join(utils.CurrentDirAbs, envFilePath) + } + // 2. Parse user defined env + env, err := parseEnvFile(envFilePath, fsys) + if err != nil { + return err + } + env = append(env, + fmt.Sprintf("SUPABASE_URL=http://%s:8000", utils.KongAliases[0]), + "SUPABASE_ANON_KEY="+utils.Config.Auth.AnonKey, + "SUPABASE_SERVICE_ROLE_KEY="+utils.Config.Auth.ServiceRoleKey, + "SUPABASE_DB_URL="+dbUrl, + "SUPABASE_INTERNAL_JWT_SECRET="+utils.Config.Auth.JwtSecret, + fmt.Sprintf("SUPABASE_INTERNAL_HOST_PORT=%d", utils.Config.Api.Port), + ) + if viper.GetBool("DEBUG") { + env = append(env, "SUPABASE_INTERNAL_DEBUG=true") + } + if runtimeOption.InspectMode != nil { + env = append(env, "SUPABASE_INTERNAL_WALLCLOCK_LIMIT_SEC=0") + } + // 3. Parse custom import map + cwd, err := os.Getwd() + if err != nil { + return errors.Errorf("failed to get working directory: %w", err) + } + binds, functionsConfigString, err := populatePerFunctionConfigs(cwd, importMapPath, noVerifyJWT, fsys) + if err != nil { + return err + } + env = append(env, "SUPABASE_INTERNAL_FUNCTIONS_CONFIG="+functionsConfigString) + // 4. Parse entrypoint script + cmd := append([]string{ + "edge-runtime", + "start", + "--main-service=/root", + fmt.Sprintf("--port=%d", dockerRuntimeServerPort), + fmt.Sprintf("--policy=%s", utils.Config.EdgeRuntime.Policy), + }, runtimeOption.toArgs()...) + if viper.GetBool("DEBUG") { + cmd = append(cmd, "--verbose") + } + cmdString := strings.Join(cmd, " ") + entrypoint := []string{"sh", "-c", `cat <<'EOF' > /root/index.ts && ` + cmdString + ` +` + mainFuncEmbed + ` +EOF +`} + // 5. Parse exposed ports + dockerRuntimePort := nat.Port(fmt.Sprintf("%d/tcp", dockerRuntimeServerPort)) + exposedPorts := nat.PortSet{dockerRuntimePort: struct{}{}} + portBindings := nat.PortMap{} + if runtimeOption.InspectMode != nil { + dockerInspectorPort := nat.Port(fmt.Sprintf("%d/tcp", dockerRuntimeInspectorPort)) + exposedPorts[dockerInspectorPort] = struct{}{} + portBindings[dockerInspectorPort] = []nat.PortBinding{{ + HostPort: strconv.FormatUint(uint64(utils.Config.EdgeRuntime.InspectorPort), 10), + }} + } + // 6. Start container + _, err = utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.EdgeRuntime.Image, + Env: env, + Entrypoint: entrypoint, + ExposedPorts: exposedPorts, + WorkingDir: utils.ToDockerPath(cwd), + // No tcp health check because edge runtime logs them as client connection error + }, + container.HostConfig{ + Binds: binds, + PortBindings: portBindings, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.EdgeRuntimeAliases, + }, + }, + }, + utils.EdgeRuntimeId, + ) + return err +} + +func parseEnvFile(envFilePath string, fsys afero.Fs) ([]string, error) { + env := []string{} + if len(envFilePath) == 0 { + return env, nil + } + envMap, err := set.ParseEnvFile(envFilePath, fsys) + if err != nil { + return env, err + } + for name, value := range envMap { + if strings.HasPrefix(name, "SUPABASE_") { + fmt.Fprintln(os.Stderr, "Env name cannot start with SUPABASE_, skipping: "+name) + continue + } + env = append(env, name+"="+value) + } + return env, nil +} + +func populatePerFunctionConfigs(cwd, importMapPath string, noVerifyJWT *bool, fsys afero.Fs) ([]string, string, error) { + slugs, err := deploy.GetFunctionSlugs(fsys) + if err != nil { + return nil, "", err + } + functionsConfig, err := deploy.GetFunctionConfig(slugs, importMapPath, noVerifyJWT, fsys) + if err != nil { + return nil, "", err + } + binds := []string{} + for slug, fc := range functionsConfig { + if !fc.Enabled { + fmt.Fprintln(os.Stderr, "Skipped serving Function:", slug) + continue + } + modules, err := deploy.GetBindMounts(cwd, utils.FunctionsDir, "", fc.Entrypoint, fc.ImportMap, fsys) + if err != nil { + return nil, "", err + } + binds = append(binds, modules...) + fc.ImportMap = utils.ToDockerPath(fc.ImportMap) + fc.Entrypoint = utils.ToDockerPath(fc.Entrypoint) + functionsConfig[slug] = fc + for i, val := range fc.StaticFiles { + fc.StaticFiles[i] = utils.ToDockerPath(val) + } + } + functionsConfigBytes, err := json.Marshal(functionsConfig) + if err != nil { + return nil, "", errors.Errorf("failed to marshal config json: %w", err) + } + return utils.RemoveDuplicates(binds), string(functionsConfigBytes), nil +} diff --git a/internal/functions/serve/serve_test.go b/internal/functions/serve/serve_test.go new file mode 100644 index 0000000..81c385b --- /dev/null +++ b/internal/functions/serve/serve_test.go @@ -0,0 +1,109 @@ +package serve + +import ( + "context" + "net/http" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/api/types" + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/cast" +) + +func TestServeCommand(t *testing.T) { + t.Run("serves all functions", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.InitConfig(utils.InitParams{ProjectId: "test"}, fsys)) + require.NoError(t, afero.WriteFile(fsys, utils.FallbackEnvFilePath, []byte{}, 0644)) + require.NoError(t, afero.WriteFile(fsys, utils.FallbackImportMapPath, []byte{}, 0644)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/supabase_db_test/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + containerId := "supabase_edge_runtime_test" + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/" + containerId). + Reply(http.StatusOK) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.EdgeRuntime.Image), containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "success")) + // Run test + err := Run(context.Background(), "", nil, "", RuntimeOption{}, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on malformed config", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.ConfigPath, []byte("malformed"), 0644)) + // Run test + err := Run(context.Background(), "", nil, "", RuntimeOption{}, fsys) + // Check error + assert.ErrorContains(t, err, "toml: expected = after a key, but the document ends there") + }) + + t.Run("throws error on missing db", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.InitConfig(utils.InitParams{ProjectId: "test"}, fsys)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/supabase_db_test/json"). + Reply(http.StatusNotFound) + // Run test + err := Run(context.Background(), "", nil, "", RuntimeOption{}, fsys) + // Check error + assert.ErrorIs(t, err, utils.ErrNotRunning) + }) + + t.Run("throws error on missing env file", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.InitConfig(utils.InitParams{ProjectId: "test"}, fsys)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/supabase_db_test/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + // Run test + err := Run(context.Background(), ".env", nil, "", RuntimeOption{}, fsys) + // Check error + assert.ErrorContains(t, err, "open .env: file does not exist") + }) + + t.Run("throws error on missing import map", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.InitConfig(utils.InitParams{ProjectId: "test"}, fsys)) + require.NoError(t, afero.WriteFile(fsys, ".env", []byte{}, 0644)) + entrypoint := filepath.Join(utils.FunctionsDir, "hello", "index.ts") + require.NoError(t, afero.WriteFile(fsys, entrypoint, []byte{}, 0644)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/supabase_db_test/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + // Run test + err := Run(context.Background(), ".env", cast.Ptr(true), "import_map.json", RuntimeOption{}, fsys) + // Check error + assert.ErrorIs(t, err, os.ErrNotExist) + }) +} diff --git a/internal/functions/serve/templates/main.ts b/internal/functions/serve/templates/main.ts new file mode 100644 index 0000000..534409a --- /dev/null +++ b/internal/functions/serve/templates/main.ts @@ -0,0 +1,246 @@ +import { STATUS_CODE, STATUS_TEXT } from "https://deno.land/std/http/status.ts"; +import * as posix from "https://deno.land/std/path/posix/mod.ts"; + +import * as jose from "https://deno.land/x/jose@v4.13.1/index.ts"; + +const SB_SPECIFIC_ERROR_CODE = { + BootError: + STATUS_CODE.ServiceUnavailable, /** Service Unavailable (RFC 7231, 6.6.4) */ + InvalidWorkerResponse: + STATUS_CODE.InternalServerError, /** Internal Server Error (RFC 7231, 6.6.1) */ + WorkerLimit: 546, /** Extended */ +}; + +const SB_SPECIFIC_ERROR_TEXT = { + [SB_SPECIFIC_ERROR_CODE.BootError]: "BOOT_ERROR", + [SB_SPECIFIC_ERROR_CODE.InvalidWorkerResponse]: "WORKER_ERROR", + [SB_SPECIFIC_ERROR_CODE.WorkerLimit]: "WORKER_LIMIT", +}; + +const SB_SPECIFIC_ERROR_REASON = { + [SB_SPECIFIC_ERROR_CODE.BootError]: + "Worker failed to boot (please check logs)", + [SB_SPECIFIC_ERROR_CODE.InvalidWorkerResponse]: + "Function exited due to an error (please check logs)", + [SB_SPECIFIC_ERROR_CODE.WorkerLimit]: + "Worker failed to respond due to a resource limit (please check logs)", +}; + +// OS stuff - we don't want to expose these to the functions. +const EXCLUDED_ENVS = ["HOME", "HOSTNAME", "PATH", "PWD"]; + +const JWT_SECRET = Deno.env.get("SUPABASE_INTERNAL_JWT_SECRET")!; +const HOST_PORT = Deno.env.get("SUPABASE_INTERNAL_HOST_PORT")!; +const DEBUG = Deno.env.get("SUPABASE_INTERNAL_DEBUG") === "true"; +const FUNCTIONS_CONFIG_STRING = Deno.env.get( + "SUPABASE_INTERNAL_FUNCTIONS_CONFIG", +)!; + +const WALLCLOCK_LIMIT_SEC = parseInt( + Deno.env.get("SUPABASE_INTERNAL_WALLCLOCK_LIMIT_SEC"), +); + +const DENO_SB_ERROR_MAP = new Map([ + [Deno.errors.InvalidWorkerCreation, SB_SPECIFIC_ERROR_CODE.BootError], + [Deno.errors.InvalidWorkerResponse, SB_SPECIFIC_ERROR_CODE.InvalidWorkerResponse], + [ + Deno.errors.WorkerRequestCancelled, + SB_SPECIFIC_ERROR_CODE.WorkerLimit, + ], +]); + +interface FunctionConfig { + entrypointPath: string; + importMapPath: string; + verifyJWT: boolean; +} + +function getResponse(payload: any, status: number, customHeaders = {}) { + const headers = { ...customHeaders }; + let body: string | null = null; + + if (payload) { + if (typeof payload === "object") { + headers["Content-Type"] = "application/json"; + body = JSON.stringify(payload); + } else if (typeof payload === "string") { + headers["Content-Type"] = "text/plain"; + body = payload; + } else { + body = null; + } + } + + return new Response(body, { status, headers }); +} + +const functionsConfig: Record = (() => { + try { + const functionsConfig = JSON.parse(FUNCTIONS_CONFIG_STRING); + + if (DEBUG) { + console.log( + "Functions config:", + JSON.stringify(functionsConfig, null, 2), + ); + } + + return functionsConfig; + } catch (cause) { + throw new Error("Failed to parse functions config", { cause }); + } +})(); + +function getAuthToken(req: Request) { + const authHeader = req.headers.get("authorization"); + if (!authHeader) { + throw new Error("Missing authorization header"); + } + const [bearer, token] = authHeader.split(" "); + if (bearer !== "Bearer") { + throw new Error(`Auth header is not 'Bearer {token}'`); + } + return token; +} + +async function verifyJWT(jwt: string): Promise { + const encoder = new TextEncoder(); + const secretKey = encoder.encode(JWT_SECRET); + try { + await jose.jwtVerify(jwt, secretKey); + } catch (e) { + console.error(e); + return false; + } + return true; +} + +Deno.serve({ + handler: async (req: Request) => { + const url = new URL(req.url); + const { pathname } = url; + + // handle health checks + if (pathname === "/_internal/health") { + return getResponse({ message: "ok" }, STATUS_CODE.OK); + } + + // handle metrics + if (pathname === '/_internal/metric') { + const metric = await EdgeRuntime.getRuntimeMetrics(); + return Response.json(metric); + } + + const pathParts = pathname.split("/"); + const functionName = pathParts[1]; + + if (!functionName || !(functionName in functionsConfig)) { + return getResponse("Function not found", STATUS_CODE.NotFound); + } + + if (req.method !== "OPTIONS" && functionsConfig[functionName].verifyJWT) { + try { + const token = getAuthToken(req); + const isValidJWT = await verifyJWT(token); + + if (!isValidJWT) { + return getResponse({ msg: "Invalid JWT" }, STATUS_CODE.Unauthorized); + } + } catch (e) { + console.error(e); + return getResponse({ msg: e.toString() }, STATUS_CODE.Unauthorized); + } + } + + const servicePath = posix.dirname(functionsConfig[functionName].entrypointPath); + console.error(`serving the request with ${servicePath}`); + + // Ref: https://supabase.com/docs/guides/functions/limits + const memoryLimitMb = 256; + const workerTimeoutMs = isFinite(WALLCLOCK_LIMIT_SEC) ? WALLCLOCK_LIMIT_SEC * 1000 : 400 * 1000; + const noModuleCache = false; + const envVarsObj = Deno.env.toObject(); + const envVars = Object.entries(envVarsObj) + .filter(([name, _]) => + !EXCLUDED_ENVS.includes(name) && !name.startsWith("SUPABASE_INTERNAL_") + ); + + const forceCreate = false; + const customModuleRoot = ""; // empty string to allow any local path + const cpuTimeSoftLimitMs = 1000; + const cpuTimeHardLimitMs = 2000; + + // NOTE(Nyannyacha): Decorator type has been set to tc39 by Lakshan's request, + // but in my opinion, we should probably expose this to customers at some + // point, as their migration process will not be easy. + const decoratorType = "tc39"; + + const absEntrypoint = posix.join(Deno.cwd(), functionsConfig[functionName].entrypointPath); + const maybeEntrypoint = posix.toFileUrl(absEntrypoint).href; + + const staticPatterns = functionsConfig[functionName].staticFiles; + + try { + const worker = await EdgeRuntime.userWorkers.create({ + servicePath, + memoryLimitMb, + workerTimeoutMs, + noModuleCache, + importMapPath: functionsConfig[functionName].importMapPath, + envVars, + forceCreate, + customModuleRoot, + cpuTimeSoftLimitMs, + cpuTimeHardLimitMs, + decoratorType, + maybeEntrypoint, + context: { + useReadSyncFileAPI: true, + }, + staticPatterns, + }); + + return await worker.fetch(req); + } catch (e) { + console.error(e); + + for (const [denoError, sbCode] of DENO_SB_ERROR_MAP.entries()) { + if (denoError !== void 0 && e instanceof denoError) { + return getResponse( + { + code: SB_SPECIFIC_ERROR_TEXT[sbCode], + message: SB_SPECIFIC_ERROR_REASON[sbCode], + }, + sbCode + ); + } + } + + return getResponse( + { + code: STATUS_TEXT[STATUS_CODE.InternalServerError], + message: "Request failed due to an internal server error", + trace: JSON.stringify(e.stack) + }, + STATUS_CODE.InternalServerError, + ); + } + }, + + onListen: () => { + console.log( + `Serving functions on http://127.0.0.1:${HOST_PORT}/functions/v1/\nUsing ${Deno.version.deno}`, + ); + }, + + onError: e => { + return getResponse( + { + code: STATUS_TEXT[STATUS_CODE.InternalServerError], + message: "Request failed due to an internal server error", + trace: JSON.stringify(e.stack) + }, + STATUS_CODE.InternalServerError + ) + } +}); diff --git a/internal/gen/keys/keys.go b/internal/gen/keys/keys.go new file mode 100644 index 0000000..81ed529 --- /dev/null +++ b/internal/gen/keys/keys.go @@ -0,0 +1,94 @@ +package keys + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "strings" + + "github.com/go-errors/errors" + "github.com/go-git/go-git/v5" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/config" +) + +type CustomName struct { + DbHost string `env:"db.host,default=NEXT_PUBLIC_SUPABASE_URL"` + DbPassword string `env:"db.password,default=SUPABASE_DB_PASSWORD"` + JWTSecret string `env:"db.password,default=SUPABASE_AUTH_JWT_SECRET"` + AnonKey string `env:"auth.anon_key,default=SUPABASE_AUTH_ANON_KEY"` + ServiceRoleKey string `env:"auth.service_role_key,default=SUPABASE_AUTH_SERVICE_ROLE_KEY"` +} + +func Run(ctx context.Context, projectRef, format string, names CustomName, fsys afero.Fs) error { + branch := GetGitBranch(fsys) + if err := GenerateSecrets(ctx, projectRef, branch, fsys); err != nil { + return err + } + return utils.EncodeOutput(format, os.Stdout, map[string]string{ + names.DbHost: fmt.Sprintf("%s-%s.fly.dev", projectRef, branch), + names.DbPassword: utils.Config.Db.Password, + names.JWTSecret: utils.Config.Auth.JwtSecret, + names.AnonKey: utils.Config.Auth.AnonKey, + names.ServiceRoleKey: utils.Config.Auth.ServiceRoleKey, + }) +} + +func GenerateSecrets(ctx context.Context, projectRef, branch string, fsys afero.Fs) error { + // Load JWT secret from api + resp, err := utils.GetSupabase().V1GetPostgrestServiceConfigWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to get postgrest config: %w", err) + } + if resp.JSON200 == nil { + return errors.New("Unexpected error retrieving JWT secret: " + string(resp.Body)) + } + utils.Config.Auth.JwtSecret = *resp.JSON200.JwtSecret + // Generate database password + key := strings.Join([]string{ + projectRef, + utils.Config.Auth.JwtSecret, + branch, + }, ":") + hash := sha256.Sum256([]byte(key)) + utils.Config.Db.Password = hex.EncodeToString(hash[:]) + // Generate JWT tokens + anonToken := config.CustomClaims{ + Issuer: "supabase", + Ref: projectRef, + Role: "anon", + }.NewToken() + if utils.Config.Auth.AnonKey, err = anonToken.SignedString([]byte(utils.Config.Auth.JwtSecret)); err != nil { + return errors.Errorf("failed to sign anon key: %w", err) + } + serviceToken := config.CustomClaims{ + Issuer: "supabase", + Ref: projectRef, + Role: "service_role", + }.NewToken() + if utils.Config.Auth.ServiceRoleKey, err = serviceToken.SignedString([]byte(utils.Config.Auth.JwtSecret)); err != nil { + return errors.Errorf("failed to sign service_role key: %w", err) + } + return nil +} + +func GetGitBranch(fsys afero.Fs) string { + return GetGitBranchOrDefault("main", fsys) +} + +func GetGitBranchOrDefault(def string, fsys afero.Fs) string { + head := os.Getenv("GITHUB_HEAD_REF") + if len(head) > 0 { + return head + } + opts := &git.PlainOpenOptions{DetectDotGit: true} + if repo, err := git.PlainOpenWithOptions(".", opts); err == nil { + if ref, err := repo.Head(); err == nil { + return ref.Name().Short() + } + } + return def +} diff --git a/internal/gen/types/types.go b/internal/gen/types/types.go new file mode 100644 index 0000000..9ffc7dc --- /dev/null +++ b/internal/gen/types/types.go @@ -0,0 +1,117 @@ +package types + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +const ( + LangTypescript = "typescript" + LangGo = "go" + LangSwift = "swift" +) + +const ( + SwiftPublicAccessControl = "public" + SwiftInternalAccessControl = "internal" +) + +func Run(ctx context.Context, projectId string, dbConfig pgconn.Config, lang string, schemas []string, postgrestV9Compat bool, swiftAccessControl string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + originalURL := utils.ToPostgresURL(dbConfig) + // Add default schemas if --schema flag is not specified + if len(schemas) == 0 { + schemas = utils.RemoveDuplicates(append([]string{"public"}, utils.Config.Api.Schemas...)) + } + included := strings.Join(schemas, ",") + + if projectId != "" { + if lang != LangTypescript { + return errors.Errorf("Unable to generate %s types for selected project. Try using --db-url flag instead.", lang) + } + resp, err := utils.GetSupabase().V1GenerateTypescriptTypesWithResponse(ctx, projectId, &api.V1GenerateTypescriptTypesParams{ + IncludedSchemas: &included, + }) + if err != nil { + return errors.Errorf("failed to get typescript types: %w", err) + } + + if resp.JSON200 == nil { + return errors.New("failed to retrieve generated types: " + string(resp.Body)) + } + + fmt.Print(resp.JSON200.Types) + return nil + } + + hostConfig := container.HostConfig{} + if utils.IsLocalDatabase(dbConfig) { + if err := utils.AssertSupabaseDbIsRunning(); err != nil { + return err + } + + if strings.Contains(utils.Config.Api.Image, "v9") { + postgrestV9Compat = true + } + + // Use custom network when connecting to local database + dbConfig.Host = utils.DbAliases[0] + dbConfig.Port = 5432 + } else { + hostConfig.NetworkMode = network.NetworkHost + } + // pg-meta does not set username as the default database, ie. postgres + if len(dbConfig.Database) == 0 { + dbConfig.Database = "postgres" + } + + fmt.Fprintln(os.Stderr, "Connecting to", dbConfig.Host, dbConfig.Port) + escaped := utils.ToPostgresURL(dbConfig) + if require, err := isRequireSSL(ctx, originalURL, options...); err != nil { + return err + } else if require { + // node-postgres does not support sslmode=prefer + escaped += "&sslmode=require" + } + + return utils.DockerRunOnceWithConfig( + ctx, + container.Config{ + Image: utils.Config.Studio.PgmetaImage, + Env: []string{ + "PG_META_DB_URL=" + escaped, + "PG_META_GENERATE_TYPES=" + lang, + "PG_META_GENERATE_TYPES_INCLUDED_SCHEMAS=" + included, + "PG_META_GENERATE_TYPES_SWIFT_ACCESS_CONTROL=" + swiftAccessControl, + fmt.Sprintf("PG_META_GENERATE_TYPES_DETECT_ONE_TO_ONE_RELATIONSHIPS=%v", !postgrestV9Compat), + }, + Cmd: []string{"node", "dist/server/server.js"}, + }, + hostConfig, + network.NetworkingConfig{}, + "", + os.Stdout, + os.Stderr, + ) +} + +func isRequireSSL(ctx context.Context, dbUrl string, options ...func(*pgx.ConnConfig)) (bool, error) { + conn, err := utils.ConnectByUrl(ctx, dbUrl+"&sslmode=require", options...) + if err != nil { + if strings.HasSuffix(err.Error(), "(server refused TLS connection)") { + return false, nil + } + return false, err + } + return true, conn.Close(ctx) +} diff --git a/internal/gen/types/types_test.go b/internal/gen/types/types_test.go new file mode 100644 index 0000000..811ae06 --- /dev/null +++ b/internal/gen/types/types_test.go @@ -0,0 +1,191 @@ +package types + +import ( + "context" + "errors" + "net/http" + "testing" + + "github.com/docker/docker/api/types" + "github.com/h2non/gock" + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" + "github.com/supabase/cli/pkg/pgtest" +) + +func TestGenLocalCommand(t *testing.T) { + utils.DbId = "test-db" + utils.Config.Hostname = "localhost" + utils.Config.Db.Port = 5432 + + dbConfig := pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.Port, + User: "admin", + Password: "password", + } + + t.Run("generates typescript types", func(t *testing.T) { + const containerId = "test-pgmeta" + imageUrl := utils.GetRegistryImageUrl(utils.Config.Studio.PgmetaImage) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "hello world\n")) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + // Run test + assert.NoError(t, Run(context.Background(), "", dbConfig, LangTypescript, []string{}, true, "", fsys, conn.Intercept)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error when db is not started", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId). + Reply(http.StatusServiceUnavailable) + // Run test + assert.Error(t, Run(context.Background(), "", dbConfig, LangTypescript, []string{}, true, "", fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on image fetch failure", func(t *testing.T) { + utils.Config.Api.Image = "v9" + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images"). + Reply(http.StatusServiceUnavailable) + // Run test + assert.Error(t, Run(context.Background(), "", dbConfig, LangTypescript, []string{}, true, "", fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("generates swift types", func(t *testing.T) { + const containerId = "test-pgmeta" + imageUrl := utils.GetRegistryImageUrl(utils.Config.Studio.PgmetaImage) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "hello world\n")) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + // Run test + assert.NoError(t, Run(context.Background(), "", dbConfig, LangSwift, []string{}, true, SwiftInternalAccessControl, fsys, conn.Intercept)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestGenLinkedCommand(t *testing.T) { + // Setup valid projectId id + projectId := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + t.Run("generates typescript types", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + projectId + "/types/typescript"). + Reply(200). + JSON(api.TypescriptResponse{Types: ""}) + // Run test + assert.NoError(t, Run(context.Background(), projectId, pgconn.Config{}, LangTypescript, []string{}, true, "", fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on network failure", func(t *testing.T) { + errNetwork := errors.New("network error") + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + projectId + "/types/typescript"). + ReplyError(errNetwork) + // Run test + err := Run(context.Background(), projectId, pgconn.Config{}, LangTypescript, []string{}, true, "", fsys) + // Validate api + assert.ErrorIs(t, err, errNetwork) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on service unavailable", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + projectId + "/types/typescript"). + Reply(http.StatusServiceUnavailable) + // Run test + assert.Error(t, Run(context.Background(), projectId, pgconn.Config{}, LangTypescript, []string{}, true, "", fsys)) + }) +} + +func TestGenRemoteCommand(t *testing.T) { + dbConfig := pgconn.Config{ + Host: "db.supabase.co", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", + } + + t.Run("generates type from remote db", func(t *testing.T) { + const containerId = "test-pgmeta" + imageUrl := utils.GetRegistryImageUrl(utils.Config.Studio.PgmetaImage) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, imageUrl, containerId) + require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "hello world\n")) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + // Run test + assert.NoError(t, Run(context.Background(), "", dbConfig, LangTypescript, []string{"public"}, true, "", afero.NewMemMapFs(), conn.Intercept)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/hostnames/activate/activate.go b/internal/hostnames/activate/activate.go new file mode 100644 index 0000000..3cc01c2 --- /dev/null +++ b/internal/hostnames/activate/activate.go @@ -0,0 +1,42 @@ +package activate + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/hostnames" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, includeRawOutput bool, fsys afero.Fs) error { + // 1. Sanity checks. + { + resp, err := hostnames.GetCustomHostnameConfig(ctx, projectRef) + if err != nil { + return err + } + err = hostnames.VerifyCNAME(ctx, projectRef, resp.JSON200.CustomHostname) + if err != nil { + return err + } + } + + // 2. activate custom hostname config + { + resp, err := utils.GetSupabase().V1ActivateCustomHostnameWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to active custom hostname: %w", err) + } + if resp.JSON201 == nil { + return errors.New("failed to activate custom hostname config: " + string(resp.Body)) + } + status, err := hostnames.TranslateStatus(resp.JSON201, includeRawOutput) + if err != nil { + return err + } + fmt.Println(status) + return nil + } +} diff --git a/internal/hostnames/common.go b/internal/hostnames/common.go new file mode 100644 index 0000000..c01e6a0 --- /dev/null +++ b/internal/hostnames/common.go @@ -0,0 +1,135 @@ +package hostnames + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func GetCustomHostnameConfig(ctx context.Context, projectRef string) (*api.V1GetHostnameConfigResponse, error) { + resp, err := utils.GetSupabase().V1GetHostnameConfigWithResponse(ctx, projectRef) + if err != nil { + return nil, errors.Errorf("failed to get custom hostname: %w", err) + } + if resp.JSON200 == nil { + return nil, errors.New("failed to get custom hostname config; received: " + string(resp.Body)) + } + return resp, nil +} + +func VerifyCNAME(ctx context.Context, projectRef string, customHostname string) error { + expectedEndpoint := fmt.Sprintf("%s.", utils.GetSupabaseHost(projectRef)) + cname, err := utils.ResolveCNAME(ctx, customHostname) + if err != nil { + return errors.Errorf("expected custom hostname '%s' to have a CNAME record pointing to your project at '%s', but it failed to resolve: %w", customHostname, expectedEndpoint, err) + } + if cname != expectedEndpoint { + return errors.Errorf("expected custom hostname '%s' to have a CNAME record pointing to your project at '%s', but it is currently set to '%s'", customHostname, expectedEndpoint, cname) + } + return nil +} + +type RawResponse struct { + Result struct { + CustomOriginServer string `json:"custom_origin_server"` + OwnershipVerification struct { + Name string + Type string + Value string + } `json:"ownership_verification"` + Ssl struct { + ValidationRecords []struct { + Status string `json:"status"` + TxtName string `json:"txt_name"` + TxtValue string `json:"txt_value"` + } `json:"validation_records"` + ValidationErrors []struct { + Message string `json:"message"` + } `json:"validation_errors"` + Status string `json:"status"` + } + } `json:"result"` +} + +func serializeRawOutput(response *api.UpdateCustomHostnameResponse) (string, error) { + output, err := json.MarshalIndent(response, "", " ") + if err != nil { + return "", errors.Errorf("failed to serialize json: %w", err) + } + return string(output), nil +} + +func appendRawOutputIfNeeded(status string, response *api.UpdateCustomHostnameResponse, includeRawOutput bool) string { + if !includeRawOutput { + return status + } + rawOutput, err := serializeRawOutput(response) + if err != nil { + return fmt.Sprintf("%s\nFailed to serialize raw output: %+v\n", status, err) + } + return fmt.Sprintf("%s\nRaw output follows:\n%s\n", status, rawOutput) +} + +func TranslateStatus(response *api.UpdateCustomHostnameResponse, includeRawOutput bool) (string, error) { + if response.Status == api.N5ServicesReconfigured { + return appendRawOutputIfNeeded(fmt.Sprintf("Custom hostname setup completed. Project is now accessible at %s.", response.CustomHostname), response, includeRawOutput), nil + } + if response.Status == api.N4OriginSetupCompleted { + var res RawResponse + rawBody, err := json.Marshal(response.Data) + if err != nil { + return "", errors.Errorf("failed to serialize body: %w", err) + } + err = json.Unmarshal(rawBody, &res) + if err != nil { + return "", errors.Errorf("failed to deserialize body: %w", err) + } + return appendRawOutputIfNeeded(fmt.Sprintf(`Custom hostname configuration complete, and ready for activation. + +Please ensure that your custom domain is set up as a CNAME record to your Supabase subdomain: + %s CNAME -> %s`, response.CustomHostname, res.Result.CustomOriginServer), response, includeRawOutput), nil + } + if response.Status == api.N2Initiated { + var res RawResponse + rawBody, err := json.Marshal(response.Data) + if err != nil { + return "", errors.Errorf("failed to serialize body: %w", err) + } + err = json.Unmarshal(rawBody, &res) + if err != nil { + return "", errors.Errorf("failed to deserialize body: %w", err) + } + ssl := res.Result.Ssl.ValidationRecords + if res.Result.Ssl.Status == "initializing" { + return appendRawOutputIfNeeded("Custom hostname setup is being initialized; please request re-verification in a few seconds.\n", response, includeRawOutput), nil + } + if len(res.Result.Ssl.ValidationErrors) > 0 { + var errorMessages []string + for _, valError := range res.Result.Ssl.ValidationErrors { + if strings.Contains(valError.Message, "caa_error") { + return appendRawOutputIfNeeded("CAA mismatch; please remove any existing CAA records on your domain, or add one for \"digicert.com\"\n", response, includeRawOutput), nil + } + errorMessages = append(errorMessages, valError.Message) + } + valErrors := strings.Join(errorMessages, "\n\t- ") + return appendRawOutputIfNeeded(fmt.Sprintf("SSL validation errors: \n\t- %s\n", valErrors), response, includeRawOutput), nil + } + if len(ssl) != 1 { + return "", errors.Errorf("expected a single SSL verification record, received: %+v", ssl) + } + records := "" + if ssl[0].TxtName != "" { + records = fmt.Sprintf("%s\n\t%s TXT -> %s", records, ssl[0].TxtName, ssl[0].TxtValue) + } + status := fmt.Sprintf("Custom hostname verification in-progress; please configure the appropriate DNS entries and request re-verification.\n"+ + "Required outstanding validation records: %s\n", + records) + return appendRawOutputIfNeeded(status, response, includeRawOutput), nil + } + return appendRawOutputIfNeeded("Custom hostname configuration not started.", response, includeRawOutput), nil +} diff --git a/internal/hostnames/common_test.go b/internal/hostnames/common_test.go new file mode 100644 index 0000000..c085afa --- /dev/null +++ b/internal/hostnames/common_test.go @@ -0,0 +1,44 @@ +package hostnames + +import ( + "context" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/stretchr/testify/assert" +) + +func TestVerifyCNAME(t *testing.T) { + defer gock.OffAll() + gock.New("https://1.1.1.1"). + Get("/dns-query"). + MatchParam("name", "hello.custom-domain.com"). + MatchParam("type", "5"). + MatchHeader("accept", "application/dns-json"). + Reply(http.StatusOK). + JSON(&map[string]interface{}{"Answer": []map[string]interface{}{ + { + "Type": 5, "Data": "foobarbaz.supabase.co.", + }, + }}) + err := VerifyCNAME(context.Background(), "foobarbaz", "hello.custom-domain.com") + assert.Empty(t, err) +} + +func TestVerifyCNAMEFailures(t *testing.T) { + defer gock.OffAll() + gock.New("https://1.1.1.1"). + Get("/dns-query"). + MatchParam("name", "hello.custom-domain.com"). + MatchParam("type", "5"). + MatchHeader("accept", "application/dns-json"). + Reply(http.StatusOK). + JSON(&map[string]interface{}{"Answer": []map[string]interface{}{ + { + "Type": 28, "Data": "127.0.0.1", + }, + }}) + err := VerifyCNAME(context.Background(), "foobarbaz", "hello.custom-domain.com") + assert.ErrorContains(t, err, "expected custom hostname 'hello.custom-domain.com' to have a CNAME record pointing to your project at 'foobarbaz.supabase.co.', but it failed to resolve: failed to locate appropriate CNAME record for hello.custom-domain.com") +} diff --git a/internal/hostnames/create/create.go b/internal/hostnames/create/create.go new file mode 100644 index 0000000..563b524 --- /dev/null +++ b/internal/hostnames/create/create.go @@ -0,0 +1,47 @@ +package create + +import ( + "context" + "fmt" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/hostnames" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, projectRef string, customHostname string, includeRawOutput bool, fsys afero.Fs) error { + // 1. Sanity checks. + hostname := strings.TrimSpace(customHostname) + { + if len(hostname) == 0 { + return errors.New("non-empty custom hostname expected") + } + // we verify that a CNAME is set as it simplifies the checks used for verifying ownership + err := hostnames.VerifyCNAME(ctx, projectRef, hostname) + if err != nil { + return err + } + } + + // 2. create custom hostname + { + resp, err := utils.GetSupabase().V1UpdateHostnameConfigWithResponse(ctx, projectRef, api.V1UpdateHostnameConfigJSONRequestBody{ + CustomHostname: hostname, + }) + if err != nil { + return errors.Errorf("failed to create custom hostname: %w", err) + } + if resp.JSON201 == nil { + return errors.New("failed to create custom hostname config: " + string(resp.Body)) + } + status, err := hostnames.TranslateStatus(resp.JSON201, includeRawOutput) + if err != nil { + return err + } + fmt.Println(status) + return nil + } +} diff --git a/internal/hostnames/delete/delete.go b/internal/hostnames/delete/delete.go new file mode 100644 index 0000000..3b9f365 --- /dev/null +++ b/internal/hostnames/delete/delete.go @@ -0,0 +1,26 @@ +package delete + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, fsys afero.Fs) error { + // 1. Sanity checks. + // 2. delete config + { + resp, err := utils.GetSupabase().V1DeleteHostnameConfigWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to delete custom hostname: %w", err) + } + if resp.StatusCode() != 200 { + return errors.New("failed to delete custom hostname config; received: " + resp.Status()) + } + fmt.Println("Deleted custom hostname config successfully.") + return nil + } +} diff --git a/internal/hostnames/get/get.go b/internal/hostnames/get/get.go new file mode 100644 index 0000000..9e17c19 --- /dev/null +++ b/internal/hostnames/get/get.go @@ -0,0 +1,26 @@ +package get + +import ( + "context" + "fmt" + + "github.com/spf13/afero" + "github.com/supabase/cli/internal/hostnames" +) + +func Run(ctx context.Context, projectRef string, includeRawOutput bool, fsys afero.Fs) error { + // 1. Sanity checks. + // 2. activate custom hostname config + { + resp, err := hostnames.GetCustomHostnameConfig(ctx, projectRef) + if err != nil { + return err + } + status, err := hostnames.TranslateStatus(resp.JSON200, includeRawOutput) + if err != nil { + return err + } + fmt.Println(status) + return nil + } +} diff --git a/internal/hostnames/reverify/reverify.go b/internal/hostnames/reverify/reverify.go new file mode 100644 index 0000000..7f43299 --- /dev/null +++ b/internal/hostnames/reverify/reverify.go @@ -0,0 +1,31 @@ +package reverify + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/hostnames" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, includeRawOutput bool, fsys afero.Fs) error { + // 1. Sanity checks. + // 2. attempt to re-verify custom hostname config + { + resp, err := utils.GetSupabase().V1VerifyDnsConfigWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to re-verify custom hostname: %w", err) + } + if resp.JSON201 == nil { + return errors.New("failed to re-verify custom hostname config: " + string(resp.Body)) + } + status, err := hostnames.TranslateStatus(resp.JSON201, includeRawOutput) + if err != nil { + return err + } + fmt.Println(status) + return nil + } +} diff --git a/internal/init/init.go b/internal/init/init.go new file mode 100644 index 0000000..72241a0 --- /dev/null +++ b/internal/init/init.go @@ -0,0 +1,170 @@ +package init + +import ( + "bytes" + "context" + _ "embed" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/tidwall/jsonc" +) + +var ( + vscodeDir = ".vscode" + extensionsPath = filepath.Join(vscodeDir, "extensions.json") + settingsPath = filepath.Join(vscodeDir, "settings.json") + intellijDir = ".idea" + denoPath = filepath.Join(intellijDir, "deno.xml") + + //go:embed templates/.gitignore + initGitignore []byte + //go:embed templates/.vscode/extensions.json + vscodeExtensions string + //go:embed templates/.vscode/settings.json + vscodeSettings string + //go:embed templates/.idea/deno.xml + intelliJDeno string +) + +func Run(ctx context.Context, fsys afero.Fs, createVscodeSettings, createIntellijSettings *bool, params utils.InitParams) error { + // 1. Write `config.toml`. + if err := utils.InitConfig(params, fsys); err != nil { + if errors.Is(err, os.ErrExist) { + utils.CmdSuggestion = fmt.Sprintf("Run %s to overwrite existing config file.", utils.Aqua("supabase init --force")) + } + return err + } + + // 2. Append to `.gitignore`. + if utils.IsGitRepo() { + if err := updateGitIgnore(utils.GitIgnorePath, fsys); err != nil { + return err + } + } + + // 3. Generate VS Code settings. + if createVscodeSettings != nil { + if *createVscodeSettings { + return writeVscodeConfig(fsys) + } + } else if createIntellijSettings != nil { + if *createIntellijSettings { + return writeIntelliJConfig(fsys) + } + } else { + console := utils.NewConsole() + if isVscode, err := console.PromptYesNo(ctx, "Generate VS Code settings for Deno?", false); err != nil { + return err + } else if isVscode { + return writeVscodeConfig(fsys) + } + if isIntelliJ, err := console.PromptYesNo(ctx, "Generate IntelliJ Settings for Deno?", false); err != nil { + return err + } else if isIntelliJ { + return writeIntelliJConfig(fsys) + } + } + return nil +} + +func updateGitIgnore(ignorePath string, fsys afero.Fs) error { + var contents []byte + + if contained, err := afero.FileContainsBytes(fsys, ignorePath, initGitignore); contained { + return nil + } else if err == nil { + // Add a line break when appending + contents = append(contents, '\n') + } else if !errors.Is(err, os.ErrNotExist) { + return errors.Errorf("failed to read git ignore file: %w", err) + } + + f, err := fsys.OpenFile(ignorePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return errors.Errorf("failed to create git ignore file: %w", err) + } + defer f.Close() + + if _, err := f.Write(append(contents, initGitignore...)); err != nil { + return errors.Errorf("failed to write git ignore file: %w", err) + } + + return nil +} + +type VSCodeSettings map[string]interface{} + +func loadUserSettings(path string, fsys afero.Fs) (VSCodeSettings, error) { + data, err := afero.ReadFile(fsys, path) + if err != nil { + return nil, errors.Errorf("failed to load settings file: %w", err) + } + data = jsonc.ToJSONInPlace(data) + // Parse and unmarshal JSON file. + var userSettings VSCodeSettings + dec := json.NewDecoder(bytes.NewReader(data)) + if err := dec.Decode(&userSettings); err != nil { + return nil, errors.Errorf("failed to parse settings: %w", err) + } + return userSettings, nil +} + +func saveUserSettings(path string, settings VSCodeSettings, fsys afero.Fs) error { + // Open our jsonFile + jsonFile, err := fsys.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return errors.Errorf("failed to create settings file: %w", err) + } + defer jsonFile.Close() + // Marshal JSON to file. + enc := json.NewEncoder(jsonFile) + enc.SetIndent("", " ") + if err := enc.Encode(settings); err != nil { + return errors.Errorf("failed to save settings: %w", err) + } + return nil +} + +func updateJsonFile(path string, template string, fsys afero.Fs) error { + userSettings, err := loadUserSettings(path, fsys) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, io.EOF) { + return afero.WriteFile(fsys, path, []byte(template), 0644) + } else if err != nil { + return err + } + // Merge template into user settings. + if err := json.Unmarshal([]byte(template), &userSettings); err != nil { + return errors.Errorf("failed to copy template: %w", err) + } + return saveUserSettings(path, userSettings, fsys) +} + +func writeVscodeConfig(fsys afero.Fs) error { + // Create VS Code settings for Deno. + if err := utils.MkdirIfNotExistFS(fsys, vscodeDir); err != nil { + return err + } + if err := updateJsonFile(extensionsPath, vscodeExtensions, fsys); err != nil { + return err + } + if err := updateJsonFile(settingsPath, vscodeSettings, fsys); err != nil { + return err + } + fmt.Println("Generated VS Code settings in " + utils.Bold(settingsPath) + ". Please install the recommended extension!") + return nil +} + +func writeIntelliJConfig(fsys afero.Fs) error { + if err := utils.WriteFile(denoPath, []byte(intelliJDeno), fsys); err != nil { + return err + } + fmt.Println("Generated IntelliJ settings in " + utils.Bold(denoPath) + ". Please install the Deno plugin!") + return nil +} diff --git a/internal/init/init_test.go b/internal/init/init_test.go new file mode 100644 index 0000000..99a96dc --- /dev/null +++ b/internal/init/init_test.go @@ -0,0 +1,250 @@ +package init + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/cast" +) + +func TestInitCommand(t *testing.T) { + t.Run("creates config file", func(t *testing.T) { + // Setup in-memory fs + fsys := &afero.MemMapFs{} + require.NoError(t, fsys.Mkdir(".git", 0755)) + // Run test + assert.NoError(t, Run(context.Background(), fsys, nil, nil, utils.InitParams{})) + // Validate generated config.toml + exists, err := afero.Exists(fsys, utils.ConfigPath) + assert.NoError(t, err) + assert.True(t, exists) + // Validate generated .gitignore + exists, err = afero.Exists(fsys, utils.GitIgnorePath) + assert.NoError(t, err) + assert.True(t, exists) + // Validate vscode settings file isn't generated + exists, err = afero.Exists(fsys, settingsPath) + assert.NoError(t, err) + assert.False(t, exists) + exists, err = afero.Exists(fsys, extensionsPath) + assert.NoError(t, err) + assert.False(t, exists) + // Validate intellij settings file isn't generated + exists, err = afero.Exists(fsys, denoPath) + assert.NoError(t, err) + assert.False(t, exists) + }) + + t.Run("throws error when config file exists", func(t *testing.T) { + // Setup in-memory fs + fsys := &afero.MemMapFs{} + _, err := fsys.Create(utils.ConfigPath) + require.NoError(t, err) + // Run test + assert.Error(t, Run(context.Background(), fsys, nil, nil, utils.InitParams{})) + }) + + t.Run("throws error on permission denied", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.ConfigPath} + // Run test + err := Run(context.Background(), fsys, nil, nil, utils.InitParams{}) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) + + t.Run("throws error on failure to write config", func(t *testing.T) { + // Setup read-only fs + fsys := afero.NewReadOnlyFs(afero.NewMemMapFs()) + // Run test + assert.Error(t, Run(context.Background(), fsys, nil, nil, utils.InitParams{})) + }) + + t.Run("creates vscode settings file", func(t *testing.T) { + // Setup in-memory fs + fsys := &afero.MemMapFs{} + // Run test + assert.NoError(t, Run(context.Background(), fsys, cast.Ptr(true), nil, utils.InitParams{})) + // Validate generated vscode settings + exists, err := afero.Exists(fsys, settingsPath) + assert.NoError(t, err) + assert.True(t, exists) + exists, err = afero.Exists(fsys, extensionsPath) + assert.NoError(t, err) + assert.True(t, exists) + }) + + t.Run("does not create vscode settings file", func(t *testing.T) { + // Setup in-memory fs + fsys := &afero.MemMapFs{} + // Run test + assert.NoError(t, Run(context.Background(), fsys, cast.Ptr(false), nil, utils.InitParams{})) + // Validate vscode settings file isn't generated + exists, err := afero.Exists(fsys, settingsPath) + assert.NoError(t, err) + assert.False(t, exists) + exists, err = afero.Exists(fsys, extensionsPath) + assert.NoError(t, err) + assert.False(t, exists) + }) + + t.Run("creates intellij deno file", func(t *testing.T) { + // Setup in-memory fs + fsys := &afero.MemMapFs{} + // Run test + assert.NoError(t, Run(context.Background(), fsys, nil, cast.Ptr(true), utils.InitParams{})) + // Validate generated intellij deno config + exists, err := afero.Exists(fsys, denoPath) + assert.NoError(t, err) + assert.True(t, exists) + }) + + t.Run("does not create intellij deno file", func(t *testing.T) { + // Setup in-memory fs + fsys := &afero.MemMapFs{} + // Run test + assert.NoError(t, Run(context.Background(), fsys, nil, cast.Ptr(false), utils.InitParams{})) + // Validate intellij deno config file isn't generated + exists, err := afero.Exists(fsys, denoPath) + assert.NoError(t, err) + assert.False(t, exists) + }) +} + +func TestUpdateGitIgnore(t *testing.T) { + const ignorePath = "/home/supabase/.gitignore" + + t.Run("appends to git ignore", func(t *testing.T) { + // Setup in-memory fs + fsys := &afero.MemMapFs{} + _, err := fsys.Create(ignorePath) + require.NoError(t, err) + // Run test + assert.NoError(t, updateGitIgnore(ignorePath, fsys)) + // Validate file contents + content, err := afero.ReadFile(fsys, ignorePath) + assert.NoError(t, err) + assert.Equal(t, append([]byte("\n"), initGitignore...), content) + }) + + t.Run("noop if already ignored", func(t *testing.T) { + // Setup read-only fs + fsys := &afero.MemMapFs{} + require.NoError(t, afero.WriteFile(fsys, ignorePath, initGitignore, 0644)) + // Run test + assert.NoError(t, updateGitIgnore(ignorePath, fsys)) + // Validate file contents + content, err := afero.ReadFile(fsys, ignorePath) + assert.NoError(t, err) + assert.Equal(t, initGitignore, content) + }) + + t.Run("throws error on failure to open", func(t *testing.T) { + // Setup read-only fs + fsys := &fstest.OpenErrorFs{DenyPath: ignorePath} + // Run test + err := updateGitIgnore(ignorePath, fsys) + // Check error + assert.Error(t, err, os.ErrPermission) + }) + + t.Run("throws error on failure to create", func(t *testing.T) { + // Setup read-only fs + fsys := afero.NewReadOnlyFs(afero.NewMemMapFs()) + // Run test + err := updateGitIgnore(ignorePath, fsys) + // Check error + assert.ErrorContains(t, err, "operation not permitted") + }) +} + +func TestWriteVSCodeConfig(t *testing.T) { + t.Run("throws error on permission denied", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := writeVscodeConfig(afero.NewReadOnlyFs(fsys)) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) + + t.Run("throws error on extensions failure", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: extensionsPath} + // Run test + err := writeVscodeConfig(fsys) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) + + t.Run("throws error on settings failure", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: settingsPath} + // Run test + err := writeVscodeConfig(fsys) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) +} + +func TestUpdateJsonFile(t *testing.T) { + t.Run("overwrites empty settings with template", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, settingsPath, []byte{}, 0644)) + // Run test + err := updateJsonFile(settingsPath, "{}", fsys) + // Check error + assert.NoError(t, err) + contents, err := afero.ReadFile(fsys, settingsPath) + assert.NoError(t, err) + assert.Equal(t, []byte("{}"), contents) + }) + + t.Run("merges template into user settings", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, settingsPath, []byte(`{"a": true, "b": 123}`), 0644)) + // Run test + err := updateJsonFile(settingsPath, `{"b": 456, "c": false}`, fsys) + // Check error + assert.NoError(t, err) + f, err := fsys.Open(settingsPath) + assert.NoError(t, err) + var settings VSCodeSettings + dec := json.NewDecoder(f) + assert.NoError(t, dec.Decode(&settings)) + assert.Equal(t, VSCodeSettings{ + "a": true, + "b": float64(456), + "c": false, + }, settings) + }) + + t.Run("throws error on merge failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, settingsPath, []byte("{}"), 0644)) + // Run test + err := updateJsonFile(settingsPath, "", fsys) + // Check error + assert.ErrorContains(t, err, "failed to copy template:") + }) + + t.Run("throws error on save failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, settingsPath, []byte("{}"), 0644)) + // Run test + err := updateJsonFile(settingsPath, "{}", afero.NewReadOnlyFs(fsys)) + // Check error + assert.ErrorContains(t, err, "operation not permitted") + }) +} diff --git a/internal/init/templates/.gitignore b/internal/init/templates/.gitignore new file mode 100644 index 0000000..ad9264f --- /dev/null +++ b/internal/init/templates/.gitignore @@ -0,0 +1,8 @@ +# Supabase +.branches +.temp + +# dotenvx +.env.keys +.env.local +.env.*.local diff --git a/internal/init/templates/.idea/deno.xml b/internal/init/templates/.idea/deno.xml new file mode 100644 index 0000000..70e99b9 --- /dev/null +++ b/internal/init/templates/.idea/deno.xml @@ -0,0 +1,6 @@ + + + + + diff --git a/internal/init/templates/.vscode/extensions.json b/internal/init/templates/.vscode/extensions.json new file mode 100644 index 0000000..74baffc --- /dev/null +++ b/internal/init/templates/.vscode/extensions.json @@ -0,0 +1,3 @@ +{ + "recommendations": ["denoland.vscode-deno"] +} diff --git a/internal/init/templates/.vscode/settings.json b/internal/init/templates/.vscode/settings.json new file mode 100644 index 0000000..af62c23 --- /dev/null +++ b/internal/init/templates/.vscode/settings.json @@ -0,0 +1,24 @@ +{ + "deno.enablePaths": [ + "supabase/functions" + ], + "deno.lint": true, + "deno.unstable": [ + "bare-node-builtins", + "byonm", + "sloppy-imports", + "unsafe-proto", + "webgpu", + "broadcast-channel", + "worker-options", + "cron", + "kv", + "ffi", + "fs", + "http", + "net" + ], + "[typescript]": { + "editor.defaultFormatter": "denoland.vscode-deno" + } +} diff --git a/internal/inspect/bloat/bloat.go b/internal/inspect/bloat/bloat.go new file mode 100644 index 0000000..6a97e41 --- /dev/null +++ b/internal/inspect/bloat/bloat.go @@ -0,0 +1,49 @@ +package bloat + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed bloat.sql +var BloatQuery string + +type Result struct { + Type string + Schemaname string + Object_name string + Bloat string + Waste string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, BloatQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|Type|Schema name|Object name|Bloat|Waste\n|-|-|-|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%s`|`%s`|`%s`|`%s`|\n", r.Type, r.Schemaname, r.Object_name, r.Bloat, r.Waste) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/bloat/bloat.sql b/internal/inspect/bloat/bloat.sql new file mode 100644 index 0000000..25917d8 --- /dev/null +++ b/internal/inspect/bloat/bloat.sql @@ -0,0 +1,61 @@ +WITH constants AS ( + SELECT current_setting('block_size')::numeric AS bs, 23 AS hdr, 4 AS ma +), bloat_info AS ( + SELECT + ma,bs,schemaname,tablename, + (datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr, + (maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 + FROM ( + SELECT + schemaname, tablename, hdr, ma, bs, + SUM((1-null_frac)*avg_width) AS datawidth, + MAX(null_frac) AS maxfracsum, + hdr+( + SELECT 1+count(*)/8 + FROM pg_stats s2 + WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename + ) AS nullhdr + FROM pg_stats s, constants + GROUP BY 1,2,3,4,5 + ) AS foo +), table_bloat AS ( + SELECT + schemaname, tablename, cc.relpages, bs, + CEIL((cc.reltuples*((datahdr+ma- + (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)) AS otta + FROM bloat_info + JOIN pg_class cc ON cc.relname = bloat_info.tablename + JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = bloat_info.schemaname AND nn.nspname <> 'information_schema' +), index_bloat AS ( + SELECT + schemaname, tablename, bs, + COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages, + COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols + FROM bloat_info + JOIN pg_class cc ON cc.relname = bloat_info.tablename + JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = bloat_info.schemaname AND nn.nspname <> 'information_schema' + JOIN pg_index i ON indrelid = cc.oid + JOIN pg_class c2 ON c2.oid = i.indexrelid +) +SELECT + type, schemaname, object_name, bloat, pg_size_pretty(raw_waste) as waste +FROM +(SELECT + 'table' as type, + schemaname, + tablename as object_name, + ROUND(CASE WHEN otta=0 THEN 0.0 ELSE table_bloat.relpages/otta::numeric END,1) AS bloat, + CASE WHEN relpages < otta THEN '0' ELSE (bs*(table_bloat.relpages-otta)::bigint)::bigint END AS raw_waste +FROM + table_bloat + UNION +SELECT + 'index' as type, + schemaname, + tablename || '::' || iname as object_name, + ROUND(CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages/iotta::numeric END,1) AS bloat, + CASE WHEN ipages < iotta THEN '0' ELSE (bs*(ipages-iotta))::bigint END AS raw_waste +FROM + index_bloat) bloat_summary +WHERE NOT schemaname LIKE ANY($1) +ORDER BY raw_waste DESC, bloat DESC diff --git a/internal/inspect/bloat/bloat_test.go b/internal/inspect/bloat/bloat_test.go new file mode 100644 index 0000000..8646565 --- /dev/null +++ b/internal/inspect/bloat/bloat_test.go @@ -0,0 +1,43 @@ +package bloat + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestBloat(t *testing.T) { + t.Run("inspects bloat", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(BloatQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Type: "index hit rate", + Schemaname: "public", + Object_name: "table", + Bloat: "0.9", + Waste: "0.1", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/blocking/blocking.go b/internal/inspect/blocking/blocking.go new file mode 100644 index 0000000..37baed1 --- /dev/null +++ b/internal/inspect/blocking/blocking.go @@ -0,0 +1,60 @@ +package blocking + +import ( + "context" + _ "embed" + "fmt" + "regexp" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed blocking.sql +var BlockingQuery string + +type Result struct { + Blocked_pid int + Blocking_statement string + Blocking_duration string + Blocking_pid int + Blocked_statement string + Blocked_duration string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + // Ref: https://github.com/heroku/heroku-pg-extras/blob/main/commands/blocking.js#L7 + rows, err := conn.Query(ctx, BlockingQuery) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|blocked pid|blocking statement|blocking duration|blocking pid|blocked statement|blocked duration|\n|-|-|-|-|-|-|\n" + for _, r := range result { + // remove whitespace from query + re := regexp.MustCompile(`\s+|\r+|\n+|\t+|\v`) + blocking_statement := re.ReplaceAllString(r.Blocking_statement, " ") + blocked_statement := re.ReplaceAllString(r.Blocked_statement, " ") + + // escape pipes in query + re = regexp.MustCompile(`\|`) + blocking_statement = re.ReplaceAllString(blocking_statement, `\|`) + blocked_statement = re.ReplaceAllString(blocked_statement, `\|`) + table += fmt.Sprintf("|`%d`|`%s`|`%s`|`%d`|%s|`%s`|\n", r.Blocked_pid, blocking_statement, r.Blocking_duration, r.Blocking_pid, blocked_statement, r.Blocked_duration) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/blocking/blocking.sql b/internal/inspect/blocking/blocking.sql new file mode 100644 index 0000000..ea7f767 --- /dev/null +++ b/internal/inspect/blocking/blocking.sql @@ -0,0 +1,15 @@ +SELECT + bl.pid AS blocked_pid, + ka.query AS blocking_statement, + age(now(), ka.query_start)::text AS blocking_duration, + kl.pid AS blocking_pid, + a.query AS blocked_statement, + age(now(), a.query_start)::text AS blocked_duration +FROM pg_catalog.pg_locks bl +JOIN pg_catalog.pg_stat_activity a + ON bl.pid = a.pid +JOIN pg_catalog.pg_locks kl +JOIN pg_catalog.pg_stat_activity ka + ON kl.pid = ka.pid + ON bl.transactionid = kl.transactionid AND bl.pid != kl.pid +WHERE NOT bl.granted diff --git a/internal/inspect/blocking/blocking_test.go b/internal/inspect/blocking/blocking_test.go new file mode 100644 index 0000000..46b6344 --- /dev/null +++ b/internal/inspect/blocking/blocking_test.go @@ -0,0 +1,42 @@ +package blocking + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestBloatCommand(t *testing.T) { + t.Run("inspects blocking", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(BlockingQuery). + Reply("SELECT 1", Result{ + Blocked_pid: 1, + Blocking_statement: "select 1", + Blocking_duration: "2s", + Blocking_pid: 1, + Blocked_statement: "select 1", + Blocked_duration: "2s", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/cache/cache.go b/internal/inspect/cache/cache.go new file mode 100644 index 0000000..ce30c1f --- /dev/null +++ b/internal/inspect/cache/cache.go @@ -0,0 +1,56 @@ +package cache + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed cache.sql +var CacheQuery string + +type Result struct { + Name string + Ratio float64 +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + // Ref: https://github.com/heroku/heroku-pg-extras/blob/main/commands/cache_hit.js#L7 + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, CacheQuery) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + // TODO: implement a markdown table marshaller + table := "|Name|Ratio|OK?|Explanation|\n|-|-|-|-|\n" + for _, r := range result { + ok := "Yup!" + if r.Ratio < 0.94 { + ok = "Maybe not..." + } + var explanation string + if r.Name == "index hit rate" { + explanation = "This is the ratio of index hits to index scans. If this ratio is low, it means that the database is not using indexes effectively. Check the `index-usage` command for more info." + } else if r.Name == "table hit rate" { + explanation = "This is the ratio of table hits to table scans. If this ratio is low, it means that your queries are not finding the data effectively. Check your query performance and it might be worth increasing your compute." + } + table += fmt.Sprintf("|`%s`|`%.6f`|`%s`|`%s`|\n", r.Name, r.Ratio, ok, explanation) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/cache/cache.sql b/internal/inspect/cache/cache.sql new file mode 100644 index 0000000..fc14b28 --- /dev/null +++ b/internal/inspect/cache/cache.sql @@ -0,0 +1,9 @@ +SELECT + 'index hit rate' AS name, + (sum(idx_blks_hit)) / nullif(sum(idx_blks_hit + idx_blks_read),0) AS ratio +FROM pg_statio_user_indexes +UNION ALL +SELECT + 'table hit rate' AS name, + sum(heap_blks_hit) / nullif(sum(heap_blks_hit) + sum(heap_blks_read),0) AS ratio +FROM pg_statio_user_tables diff --git a/internal/inspect/cache/cache_test.go b/internal/inspect/cache/cache_test.go new file mode 100644 index 0000000..28fa1cb --- /dev/null +++ b/internal/inspect/cache/cache_test.go @@ -0,0 +1,52 @@ +package cache + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestCacheCommand(t *testing.T) { + t.Run("inspects cache hit rate", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(CacheQuery). + Reply("SELECT 1", Result{ + Name: "index hit rate", + Ratio: 0.9, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on empty result", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(CacheQuery). + Reply("SELECT 1", []interface{}{}) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, "cannot find field Name in returned row") + }) +} diff --git a/internal/inspect/calls/calls.go b/internal/inspect/calls/calls.go new file mode 100644 index 0000000..2fd0c8d --- /dev/null +++ b/internal/inspect/calls/calls.go @@ -0,0 +1,56 @@ +package calls + +import ( + "context" + _ "embed" + "fmt" + "regexp" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed calls.sql +var CallsQuery string + +type Result struct { + Total_exec_time string + Prop_exec_time string + Ncalls string + Sync_io_time string + Query string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, CallsQuery) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + // TODO: implement a markdown table marshaller + table := "|Query|Total Execution Time|Proportion of total exec time|Number Calls|Sync IO time|\n|-|-|-|-|-|\n" + for _, r := range result { + // remove whitespace from query + re := regexp.MustCompile(`\s+|\r+|\n+|\t+|\v`) + query := re.ReplaceAllString(r.Query, " ") + + // escape pipes in query + re = regexp.MustCompile(`\|`) + query = re.ReplaceAllString(query, `\|`) + table += fmt.Sprintf("|`%s`|`%s`|`%s`|`%s`|`%s`|\n", query, r.Total_exec_time, r.Prop_exec_time, r.Ncalls, r.Sync_io_time) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/calls/calls.sql b/internal/inspect/calls/calls.sql new file mode 100644 index 0000000..57c3ece --- /dev/null +++ b/internal/inspect/calls/calls.sql @@ -0,0 +1,9 @@ +SELECT + query, + (interval '1 millisecond' * total_exec_time)::text AS total_exec_time, + to_char((total_exec_time/sum(total_exec_time) OVER()) * 100, 'FM90D0') || '%' AS prop_exec_time, + to_char(calls, 'FM999G999G990') AS ncalls, + (interval '1 millisecond' * (blk_read_time + blk_write_time))::text AS sync_io_time +FROM pg_stat_statements +ORDER BY calls DESC +LIMIT 10 diff --git a/internal/inspect/calls/calls_test.go b/internal/inspect/calls/calls_test.go new file mode 100644 index 0000000..ded2710 --- /dev/null +++ b/internal/inspect/calls/calls_test.go @@ -0,0 +1,41 @@ +package calls + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestCallsCommand(t *testing.T) { + t.Run("inspects calls", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(CallsQuery). + Reply("SELECT 1", Result{ + Total_exec_time: "0.9", + Prop_exec_time: "0.9", + Ncalls: "0.9", + Sync_io_time: "0.9", + Query: "SELECT 1", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/index_sizes/index_sizes.go b/internal/inspect/index_sizes/index_sizes.go new file mode 100644 index 0000000..2cfc06e --- /dev/null +++ b/internal/inspect/index_sizes/index_sizes.go @@ -0,0 +1,46 @@ +package index_sizes + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed index_sizes.sql +var IndexSizesQuery string + +type Result struct { + Name string + Size string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, IndexSizesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|Name|size|\n|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%s`|\n", r.Name, r.Size) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/index_sizes/index_sizes.sql b/internal/inspect/index_sizes/index_sizes.sql new file mode 100644 index 0000000..25b6f96 --- /dev/null +++ b/internal/inspect/index_sizes/index_sizes.sql @@ -0,0 +1,9 @@ +SELECT + n.nspname || '.' || c.relname AS name, + pg_size_pretty(sum(c.relpages::bigint*8192)::bigint) AS size +FROM pg_class c +LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) +WHERE NOT n.nspname LIKE ANY($1) +AND c.relkind = 'i' +GROUP BY n.nspname, c.relname +ORDER BY sum(c.relpages) DESC diff --git a/internal/inspect/index_sizes/index_sizes_test.go b/internal/inspect/index_sizes/index_sizes_test.go new file mode 100644 index 0000000..9071c57 --- /dev/null +++ b/internal/inspect/index_sizes/index_sizes_test.go @@ -0,0 +1,40 @@ +package index_sizes + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestIndexSizes(t *testing.T) { + t.Run("inspects index sizes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(IndexSizesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Name: "test_table_idx", + Size: "100GB", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/index_usage/index_usage.go b/internal/inspect/index_usage/index_usage.go new file mode 100644 index 0000000..cd8875f --- /dev/null +++ b/internal/inspect/index_usage/index_usage.go @@ -0,0 +1,47 @@ +package index_usage + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed index_usage.sql +var IndexUsageQuery string + +type Result struct { + Name string + Percent_of_times_index_used string + Rows_in_table int64 +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, IndexUsageQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + // TODO: implement a markdown table marshaller + table := "|Table name|Percentage of times index used|Rows in table|\n|-|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%s`|`%d`|\n", r.Name, r.Percent_of_times_index_used, r.Rows_in_table) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/index_usage/index_usage.sql b/internal/inspect/index_usage/index_usage.sql new file mode 100644 index 0000000..fa83e97 --- /dev/null +++ b/internal/inspect/index_usage/index_usage.sql @@ -0,0 +1,17 @@ +SELECT + schemaname || '.' || relname AS name, + CASE + WHEN idx_scan IS NULL THEN 'Insufficient data' + WHEN idx_scan = 0 THEN 'Insufficient data' + ELSE ROUND(100.0 * idx_scan / (seq_scan + idx_scan), 1) || '%' + END percent_of_times_index_used, + n_live_tup rows_in_table +FROM pg_stat_user_tables +WHERE NOT schemaname LIKE ANY($1) +ORDER BY + CASE + WHEN idx_scan is null then 1 + WHEN idx_scan = 0 then 1 + ELSE 0 + END, + n_live_tup DESC diff --git a/internal/inspect/index_usage/index_usage_test.go b/internal/inspect/index_usage/index_usage_test.go new file mode 100644 index 0000000..5b735bb --- /dev/null +++ b/internal/inspect/index_usage/index_usage_test.go @@ -0,0 +1,41 @@ +package index_usage + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestIndexUsage(t *testing.T) { + t.Run("inspects index usage", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(IndexUsageQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Name: "test_table_idx", + Percent_of_times_index_used: "0.9", + Rows_in_table: 300, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/locks/locks.go b/internal/inspect/locks/locks.go new file mode 100644 index 0000000..0b2db71 --- /dev/null +++ b/internal/inspect/locks/locks.go @@ -0,0 +1,57 @@ +package locks + +import ( + "context" + _ "embed" + "fmt" + "regexp" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed locks.sql +var LocksQuery string + +type Result struct { + Pid int + Relname string + Transactionid string + Granted bool + Query string + Age string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, LocksQuery) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|pid|relname|transaction id|granted|query|age|\n|-|-|-|-|-|-|\n" + for _, r := range result { + // remove whitespace from query + re := regexp.MustCompile(`\s+|\r+|\n+|\t+|\v`) + query := re.ReplaceAllString(r.Query, " ") + + // escape pipes in query + re = regexp.MustCompile(`\|`) + query = re.ReplaceAllString(query, `\|`) + table += fmt.Sprintf("|`%d`|`%s`|`%s`|`%t`|%s|`%s`|\n", r.Pid, r.Relname, r.Transactionid, r.Granted, query, r.Age) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/locks/locks.sql b/internal/inspect/locks/locks.sql new file mode 100644 index 0000000..e140f47 --- /dev/null +++ b/internal/inspect/locks/locks.sql @@ -0,0 +1,12 @@ +SELECT + pg_stat_activity.pid, + COALESCE(pg_class.relname, 'null') AS relname, + COALESCE(pg_locks.transactionid, 'null') AS transactionid, + pg_locks.granted, + pg_stat_activity.query, + age(now(), pg_stat_activity.query_start)::text AS age +FROM pg_stat_activity, pg_locks LEFT OUTER JOIN pg_class ON (pg_locks.relation = pg_class.oid) +WHERE pg_stat_activity.query <> '' +AND pg_locks.pid = pg_stat_activity.pid +AND pg_locks.mode = 'ExclusiveLock' +ORDER BY query_start diff --git a/internal/inspect/locks/locks_test.go b/internal/inspect/locks/locks_test.go new file mode 100644 index 0000000..e4c55c6 --- /dev/null +++ b/internal/inspect/locks/locks_test.go @@ -0,0 +1,42 @@ +package locks + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestLocksCommand(t *testing.T) { + t.Run("inspects locks", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(LocksQuery). + Reply("SELECT 1", Result{ + Pid: 1, + Relname: "rel", + Transactionid: "9301", + Granted: true, + Query: "select 1", + Age: "300ms", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/long_running_queries/long_running_queries.go b/internal/inspect/long_running_queries/long_running_queries.go new file mode 100644 index 0000000..acf4be4 --- /dev/null +++ b/internal/inspect/long_running_queries/long_running_queries.go @@ -0,0 +1,46 @@ +package long_running_queries + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed long_running_queries.sql +var LongRunningQueriesQuery string + +type Result struct { + Pid int + Duration string + Query string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, LongRunningQueriesQuery) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|pid|Duration|Query|\n|-|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%d`|`%s`|`%s`|\n", r.Pid, r.Duration, r.Query) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/long_running_queries/long_running_queries.sql b/internal/inspect/long_running_queries/long_running_queries.sql new file mode 100644 index 0000000..68c20ba --- /dev/null +++ b/internal/inspect/long_running_queries/long_running_queries.sql @@ -0,0 +1,12 @@ +SELECT + pid, + age(now(), pg_stat_activity.query_start)::text AS duration, + query AS query +FROM + pg_stat_activity +WHERE + pg_stat_activity.query <> ''::text + AND state <> 'idle' + AND age(now(), pg_stat_activity.query_start) > interval '5 minutes' +ORDER BY + age(now(), pg_stat_activity.query_start) DESC diff --git a/internal/inspect/long_running_queries/long_running_queries_test.go b/internal/inspect/long_running_queries/long_running_queries_test.go new file mode 100644 index 0000000..d936c61 --- /dev/null +++ b/internal/inspect/long_running_queries/long_running_queries_test.go @@ -0,0 +1,39 @@ +package long_running_queries + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestLongQueriesCommand(t *testing.T) { + t.Run("inspects long running queries", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(LongRunningQueriesQuery). + Reply("SELECT 1", Result{ + Pid: 1, + Duration: "300ms", + Query: "select 1", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/outliers/outliers.go b/internal/inspect/outliers/outliers.go new file mode 100644 index 0000000..06f015c --- /dev/null +++ b/internal/inspect/outliers/outliers.go @@ -0,0 +1,54 @@ +package outliers + +import ( + "context" + _ "embed" + "fmt" + "regexp" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed outliers.sql +var OutliersQuery string + +type Result struct { + Total_exec_time string + Prop_exec_time string + Ncalls string + Sync_io_time string + Query string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, OutliersQuery) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + // TODO: implement a markdown table marshaller + table := "|Query|Execution Time|Proportion of exec time|Number Calls|Sync IO time|\n|-|-|-|-|-|\n" + for _, r := range result { + re := regexp.MustCompile(`\s+|\r+|\n+|\t+|\v`) + query := re.ReplaceAllString(r.Query, " ") + + re = regexp.MustCompile(`\|`) + query = re.ReplaceAllString(query, `\|`) + table += fmt.Sprintf("|`%s`|`%s`|`%s`|`%s`|`%s`|\n", query, r.Total_exec_time, r.Prop_exec_time, r.Ncalls, r.Sync_io_time) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/outliers/outliers.sql b/internal/inspect/outliers/outliers.sql new file mode 100644 index 0000000..d222cc8 --- /dev/null +++ b/internal/inspect/outliers/outliers.sql @@ -0,0 +1,9 @@ +SELECT + (interval '1 millisecond' * total_exec_time)::text AS total_exec_time, + to_char((total_exec_time/sum(total_exec_time) OVER()) * 100, 'FM90D0') || '%' AS prop_exec_time, + to_char(calls, 'FM999G999G999G990') AS ncalls, + (interval '1 millisecond' * (blk_read_time + blk_write_time))::text AS sync_io_time, + query +FROM pg_stat_statements WHERE userid = (SELECT usesysid FROM pg_user WHERE usename = current_user LIMIT 1) +ORDER BY total_exec_time DESC +LIMIT 10 diff --git a/internal/inspect/outliers/outliers_test.go b/internal/inspect/outliers/outliers_test.go new file mode 100644 index 0000000..e5d4643 --- /dev/null +++ b/internal/inspect/outliers/outliers_test.go @@ -0,0 +1,41 @@ +package outliers + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestOutliersCommand(t *testing.T) { + t.Run("inspects outliers", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(OutliersQuery). + Reply("SELECT 1", Result{ + Total_exec_time: "0.9", + Prop_exec_time: "0.9", + Ncalls: "0.9", + Sync_io_time: "0.9", + Query: "SELECT 1", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/replication_slots/replication_slots.go b/internal/inspect/replication_slots/replication_slots.go new file mode 100644 index 0000000..927ad05 --- /dev/null +++ b/internal/inspect/replication_slots/replication_slots.go @@ -0,0 +1,48 @@ +package replication_slots + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed replication_slots.sql +var ReplicationSlotsQuery string + +type Result struct { + Slot_name string + Active bool + State string + Replication_client_address string + Replication_lag_gb string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, ReplicationSlotsQuery) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + // TODO: implement a markdown table marshaller + table := "|Name|Active|State|Replication Client Address|Replication Lag GB|\n|-|-|-|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%t`|`%s`|`%s`|`%s`|\n", r.Slot_name, r.Active, r.State, r.Replication_client_address, r.Replication_lag_gb) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/replication_slots/replication_slots.sql b/internal/inspect/replication_slots/replication_slots.sql new file mode 100644 index 0000000..c45add9 --- /dev/null +++ b/internal/inspect/replication_slots/replication_slots.sql @@ -0,0 +1,11 @@ +SELECT + s.slot_name, + s.active, + COALESCE(r.state, 'N/A') as state, + CASE WHEN r.client_addr IS NULL + THEN 'N/A' + ELSE r.client_addr::text + END replication_client_address, + GREATEST(0, ROUND((redo_lsn-restart_lsn)/1024/1024/1024, 2)) as replication_lag_gb +FROM pg_control_checkpoint(), pg_replication_slots s +LEFT JOIN pg_stat_replication r ON (r.pid = s.active_pid) diff --git a/internal/inspect/replication_slots/replication_slots_test.go b/internal/inspect/replication_slots/replication_slots_test.go new file mode 100644 index 0000000..d9e9b32 --- /dev/null +++ b/internal/inspect/replication_slots/replication_slots_test.go @@ -0,0 +1,41 @@ +package replication_slots + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestReplicationCommand(t *testing.T) { + t.Run("inspects replication slots", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(ReplicationSlotsQuery). + Reply("SELECT 1", Result{ + Slot_name: "test", + Active: true, + State: "active", + Replication_client_address: "127.0.0.1", + Replication_lag_gb: "0.9", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/report.go b/internal/inspect/report.go new file mode 100644 index 0000000..de721d9 --- /dev/null +++ b/internal/inspect/report.go @@ -0,0 +1,79 @@ +package inspect + +import ( + "context" + "embed" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + "time" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" +) + +//go:embed **/*.sql +var queries embed.FS + +func Report(ctx context.Context, out string, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + date := time.Now().Format("2006-01-02") + if err := utils.MkdirIfNotExistFS(fsys, out); err != nil { + return err + } + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + fmt.Fprintln(os.Stderr, "Running queries...") + if err := fs.WalkDir(queries, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return errors.Errorf("failed to walk queries: %w", err) + } + if d.IsDir() { + return nil + } + query, err := queries.ReadFile(path) + if err != nil { + return errors.Errorf("failed to read query: %w", err) + } + name := strings.Split(d.Name(), ".")[0] + outPath := filepath.Join(out, fmt.Sprintf("%s_%s.csv", name, date)) + return copyToCSV(ctx, string(query), outPath, conn.PgConn(), fsys) + }); err != nil { + return err + } + if !filepath.IsAbs(out) { + out, _ = filepath.Abs(out) + } + fmt.Fprintln(os.Stderr, "Reports saved to "+utils.Bold(out)) + return nil +} + +func copyToCSV(ctx context.Context, query, outPath string, conn *pgconn.PgConn, fsys afero.Fs) error { + // Create output file + f, err := fsys.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return errors.Errorf("failed to create output file: %w", err) + } + defer f.Close() + // Execute query + csvQuery := wrapQuery(query) + if _, err = conn.CopyTo(ctx, f, csvQuery); err != nil { + return errors.Errorf("failed to copy output: %w", err) + } + return nil +} + +var ignoreSchemas = fmt.Sprintf("'{%s}'::text[]", strings.Join(reset.LikeEscapeSchema(utils.InternalSchemas), ",")) + +func wrapQuery(query string) string { + fullQuery := strings.ReplaceAll(query, "$1", ignoreSchemas) + return fmt.Sprintf("COPY (%s) TO STDOUT WITH CSV HEADER", fullQuery) +} diff --git a/internal/inspect/report_test.go b/internal/inspect/report_test.go new file mode 100644 index 0000000..6b42204 --- /dev/null +++ b/internal/inspect/report_test.go @@ -0,0 +1,113 @@ +package inspect + +import ( + "context" + "fmt" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/inspect/bloat" + "github.com/supabase/cli/internal/inspect/blocking" + "github.com/supabase/cli/internal/inspect/cache" + "github.com/supabase/cli/internal/inspect/calls" + "github.com/supabase/cli/internal/inspect/index_sizes" + "github.com/supabase/cli/internal/inspect/index_usage" + "github.com/supabase/cli/internal/inspect/locks" + "github.com/supabase/cli/internal/inspect/long_running_queries" + "github.com/supabase/cli/internal/inspect/outliers" + "github.com/supabase/cli/internal/inspect/replication_slots" + "github.com/supabase/cli/internal/inspect/role_configs" + "github.com/supabase/cli/internal/inspect/role_connections" + "github.com/supabase/cli/internal/inspect/seq_scans" + "github.com/supabase/cli/internal/inspect/table_index_sizes" + "github.com/supabase/cli/internal/inspect/table_record_counts" + "github.com/supabase/cli/internal/inspect/table_sizes" + "github.com/supabase/cli/internal/inspect/total_index_size" + "github.com/supabase/cli/internal/inspect/total_table_sizes" + "github.com/supabase/cli/internal/inspect/unused_indexes" + "github.com/supabase/cli/internal/inspect/vacuum_stats" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestReportCommand(t *testing.T) { + t.Run("runs all queries", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(wrapQuery(bloat.BloatQuery)). + Reply("COPY 0"). + Query(wrapQuery(blocking.BlockingQuery)). + Reply("COPY 0"). + Query(wrapQuery(cache.CacheQuery)). + Reply("COPY 0"). + Query(wrapQuery(calls.CallsQuery)). + Reply("COPY 0"). + Query(wrapQuery(index_sizes.IndexSizesQuery)). + Reply("COPY 0"). + Query(wrapQuery(index_usage.IndexUsageQuery)). + Reply("COPY 0"). + Query(wrapQuery(locks.LocksQuery)). + Reply("COPY 0"). + Query(wrapQuery(long_running_queries.LongRunningQueriesQuery)). + Reply("COPY 0"). + Query(wrapQuery(outliers.OutliersQuery)). + Reply("COPY 0"). + Query(wrapQuery(replication_slots.ReplicationSlotsQuery)). + Reply("COPY 0"). + Query(wrapQuery(role_configs.RoleConfigsQuery)). + Reply("COPY 0"). + Query(wrapQuery(role_connections.RoleConnectionsQuery)). + Reply("COPY 0"). + Query(wrapQuery(seq_scans.SeqScansQuery)). + Reply("COPY 0"). + Query(wrapQuery(table_index_sizes.TableIndexSizesQuery)). + Reply("COPY 0"). + Query(wrapQuery(table_record_counts.TableRecordCountsQuery)). + Reply("COPY 0"). + Query(wrapQuery(table_sizes.TableSizesQuery)). + Reply("COPY 0"). + Query(wrapQuery(total_index_size.TotalIndexSizeQuery)). + Reply("COPY 0"). + Query(wrapQuery(total_table_sizes.TotalTableSizesQuery)). + Reply("COPY 0"). + Query(wrapQuery(unused_indexes.UnusedIndexesQuery)). + Reply("COPY 0"). + Query(wrapQuery(vacuum_stats.VacuumStatsQuery)). + Reply("COPY 0") + // Run test + err := Report(context.Background(), ".", dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + matches, err := afero.Glob(fsys, "*.csv") + assert.NoError(t, err) + assert.Len(t, matches, 20) + }) +} + +func TestWrapQuery(t *testing.T) { + t.Run("wraps query in csv", func(t *testing.T) { + assert.Equal(t, + "COPY (SELECT 1) TO STDOUT WITH CSV HEADER", + wrapQuery("SELECT 1"), + ) + }) + + t.Run("replaces placeholder value", func(t *testing.T) { + assert.Equal(t, + fmt.Sprintf("COPY (SELECT 'a' LIKE ANY(%s)) TO STDOUT WITH CSV HEADER", ignoreSchemas), + wrapQuery("SELECT 'a' LIKE ANY($1)"), + ) + }) +} diff --git a/internal/inspect/role_configs/role_configs.go b/internal/inspect/role_configs/role_configs.go new file mode 100644 index 0000000..f4fb793 --- /dev/null +++ b/internal/inspect/role_configs/role_configs.go @@ -0,0 +1,46 @@ +package role_configs + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed role_configs.sql +var RoleConfigsQuery string + +type Result struct { + Role_name string + Custom_config string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, RoleConfigsQuery) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|Role name|Custom config|\n|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%s`|\n", r.Role_name, r.Custom_config) + } + + return list.RenderTable(table) +} diff --git a/internal/inspect/role_configs/role_configs.sql b/internal/inspect/role_configs/role_configs.sql new file mode 100644 index 0000000..fd7f964 --- /dev/null +++ b/internal/inspect/role_configs/role_configs.sql @@ -0,0 +1,5 @@ +select + rolname as role_name, + array_to_string(rolconfig, ',', '*') as custom_config +from + pg_roles where rolconfig is not null diff --git a/internal/inspect/role_configs/role_configs_test.go b/internal/inspect/role_configs/role_configs_test.go new file mode 100644 index 0000000..554a125 --- /dev/null +++ b/internal/inspect/role_configs/role_configs_test.go @@ -0,0 +1,38 @@ +package role_configs + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestRoleCommand(t *testing.T) { + t.Run("inspects role connections", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(RoleConfigsQuery). + Reply("SELECT 1", Result{ + Role_name: "postgres", + Custom_config: "statement_timeout=3s", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/role_connections/role_connections.go b/internal/inspect/role_connections/role_connections.go new file mode 100644 index 0000000..5b0a565 --- /dev/null +++ b/internal/inspect/role_connections/role_connections.go @@ -0,0 +1,62 @@ +package role_connections + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed role_connections.sql +var RoleConnectionsQuery string + +type Result struct { + Rolname string + Active_connections int + Connection_limit int +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, RoleConnectionsQuery) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|Role Name|Active connction|\n|-|-|\n" + sum := 0 + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%d`|\n", r.Rolname, r.Active_connections) + sum += r.Active_connections + } + + if err := list.RenderTable(table); err != nil { + return err + } + + if len(result) > 0 { + fmt.Printf("\nActive connections %d/%d\n\n", sum, result[0].Connection_limit) + } + + if matches := utils.ProjectHostPattern.FindStringSubmatch(config.Host); len(matches) == 4 { + fmt.Println("Go to the dashboard for more here:") + fmt.Printf("https://app.supabase.com/project/%s/database/roles\n", matches[2]) + } + + return nil +} diff --git a/internal/inspect/role_connections/role_connections.sql b/internal/inspect/role_connections/role_connections.sql new file mode 100644 index 0000000..40b1036 --- /dev/null +++ b/internal/inspect/role_connections/role_connections.sql @@ -0,0 +1,16 @@ +SELECT + rolname, + ( + SELECT + count(*) + FROM + pg_stat_activity + WHERE + pg_roles.rolname = pg_stat_activity.usename + ) AS active_connections, + CASE WHEN rolconnlimit = -1 + THEN current_setting('max_connections')::int8 + ELSE rolconnlimit + END AS connection_limit +FROM pg_roles +ORDER BY 2 DESC diff --git a/internal/inspect/role_connections/role_connections_test.go b/internal/inspect/role_connections/role_connections_test.go new file mode 100644 index 0000000..32ecab7 --- /dev/null +++ b/internal/inspect/role_connections/role_connections_test.go @@ -0,0 +1,39 @@ +package role_connections + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestRoleCommand(t *testing.T) { + t.Run("inspects role connections", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(RoleConnectionsQuery). + Reply("SELECT 1", Result{ + Rolname: "postgres", + Active_connections: 1, + Connection_limit: 10, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/seq_scans/seq_scans.go b/internal/inspect/seq_scans/seq_scans.go new file mode 100644 index 0000000..6b52538 --- /dev/null +++ b/internal/inspect/seq_scans/seq_scans.go @@ -0,0 +1,46 @@ +package seq_scans + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed seq_scans.sql +var SeqScansQuery string + +type Result struct { + Name string + Count int64 +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, SeqScansQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|Name|Count|\n|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%d`|\n", r.Name, r.Count) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/seq_scans/seq_scans.sql b/internal/inspect/seq_scans/seq_scans.sql new file mode 100644 index 0000000..c8edfc8 --- /dev/null +++ b/internal/inspect/seq_scans/seq_scans.sql @@ -0,0 +1,6 @@ +SELECT + schemaname || '.' || relname AS name, + seq_scan as count +FROM pg_stat_user_tables +WHERE NOT schemaname LIKE ANY($1) +ORDER BY seq_scan DESC diff --git a/internal/inspect/seq_scans/seq_scans_test.go b/internal/inspect/seq_scans/seq_scans_test.go new file mode 100644 index 0000000..3db6cae --- /dev/null +++ b/internal/inspect/seq_scans/seq_scans_test.go @@ -0,0 +1,40 @@ +package seq_scans + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestSequentialScansCommand(t *testing.T) { + t.Run("inspects sequential scans", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(SeqScansQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Name: "test_table", + Count: 99999, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/table_index_sizes/table_index_sizes.go b/internal/inspect/table_index_sizes/table_index_sizes.go new file mode 100644 index 0000000..e61f233 --- /dev/null +++ b/internal/inspect/table_index_sizes/table_index_sizes.go @@ -0,0 +1,46 @@ +package table_index_sizes + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed table_index_sizes.sql +var TableIndexSizesQuery string + +type Result struct { + Table string + Index_size string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TableIndexSizesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|Table|Index size|\n|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%s`|\n", r.Table, r.Index_size) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/table_index_sizes/table_index_sizes.sql b/internal/inspect/table_index_sizes/table_index_sizes.sql new file mode 100644 index 0000000..0c9bc6b --- /dev/null +++ b/internal/inspect/table_index_sizes/table_index_sizes.sql @@ -0,0 +1,8 @@ +SELECT + n.nspname || '.' || c.relname AS table, + pg_size_pretty(pg_indexes_size(c.oid)) AS index_size +FROM pg_class c +LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) +WHERE NOT n.nspname LIKE ANY($1) +AND c.relkind = 'r' +ORDER BY pg_indexes_size(c.oid) DESC diff --git a/internal/inspect/table_index_sizes/table_index_sizes_test.go b/internal/inspect/table_index_sizes/table_index_sizes_test.go new file mode 100644 index 0000000..20ad80f --- /dev/null +++ b/internal/inspect/table_index_sizes/table_index_sizes_test.go @@ -0,0 +1,40 @@ +package table_index_sizes + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTableIndexSizesCommand(t *testing.T) { + t.Run("inspects table index sizes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TableIndexSizesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Table: "public.test_table", + Index_size: "3GB", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/table_record_counts/table_record_counts.go b/internal/inspect/table_record_counts/table_record_counts.go new file mode 100644 index 0000000..e0b3943 --- /dev/null +++ b/internal/inspect/table_record_counts/table_record_counts.go @@ -0,0 +1,47 @@ +package table_record_counts + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed table_record_counts.sql +var TableRecordCountsQuery string + +type Result struct { + Schema string + Name string + Estimated_count int64 +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TableRecordCountsQuery, reset.LikeEscapeSchema(utils.PgSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "Schema|Table|Estimated count|\n|-|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%s`|`%d`|\n", r.Schema, r.Name, r.Estimated_count) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/table_record_counts/table_record_counts.sql b/internal/inspect/table_record_counts/table_record_counts.sql new file mode 100644 index 0000000..1b24f04 --- /dev/null +++ b/internal/inspect/table_record_counts/table_record_counts.sql @@ -0,0 +1,7 @@ +SELECT + schemaname AS schema, + relname AS name, + n_live_tup AS estimated_count +FROM pg_stat_user_tables +WHERE NOT schemaname LIKE ANY($1) +ORDER BY n_live_tup DESC diff --git a/internal/inspect/table_record_counts/table_record_counts_test.go b/internal/inspect/table_record_counts/table_record_counts_test.go new file mode 100644 index 0000000..a03714d --- /dev/null +++ b/internal/inspect/table_record_counts/table_record_counts_test.go @@ -0,0 +1,41 @@ +package table_record_counts + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTableRecordCountsCommand(t *testing.T) { + t.Run("inspects table record counts", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TableRecordCountsQuery, reset.LikeEscapeSchema(utils.PgSchemas)). + Reply("SELECT 1", Result{ + Schema: "public", + Name: "test_table", + Estimated_count: 100, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/table_sizes/table_sizes.go b/internal/inspect/table_sizes/table_sizes.go new file mode 100644 index 0000000..7741f01 --- /dev/null +++ b/internal/inspect/table_sizes/table_sizes.go @@ -0,0 +1,47 @@ +package table_sizes + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed table_sizes.sql +var TableSizesQuery string + +type Result struct { + Schema string + Name string + Size string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TableSizesQuery, reset.LikeEscapeSchema(utils.PgSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "Schema|Table|size|\n|-|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%s`|`%s`|\n", r.Schema, r.Name, r.Size) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/table_sizes/table_sizes.sql b/internal/inspect/table_sizes/table_sizes.sql new file mode 100644 index 0000000..2c8fb30 --- /dev/null +++ b/internal/inspect/table_sizes/table_sizes.sql @@ -0,0 +1,9 @@ +SELECT + n.nspname AS schema, + c.relname AS name, + pg_size_pretty(pg_table_size(c.oid)) AS size +FROM pg_class c +LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) +WHERE NOT n.nspname LIKE ANY($1) +AND c.relkind = 'r' +ORDER BY pg_table_size(c.oid) DESC diff --git a/internal/inspect/table_sizes/table_sizes_test.go b/internal/inspect/table_sizes/table_sizes_test.go new file mode 100644 index 0000000..5cc6426 --- /dev/null +++ b/internal/inspect/table_sizes/table_sizes_test.go @@ -0,0 +1,41 @@ +package table_sizes + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTableSizesCommand(t *testing.T) { + t.Run("inspects table sizes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TableSizesQuery, reset.LikeEscapeSchema(utils.PgSchemas)). + Reply("SELECT 1", Result{ + Schema: "schema", + Name: "test_table", + Size: "3GB", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/total_index_size/total_index_size.go b/internal/inspect/total_index_size/total_index_size.go new file mode 100644 index 0000000..fbc66b2 --- /dev/null +++ b/internal/inspect/total_index_size/total_index_size.go @@ -0,0 +1,45 @@ +package total_index_size + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed total_index_size.sql +var TotalIndexSizeQuery string + +type Result struct { + Size string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TotalIndexSizeQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|Size|\n|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|\n", r.Size) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/total_index_size/total_index_size.sql b/internal/inspect/total_index_size/total_index_size.sql new file mode 100644 index 0000000..d1e8ab3 --- /dev/null +++ b/internal/inspect/total_index_size/total_index_size.sql @@ -0,0 +1,6 @@ +SELECT + pg_size_pretty(sum(c.relpages::bigint*8192)::bigint) AS size +FROM pg_class c +LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) +WHERE NOT n.nspname LIKE ANY($1) +AND c.relkind = 'i' diff --git a/internal/inspect/total_index_size/total_index_size_test.go b/internal/inspect/total_index_size/total_index_size_test.go new file mode 100644 index 0000000..8eb0e0a --- /dev/null +++ b/internal/inspect/total_index_size/total_index_size_test.go @@ -0,0 +1,39 @@ +package total_index_size + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTotalIndexSizeCommand(t *testing.T) { + t.Run("inspects size of all indexes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TotalIndexSizeQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Size: "8GB", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/total_table_sizes/total_table_sizes.go b/internal/inspect/total_table_sizes/total_table_sizes.go new file mode 100644 index 0000000..80b1c89 --- /dev/null +++ b/internal/inspect/total_table_sizes/total_table_sizes.go @@ -0,0 +1,47 @@ +package total_table_sizes + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed total_table_sizes.sql +var TotalTableSizesQuery string + +type Result struct { + Schema string + Name string + Size string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TotalTableSizesQuery, reset.LikeEscapeSchema(utils.PgSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "Schema|Table|Size|\n|-|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%s`|`%s`|\n", r.Schema, r.Name, r.Size) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/total_table_sizes/total_table_sizes.sql b/internal/inspect/total_table_sizes/total_table_sizes.sql new file mode 100644 index 0000000..471d065 --- /dev/null +++ b/internal/inspect/total_table_sizes/total_table_sizes.sql @@ -0,0 +1,9 @@ +SELECT + n.nspname AS schema, + c.relname AS name, + pg_size_pretty(pg_total_relation_size(c.oid)) AS size +FROM pg_class c +LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) +WHERE NOT n.nspname LIKE ANY($1) +AND c.relkind = 'r' +ORDER BY pg_total_relation_size(c.oid) DESC diff --git a/internal/inspect/total_table_sizes/total_table_sizes_test.go b/internal/inspect/total_table_sizes/total_table_sizes_test.go new file mode 100644 index 0000000..bc548af --- /dev/null +++ b/internal/inspect/total_table_sizes/total_table_sizes_test.go @@ -0,0 +1,41 @@ +package total_table_sizes + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTotalTableSizesCommand(t *testing.T) { + t.Run("inspects total table sizes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TotalTableSizesQuery, reset.LikeEscapeSchema(utils.PgSchemas)). + Reply("SELECT 1", Result{ + Schema: "public", + Name: "test_table", + Size: "3GB", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/unused_indexes/unused_indexes.go b/internal/inspect/unused_indexes/unused_indexes.go new file mode 100644 index 0000000..2a30a46 --- /dev/null +++ b/internal/inspect/unused_indexes/unused_indexes.go @@ -0,0 +1,48 @@ +package unused_indexes + +import ( + "context" + _ "embed" + "fmt" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed unused_indexes.sql +var UnusedIndexesQuery string + +type Result struct { + Table string + Index string + Index_size string + Index_scans int64 +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, UnusedIndexesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|Table|Index|Index Size|Index Scans\n|-|-|-|-|\n" + for _, r := range result { + table += fmt.Sprintf("|`%s`|`%s`|`%s`|`%d`|\n", r.Table, r.Index, r.Index_size, r.Index_scans) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/unused_indexes/unused_indexes.sql b/internal/inspect/unused_indexes/unused_indexes.sql new file mode 100644 index 0000000..6e77596 --- /dev/null +++ b/internal/inspect/unused_indexes/unused_indexes.sql @@ -0,0 +1,13 @@ +SELECT + schemaname || '.' || relname AS table, + indexrelname AS index, + pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size, + idx_scan as index_scans +FROM pg_stat_user_indexes ui +JOIN pg_index i ON ui.indexrelid = i.indexrelid +WHERE + NOT indisunique AND idx_scan < 50 AND pg_relation_size(relid) > 5 * 8192 + AND NOT schemaname LIKE ANY($1) +ORDER BY + pg_relation_size(i.indexrelid) / nullif(idx_scan, 0) DESC NULLS FIRST, + pg_relation_size(i.indexrelid) DESC diff --git a/internal/inspect/unused_indexes/unused_indexes_test.go b/internal/inspect/unused_indexes/unused_indexes_test.go new file mode 100644 index 0000000..ee41820 --- /dev/null +++ b/internal/inspect/unused_indexes/unused_indexes_test.go @@ -0,0 +1,42 @@ +package unused_indexes + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestUnusedIndexesCommand(t *testing.T) { + t.Run("inspects unused indexes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(UnusedIndexesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Table: "test_table", + Index: "test_table_idx", + Index_size: "3GB", + Index_scans: 2, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/vacuum_stats/vacuum_stats.go b/internal/inspect/vacuum_stats/vacuum_stats.go new file mode 100644 index 0000000..dc9326d --- /dev/null +++ b/internal/inspect/vacuum_stats/vacuum_stats.go @@ -0,0 +1,54 @@ +package vacuum_stats + +import ( + "context" + _ "embed" + "fmt" + "strings" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgxv5" +) + +//go:embed vacuum_stats.sql +var VacuumStatsQuery string + +type Result struct { + Schema string + Table string + Last_vacuum string + Last_autovacuum string + Rowcount string + Dead_rowcount string + Autovacuum_threshold string + Expect_autovacuum string +} + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, VacuumStatsQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) + if err != nil { + return errors.Errorf("failed to query rows: %w", err) + } + result, err := pgxv5.CollectRows[Result](rows) + if err != nil { + return err + } + + table := "|Schema|Table|Last Vacuum|Last Auto Vacuum|Row count|Dead row count|Expect autovacuum?\n|-|-|-|-|-|-|-|\n" + for _, r := range result { + rowcount := strings.Replace(r.Rowcount, "-1", "No stats", 1) + table += fmt.Sprintf("|`%s`|`%s`|%s|%s|`%s`|`%s`|`%s`|\n", r.Schema, r.Table, r.Last_vacuum, r.Last_autovacuum, rowcount, r.Dead_rowcount, r.Expect_autovacuum) + } + return list.RenderTable(table) +} diff --git a/internal/inspect/vacuum_stats/vacuum_stats.sql b/internal/inspect/vacuum_stats/vacuum_stats.sql new file mode 100644 index 0000000..7078474 --- /dev/null +++ b/internal/inspect/vacuum_stats/vacuum_stats.sql @@ -0,0 +1,45 @@ +WITH table_opts AS ( + SELECT + pg_class.oid, relname, nspname, array_to_string(reloptions, '') AS relopts + FROM + pg_class INNER JOIN pg_namespace ns ON relnamespace = ns.oid +), vacuum_settings AS ( + SELECT + oid, relname, nspname, + CASE + WHEN relopts LIKE '%autovacuum_vacuum_threshold%' + THEN substring(relopts, '.*autovacuum_vacuum_threshold=([0-9.]+).*')::integer + ELSE current_setting('autovacuum_vacuum_threshold')::integer + END AS autovacuum_vacuum_threshold, + CASE + WHEN relopts LIKE '%autovacuum_vacuum_scale_factor%' + THEN substring(relopts, '.*autovacuum_vacuum_scale_factor=([0-9.]+).*')::real + ELSE current_setting('autovacuum_vacuum_scale_factor')::real + END AS autovacuum_vacuum_scale_factor + FROM + table_opts +) +SELECT + vacuum_settings.nspname AS schema, + vacuum_settings.relname AS table, + coalesce(to_char(psut.last_vacuum, 'YYYY-MM-DD HH24:MI'), '') AS last_vacuum, + coalesce(to_char(psut.last_autovacuum, 'YYYY-MM-DD HH24:MI'), '') AS last_autovacuum, + to_char(pg_class.reltuples, '9G999G999G999') AS rowcount, + to_char(psut.n_dead_tup, '9G999G999G999') AS dead_rowcount, + to_char(autovacuum_vacuum_threshold + + (autovacuum_vacuum_scale_factor::numeric * pg_class.reltuples), '9G999G999G999') AS autovacuum_threshold, + CASE + WHEN autovacuum_vacuum_threshold + (autovacuum_vacuum_scale_factor::numeric * pg_class.reltuples) < psut.n_dead_tup + THEN 'yes' + ELSE 'no' + END AS expect_autovacuum +FROM + pg_stat_user_tables psut INNER JOIN pg_class ON psut.relid = pg_class.oid +INNER JOIN vacuum_settings ON pg_class.oid = vacuum_settings.oid +WHERE NOT vacuum_settings.nspname LIKE ANY($1) +ORDER BY + case + when pg_class.reltuples = -1 then 1 + else 0 + end, + 1 diff --git a/internal/inspect/vacuum_stats/vacuum_stats_test.go b/internal/inspect/vacuum_stats/vacuum_stats_test.go new file mode 100644 index 0000000..0d3cbec --- /dev/null +++ b/internal/inspect/vacuum_stats/vacuum_stats_test.go @@ -0,0 +1,46 @@ +package vacuum_stats + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestVacuumCommand(t *testing.T) { + t.Run("inspects vacuum stats", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(VacuumStatsQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Schema: "test_schema", + Table: "test_table", + Last_vacuum: "2021-01-01 00:00:00", + Last_autovacuum: "2021-01-01 00:00:00", + Rowcount: "1000", + Dead_rowcount: "100", + Autovacuum_threshold: "100", + Expect_autovacuum: "yes", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/link/link.go b/internal/link/link.go new file mode 100644 index 0000000..d35321e --- /dev/null +++ b/internal/link/link.go @@ -0,0 +1,286 @@ +package link + +import ( + "context" + "fmt" + "net/http" + "os" + "strconv" + "sync" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/credentials" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/internal/utils/tenant" + "github.com/supabase/cli/pkg/api" + "github.com/supabase/cli/pkg/cast" + cliConfig "github.com/supabase/cli/pkg/config" + "github.com/supabase/cli/pkg/diff" + "github.com/supabase/cli/pkg/migration" +) + +func Run(ctx context.Context, projectRef string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + copy := utils.Config.Clone() + original, err := cliConfig.ToTomlBytes(copy) + if err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + } + + if err := checkRemoteProjectStatus(ctx, projectRef, fsys); err != nil { + return err + } + + // 1. Check service config + keys, err := tenant.GetApiKeys(ctx, projectRef) + if err != nil { + return err + } + LinkServices(ctx, projectRef, keys.Anon, fsys) + + // 2. Check database connection + config := flags.GetDbConfigOptionalPassword(projectRef) + if len(config.Password) > 0 { + if err := linkDatabase(ctx, config, options...); err != nil { + return err + } + // Save database password + if err := credentials.StoreProvider.Set(projectRef, config.Password); err != nil { + fmt.Fprintln(os.Stderr, "Failed to save database password:", err) + } + } + + // 3. Save project ref + if err := utils.WriteFile(utils.ProjectRefPath, []byte(projectRef), fsys); err != nil { + return err + } + fmt.Fprintln(os.Stdout, "Finished "+utils.Aqua("supabase link")+".") + + // 4. Suggest config update + updated, err := cliConfig.ToTomlBytes(utils.Config.Clone()) + if err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + } + + if lineDiff := diff.Diff(utils.ConfigPath, original, projectRef, updated); len(lineDiff) > 0 { + fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), "Local config differs from linked project. Try updating", utils.Bold(utils.ConfigPath)) + fmt.Println(string(lineDiff)) + } + return nil +} + +func LinkServices(ctx context.Context, projectRef, anonKey string, fsys afero.Fs) { + // Ignore non-fatal errors linking services + var wg sync.WaitGroup + wg.Add(8) + go func() { + defer wg.Done() + if err := linkDatabaseSettings(ctx, projectRef); err != nil && viper.GetBool("DEBUG") { + fmt.Fprintln(os.Stderr, err) + } + }() + go func() { + defer wg.Done() + if err := linkPostgrest(ctx, projectRef); err != nil && viper.GetBool("DEBUG") { + fmt.Fprintln(os.Stderr, err) + } + }() + go func() { + defer wg.Done() + if err := linkGotrue(ctx, projectRef); err != nil && viper.GetBool("DEBUG") { + fmt.Fprintln(os.Stderr, err) + } + }() + go func() { + defer wg.Done() + if err := linkStorage(ctx, projectRef); err != nil && viper.GetBool("DEBUG") { + fmt.Fprintln(os.Stderr, err) + } + }() + go func() { + defer wg.Done() + if err := linkPooler(ctx, projectRef, fsys); err != nil && viper.GetBool("DEBUG") { + fmt.Fprintln(os.Stderr, err) + } + }() + api := tenant.NewTenantAPI(ctx, projectRef, anonKey) + go func() { + defer wg.Done() + if err := linkPostgrestVersion(ctx, api, fsys); err != nil && viper.GetBool("DEBUG") { + fmt.Fprintln(os.Stderr, err) + } + }() + go func() { + defer wg.Done() + if err := linkGotrueVersion(ctx, api, fsys); err != nil && viper.GetBool("DEBUG") { + fmt.Fprintln(os.Stderr, err) + } + }() + go func() { + defer wg.Done() + if err := linkStorageVersion(ctx, api, fsys); err != nil && viper.GetBool("DEBUG") { + fmt.Fprintln(os.Stderr, err) + } + }() + wg.Wait() +} + +func linkPostgrest(ctx context.Context, projectRef string) error { + resp, err := utils.GetSupabase().V1GetPostgrestServiceConfigWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to read API config: %w", err) + } else if resp.JSON200 == nil { + return errors.Errorf("unexpected API config status %d: %s", resp.StatusCode(), string(resp.Body)) + } + utils.Config.Api.FromRemoteApiConfig(*resp.JSON200) + return nil +} + +func linkPostgrestVersion(ctx context.Context, api tenant.TenantAPI, fsys afero.Fs) error { + version, err := api.GetPostgrestVersion(ctx) + if err != nil { + return err + } + return utils.WriteFile(utils.RestVersionPath, []byte(version), fsys) +} + +func linkGotrue(ctx context.Context, projectRef string) error { + resp, err := utils.GetSupabase().V1GetAuthServiceConfigWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to read Auth config: %w", err) + } else if resp.JSON200 == nil { + return errors.Errorf("unexpected Auth config status %d: %s", resp.StatusCode(), string(resp.Body)) + } + utils.Config.Auth.FromRemoteAuthConfig(*resp.JSON200) + return nil +} + +func linkGotrueVersion(ctx context.Context, api tenant.TenantAPI, fsys afero.Fs) error { + version, err := api.GetGotrueVersion(ctx) + if err != nil { + return err + } + return utils.WriteFile(utils.GotrueVersionPath, []byte(version), fsys) +} + +func linkStorage(ctx context.Context, projectRef string) error { + resp, err := utils.GetSupabase().V1GetStorageConfigWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to read Storage config: %w", err) + } else if resp.JSON200 == nil { + return errors.Errorf("unexpected Storage config status %d: %s", resp.StatusCode(), string(resp.Body)) + } + utils.Config.Storage.FromRemoteStorageConfig(*resp.JSON200) + return nil +} + +func linkStorageVersion(ctx context.Context, api tenant.TenantAPI, fsys afero.Fs) error { + version, err := api.GetStorageVersion(ctx) + if err != nil { + return err + } + return utils.WriteFile(utils.StorageVersionPath, []byte(version), fsys) +} + +func linkDatabaseSettings(ctx context.Context, projectRef string) error { + resp, err := utils.GetSupabase().V1GetPostgresConfigWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to read DB config: %w", err) + } else if resp.JSON200 == nil { + return errors.Errorf("unexpected DB config status %d: %s", resp.StatusCode(), string(resp.Body)) + } + utils.Config.Db.Settings.FromRemotePostgresConfig(*resp.JSON200) + return nil +} + +func linkDatabase(ctx context.Context, config pgconn.Config, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + updatePostgresConfig(conn) + // If `schema_migrations` doesn't exist on the remote database, create it. + if err := migration.CreateMigrationTable(ctx, conn); err != nil { + return err + } + return migration.CreateSeedTable(ctx, conn) +} + +func updatePostgresConfig(conn *pgx.Conn) { + serverVersion := conn.PgConn().ParameterStatus("server_version") + // Safe to assume that supported Postgres version is 10.0 <= n < 100.0 + majorDigits := len(serverVersion) + if majorDigits > 2 { + majorDigits = 2 + } + // Treat error as unchanged + if dbMajorVersion, err := strconv.ParseUint(serverVersion[:majorDigits], 10, 7); err == nil { + utils.Config.Db.MajorVersion = uint(dbMajorVersion) + } +} + +func linkPooler(ctx context.Context, projectRef string, fsys afero.Fs) error { + resp, err := utils.GetSupabase().V1GetSupavisorConfigWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to get pooler config: %w", err) + } + if resp.JSON200 == nil { + return errors.Errorf("%w: %s", tenant.ErrAuthToken, string(resp.Body)) + } + for _, config := range *resp.JSON200 { + if config.DatabaseType == api.PRIMARY { + updatePoolerConfig(config) + } + } + return utils.WriteFile(utils.PoolerUrlPath, []byte(utils.Config.Db.Pooler.ConnectionString), fsys) +} + +func updatePoolerConfig(config api.SupavisorConfigResponse) { + utils.Config.Db.Pooler.ConnectionString = config.ConnectionString + utils.Config.Db.Pooler.PoolMode = cliConfig.PoolMode(config.PoolMode) + if config.DefaultPoolSize != nil { + utils.Config.Db.Pooler.DefaultPoolSize = cast.IntToUint(*config.DefaultPoolSize) + } + if config.MaxClientConn != nil { + utils.Config.Db.Pooler.MaxClientConn = cast.IntToUint(*config.MaxClientConn) + } +} + +var errProjectPaused = errors.New("project is paused") + +func checkRemoteProjectStatus(ctx context.Context, projectRef string, fsys afero.Fs) error { + resp, err := utils.GetSupabase().V1GetProjectWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to retrieve remote project status: %w", err) + } + switch resp.StatusCode() { + case http.StatusNotFound: + // Ignore not found error to support linking branch projects + return nil + case http.StatusOK: + // resp.JSON200 is not nil, proceed + default: + return errors.New("Unexpected error retrieving remote project status: " + string(resp.Body)) + } + + switch resp.JSON200.Status { + case api.V1ProjectWithDatabaseResponseStatusINACTIVE: + utils.CmdSuggestion = fmt.Sprintf("An admin must unpause it from the Supabase dashboard at %s", utils.Aqua(fmt.Sprintf("%s/project/%s", utils.GetSupabaseDashboardURL(), projectRef))) + return errors.New(errProjectPaused) + case api.V1ProjectWithDatabaseResponseStatusACTIVEHEALTHY: + // Project is in the desired state, do nothing + default: + fmt.Fprintf(os.Stderr, "%s: Project status is %s instead of Active Healthy. Some operations might fail.\n", utils.Yellow("WARNING"), resp.JSON200.Status) + } + + // Update postgres image version to match the remote project + if version := resp.JSON200.Database.Version; len(version) > 0 { + return utils.WriteFile(utils.PostgresVersionPath, []byte(version), fsys) + } + return nil +} diff --git a/internal/link/link_test.go b/internal/link/link_test.go new file mode 100644 index 0000000..18c090a --- /dev/null +++ b/internal/link/link_test.go @@ -0,0 +1,430 @@ +package link + +import ( + "context" + "errors" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/testing/helper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/tenant" + "github.com/supabase/cli/pkg/api" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgtest" + "github.com/zalando/go-keyring" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestLinkCommand(t *testing.T) { + project := "test-project" + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Mock credentials store + keyring.MockInit() + + t.Run("link valid project", func(t *testing.T) { + t.Cleanup(fstest.MockStdin(t, "\n")) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn) + helper.MockSeedHistory(conn) + // Flush pending mocks after test execution + defer gock.OffAll() + // Mock project status + postgres := api.V1DatabaseResponse{ + Host: utils.GetSupabaseDbHost(project), + Version: "15.1.0.117", + } + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project). + Reply(200). + JSON(api.V1ProjectWithDatabaseResponse{ + Status: api.V1ProjectWithDatabaseResponseStatusACTIVEHEALTHY, + Database: postgres, + }) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/api-keys"). + Reply(200). + JSON([]api.ApiKeyResponse{{Name: "anon", ApiKey: "anon-key"}}) + // Link configs + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/database/postgres"). + Reply(200). + JSON(api.PostgresConfigResponse{}) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/postgrest"). + Reply(200). + JSON(api.V1PostgrestConfigResponse{}) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/auth"). + Reply(200). + JSON(api.AuthConfigResponse{}) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/storage"). + Reply(200). + JSON(api.StorageConfigResponse{}) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/database/pooler"). + Reply(200). + JSON(api.V1PgbouncerConfigResponse{}) + // Link versions + auth := tenant.HealthResponse{Version: "v2.74.2"} + gock.New("https://" + utils.GetSupabaseHost(project)). + Get("/auth/v1/health"). + Reply(200). + JSON(auth) + rest := tenant.SwaggerResponse{Info: tenant.SwaggerInfo{Version: "11.1.0"}} + gock.New("https://" + utils.GetSupabaseHost(project)). + Get("/rest/v1/"). + Reply(200). + JSON(rest) + gock.New("https://" + utils.GetSupabaseHost(project)). + Get("/storage/v1/version"). + Reply(200). + BodyString("0.40.4") + // Run test + err := Run(context.Background(), project, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + // Validate file contents + content, err := afero.ReadFile(fsys, utils.ProjectRefPath) + assert.NoError(t, err) + assert.Equal(t, []byte(project), content) + restVersion, err := afero.ReadFile(fsys, utils.RestVersionPath) + assert.NoError(t, err) + assert.Equal(t, []byte("v"+rest.Info.Version), restVersion) + authVersion, err := afero.ReadFile(fsys, utils.GotrueVersionPath) + assert.NoError(t, err) + assert.Equal(t, []byte(auth.Version), authVersion) + postgresVersion, err := afero.ReadFile(fsys, utils.PostgresVersionPath) + assert.NoError(t, err) + assert.Equal(t, []byte(postgres.Version), postgresVersion) + }) + + t.Run("ignores error linking services", func(t *testing.T) { + t.Cleanup(fstest.MockStdin(t, "\n")) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Flush pending mocks after test execution + defer gock.OffAll() + // Mock project status + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project). + Reply(200). + JSON(api.V1ProjectWithDatabaseResponse{ + Status: api.V1ProjectWithDatabaseResponseStatusACTIVEHEALTHY, + Database: api.V1DatabaseResponse{}, + }) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/api-keys"). + Reply(200). + JSON([]api.ApiKeyResponse{{Name: "anon", ApiKey: "anon-key"}}) + // Link configs + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/database/postgres"). + ReplyError(errors.New("network error")) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/postgrest"). + ReplyError(errors.New("network error")) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/auth"). + ReplyError(errors.New("network error")) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/storage"). + ReplyError(errors.New("network error")) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/database/pooler"). + ReplyError(errors.New("network error")) + // Link versions + gock.New("https://" + utils.GetSupabaseHost(project)). + Get("/auth/v1/health"). + ReplyError(errors.New("network error")) + gock.New("https://" + utils.GetSupabaseHost(project)). + Get("/rest/v1/"). + ReplyError(errors.New("network error")) + gock.New("https://" + utils.GetSupabaseHost(project)). + Get("/storage/v1/version"). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), project, fsys, func(cc *pgx.ConnConfig) { + cc.LookupFunc = func(ctx context.Context, host string) (addrs []string, err error) { + return nil, errors.New("hostname resolving error") + } + }) + // Check error + assert.ErrorContains(t, err, "hostname resolving error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on write failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewReadOnlyFs(afero.NewMemMapFs()) + // Flush pending mocks after test execution + defer gock.OffAll() + // Mock project status + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project). + Reply(200). + JSON(api.V1ProjectWithDatabaseResponse{ + Status: api.V1ProjectWithDatabaseResponseStatusACTIVEHEALTHY, + Database: api.V1DatabaseResponse{}, + }) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/api-keys"). + Reply(200). + JSON([]api.ApiKeyResponse{{Name: "anon", ApiKey: "anon-key"}}) + // Link configs + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/database/postgres"). + ReplyError(errors.New("network error")) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/postgrest"). + ReplyError(errors.New("network error")) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/auth"). + ReplyError(errors.New("network error")) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/storage"). + ReplyError(errors.New("network error")) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/config/database/pooler"). + ReplyError(errors.New("network error")) + // Link versions + gock.New("https://" + utils.GetSupabaseHost(project)). + Get("/auth/v1/health"). + ReplyError(errors.New("network error")) + gock.New("https://" + utils.GetSupabaseHost(project)). + Get("/rest/v1/"). + ReplyError(errors.New("network error")) + gock.New("https://" + utils.GetSupabaseHost(project)). + Get("/storage/v1/version"). + ReplyError(errors.New("network error")) + gock.New(utils.DefaultApiHost). + Get("/v1/projects"). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), project, fsys) + // Check error + assert.ErrorContains(t, err, "operation not permitted") + assert.Empty(t, apitest.ListUnmatchedRequests()) + // Validate file contents + exists, err := afero.Exists(fsys, utils.ProjectRefPath) + assert.NoError(t, err) + assert.False(t, exists) + }) +} + +func TestStatusCheck(t *testing.T) { + project := "test-project" + + t.Run("updates postgres version when healthy", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Flush pending mocks after test execution + defer gock.OffAll() + // Mock project status + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project). + Reply(http.StatusOK). + JSON(api.V1ProjectWithDatabaseResponse{ + Status: api.V1ProjectWithDatabaseResponseStatusACTIVEHEALTHY, + Database: api.V1DatabaseResponse{Version: "15.6.1.139"}, + }) + // Run test + err := checkRemoteProjectStatus(context.Background(), project, fsys) + // Check error + assert.NoError(t, err) + version, err := afero.ReadFile(fsys, utils.PostgresVersionPath) + assert.NoError(t, err) + assert.Equal(t, "15.6.1.139", string(version)) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("ignores project not found", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Flush pending mocks after test execution + defer gock.OffAll() + // Mock project status + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project). + Reply(http.StatusNotFound) + // Run test + err := checkRemoteProjectStatus(context.Background(), project, fsys) + // Check error + assert.NoError(t, err) + exists, err := afero.Exists(fsys, utils.PostgresVersionPath) + assert.NoError(t, err) + assert.False(t, exists) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on project inactive", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Flush pending mocks after test execution + defer gock.OffAll() + // Mock project status + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project). + Reply(http.StatusOK). + JSON(api.V1ProjectWithDatabaseResponse{Status: api.V1ProjectWithDatabaseResponseStatusINACTIVE}) + // Run test + err := checkRemoteProjectStatus(context.Background(), project, fsys) + // Check error + assert.ErrorIs(t, err, errProjectPaused) + exists, err := afero.Exists(fsys, utils.PostgresVersionPath) + assert.NoError(t, err) + assert.False(t, exists) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestLinkPostgrest(t *testing.T) { + project := "test-project" + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + t.Run("ignores matching config", func(t *testing.T) { + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/postgrest"). + Reply(200). + JSON(api.V1PostgrestConfigResponse{}) + // Run test + err := linkPostgrest(context.Background(), project) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("updates api on newer config", func(t *testing.T) { + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/postgrest"). + Reply(200). + JSON(api.V1PostgrestConfigResponse{ + DbSchema: "public, graphql_public", + DbExtraSearchPath: "public, extensions", + MaxRows: 1000, + }) + // Run test + err := linkPostgrest(context.Background(), project) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + assert.ElementsMatch(t, []string{"public", "graphql_public"}, utils.Config.Api.Schemas) + assert.ElementsMatch(t, []string{"public", "extensions"}, utils.Config.Api.ExtraSearchPath) + assert.Equal(t, uint(1000), utils.Config.Api.MaxRows) + }) + + t.Run("throws error on network failure", func(t *testing.T) { + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/postgrest"). + ReplyError(errors.New("network error")) + // Run test + err := linkPostgrest(context.Background(), project) + // Validate api + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on server unavailable", func(t *testing.T) { + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/postgrest"). + Reply(500). + JSON(map[string]string{"message": "unavailable"}) + // Run test + err := linkPostgrest(context.Background(), project) + // Validate api + assert.ErrorContains(t, err, `unexpected API config status 500: {"message":"unavailable"}`) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestLinkDatabase(t *testing.T) { + t.Run("throws error on connect failure", func(t *testing.T) { + // Run test + err := linkDatabase(context.Background(), pgconn.Config{}) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + + t.Run("ignores missing server version", func(t *testing.T) { + // Setup mock postgres + conn := pgtest.NewWithStatus(map[string]string{ + "standard_conforming_strings": "on", + }) + defer conn.Close(t) + helper.MockMigrationHistory(conn) + helper.MockSeedHistory(conn) + // Run test + err := linkDatabase(context.Background(), dbConfig, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("updates config to newer db version", func(t *testing.T) { + utils.Config.Db.MajorVersion = 14 + // Setup mock postgres + conn := pgtest.NewWithStatus(map[string]string{ + "standard_conforming_strings": "on", + "server_version": "15.0", + }) + defer conn.Close(t) + helper.MockMigrationHistory(conn) + helper.MockSeedHistory(conn) + // Run test + err := linkDatabase(context.Background(), dbConfig, conn.Intercept) + // Check error + assert.NoError(t, err) + utils.Config.Db.MajorVersion = 15 + assert.Equal(t, uint(15), utils.Config.Db.MajorVersion) + }) + + t.Run("throws error on query failure", func(t *testing.T) { + utils.Config.Db.MajorVersion = 14 + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.SET_LOCK_TIMEOUT). + Query(migration.CREATE_VERSION_SCHEMA). + Reply("CREATE SCHEMA"). + Query(migration.CREATE_VERSION_TABLE). + ReplyError(pgerrcode.InsufficientPrivilege, "permission denied for relation supabase_migrations"). + Query(migration.ADD_STATEMENTS_COLUMN). + Query(migration.ADD_NAME_COLUMN) + // Run test + err := linkDatabase(context.Background(), dbConfig, conn.Intercept) + // Check error + assert.ErrorContains(t, err, "ERROR: permission denied for relation supabase_migrations (SQLSTATE 42501)") + }) +} diff --git a/internal/login/login.go b/internal/login/login.go new file mode 100644 index 0000000..6239b50 --- /dev/null +++ b/internal/login/login.go @@ -0,0 +1,261 @@ +package login + +import ( + "bytes" + "context" + "crypto/aes" + "crypto/cipher" + "crypto/ecdh" + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "net/http" + "os" + "os/user" + "strings" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/go-errors/errors" + "github.com/google/uuid" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/new" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/fetcher" +) + +type RunParams struct { + Token string + TokenName string + OpenBrowser bool + SessionId string + Encryption LoginEncryptor + Fsys afero.Fs +} + +type AccessTokenResponse struct { + SessionId string `json:"id"` + CreatedAt string `json:"created_at"` + AccessToken string `json:"access_token"` + PublicKey string `json:"public_key"` + Nonce string `json:"nonce"` +} + +const decryptionErrorMsg = "cannot decrypt access token" + +var loggedInMsg = "You are now logged in. " + utils.Aqua("Happy coding!") + +type LoginEncryptor interface { + encodedPublicKey() string + decryptAccessToken(accessToken string, publicKey string, nonce string) (string, error) +} + +type LoginEncryption struct { + curve ecdh.Curve + privateKey *ecdh.PrivateKey + publicKey *ecdh.PublicKey +} + +func NewLoginEncryption() (LoginEncryption, error) { + enc := LoginEncryption{} + err := enc.generateKeys() + if err != nil { + return enc, errors.Errorf("cannot generate crypto keys: %w", err) + } + return enc, nil +} + +func (enc *LoginEncryption) generateKeys() error { + enc.curve = ecdh.P256() + privateKey, err := enc.curve.GenerateKey(rand.Reader) + if err != nil { + return errors.Errorf("cannot generate encryption key: %w", err) + } + enc.privateKey = privateKey + enc.publicKey = privateKey.PublicKey() + return nil +} + +func (enc LoginEncryption) encodedPublicKey() string { + return hex.EncodeToString(enc.publicKey.Bytes()) +} + +func (enc LoginEncryption) decryptAccessToken(accessToken string, publicKey string, nonce string) (string, error) { + decodedAccessToken, err := hex.DecodeString(accessToken) + if err != nil { + return "", errors.Errorf("%s: %w", decryptionErrorMsg, err) + } + + decodedNonce, err := hex.DecodeString(nonce) + if err != nil { + return "", errors.Errorf("%s: %w", decryptionErrorMsg, err) + } + + decodedPublicKey, err := hex.DecodeString(publicKey) + if err != nil { + return "", errors.Errorf("%s: %w", decryptionErrorMsg, err) + } + + remotePublicKey, err := enc.curve.NewPublicKey(decodedPublicKey) + if err != nil { + return "", errors.Errorf("%s: %w", decryptionErrorMsg, err) + } + + secret, err := enc.privateKey.ECDH(remotePublicKey) + if err != nil { + return "", errors.Errorf("%s: %w", decryptionErrorMsg, err) + } + + block, err := aes.NewCipher(secret) + if err != nil { + return "", errors.Errorf("%s: %w", decryptionErrorMsg, err) + } + + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return "", errors.Errorf("%s: %w", decryptionErrorMsg, err) + } + + decryptedAccessToken, err := aesgcm.Open(nil, decodedNonce, decodedAccessToken, nil) + if err != nil { + return "", errors.Errorf("%s: %w", decryptionErrorMsg, err) + } + + return string(decryptedAccessToken), nil +} + +const maxRetries = 2 + +func pollForAccessToken(ctx context.Context, url string) (AccessTokenResponse, error) { + // TODO: Move to OpenAPI-generated http client once we reach v1 on API schema. + client := fetcher.NewFetcher( + utils.GetSupabaseAPIHost(), + fetcher.WithHTTPClient(&http.Client{ + Timeout: 10 * time.Second, + }), + fetcher.WithExpectedStatus(http.StatusOK), + ) + console := utils.NewConsole() + probe := func() (AccessTokenResponse, error) { + // TODO: support automatic login flow + deviceCode, err := console.PromptText(ctx, "Enter your verification code: ") + if err != nil { + return AccessTokenResponse{}, err + } + urlWithQuery := fmt.Sprintf("%s?device_code=%s", url, deviceCode) + resp, err := client.Send(ctx, http.MethodGet, urlWithQuery, nil) + if err != nil { + return AccessTokenResponse{}, err + } + return fetcher.ParseJSON[AccessTokenResponse](resp.Body) + } + policy := backoff.WithContext(backoff.WithMaxRetries(&backoff.ZeroBackOff{}, maxRetries), ctx) + return backoff.RetryNotifyWithData(probe, policy, newErrorCallback()) +} + +func newErrorCallback() backoff.Notify { + failureCount := 0 + return func(err error, d time.Duration) { + failureCount += 1 + fmt.Fprintln(os.Stderr, err) + fmt.Fprintf(os.Stderr, "Retry (%d/%d): ", failureCount, maxRetries) + } +} + +func Run(ctx context.Context, stdout io.Writer, params RunParams) error { + if params.Token != "" { + if err := utils.SaveAccessToken(params.Token, params.Fsys); err != nil { + return errors.Errorf("cannot save provided token: %w", err) + } + fmt.Println(loggedInMsg) + return nil + } + + // Initialise login encryption and Session ID for end-to-end communication. + if params.Encryption == nil { + var err error + if params.Encryption, err = NewLoginEncryption(); err != nil { + return err + } + params.SessionId = uuid.New().String() + } + + // Initialise default token name + if params.TokenName == "" { + params.TokenName = generateTokenNameWithFallback() + } + + encodedPublicKey := params.Encryption.encodedPublicKey() + createLoginSessionPath := "/cli/login" + createLoginSessionQuery := "?session_id=" + params.SessionId + "&token_name=" + params.TokenName + "&public_key=" + encodedPublicKey + createLoginSessionUrl := utils.GetSupabaseDashboardURL() + createLoginSessionPath + createLoginSessionQuery + + if params.OpenBrowser { + fmt.Fprintf(stdout, "Hello from %s! Press %s to open browser and login automatically.\n", utils.Aqua("Supabase"), utils.Aqua("Enter")) + if _, err := fmt.Scanln(); err != nil { + return errors.Errorf("failed to scan line: %w", err) + } + fmt.Fprintf(stdout, "Here is your login link in case browser did not open %s\n\n", utils.Bold(createLoginSessionUrl)) + if err := RunOpenCmd(ctx, createLoginSessionUrl); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } else { + fmt.Fprintf(stdout, "Here is your login link, open it in the browser %s\n\n", utils.Bold(createLoginSessionUrl)) + } + + sessionPollingUrl := "/platform/cli/login/" + params.SessionId + accessTokenResponse, err := pollForAccessToken(ctx, sessionPollingUrl) + if err != nil { + return err + } + decryptedAccessToken, err := params.Encryption.decryptAccessToken(accessTokenResponse.AccessToken, accessTokenResponse.PublicKey, accessTokenResponse.Nonce) + if err != nil { + return err + } + if err := utils.SaveAccessToken(decryptedAccessToken, params.Fsys); err != nil { + return err + } + + fmt.Fprintf(stdout, "Token %s created successfully.\n\n", utils.Bold(params.TokenName)) + fmt.Fprintln(stdout, loggedInMsg) + + return nil +} + +func ParseAccessToken(stdin afero.File) string { + // Not using viper so we can reset env easily in tests + token := os.Getenv("SUPABASE_ACCESS_TOKEN") + if len(token) == 0 { + var buf bytes.Buffer + if err := new.CopyStdinIfExists(stdin, &buf); err != nil { + fmt.Fprintln(os.Stderr, err) + } + token = strings.TrimSpace(buf.String()) + } + return token +} + +func generateTokenName() (string, error) { + user, err := user.Current() + if err != nil { + return "", errors.Errorf("cannot retrieve username: %w", err) + } + + hostname, err := os.Hostname() + if err != nil { + return "", errors.Errorf("cannot retrieve hostname: %w", err) + } + + return fmt.Sprintf("cli_%s@%s_%d", user.Username, hostname, time.Now().Unix()), nil +} + +func generateTokenNameWithFallback() string { + name, err := generateTokenName() + if err != nil { + logger := utils.GetDebugLogger() + fmt.Fprintln(logger, err) + name = fmt.Sprintf("cli_%d", time.Now().Unix()) + } + return name +} diff --git a/internal/login/login_darwin.go b/internal/login/login_darwin.go new file mode 100644 index 0000000..4b7b72e --- /dev/null +++ b/internal/login/login_darwin.go @@ -0,0 +1,13 @@ +//go:build darwin + +package login + +import ( + "context" + "os/exec" +) + +func RunOpenCmd(ctx context.Context, input string) error { + cmd := exec.CommandContext(ctx, "open", input) + return cmd.Run() +} diff --git a/internal/login/login_linux.go b/internal/login/login_linux.go new file mode 100644 index 0000000..a8a182c --- /dev/null +++ b/internal/login/login_linux.go @@ -0,0 +1,17 @@ +//go:build linux + +package login + +import ( + "bytes" + "context" + "os" + "os/exec" +) + +func RunOpenCmd(ctx context.Context, input string) error { + if f, err := os.ReadFile("/proc/sys/kernel/osrelease"); err == nil && bytes.Contains(f, []byte("WSL")) { + return exec.CommandContext(ctx, "wslview", input).Run() + } + return exec.CommandContext(ctx, "xdg-open", input).Run() +} diff --git a/internal/login/login_test.go b/internal/login/login_test.go new file mode 100644 index 0000000..758fbc5 --- /dev/null +++ b/internal/login/login_test.go @@ -0,0 +1,91 @@ +package login + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/credentials" + "github.com/zalando/go-keyring" +) + +type MockEncryption struct { + token string + publicKey string +} + +func (enc *MockEncryption) encodedPublicKey() string { + return enc.publicKey +} + +func (enc *MockEncryption) decryptAccessToken(accessToken string, publicKey string, nonce string) (string, error) { + return enc.token, nil +} + +func TestLoginCommand(t *testing.T) { + keyring.MockInit() + + t.Run("accepts --token flag and validates provided value", func(t *testing.T) { + token := string(apitest.RandomAccessToken(t)) + assert.NoError(t, Run(context.Background(), os.Stdout, RunParams{ + Token: token, + Fsys: afero.NewMemMapFs(), + })) + saved, err := credentials.StoreProvider.Get(utils.AccessTokenKey) + assert.NoError(t, err) + assert.Equal(t, token, saved) + }) + + t.Run("goes through automated flow successfully", func(t *testing.T) { + r, w, err := os.Pipe() + require.NoError(t, err) + + sessionId := "random_session_id" + token := string(apitest.RandomAccessToken(t)) + tokenName := "random_token_name" + publicKey := "random_public_key" + + defer gock.OffAll() + + gock.New(utils.GetSupabaseAPIHost()). + Get("/platform/cli/login/" + sessionId). + Reply(200). + JSON(map[string]any{ + "id": "0b0d48f6-878b-4190-88d7-2ca33ed800bc", + "created_at": "2023-03-28T13:50:14.464Z", + "access_token": "picklerick", + "public_key": "iddqd", + "nonce": "idkfa", + }) + + enc := &MockEncryption{publicKey: publicKey, token: token} + runParams := RunParams{ + TokenName: tokenName, + SessionId: sessionId, + Fsys: afero.NewMemMapFs(), + Encryption: enc, + } + assert.NoError(t, Run(context.Background(), w, runParams)) + w.Close() + + var out bytes.Buffer + _, _ = io.Copy(&out, r) + + expectedBrowserUrl := fmt.Sprintf("%s/cli/login?session_id=%s&token_name=%s&public_key=%s", utils.GetSupabaseDashboardURL(), sessionId, tokenName, publicKey) + assert.Contains(t, out.String(), expectedBrowserUrl) + + saved, err := credentials.StoreProvider.Get(utils.AccessTokenKey) + assert.NoError(t, err) + assert.Equal(t, token, saved) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/login/login_windows.go b/internal/login/login_windows.go new file mode 100644 index 0000000..833bbe7 --- /dev/null +++ b/internal/login/login_windows.go @@ -0,0 +1,15 @@ +//go:build windows + +package login + +import ( + "context" + "os" + "os/exec" + "path/filepath" +) + +func RunOpenCmd(ctx context.Context, input string) error { + cmd := exec.CommandContext(ctx, filepath.Join(os.Getenv("SYSTEMROOT"), "System32", "rundll32.exe"), "url.dll,FileProtocolHandler", input) + return cmd.Run() +} diff --git a/internal/logout/logout.go b/internal/logout/logout.go new file mode 100644 index 0000000..abbd191 --- /dev/null +++ b/internal/logout/logout.go @@ -0,0 +1,35 @@ +package logout + +import ( + "context" + "fmt" + "os" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/credentials" +) + +func Run(ctx context.Context, stdout *os.File, fsys afero.Fs) error { + if shouldLogout, err := utils.NewConsole().PromptYesNo(ctx, "Do you want to log out? This will remove the access token from your system.", false); err != nil { + return err + } else if !shouldLogout { + return errors.New(context.Canceled) + } + + if err := utils.DeleteAccessToken(fsys); errors.Is(err, utils.ErrNotLoggedIn) { + fmt.Fprintln(os.Stderr, err) + return nil + } else if err != nil { + return err + } + + // Delete all possible stored project credentials + if err := credentials.StoreProvider.DeleteAll(); err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + } + + fmt.Fprintln(stdout, "Access token deleted successfully. You are now logged out.") + return nil +} diff --git a/internal/logout/logout_test.go b/internal/logout/logout_test.go new file mode 100644 index 0000000..42f9f8e --- /dev/null +++ b/internal/logout/logout_test.go @@ -0,0 +1,94 @@ +package logout + +import ( + "context" + "os" + "testing" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/credentials" + "github.com/zalando/go-keyring" +) + +func TestLogoutCommand(t *testing.T) { + token := string(apitest.RandomAccessToken(t)) + + t.Run("login with token and logout", func(t *testing.T) { + keyring.MockInitWithError(keyring.ErrUnsupportedPlatform) + t.Cleanup(fstest.MockStdin(t, "y")) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.SaveAccessToken(token, fsys)) + // Run test + err := Run(context.Background(), os.Stdout, fsys) + // Check error + assert.NoError(t, err) + saved, err := utils.LoadAccessTokenFS(fsys) + assert.ErrorIs(t, err, utils.ErrMissingToken) + assert.Empty(t, saved) + }) + + t.Run("removes all Supabase CLI credentials", func(t *testing.T) { + t.Cleanup(credentials.MockInit()) + require.NoError(t, credentials.StoreProvider.Set(utils.AccessTokenKey, token)) + require.NoError(t, credentials.StoreProvider.Set("project1", "password1")) + require.NoError(t, credentials.StoreProvider.Set("project2", "password2")) + t.Cleanup(fstest.MockStdin(t, "y")) + // Run test + err := Run(context.Background(), os.Stdout, afero.NewMemMapFs()) + // Check error + assert.NoError(t, err) + // Check that access token has been removed + saved, _ := credentials.StoreProvider.Get(utils.AccessTokenKey) + assert.Empty(t, saved) + // check that project 1 has been removed + saved, _ = credentials.StoreProvider.Get("project1") + assert.Empty(t, saved) + // check that project 2 has been removed + saved, _ = credentials.StoreProvider.Get("project2") + assert.Empty(t, saved) + }) + + t.Run("skips logout by default", func(t *testing.T) { + keyring.MockInit() + require.NoError(t, credentials.StoreProvider.Set(utils.AccessTokenKey, token)) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), os.Stdout, fsys) + // Check error + assert.ErrorIs(t, err, context.Canceled) + saved, err := credentials.StoreProvider.Get(utils.AccessTokenKey) + assert.NoError(t, err) + assert.Equal(t, token, saved) + }) + + t.Run("exits 0 if not logged in", func(t *testing.T) { + keyring.MockInit() + t.Cleanup(fstest.MockStdin(t, "y")) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), os.Stdout, fsys) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on failure to delete", func(t *testing.T) { + keyring.MockInitWithError(keyring.ErrNotFound) + t.Cleanup(fstest.MockStdin(t, "y")) + // Setup empty home directory + t.Setenv("HOME", "") + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), os.Stdout, fsys) + // Check error + assert.ErrorContains(t, err, "$HOME is not defined") + }) +} diff --git a/internal/migration/apply/apply.go b/internal/migration/apply/apply.go new file mode 100644 index 0000000..224b342 --- /dev/null +++ b/internal/migration/apply/apply.go @@ -0,0 +1,33 @@ +package apply + +import ( + "context" + + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +func MigrateAndSeed(ctx context.Context, version string, conn *pgx.Conn, fsys afero.Fs) error { + migrations, err := list.LoadPartialMigrations(version, fsys) + if err != nil { + return err + } + if err := migration.ApplyMigrations(ctx, migrations, conn, afero.NewIOFS(fsys)); err != nil { + return err + } + return applySeedFiles(ctx, conn, fsys) +} + +func applySeedFiles(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { + if !utils.Config.Db.Seed.Enabled { + return nil + } + seeds, err := migration.GetPendingSeeds(ctx, utils.Config.Db.Seed.SqlPaths, conn, afero.NewIOFS(fsys)) + if err != nil { + return err + } + return migration.SeedData(ctx, seeds, conn, afero.NewIOFS(fsys)) +} diff --git a/internal/migration/apply/apply_test.go b/internal/migration/apply/apply_test.go new file mode 100644 index 0000000..2860937 --- /dev/null +++ b/internal/migration/apply/apply_test.go @@ -0,0 +1,76 @@ +package apply + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/testing/helper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgtest" +) + +func TestMigrateDatabase(t *testing.T) { + t.Run("applies local migration", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + sql := "create schema public" + require.NoError(t, afero.WriteFile(fsys, path, []byte(sql), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(sql). + Reply("CREATE SCHEMA"). + Query(migration.INSERT_MIGRATION_VERSION, "0", "test", []string{sql}). + Reply("INSERT 0 1") + // Run test + err := MigrateAndSeed(context.Background(), "", conn.MockClient(t), fsys) + // Check error + assert.NoError(t, err) + }) + + t.Run("skip seeding when seed config is disabled", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + sql := "create schema public" + require.NoError(t, afero.WriteFile(fsys, path, []byte(sql), 0644)) + seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") + // This will raise an error when seeding + require.NoError(t, afero.WriteFile(fsys, seedPath, []byte("INSERT INTO test_table;"), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(sql). + Reply("CREATE SCHEMA"). + Query(migration.INSERT_MIGRATION_VERSION, "0", "test", []string{sql}). + Reply("INSERT 0 1") + utils.Config.Db.Seed.Enabled = false + // Run test + err := MigrateAndSeed(context.Background(), "", conn.MockClient(t), fsys) + // No error should be returned since seeding is skipped + assert.NoError(t, err) + }) + + t.Run("ignores empty local directory", func(t *testing.T) { + assert.NoError(t, MigrateAndSeed(context.Background(), "", nil, afero.NewMemMapFs())) + }) + + t.Run("throws error on open failure", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.MigrationsDir} + // Run test + err := MigrateAndSeed(context.Background(), "", nil, fsys) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) +} diff --git a/internal/migration/fetch/fetch.go b/internal/migration/fetch/fetch.go new file mode 100644 index 0000000..cb78c70 --- /dev/null +++ b/internal/migration/fetch/fetch.go @@ -0,0 +1,53 @@ +package fetch + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + if err := utils.MkdirIfNotExistFS(fsys, utils.MigrationsDir); err != nil { + return err + } + if empty, err := afero.IsEmpty(fsys, utils.MigrationsDir); err != nil { + return errors.Errorf("failed to read migrations: %w", err) + } else if !empty { + title := fmt.Sprintf("Do you want to overwrite existing files in %s directory?", utils.Bold(utils.MigrationsDir)) + if shouldOverwrite, err := utils.NewConsole().PromptYesNo(ctx, title, true); err != nil { + return err + } else if !shouldOverwrite { + return errors.New(context.Canceled) + } + } + result, err := fetchMigrationHistory(ctx, config, options...) + if err != nil { + return err + } + for _, r := range result { + name := fmt.Sprintf("%s_%s.sql", r.Version, r.Name) + path := filepath.Join(utils.MigrationsDir, name) + contents := strings.Join(r.Statements, ";\n") + ";\n" + if err := afero.WriteFile(fsys, path, []byte(contents), 0644); err != nil { + return errors.Errorf("failed to write migration: %w", err) + } + } + return nil +} + +func fetchMigrationHistory(ctx context.Context, config pgconn.Config, options ...func(*pgx.ConnConfig)) ([]migration.MigrationFile, error) { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return nil, err + } + defer conn.Close(context.Background()) + return migration.ReadMigrationTable(ctx, conn) +} diff --git a/internal/migration/list/list.go b/internal/migration/list/list.go new file mode 100644 index 0000000..3107d4e --- /dev/null +++ b/internal/migration/list/list.go @@ -0,0 +1,105 @@ +package list + +import ( + "context" + "fmt" + "math" + "strconv" + + "github.com/charmbracelet/glamour" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + remoteVersions, err := loadRemoteVersions(ctx, config, options...) + if err != nil { + return err + } + localVersions, err := LoadLocalVersions(fsys) + if err != nil { + return err + } + table := makeTable(remoteVersions, localVersions) + return RenderTable(table) +} + +func loadRemoteVersions(ctx context.Context, config pgconn.Config, options ...func(*pgx.ConnConfig)) ([]string, error) { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return nil, err + } + defer conn.Close(context.Background()) + return migration.ListRemoteMigrations(ctx, conn) +} + +func makeTable(remoteMigrations, localMigrations []string) string { + var err error + table := "|Local|Remote|Time (UTC)|\n|-|-|-|\n" + for i, j := 0, 0; i < len(remoteMigrations) || j < len(localMigrations); { + remoteTimestamp := math.MaxInt + if i < len(remoteMigrations) { + if remoteTimestamp, err = strconv.Atoi(remoteMigrations[i]); err != nil { + i++ + continue + } + } + localTimestamp := math.MaxInt + if j < len(localMigrations) { + if localTimestamp, err = strconv.Atoi(localMigrations[j]); err != nil { + j++ + continue + } + } + // Top to bottom chronological order + if localTimestamp < remoteTimestamp { + table += fmt.Sprintf("|`%s`|` `|`%s`|\n", localMigrations[j], utils.FormatTimestampVersion(localMigrations[j])) + j++ + } else if remoteTimestamp < localTimestamp { + table += fmt.Sprintf("|` `|`%s`|`%s`|\n", remoteMigrations[i], utils.FormatTimestampVersion(remoteMigrations[i])) + i++ + } else { + table += fmt.Sprintf("|`%s`|`%s`|`%s`|\n", localMigrations[j], remoteMigrations[i], utils.FormatTimestampVersion(remoteMigrations[i])) + i++ + j++ + } + } + return table +} + +func RenderTable(markdown string) error { + r, err := glamour.NewTermRenderer( + glamour.WithAutoStyle(), + glamour.WithWordWrap(-1), + ) + if err != nil { + return errors.Errorf("failed to initialise terminal renderer: %w", err) + } + out, err := r.Render(markdown) + if err != nil { + return errors.Errorf("failed to render markdown: %w", err) + } + fmt.Print(out) + return nil +} + +func LoadLocalVersions(fsys afero.Fs) ([]string, error) { + var versions []string + filter := func(v string) bool { + versions = append(versions, v) + return true + } + _, err := migration.ListLocalMigrations(utils.MigrationsDir, afero.NewIOFS(fsys), filter) + return versions, err +} + +func LoadPartialMigrations(version string, fsys afero.Fs) ([]string, error) { + filter := func(v string) bool { + return version == "" || v <= version + } + return migration.ListLocalMigrations(utils.MigrationsDir, afero.NewIOFS(fsys), filter) +} diff --git a/internal/migration/list/list_test.go b/internal/migration/list/list_test.go new file mode 100644 index 0000000..b23fa4a --- /dev/null +++ b/internal/migration/list/list_test.go @@ -0,0 +1,193 @@ +package list + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestMigrationList(t *testing.T) { + t.Run("lists remote migrations", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on remote failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), pgconn.Config{}, fsys) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + + t.Run("throws error on open failure", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.MigrationsDir} + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) +} + +func TestRemoteMigrations(t *testing.T) { + t.Run("loads migration versions", func(t *testing.T) { + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 1", []interface{}{"20220727064247"}) + // Run test + versions, err := loadRemoteVersions(context.Background(), dbConfig, conn.Intercept) + // Check error + assert.NoError(t, err) + assert.ElementsMatch(t, []string{"20220727064247"}, versions) + }) + + t.Run("throws error on connect failure", func(t *testing.T) { + // Run test + _, err := loadRemoteVersions(context.Background(), pgconn.Config{}) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + + t.Run("loads empty migrations on missing table", func(t *testing.T) { + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + ReplyError(pgerrcode.UndefinedTable, "relation \"supabase_migrations.schema_migrations\" does not exist") + // Run test + versions, err := loadRemoteVersions(context.Background(), dbConfig, conn.Intercept) + // Check error + assert.NoError(t, err) + assert.Empty(t, versions) + }) + + t.Run("throws error on invalid row", func(t *testing.T) { + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 1", []interface{}{}) + // Run test + _, err := loadRemoteVersions(context.Background(), dbConfig, conn.Intercept) + // Check error + assert.ErrorContains(t, err, "number of field descriptions must equal number of destinations, got 0 and 1") + }) +} + +func TestLocalMigrations(t *testing.T) { + t.Run("loads migration versions", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "20220727064246_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) + path = filepath.Join(utils.MigrationsDir, "20220727064248_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) + // Run test + versions, err := LoadLocalVersions(fsys) + // Check error + assert.NoError(t, err) + assert.ElementsMatch(t, []string{"20220727064246", "20220727064248"}, versions) + }) + + t.Run("ignores outdated and invalid files", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "20211208000000_init.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) + path = filepath.Join(utils.MigrationsDir, "20211208000001_invalid.ts") + require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) + // Run test + versions, err := LoadLocalVersions(fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, versions) + }) + + t.Run("throws error on open failure", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.MigrationsDir} + // Run test + _, err := LoadLocalVersions(fsys) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) +} + +func TestMakeTable(t *testing.T) { + t.Run("tabulate version", func(t *testing.T) { + // Run test + table := makeTable([]string{"0", "2"}, []string{"0", "1"}) + // Check error + lines := strings.Split(strings.TrimSpace(table), "\n") + assert.ElementsMatch(t, []string{ + "|Local|Remote|Time (UTC)|", + "|-|-|-|", + "|`0`|`0`|`0`|", + "|`1`|` `|`1`|", + "|` `|`2`|`2`|", + }, lines) + }) + + t.Run("tabulate timestamp", func(t *testing.T) { + // Run test + table := makeTable([]string{"20220727064246", "20220727064248"}, []string{"20220727064246", "20220727064247"}) + // Check error + lines := strings.Split(strings.TrimSpace(table), "\n") + assert.ElementsMatch(t, []string{ + "|Local|Remote|Time (UTC)|", + "|-|-|-|", + "|`20220727064246`|`20220727064246`|`2022-07-27 06:42:46`|", + "|`20220727064247`|` `|`2022-07-27 06:42:47`|", + "|` `|`20220727064248`|`2022-07-27 06:42:48`|", + }, lines) + }) + + t.Run("ignores string values", func(t *testing.T) { + // Run test + table := makeTable([]string{"a", "c"}, []string{"a", "b"}) + // Check error + lines := strings.Split(strings.TrimSpace(table), "\n") + assert.ElementsMatch(t, []string{ + "|Local|Remote|Time (UTC)|", + "|-|-|-|", + }, lines) + }) +} diff --git a/internal/migration/new/new.go b/internal/migration/new/new.go new file mode 100644 index 0000000..be232b5 --- /dev/null +++ b/internal/migration/new/new.go @@ -0,0 +1,46 @@ +package new + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" +) + +func Run(migrationName string, stdin afero.File, fsys afero.Fs) error { + path := GetMigrationPath(utils.GetCurrentTimestamp(), migrationName) + if err := utils.MkdirIfNotExistFS(fsys, filepath.Dir(path)); err != nil { + return err + } + f, err := fsys.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return errors.Errorf("failed to open migration file: %w", err) + } + defer func() { + fmt.Println("Created new migration at " + utils.Bold(path)) + // File descriptor will always be closed when process quits + _ = f.Close() + }() + return CopyStdinIfExists(stdin, f) +} + +func GetMigrationPath(timestamp, name string) string { + fullName := fmt.Sprintf("%s_%s.sql", timestamp, name) + return filepath.Join(utils.MigrationsDir, fullName) +} + +func CopyStdinIfExists(stdin afero.File, dst io.Writer) error { + if fi, err := stdin.Stat(); err != nil { + return errors.Errorf("failed to initialise stdin: %w", err) + } else if (fi.Mode() & os.ModeCharDevice) == 0 { + // Ref: https://stackoverflow.com/a/26567513 + if _, err := io.Copy(dst, stdin); err != nil { + return errors.Errorf("failed to copy from stdin: %w", err) + } + } + return nil +} diff --git a/internal/migration/new/new_test.go b/internal/migration/new/new_test.go new file mode 100644 index 0000000..39e2fe1 --- /dev/null +++ b/internal/migration/new/new_test.go @@ -0,0 +1,72 @@ +package new + +import ( + "os" + "path/filepath" + "testing" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/utils" +) + +func TestNewCommand(t *testing.T) { + t.Run("creates new migration file", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup empty stdin + stdin, err := fsys.Create("/dev/stdin") + require.NoError(t, err) + // Run test + assert.NoError(t, Run("test_migrate", stdin, fsys)) + // Validate output + files, err := afero.ReadDir(fsys, utils.MigrationsDir) + assert.NoError(t, err) + assert.Equal(t, 1, len(files)) + assert.Regexp(t, `([0-9]{14})_test_migrate\.sql`, files[0].Name()) + }) + + t.Run("streams content from pipe", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup stdin + r, w, err := os.Pipe() + require.NoError(t, err) + script := "create table pet;\ndrop table pet;\n" + _, err = w.WriteString(script) + require.NoError(t, err) + require.NoError(t, w.Close()) + // Run test + assert.NoError(t, Run("test_migrate", r, fsys)) + // Validate output + files, err := afero.ReadDir(fsys, utils.MigrationsDir) + assert.NoError(t, err) + assert.Equal(t, 1, len(files)) + path := filepath.Join(utils.MigrationsDir, files[0].Name()) + contents, err := afero.ReadFile(fsys, path) + assert.NoError(t, err) + assert.Equal(t, []byte(script), contents) + }) + + t.Run("throws error on failure to create directory", func(t *testing.T) { + // Setup read-only fs + fsys := afero.NewMemMapFs() + // Setup empty stdin + stdin, err := fsys.Create("/dev/stdin") + require.NoError(t, err) + // Run test + assert.Error(t, Run("test_migrate", stdin, afero.NewReadOnlyFs(fsys))) + }) + + t.Run("throws error on closed pipe", func(t *testing.T) { + // Setup read-only fs + fsys := afero.NewMemMapFs() + // Setup empty stdin + r, _, err := os.Pipe() + require.NoError(t, err) + require.NoError(t, r.Close()) + // Run test + assert.Error(t, Run("test_migrate", r, fsys)) + }) +} diff --git a/internal/migration/repair/repair.go b/internal/migration/repair/repair.go new file mode 100644 index 0000000..c067000 --- /dev/null +++ b/internal/migration/repair/repair.go @@ -0,0 +1,108 @@ +package repair + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +const ( + Applied = "applied" + Reverted = "reverted" +) + +var ErrInvalidVersion = errors.New("invalid version number") + +func Run(ctx context.Context, config pgconn.Config, version []string, status string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + for _, v := range version { + if _, err := strconv.Atoi(v); err != nil { + return errors.Errorf("failed to parse %s: %w", v, ErrInvalidVersion) + } + } + repairAll := len(version) == 0 + if repairAll { + msg := "Do you want to repair the entire migration history table to match local migration files?" + if shouldRepair, err := utils.NewConsole().PromptYesNo(ctx, msg, false); err != nil { + return err + } else if !shouldRepair { + return errors.New(context.Canceled) + } + local, err := list.LoadLocalVersions(fsys) + if err != nil { + return err + } + version = append(version, local...) + } + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + // Update migration history + if err = UpdateMigrationTable(ctx, conn, version, status, repairAll, fsys); err == nil { + utils.CmdSuggestion = fmt.Sprintf("Run %s to show the updated migration history.", utils.Aqua("supabase migration list")) + } + return err +} + +func UpdateMigrationTable(ctx context.Context, conn *pgx.Conn, version []string, status string, repairAll bool, fsys afero.Fs) error { + if err := migration.CreateMigrationTable(ctx, conn); err != nil { + return err + } + // Data statements don't mutate schemas, safe to use statement cache + batch := &pgx.Batch{} + if repairAll { + batch.Queue(migration.TRUNCATE_VERSION_TABLE) + } + switch status { + case Applied: + for _, v := range version { + f, err := NewMigrationFromVersion(v, fsys) + if err != nil { + return err + } + batch.Queue(migration.INSERT_MIGRATION_VERSION, f.Version, f.Name, f.Statements) + } + case Reverted: + if !repairAll { + batch.Queue(migration.DELETE_MIGRATION_VERSION, version) + } + } + if err := conn.SendBatch(ctx, batch).Close(); err != nil { + return errors.Errorf("failed to update migration table: %w", err) + } + if !repairAll { + fmt.Fprintf(os.Stderr, "Repaired migration history: %v => %s\n", version, status) + } + return nil +} + +func GetMigrationFile(version string, fsys afero.Fs) (string, error) { + path := filepath.Join(utils.MigrationsDir, version+"_*.sql") + matches, err := afero.Glob(fsys, path) + if err != nil { + return "", errors.Errorf("failed to glob migration files: %w", err) + } + if len(matches) == 0 { + return "", errors.Errorf("glob %s: %w", path, os.ErrNotExist) + } + return matches[0], nil +} + +func NewMigrationFromVersion(version string, fsys afero.Fs) (*migration.MigrationFile, error) { + name, err := GetMigrationFile(version, fsys) + if err != nil { + return nil, err + } + return migration.NewMigrationFromFile(name, afero.NewIOFS(fsys)) +} diff --git a/internal/migration/repair/repair_test.go b/internal/migration/repair/repair_test.go new file mode 100644 index 0000000..4dea632 --- /dev/null +++ b/internal/migration/repair/repair_test.go @@ -0,0 +1,155 @@ +package repair + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/testing/helper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "db.supabase.com", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestRepairCommand(t *testing.T) { + t.Run("applies new version", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte("select 1"), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(migration.INSERT_MIGRATION_VERSION, "0", "test", []string{"select 1"}). + Reply("INSERT 0 1") + // Run test + err := Run(context.Background(), dbConfig, []string{"0"}, Applied, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("reverts old version", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(migration.DELETE_MIGRATION_VERSION, []string{"0"}). + Reply("DELETE 1") + // Run test + err := Run(context.Background(), dbConfig, []string{"0"}, Reverted, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on invalid version", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), pgconn.Config{}, []string{"invalid"}, Applied, fsys) + // Check error + assert.ErrorIs(t, err, ErrInvalidVersion) + }) + + t.Run("throws error on connect failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), pgconn.Config{}, []string{"0"}, Applied, fsys) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + + t.Run("throws error on insert failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(migration.INSERT_MIGRATION_VERSION, "0", "test", nil). + ReplyError(pgerrcode.DuplicateObject, `relation "supabase_migrations.schema_migrations" does not exist`) + // Run test + err := Run(context.Background(), dbConfig, []string{"0"}, Applied, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, `ERROR: relation "supabase_migrations.schema_migrations" does not exist (SQLSTATE 42710)`) + }) +} + +func TestRepairAll(t *testing.T) { + t.Run("repairs whole history", func(t *testing.T) { + t.Cleanup(fstest.MockStdin(t, "y")) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_test.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte("select 1"), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(migration.TRUNCATE_VERSION_TABLE + `;INSERT INTO supabase_migrations.schema_migrations(version, name, statements) VALUES( '0' , 'test' , '{select 1}' )`). + Reply("TRUNCATE TABLE"). + Reply("INSERT 0 1") + // Run test + err := Run(context.Background(), dbConfig, nil, Applied, fsys, conn.Intercept, func(cc *pgx.ConnConfig) { + cc.PreferSimpleProtocol = true + }) + // Check error + assert.NoError(t, err) + }) + + t.Run("reverts whole history", func(t *testing.T) { + t.Cleanup(fstest.MockStdin(t, "y")) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(migration.TRUNCATE_VERSION_TABLE). + Reply("TRUNCATE TABLE") + // Run test + err := Run(context.Background(), dbConfig, nil, Reverted, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on cancel", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), dbConfig, nil, Applied, fsys) + // Check error + assert.ErrorIs(t, err, context.Canceled) + }) + + t.Run("throws error on permission denied", func(t *testing.T) { + t.Cleanup(fstest.MockStdin(t, "y")) + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.MigrationsDir} + // Run test + err := Run(context.Background(), dbConfig, nil, Applied, fsys) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) +} diff --git a/internal/migration/squash/squash.go b/internal/migration/squash/squash.go new file mode 100644 index 0000000..22f3278 --- /dev/null +++ b/internal/migration/squash/squash.go @@ -0,0 +1,189 @@ +package squash + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "strconv" + "time" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/diff" + "github.com/supabase/cli/internal/db/dump" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/migration/repair" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +var ErrMissingVersion = errors.New("version not found") + +func Run(ctx context.Context, version string, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + if len(version) > 0 { + if _, err := strconv.Atoi(version); err != nil { + return errors.New(repair.ErrInvalidVersion) + } + if _, err := repair.GetMigrationFile(version, fsys); err != nil { + return err + } + } + // 1. Squash local migrations + if err := squashToVersion(ctx, version, fsys, options...); err != nil { + return err + } + // 2. Update migration history + if utils.IsLocalDatabase(config) { + return nil + } + if shouldUpdate, err := utils.NewConsole().PromptYesNo(ctx, "Update remote migration history table?", true); err != nil { + return err + } else if !shouldUpdate { + return nil + } + return baselineMigrations(ctx, config, version, fsys, options...) +} + +func squashToVersion(ctx context.Context, version string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + migrations, err := list.LoadPartialMigrations(version, fsys) + if err != nil { + return err + } + if len(migrations) == 0 { + return errors.New(ErrMissingVersion) + } + // Migrate to target version and dump + local := migrations[len(migrations)-1] + if len(migrations) == 1 { + fmt.Fprintln(os.Stderr, utils.Bold(local), "is already the earliest migration.") + return nil + } + if err := squashMigrations(ctx, migrations, fsys, options...); err != nil { + return err + } + fmt.Fprintln(os.Stderr, "Squashed local migrations to", utils.Bold(local)) + // Remove merged files + for _, path := range migrations[:len(migrations)-1] { + if err := fsys.Remove(path); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } + return nil +} + +func squashMigrations(ctx context.Context, migrations []string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + // 1. Start shadow database + shadow, err := diff.CreateShadowDatabase(ctx, utils.Config.Db.ShadowPort) + if err != nil { + return err + } + defer utils.DockerRemove(shadow) + if err := start.WaitForHealthyService(ctx, start.HealthTimeout, shadow); err != nil { + return err + } + conn, err := diff.ConnectShadowDatabase(ctx, 10*time.Second, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + if err := start.SetupDatabase(ctx, conn, shadow[:12], os.Stderr, fsys); err != nil { + return err + } + // Assuming entities in managed schemas are not altered, we can simply diff the dumps before and after migrations. + schemas := []string{"auth", "storage"} + config := pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.ShadowPort, + User: "postgres", + Password: utils.Config.Db.Password, + Database: "postgres", + } + var before, after bytes.Buffer + if err := dump.DumpSchema(ctx, config, schemas, false, false, &before); err != nil { + return err + } + // 2. Migrate to target version + if err := migration.ApplyMigrations(ctx, migrations, conn, afero.NewIOFS(fsys)); err != nil { + return err + } + if err := dump.DumpSchema(ctx, config, schemas, false, false, &after); err != nil { + return err + } + // 3. Dump migrated schema + path := migrations[len(migrations)-1] + f, err := fsys.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return errors.Errorf("failed to open migration file: %w", err) + } + defer f.Close() + if err := dump.DumpSchema(ctx, config, nil, false, false, f); err != nil { + return err + } + // 4. Append managed schema diffs + fmt.Fprint(f, separatorComment) + return lineByLineDiff(&before, &after, f) +} + +const separatorComment = ` +-- +-- Dumped schema changes for auth and storage +-- + +` + +func lineByLineDiff(before, after io.Reader, f io.Writer) error { + anchor := bufio.NewScanner(before) + anchor.Scan() + // Assuming before is always a subset of after + scanner := bufio.NewScanner(after) + for scanner.Scan() { + line := scanner.Text() + if line == anchor.Text() { + anchor.Scan() + continue + } + if _, err := fmt.Fprintln(f, line); err != nil { + return errors.Errorf("failed to write line: %w", err) + } + } + return nil +} + +func baselineMigrations(ctx context.Context, config pgconn.Config, version string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + if len(version) == 0 { + // Expecting no errors here because the caller should have handled them + if localVersions, err := list.LoadLocalVersions(fsys); len(localVersions) > 0 { + version = localVersions[0] + } else if err != nil { + logger := utils.GetDebugLogger() + fmt.Fprintln(logger, err) + } + } + fmt.Fprintln(os.Stderr, "Baselining migration history to", version) + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + if err := migration.CreateMigrationTable(ctx, conn); err != nil { + return err + } + m, err := repair.NewMigrationFromVersion(version, fsys) + if err != nil { + return err + } + // Data statements don't mutate schemas, safe to use statement cache + batch := pgx.Batch{} + batch.Queue(migration.DELETE_MIGRATION_BEFORE, m.Version) + batch.Queue(migration.INSERT_MIGRATION_VERSION, m.Version, m.Name, m.Statements) + if err := conn.SendBatch(ctx, &batch).Close(); err != nil { + return errors.Errorf("failed to update migration history: %w", err) + } + return nil +} diff --git a/internal/migration/squash/squash_test.go b/internal/migration/squash/squash_test.go new file mode 100644 index 0000000..b9092d9 --- /dev/null +++ b/internal/migration/squash/squash_test.go @@ -0,0 +1,440 @@ +package squash + +import ( + "bytes" + "context" + "embed" + "errors" + "fmt" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/h2non/gock" + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/migration/repair" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/testing/helper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "db.supabase.co", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestSquashCommand(t *testing.T) { + t.Run("squashes local migrations", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, flags.LoadConfig(fsys)) + paths := []string{ + filepath.Join(utils.MigrationsDir, "0_init.sql"), + filepath.Join(utils.MigrationsDir, "1_target.sql"), + } + sql := "create schema test" + require.NoError(t, afero.WriteFile(fsys, paths[0], []byte(sql), 0644)) + require.NoError(t, afero.WriteFile(fsys, paths[1], []byte{}, 0644)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-shadow-db") + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db"). + Reply(http.StatusOK) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Realtime.Image), "test-realtime") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-realtime", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Storage.Image), "test-storage") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-storage", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Auth.Image), "test-auth") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-auth", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-db") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", sql)) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-db") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", sql)) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-db") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", sql)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(sql). + Reply("CREATE SCHEMA"). + Query(migration.INSERT_MIGRATION_VERSION, "0", "init", []string{sql}). + Reply("INSERT 0 1"). + Query(migration.INSERT_MIGRATION_VERSION, "1", "target", nil). + Reply("INSERT 0 1") + // Run test + err := Run(context.Background(), "", pgconn.Config{ + Host: "127.0.0.1", + Port: 54322, + }, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + exists, err := afero.Exists(fsys, paths[0]) + assert.NoError(t, err) + assert.False(t, exists) + match, err := afero.FileContainsBytes(fsys, paths[1], []byte(sql)) + assert.NoError(t, err) + assert.True(t, match) + }) + + t.Run("baselines migration history", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_init.sql") + sql := "create schema test" + require.NoError(t, afero.WriteFile(fsys, path, []byte(sql), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(fmt.Sprintf("DELETE FROM supabase_migrations.schema_migrations WHERE version <= '0' ;INSERT INTO supabase_migrations.schema_migrations(version, name, statements) VALUES( '0' , 'init' , '{%s}' )", sql)). + Reply("INSERT 0 1") + // Run test + err := Run(context.Background(), "0", dbConfig, fsys, conn.Intercept, func(cc *pgx.ConnConfig) { + cc.PreferSimpleProtocol = true + }) + // Check error + assert.NoError(t, err) + match, err := afero.FileContainsBytes(fsys, path, []byte(sql)) + assert.NoError(t, err) + assert.True(t, match) + }) + + t.Run("throws error on invalid version", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), "0_init", pgconn.Config{}, fsys) + // Check error + assert.ErrorIs(t, err, repair.ErrInvalidVersion) + }) + + t.Run("throws error on missing migration", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), "0", pgconn.Config{}, fsys) + // Check error + assert.ErrorIs(t, err, os.ErrNotExist) + }) +} + +func TestSquashVersion(t *testing.T) { + t.Run("throws error on permission denied", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.MigrationsDir} + // Run test + err := squashToVersion(context.Background(), "0", fsys) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) + + t.Run("throws error on missing version", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := squashToVersion(context.Background(), "0", fsys) + // Check error + assert.ErrorIs(t, err, ErrMissingVersion) + }) + + t.Run("throws error on shadow create failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_init.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) + path = filepath.Join(utils.MigrationsDir, "1_target.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + utils.GetRegistryImageUrl(utils.Config.Db.Image) + "/json"). + ReplyError(errors.New("network error")) + // Run test + err := squashToVersion(context.Background(), "1", fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestSquashMigrations(t *testing.T) { + utils.Config.Db.MajorVersion = 15 + utils.Config.Db.ShadowPort = 54320 + + t.Run("throws error on shadow create failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + utils.GetRegistryImageUrl(utils.Config.Db.Image) + "/json"). + ReplyError(errors.New("network error")) + // Run test + err := squashMigrations(context.Background(), nil, fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on health check failure", func(t *testing.T) { + start.HealthTimeout = time.Millisecond + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-shadow-db") + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: false, + Status: "exited", + }, + }}) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db/logs"). + Reply(http.StatusServiceUnavailable) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db"). + Reply(http.StatusOK) + // Run test + err := squashMigrations(context.Background(), nil, fsys) + // Check error + assert.ErrorContains(t, err, "test-shadow-db container is not running: exited") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on shadow migrate failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, flags.LoadConfig(fsys)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-shadow-db") + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db"). + Reply(http.StatusOK) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + utils.GetRegistryImageUrl(utils.Config.Realtime.Image) + "/json"). + ReplyError(errors.New("network error")) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + // Run test + err := squashMigrations(context.Background(), nil, fsys, conn.Intercept) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on permission denied", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_init.sql") + sql := "create schema test" + require.NoError(t, afero.WriteFile(fsys, path, []byte(sql), 0644)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-shadow-db") + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/test-shadow-db"). + Reply(http.StatusOK) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Realtime.Image), "test-realtime") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-realtime", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Storage.Image), "test-storage") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-storage", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Auth.Image), "test-auth") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-auth", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-db") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", sql)) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), "test-db") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", sql)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(sql). + Reply("CREATE SCHEMA"). + Query(migration.INSERT_MIGRATION_VERSION, "0", "init", []string{sql}). + Reply("INSERT 0 1") + // Run test + err := squashMigrations(context.Background(), []string{path}, afero.NewReadOnlyFs(fsys), conn.Intercept) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestBaselineMigration(t *testing.T) { + t.Run("baselines earliest version", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + paths := []string{ + filepath.Join(utils.MigrationsDir, "0_init.sql"), + filepath.Join(utils.MigrationsDir, "1_target.sql"), + } + sql := "create schema test" + require.NoError(t, afero.WriteFile(fsys, paths[0], []byte(sql), 0644)) + require.NoError(t, afero.WriteFile(fsys, paths[1], []byte{}, 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(fmt.Sprintf("DELETE FROM supabase_migrations.schema_migrations WHERE version <= '0' ;INSERT INTO supabase_migrations.schema_migrations(version, name, statements) VALUES( '0' , 'init' , '{%s}' )", sql)). + Reply("INSERT 0 1") + // Run test + err := baselineMigrations(context.Background(), dbConfig, "", fsys, conn.Intercept, func(cc *pgx.ConnConfig) { + cc.PreferSimpleProtocol = true + }) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on connect failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := baselineMigrations(context.Background(), pgconn.Config{}, "0", fsys) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + + t.Run("throws error on query failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + path := filepath.Join(utils.MigrationsDir, "0_init.sql") + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn). + Query(fmt.Sprintf("DELETE FROM supabase_migrations.schema_migrations WHERE version <= '%[1]s' ;INSERT INTO supabase_migrations.schema_migrations(version, name, statements) VALUES( '%[1]s' , 'init' , null )", "0")). + ReplyError(pgerrcode.InsufficientPrivilege, "permission denied for relation supabase_migrations") + // Run test + err := baselineMigrations(context.Background(), dbConfig, "0", fsys, conn.Intercept, func(cc *pgx.ConnConfig) { + cc.PreferSimpleProtocol = true + }) + // Check error + assert.ErrorContains(t, err, `ERROR: permission denied for relation supabase_migrations (SQLSTATE 42501)`) + }) + + t.Run("throws error on missing file", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + helper.MockMigrationHistory(conn) + // Run test + err := baselineMigrations(context.Background(), dbConfig, "0", fsys, conn.Intercept) + // Check error + assert.ErrorIs(t, err, os.ErrNotExist) + }) +} + +//go:embed testdata/*.sql +var testdata embed.FS + +func TestLineByLine(t *testing.T) { + t.Run("diffs output from pg_dump", func(t *testing.T) { + before, err := testdata.Open("testdata/before.sql") + require.NoError(t, err) + after, err := testdata.Open("testdata/after.sql") + require.NoError(t, err) + expected, err := testdata.ReadFile("testdata/diff.sql") + require.NoError(t, err) + // Run test + var out bytes.Buffer + err = lineByLineDiff(before, after, &out) + // Check error + assert.NoError(t, err) + assert.Equal(t, expected, out.Bytes()) + }) + + t.Run("diffs shorter before", func(t *testing.T) { + before := strings.NewReader("select 1;") + after := strings.NewReader("select 0;\nselect 1;\nselect 2;") + // Run test + var out bytes.Buffer + err := lineByLineDiff(before, after, &out) + // Check error + assert.NoError(t, err) + assert.Equal(t, "select 0;\nselect 2;\n", out.String()) + }) + + t.Run("diffs shorter after", func(t *testing.T) { + before := strings.NewReader("select 1;\nselect 2;") + after := strings.NewReader("select 1;") + // Run test + var out bytes.Buffer + err := lineByLineDiff(before, after, &out) + // Check error + assert.NoError(t, err) + assert.Equal(t, "", out.String()) + }) + + t.Run("diffs no match", func(t *testing.T) { + before := strings.NewReader("select 0;\nselect 1;") + after := strings.NewReader("select 1;") + // Run test + var out bytes.Buffer + err := lineByLineDiff(before, after, &out) + // Check error + assert.NoError(t, err) + assert.Equal(t, "select 1;\n", out.String()) + }) +} diff --git a/internal/migration/squash/testdata/after.sql b/internal/migration/squash/testdata/after.sql new file mode 100644 index 0000000..a8422bb --- /dev/null +++ b/internal/migration/squash/testdata/after.sql @@ -0,0 +1,109 @@ + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +CREATE SCHEMA IF NOT EXISTS "storage"; + +ALTER SCHEMA "storage" OWNER TO "supabase_admin"; + +CREATE OR REPLACE FUNCTION "storage"."can_insert_object"("bucketid" "text", "name" "text", "owner" "uuid", "metadata" "jsonb") RETURNS "void" + LANGUAGE "plpgsql" + AS $$ +BEGIN + INSERT INTO "storage"."objects" ("bucket_id", "name", "owner", "metadata") VALUES (bucketid, name, owner, metadata); + -- hack to rollback the successful insert + RAISE sqlstate 'PT200' using + message = 'ROLLBACK', + detail = 'rollback successful insert'; +END +$$; + +ALTER FUNCTION "storage"."can_insert_object"("bucketid" "text", "name" "text", "owner" "uuid", "metadata" "jsonb") OWNER TO "supabase_storage_admin"; + +CREATE TABLE IF NOT EXISTS "storage"."objects" ( + "id" "uuid" DEFAULT "gen_random_uuid"() NOT NULL, + "bucket_id" "text", + "name" "text", + "owner" "uuid", + "created_at" timestamp with time zone DEFAULT "now"(), + "updated_at" timestamp with time zone DEFAULT "now"(), + "last_accessed_at" timestamp with time zone DEFAULT "now"(), + "metadata" "jsonb", + "path_tokens" "text"[] GENERATED ALWAYS AS ("string_to_array"("name", '/'::"text")) STORED, + "version" "text", + "owner_id" "text" +); + +ALTER TABLE "storage"."objects" OWNER TO "supabase_storage_admin"; + +COMMENT ON COLUMN "storage"."objects"."owner" IS 'Field is deprecated, use owner_id instead'; + +ALTER TABLE ONLY "storage"."buckets" + ADD CONSTRAINT "buckets_pkey" PRIMARY KEY ("id"); + +ALTER TABLE ONLY "storage"."migrations" + ADD CONSTRAINT "migrations_name_key" UNIQUE ("name"); + +ALTER TABLE ONLY "storage"."migrations" + ADD CONSTRAINT "migrations_pkey" PRIMARY KEY ("id"); + +ALTER TABLE ONLY "storage"."objects" + ADD CONSTRAINT "objects_pkey" PRIMARY KEY ("id"); + +CREATE UNIQUE INDEX "bname" ON "storage"."buckets" USING "btree" ("name"); + +CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING "btree" ("bucket_id", "name"); + +CREATE INDEX "name_prefix_search" ON "storage"."objects" USING "btree" ("name" "text_pattern_ops"); + +CREATE OR REPLACE TRIGGER "delete_images" AFTER DELETE ON "storage"."objects" FOR EACH ROW EXECUTE FUNCTION "public"."check_can_upload"(); + +CREATE OR REPLACE TRIGGER "insert_images" AFTER INSERT ON "storage"."objects" FOR EACH ROW EXECUTE FUNCTION "public"."check_can_upload"(); + +CREATE OR REPLACE TRIGGER "update_objects_updated_at" BEFORE UPDATE ON "storage"."objects" FOR EACH ROW EXECUTE FUNCTION "storage"."update_updated_at_column"(); + +ALTER TABLE ONLY "storage"."objects" + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id"); + +ALTER TABLE ONLY "storage"."objects" + ADD CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"); + +CREATE POLICY "Anyone can read owner" ON "storage"."objects" FOR SELECT USING ((("bucket_id" = 'public-images'::"text") AND ("owner" IS NULL))); + +CREATE POLICY "Authenticated users can delete images" ON "storage"."objects" FOR DELETE TO "authenticated" USING ((("bucket_id" = 'public-images'::"text") AND ("auth"."uid"() = "owner"))); + +CREATE POLICY "Authenticated users can insert images" ON "storage"."objects" FOR INSERT TO "authenticated" WITH CHECK ((("bucket_id" = 'public-images'::"text") AND ("auth"."uid"() = "owner"))); + +CREATE POLICY "Authenticated users can read images" ON "storage"."objects" FOR SELECT TO "authenticated" USING ((("bucket_id" = 'public-images'::"text") AND ("auth"."uid"() = "owner"))); + +CREATE POLICY "Authenticated users can update images" ON "storage"."objects" FOR UPDATE TO "authenticated" USING (("bucket_id" = 'public-images'::"text")) WITH CHECK (("auth"."uid"() = "owner")); + +ALTER TABLE "storage"."buckets" ENABLE ROW LEVEL SECURITY; + +ALTER TABLE "storage"."migrations" ENABLE ROW LEVEL SECURITY; + +ALTER TABLE "storage"."objects" ENABLE ROW LEVEL SECURITY; + +CREATE POLICY "objects_auth_select" ON "storage"."objects" FOR SELECT TO "authenticated" USING (("owner" = "auth"."uid"())); + +GRANT ALL ON SCHEMA "storage" TO "postgres"; +GRANT USAGE ON SCHEMA "storage" TO "anon"; +GRANT USAGE ON SCHEMA "storage" TO "authenticated"; +GRANT USAGE ON SCHEMA "storage" TO "service_role"; +GRANT ALL ON SCHEMA "storage" TO "supabase_storage_admin"; +GRANT ALL ON SCHEMA "storage" TO "dashboard_user"; + +ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "postgres"; +ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "anon"; +ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "authenticated"; +ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "service_role"; + +RESET ALL; diff --git a/internal/migration/squash/testdata/before.sql b/internal/migration/squash/testdata/before.sql new file mode 100644 index 0000000..7f3e624 --- /dev/null +++ b/internal/migration/squash/testdata/before.sql @@ -0,0 +1,90 @@ + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +CREATE SCHEMA IF NOT EXISTS "storage"; + +ALTER SCHEMA "storage" OWNER TO "supabase_admin"; + +CREATE OR REPLACE FUNCTION "storage"."can_insert_object"("bucketid" "text", "name" "text", "owner" "uuid", "metadata" "jsonb") RETURNS "void" + LANGUAGE "plpgsql" + AS $$ +BEGIN + INSERT INTO "storage"."objects" ("bucket_id", "name", "owner", "metadata") VALUES (bucketid, name, owner, metadata); + -- hack to rollback the successful insert + RAISE sqlstate 'PT200' using + message = 'ROLLBACK', + detail = 'rollback successful insert'; +END +$$; + +ALTER FUNCTION "storage"."can_insert_object"("bucketid" "text", "name" "text", "owner" "uuid", "metadata" "jsonb") OWNER TO "supabase_storage_admin"; + +CREATE TABLE IF NOT EXISTS "storage"."objects" ( + "id" "uuid" DEFAULT "gen_random_uuid"() NOT NULL, + "bucket_id" "text", + "name" "text", + "owner" "uuid", + "created_at" timestamp with time zone DEFAULT "now"(), + "updated_at" timestamp with time zone DEFAULT "now"(), + "last_accessed_at" timestamp with time zone DEFAULT "now"(), + "metadata" "jsonb", + "path_tokens" "text"[] GENERATED ALWAYS AS ("string_to_array"("name", '/'::"text")) STORED, + "version" "text", + "owner_id" "text" +); + +ALTER TABLE "storage"."objects" OWNER TO "supabase_storage_admin"; + +COMMENT ON COLUMN "storage"."objects"."owner" IS 'Field is deprecated, use owner_id instead'; + +ALTER TABLE ONLY "storage"."buckets" + ADD CONSTRAINT "buckets_pkey" PRIMARY KEY ("id"); + +ALTER TABLE ONLY "storage"."migrations" + ADD CONSTRAINT "migrations_name_key" UNIQUE ("name"); + +ALTER TABLE ONLY "storage"."migrations" + ADD CONSTRAINT "migrations_pkey" PRIMARY KEY ("id"); + +ALTER TABLE ONLY "storage"."objects" + ADD CONSTRAINT "objects_pkey" PRIMARY KEY ("id"); + +CREATE UNIQUE INDEX "bname" ON "storage"."buckets" USING "btree" ("name"); + +CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING "btree" ("bucket_id", "name"); + +CREATE INDEX "name_prefix_search" ON "storage"."objects" USING "btree" ("name" "text_pattern_ops"); + +CREATE OR REPLACE TRIGGER "update_objects_updated_at" BEFORE UPDATE ON "storage"."objects" FOR EACH ROW EXECUTE FUNCTION "storage"."update_updated_at_column"(); + +ALTER TABLE ONLY "storage"."objects" + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id"); + +ALTER TABLE "storage"."buckets" ENABLE ROW LEVEL SECURITY; + +ALTER TABLE "storage"."migrations" ENABLE ROW LEVEL SECURITY; + +ALTER TABLE "storage"."objects" ENABLE ROW LEVEL SECURITY; + +GRANT ALL ON SCHEMA "storage" TO "postgres"; +GRANT USAGE ON SCHEMA "storage" TO "anon"; +GRANT USAGE ON SCHEMA "storage" TO "authenticated"; +GRANT USAGE ON SCHEMA "storage" TO "service_role"; +GRANT ALL ON SCHEMA "storage" TO "supabase_storage_admin"; +GRANT ALL ON SCHEMA "storage" TO "dashboard_user"; + +ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "postgres"; +ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "anon"; +ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "authenticated"; +ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "service_role"; + +RESET ALL; diff --git a/internal/migration/squash/testdata/diff.sql b/internal/migration/squash/testdata/diff.sql new file mode 100644 index 0000000..b0d6d4c --- /dev/null +++ b/internal/migration/squash/testdata/diff.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE TRIGGER "delete_images" AFTER DELETE ON "storage"."objects" FOR EACH ROW EXECUTE FUNCTION "public"."check_can_upload"(); + +CREATE OR REPLACE TRIGGER "insert_images" AFTER INSERT ON "storage"."objects" FOR EACH ROW EXECUTE FUNCTION "public"."check_can_upload"(); + +ALTER TABLE ONLY "storage"."objects" + ADD CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"); + +CREATE POLICY "Anyone can read owner" ON "storage"."objects" FOR SELECT USING ((("bucket_id" = 'public-images'::"text") AND ("owner" IS NULL))); + +CREATE POLICY "Authenticated users can delete images" ON "storage"."objects" FOR DELETE TO "authenticated" USING ((("bucket_id" = 'public-images'::"text") AND ("auth"."uid"() = "owner"))); + +CREATE POLICY "Authenticated users can insert images" ON "storage"."objects" FOR INSERT TO "authenticated" WITH CHECK ((("bucket_id" = 'public-images'::"text") AND ("auth"."uid"() = "owner"))); + +CREATE POLICY "Authenticated users can read images" ON "storage"."objects" FOR SELECT TO "authenticated" USING ((("bucket_id" = 'public-images'::"text") AND ("auth"."uid"() = "owner"))); + +CREATE POLICY "Authenticated users can update images" ON "storage"."objects" FOR UPDATE TO "authenticated" USING (("bucket_id" = 'public-images'::"text")) WITH CHECK (("auth"."uid"() = "owner")); + +CREATE POLICY "objects_auth_select" ON "storage"."objects" FOR SELECT TO "authenticated" USING (("owner" = "auth"."uid"())); + diff --git a/internal/migration/up/up.go b/internal/migration/up/up.go new file mode 100644 index 0000000..331abce --- /dev/null +++ b/internal/migration/up/up.go @@ -0,0 +1,67 @@ +package up + +import ( + "context" + "fmt" + "strings" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/vault" +) + +func Run(ctx context.Context, includeAll bool, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + pending, err := GetPendingMigrations(ctx, includeAll, conn, fsys) + if err != nil { + return err + } + if err := vault.UpsertVaultSecrets(ctx, utils.Config.Db.Vault, conn); err != nil { + return err + } + return migration.ApplyMigrations(ctx, pending, conn, afero.NewIOFS(fsys)) +} + +func GetPendingMigrations(ctx context.Context, includeAll bool, conn *pgx.Conn, fsys afero.Fs) ([]string, error) { + remoteMigrations, err := migration.ListRemoteMigrations(ctx, conn) + if err != nil { + return nil, err + } + localMigrations, err := migration.ListLocalMigrations(utils.MigrationsDir, afero.NewIOFS(fsys)) + if err != nil { + return nil, err + } + diff, err := migration.FindPendingMigrations(localMigrations, remoteMigrations) + if errors.Is(err, migration.ErrMissingLocal) { + utils.CmdSuggestion = suggestRevertHistory(diff) + } else if errors.Is(err, migration.ErrMissingRemote) { + if includeAll { + pending := localMigrations[len(remoteMigrations)+len(diff):] + return append(diff, pending...), nil + } + utils.CmdSuggestion = suggestIgnoreFlag(diff) + } + return diff, err +} + +func suggestRevertHistory(versions []string) string { + result := fmt.Sprintln("\nMake sure your local git repo is up-to-date. If the error persists, try repairing the migration history table:") + result += fmt.Sprintln(utils.Bold("supabase migration repair --status reverted " + strings.Join(versions, " "))) + result += fmt.Sprintln("\nAnd update local migrations to match remote database:") + result += fmt.Sprintln(utils.Bold("supabase db pull")) + return result +} + +func suggestIgnoreFlag(paths []string) string { + result := "\nRerun the command with --include-all flag to apply these migrations:\n" + result += fmt.Sprintln(utils.Bold(strings.Join(paths, "\n"))) + return result +} diff --git a/internal/migration/up/up_test.go b/internal/migration/up/up_test.go new file mode 100644 index 0000000..ea41680 --- /dev/null +++ b/internal/migration/up/up_test.go @@ -0,0 +1,168 @@ +package up + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/fstest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/pgtest" +) + +func TestPendingMigrations(t *testing.T) { + t.Run("finds pending migrations", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + files := []string{ + filepath.Join(utils.MigrationsDir, "20221201000000_test.sql"), + filepath.Join(utils.MigrationsDir, "20221201000001_test.sql"), + filepath.Join(utils.MigrationsDir, "20221201000002_test.sql"), + filepath.Join(utils.MigrationsDir, "20221201000003_test.sql"), + } + for _, path := range files { + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + } + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 2", []interface{}{"20221201000000"}, []interface{}{"20221201000001"}) + // Run test + pending, err := GetPendingMigrations(context.Background(), false, conn.MockClient(t), fsys) + // Check error + assert.NoError(t, err) + assert.ElementsMatch(t, files[2:], pending) + }) + + t.Run("throws error on local load failure", func(t *testing.T) { + // Setup in-memory fs + fsys := &fstest.OpenErrorFs{DenyPath: utils.MigrationsDir} + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0") + // Run test + _, err := GetPendingMigrations(context.Background(), false, conn.MockClient(t), fsys) + // Check error + assert.ErrorIs(t, err, os.ErrPermission) + }) + + t.Run("throws error on missing local migration", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 1", []interface{}{"0"}) + // Run test + _, err := GetPendingMigrations(context.Background(), false, conn.MockClient(t), fsys) + // Check error + assert.ErrorIs(t, err, migration.ErrMissingLocal) + assert.Contains(t, utils.CmdSuggestion, "supabase migration repair --status reverted 0") + }) + + t.Run("throws error on missing remote version", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + files := []string{"0_test.sql", "1_test.sql"} + for _, name := range files { + path := filepath.Join(utils.MigrationsDir, name) + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + } + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 1", []interface{}{"1"}) + // Run test + _, err := GetPendingMigrations(context.Background(), false, conn.MockClient(t), fsys) + // Check error + assert.ErrorIs(t, err, migration.ErrMissingRemote) + }) +} + +func TestIgnoreVersionMismatch(t *testing.T) { + t.Run("applies out-of-order local migrations", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + files := []string{ + filepath.Join(utils.MigrationsDir, "20221201000000_test.sql"), + filepath.Join(utils.MigrationsDir, "20221201000001_test.sql"), + filepath.Join(utils.MigrationsDir, "20221201000002_test.sql"), + filepath.Join(utils.MigrationsDir, "20221201000003_test.sql"), + } + for _, path := range files { + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + } + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 2", []interface{}{"20221201000000"}, []interface{}{"20221201000002"}) + // Run test + pending, err := GetPendingMigrations(context.Background(), true, conn.MockClient(t), fsys) + // Check error + assert.NoError(t, err) + assert.ElementsMatch(t, []string{files[1], files[3]}, pending) + }) + + t.Run("throws error on missing local and remote migration", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + files := []string{ + filepath.Join(utils.MigrationsDir, "20221201000000_test.sql"), + filepath.Join(utils.MigrationsDir, "20221201000001_test.sql"), + filepath.Join(utils.MigrationsDir, "20221201000002_test.sql"), + filepath.Join(utils.MigrationsDir, "20221201000003_test.sql"), + } + for _, path := range files { + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + } + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 2", []interface{}{"20221201000002"}, []interface{}{"20221201000004"}) + // Run test + _, err := GetPendingMigrations(context.Background(), true, conn.MockClient(t), fsys) + // Check error + assert.ErrorIs(t, err, migration.ErrMissingLocal) + assert.Contains(t, utils.CmdSuggestion, "supabase migration repair --status reverted 20221201000004") + }) + + t.Run("throws error on missing local migration", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + files := []string{ + filepath.Join(utils.MigrationsDir, "20221201000000_test.sql"), + filepath.Join(utils.MigrationsDir, "20221201000002_test.sql"), + } + for _, path := range files { + require.NoError(t, afero.WriteFile(fsys, path, []byte(""), 0644)) + } + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 5", + []interface{}{"20221201000000"}, + []interface{}{"20221201000001"}, + []interface{}{"20221201000002"}, + []interface{}{"20221201000003"}, + []interface{}{"20221201000004"}, + ) + // Run test + _, err := GetPendingMigrations(context.Background(), true, conn.MockClient(t), fsys) + // Check error + assert.ErrorIs(t, err, migration.ErrMissingLocal) + assert.Contains(t, utils.CmdSuggestion, "supabase migration repair --status reverted 20221201000001 20221201000003 20221201000004") + }) +} diff --git a/internal/orgs/create/create.go b/internal/orgs/create/create.go new file mode 100644 index 0000000..ed6008a --- /dev/null +++ b/internal/orgs/create/create.go @@ -0,0 +1,24 @@ +package create + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, name string) error { + resp, err := utils.GetSupabase().V1CreateAnOrganizationWithResponse(ctx, api.V1CreateAnOrganizationJSONRequestBody{Name: name}) + if err != nil { + return errors.Errorf("failed to create organization: %w", err) + } + + if resp.JSON201 == nil { + return errors.New("Unexpected error creating organization: " + string(resp.Body)) + } + + fmt.Println("Created organization:", resp.JSON201.Id) + return nil +} diff --git a/internal/orgs/create/create_test.go b/internal/orgs/create/create_test.go new file mode 100644 index 0000000..f491bdc --- /dev/null +++ b/internal/orgs/create/create_test.go @@ -0,0 +1,68 @@ +package create + +import ( + "context" + "errors" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestOrganizationCreateCommand(t *testing.T) { + orgName := "Test Organization" + + t.Run("create an organization", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/organizations"). + Reply(http.StatusCreated). + JSON(api.OrganizationResponseV1{ + Id: "combined-fuchsia-lion", + Name: orgName, + }) + // Run test + assert.NoError(t, Run(context.Background(), orgName)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/organizations"). + ReplyError(errors.New("network error")) + // Run test + assert.Error(t, Run(context.Background(), orgName)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on server unavailable", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/organizations"). + Reply(http.StatusServiceUnavailable). + JSON(map[string]string{"message": "unavailable"}) + // Run test + assert.Error(t, Run(context.Background(), orgName)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/orgs/list/list.go b/internal/orgs/list/list.go new file mode 100644 index 0000000..abd6c08 --- /dev/null +++ b/internal/orgs/list/list.go @@ -0,0 +1,31 @@ +package list + +import ( + "context" + "fmt" + "strings" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context) error { + resp, err := utils.GetSupabase().V1ListAllOrganizationsWithResponse(ctx) + if err != nil { + return errors.Errorf("failed to list organizations: %w", err) + } + + if resp.JSON200 == nil { + return errors.New("Unexpected error retrieving organizations: " + string(resp.Body)) + } + + table := `|ID|NAME| +|-|-| +` + for _, org := range *resp.JSON200 { + table += fmt.Sprintf("|`%s`|`%s`|\n", org.Id, strings.ReplaceAll(org.Name, "|", "\\|")) + } + + return list.RenderTable(table) +} diff --git a/internal/orgs/list/list_test.go b/internal/orgs/list/list_test.go new file mode 100644 index 0000000..00030db --- /dev/null +++ b/internal/orgs/list/list_test.go @@ -0,0 +1,68 @@ +package list + +import ( + "context" + "errors" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestOrganizationListCommand(t *testing.T) { + t.Run("lists all organizations", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/organizations"). + Reply(http.StatusOK). + JSON([]api.OrganizationResponseV1{ + { + Id: "combined-fuchsia-lion", + Name: "Test Organization", + }, + }) + // Run test + assert.NoError(t, Run(context.Background())) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/organizations"). + ReplyError(errors.New("network error")) + // Run test + assert.Error(t, Run(context.Background())) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on server unavailable", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/organizations"). + Reply(http.StatusServiceUnavailable). + JSON(map[string]string{"message": "unavailable"}) + // Run test + assert.Error(t, Run(context.Background())) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/postgresConfig/delete/delete.go b/internal/postgresConfig/delete/delete.go new file mode 100644 index 0000000..20b7b97 --- /dev/null +++ b/internal/postgresConfig/delete/delete.go @@ -0,0 +1,48 @@ +package delete + +import ( + "bytes" + "context" + "encoding/json" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/postgresConfig/get" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, configKeys []string, noRestart bool, fsys afero.Fs) error { + // 1. Get current config + currentConfig, err := get.GetCurrentPostgresConfig(ctx, projectRef) + if err != nil { + return err + } + + // 2. Remove specified keys + for _, key := range configKeys { + delete(currentConfig, strings.TrimSpace(key)) + } + + // 3. Update config with removed keys + if noRestart { + currentConfig["restart_database"] = false + } + bts, err := json.Marshal(currentConfig) + if err != nil { + return errors.Errorf("failed to serialize config overrides: %w", err) + } + + resp, err := utils.GetSupabase().V1UpdatePostgresConfigWithBodyWithResponse(ctx, projectRef, "application/json", bytes.NewReader(bts)) + if err != nil { + return errors.Errorf("failed to update config overrides: %w", err) + } + if resp.JSON200 == nil { + if resp.StatusCode() == 400 { + return errors.Errorf("failed to update config overrides: %s (%s). This usually indicates that an unsupported or invalid config override was attempted. Please refer to https://supabase.com/docs/guides/platform/custom-postgres-config", resp.Status(), string(resp.Body)) + } + return errors.Errorf("failed to update config overrides: %s (%s)", resp.Status(), string(resp.Body)) + } + + return get.Run(ctx, projectRef, fsys) +} diff --git a/internal/postgresConfig/get/get.go b/internal/postgresConfig/get/get.go new file mode 100644 index 0000000..ad311b3 --- /dev/null +++ b/internal/postgresConfig/get/get.go @@ -0,0 +1,70 @@ +package get + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, fsys afero.Fs) error { + // 1. get current config + { + config, err := GetCurrentPostgresConfig(ctx, projectRef) + if err != nil { + return err + } + err = PrintOutPostgresConfigOverrides(config) + if err != nil { + return err + } + return nil + } +} + +func PrintOutPostgresConfigOverrides(config map[string]interface{}) error { + fmt.Println("- Custom Postgres Config -") + markdownTable := []string{ + "|Parameter|Value|\n|-|-|\n", + } + + for k, v := range config { + markdownTable = append(markdownTable, fmt.Sprintf( + "|`%s`|`%+v`|\n", + k, v, + )) + } + + if err := list.RenderTable(strings.Join(markdownTable, "")); err != nil { + return err + } + fmt.Println("- End of Custom Postgres Config -") + return nil +} + +func GetCurrentPostgresConfig(ctx context.Context, projectRef string) (map[string]interface{}, error) { + resp, err := utils.GetSupabase().V1GetPostgresConfig(ctx, projectRef) + if err != nil { + return nil, errors.Errorf("failed to retrieve Postgres config overrides: %w", err) + } + if resp.StatusCode != 200 { + return nil, errors.Errorf("error in retrieving Postgres config overrides: %s", resp.Status) + } + contents, err := io.ReadAll(resp.Body) + if err != nil { + return nil, errors.Errorf("failed to read response body: %w", err) + } + + var config map[string]interface{} + err = json.Unmarshal(contents, &config) + if err != nil { + return nil, errors.Errorf("failed to unmarshal response body: %w. Contents were %s", err, contents) + } + return config, nil +} diff --git a/internal/postgresConfig/update/update.go b/internal/postgresConfig/update/update.go new file mode 100644 index 0000000..94632d4 --- /dev/null +++ b/internal/postgresConfig/update/update.go @@ -0,0 +1,72 @@ +package update + +import ( + "bytes" + "context" + "encoding/json" + "strconv" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/postgresConfig/get" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, values []string, replaceOverrides, noRestart bool, fsys afero.Fs) error { + // 1. Prepare config overrides + newConfigOverrides := make(map[string]string) + for _, config := range values { + splits := strings.Split(config, "=") + if len(splits) != 2 { + return errors.Errorf("expected config value in key:value format, received: '%s'", config) + } + newConfigOverrides[splits[0]] = splits[1] + } + // 2. If not in replace mode, retrieve current overrides + finalOverrides := make(map[string]interface{}) + { + if !replaceOverrides { + config, err := get.GetCurrentPostgresConfig(ctx, projectRef) + if err != nil { + return err + } + finalOverrides = config + } + } + // 3. Create the list of final overrides + { + for k, v := range newConfigOverrides { + // this is hacky - if we're able to convert the value to an integer, we do so + // if we start supporting config fields with e.g. floating pt overrides this'll need to be updated + if vInt, err := strconv.Atoi(v); err == nil { + finalOverrides[k] = vInt + } else if vBool, err := strconv.ParseBool(v); err == nil { + finalOverrides[k] = vBool + } else { + finalOverrides[k] = v + } + } + } + // 4. update config overrides and print out final result + { + if noRestart { + finalOverrides["restart_database"] = false + } + bts, err := json.Marshal(finalOverrides) + if err != nil { + return errors.Errorf("failed to serialize config overrides: %w", err) + } + resp, err := utils.GetSupabase().V1UpdatePostgresConfigWithBodyWithResponse(ctx, projectRef, "application/json", bytes.NewReader(bts)) + if err != nil { + return errors.Errorf("failed to update config overrides: %w", err) + } + if resp.JSON200 == nil { + if resp.StatusCode() == 400 { + return errors.Errorf("failed to update config overrides: %s (%s). This usually indicates that an unsupported or invalid config override was attempted. Please refer to https://supabase.com/docs/guides/platform/custom-postgres-config", resp.Status(), string(resp.Body)) + } + return errors.Errorf("failed to update config overrides: %s (%s)", resp.Status(), string(resp.Body)) + } + } + return get.Run(ctx, projectRef, fsys) +} diff --git a/internal/projects/apiKeys/api_keys.go b/internal/projects/apiKeys/api_keys.go new file mode 100644 index 0000000..7daddce --- /dev/null +++ b/internal/projects/apiKeys/api_keys.go @@ -0,0 +1,45 @@ +package apiKeys + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, projectRef string, fsys afero.Fs) error { + keys, err := RunGetApiKeys(ctx, projectRef) + if err != nil { + return err + } + + if utils.OutputFormat.Value == utils.OutputPretty { + table := `|NAME|KEY VALUE| +|-|-| +` + for _, entry := range keys { + table += fmt.Sprintf("|`%s`|`%s`|\n", strings.ReplaceAll(entry.Name, "|", "\\|"), entry.ApiKey) + } + + return list.RenderTable(table) + } + + return utils.EncodeOutput(utils.OutputFormat.Value, os.Stdout, keys) +} + +func RunGetApiKeys(ctx context.Context, projectRef string) ([]api.ApiKeyResponse, error) { + resp, err := utils.GetSupabase().V1GetProjectApiKeysWithResponse(ctx, projectRef, &api.V1GetProjectApiKeysParams{}) + if err != nil { + return nil, errors.Errorf("failed to get api keys: %w", err) + } + if resp.JSON200 == nil { + return nil, errors.New("Unexpected error retrieving project api-keys: " + string(resp.Body)) + } + return *resp.JSON200, nil +} diff --git a/internal/projects/apiKeys/api_keys_test.go b/internal/projects/apiKeys/api_keys_test.go new file mode 100644 index 0000000..57130f1 --- /dev/null +++ b/internal/projects/apiKeys/api_keys_test.go @@ -0,0 +1,69 @@ +package apiKeys + +import ( + "context" + "errors" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestProjectApiKeysCommand(t *testing.T) { + t.Run("lists all api-keys", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/api-keys"). + Reply(200). + JSON([]api.ApiKeyResponse{{ + Name: "Test ApiKey", + ApiKey: "dummy-api-key-value", + }}) + // Run test + err := Run(context.Background(), project, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on missing access token", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), "", fsys) + // Check error + assert.ErrorContains(t, err, "Unexpected error retrieving project api-keys") + }) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/api-keys"). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), project, fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/projects/create/create.go b/internal/projects/create/create.go new file mode 100644 index 0000000..ddbd46b --- /dev/null +++ b/internal/projects/create/create.go @@ -0,0 +1,122 @@ +package create + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/spf13/viper" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/credentials" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, params api.V1CreateProjectBodyDto, fsys afero.Fs) error { + if err := promptMissingParams(ctx, ¶ms); err != nil { + return err + } + + resp, err := utils.GetSupabase().V1CreateAProjectWithResponse(ctx, params) + if err != nil { + return errors.Errorf("failed to create project: %w", err) + } + if resp.JSON201 == nil { + return errors.New("Unexpected error creating project: " + string(resp.Body)) + } + + flags.ProjectRef = resp.JSON201.Id + viper.Set("DB_PASSWORD", params.DbPass) + if err := credentials.StoreProvider.Set(flags.ProjectRef, params.DbPass); err != nil { + fmt.Fprintln(os.Stderr, "Failed to save database password:", err) + } + + projectUrl := fmt.Sprintf("%s/project/%s", utils.GetSupabaseDashboardURL(), resp.JSON201.Id) + fmt.Fprintf(os.Stderr, "Created a new project %s at %s\n", utils.Aqua(resp.JSON201.Name), utils.Bold(projectUrl)) + if utils.OutputFormat.Value == utils.OutputPretty { + return nil + } + + return utils.EncodeOutput(utils.OutputFormat.Value, os.Stdout, resp.JSON201) +} + +func printKeyValue(key, value string) string { + indent := 20 - len(key) + spaces := strings.Repeat(" ", indent) + return key + ":" + spaces + value +} + +func promptMissingParams(ctx context.Context, body *api.V1CreateProjectBodyDto) error { + var err error + if len(body.Name) == 0 { + if body.Name, err = promptProjectName(ctx); err != nil { + return err + } + } else { + fmt.Fprintln(os.Stderr, printKeyValue("Creating project", body.Name)) + } + if len(body.OrganizationId) == 0 { + if body.OrganizationId, err = promptOrgId(ctx); err != nil { + return err + } + fmt.Fprintln(os.Stderr, printKeyValue("Selected org-id", body.OrganizationId)) + } + if len(body.Region) == 0 { + if body.Region, err = promptProjectRegion(ctx); err != nil { + return err + } + fmt.Fprintln(os.Stderr, printKeyValue("Selected region", string(body.Region))) + } + if len(body.DbPass) == 0 { + body.DbPass = flags.PromptPassword(os.Stdin) + } + return nil +} + +func promptProjectName(ctx context.Context) (string, error) { + title := "Enter your project name: " + if name, err := utils.NewConsole().PromptText(ctx, title); err != nil { + return "", err + } else if len(name) > 0 { + return name, nil + } + return "", errors.New("project name cannot be empty") +} + +func promptOrgId(ctx context.Context) (string, error) { + title := "Which organisation do you want to create the project for?" + resp, err := utils.GetSupabase().V1ListAllOrganizationsWithResponse(ctx) + if err != nil { + return "", err + } + if resp.JSON200 == nil { + return "", errors.New("Unexpected error retrieving organizations: " + string(resp.Body)) + } + items := make([]utils.PromptItem, len(*resp.JSON200)) + for i, org := range *resp.JSON200 { + items[i] = utils.PromptItem{Summary: org.Name, Details: org.Id} + } + choice, err := utils.PromptChoice(ctx, title, items) + if err != nil { + return "", err + } + return choice.Details, nil +} + +func promptProjectRegion(ctx context.Context) (api.V1CreateProjectBodyDtoRegion, error) { + title := "Which region do you want to host the project in?" + items := make([]utils.PromptItem, len(utils.RegionMap)) + i := 0 + for k, v := range utils.RegionMap { + items[i] = utils.PromptItem{Summary: k, Details: v} + i++ + } + choice, err := utils.PromptChoice(ctx, title, items) + if err != nil { + return "", err + } + return api.V1CreateProjectBodyDtoRegion(choice.Summary), nil +} diff --git a/internal/projects/create/create_test.go b/internal/projects/create/create_test.go new file mode 100644 index 0000000..879421d --- /dev/null +++ b/internal/projects/create/create_test.go @@ -0,0 +1,112 @@ +package create + +import ( + "context" + "errors" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestProjectCreateCommand(t *testing.T) { + var params = api.V1CreateProjectBodyDto{ + Name: "Test Project", + OrganizationId: "combined-fuchsia-lion", + DbPass: "redacted", + Region: api.V1CreateProjectBodyDtoRegionUsWest1, + } + + t.Run("creates a new project", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects"). + MatchType("json"). + JSON(params). + Reply(201). + JSON(api.V1ProjectResponse{ + Id: apitest.RandomProjectRef(), + OrganizationId: params.OrganizationId, + Name: params.Name, + Region: string(params.Region), + CreatedAt: "2022-04-25T02:14:55.906498Z", + }) + // Run test + assert.NoError(t, Run(context.Background(), params, fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on failure to load token", func(t *testing.T) { + assert.Error(t, Run(context.Background(), params, afero.NewMemMapFs())) + }) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects"). + MatchType("json"). + JSON(params). + ReplyError(errors.New("network error")) + // Run test + assert.Error(t, Run(context.Background(), params, fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on server unavailable", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects"). + MatchType("json"). + JSON(params). + Reply(500). + JSON(map[string]string{"message": "unavailable"}) + // Run test + assert.Error(t, Run(context.Background(), params, fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on malformed json", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects"). + MatchType("json"). + JSON(params). + Reply(200). + JSON([]string{}) + // Run test + assert.Error(t, Run(context.Background(), params, fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/projects/delete/delete.go b/internal/projects/delete/delete.go new file mode 100644 index 0000000..f042f62 --- /dev/null +++ b/internal/projects/delete/delete.go @@ -0,0 +1,60 @@ +package delete + +import ( + "context" + "fmt" + "net/http" + "os" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/unlink" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/credentials" + "github.com/zalando/go-keyring" +) + +func PreRun(ctx context.Context, ref string) error { + if err := utils.AssertProjectRefIsValid(ref); err != nil { + return err + } + title := fmt.Sprintf("Do you want to delete project %s? This action is irreversible.", utils.Aqua(ref)) + if shouldDelete, err := utils.NewConsole().PromptYesNo(ctx, title, false); err != nil { + return err + } else if !shouldDelete { + return errors.New(context.Canceled) + } + return nil +} + +func Run(ctx context.Context, ref string, fsys afero.Fs) error { + resp, err := utils.GetSupabase().V1DeleteAProjectWithResponse(ctx, ref) + if err != nil { + return errors.Errorf("failed to delete project: %w", err) + } + + switch resp.StatusCode() { + case http.StatusNotFound: + return errors.New("Project does not exist:" + utils.Aqua(ref)) + case http.StatusOK: + break + default: + return errors.Errorf("Failed to delete project %s: %s", utils.Aqua(ref), string(resp.Body)) + } + + // Unlink project + if err := credentials.StoreProvider.Delete(ref); err != nil && !errors.Is(err, keyring.ErrNotFound) { + fmt.Fprintln(os.Stderr, err) + } + if match, err := afero.FileContainsBytes(fsys, utils.ProjectRefPath, []byte(ref)); match { + if err := unlink.Unlink(ref, fsys); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } else if err != nil { + logger := utils.GetDebugLogger() + fmt.Fprintln(logger, err) + } + + fmt.Println("Deleted project: " + utils.Aqua(resp.JSON200.Name)) + return nil +} diff --git a/internal/projects/delete/delete_test.go b/internal/projects/delete/delete_test.go new file mode 100644 index 0000000..b83d4cf --- /dev/null +++ b/internal/projects/delete/delete_test.go @@ -0,0 +1,87 @@ +package delete + +import ( + "context" + "errors" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" + "github.com/zalando/go-keyring" +) + +func TestDeleteCommand(t *testing.T) { + ref := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Mock credentials store + keyring.MockInit() + + t.Run("deletes project", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.ProjectRefPath, []byte(ref), 0644)) + // Setup api mock + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + ref). + Reply(http.StatusOK). + JSON(api.V1ProjectRefResponse{ + Ref: ref, + Name: "test-project", + }) + // Run test + err := Run(context.Background(), ref, afero.NewReadOnlyFs(fsys)) + // Check error + assert.NoError(t, err) + }) + + t.Run("throws error on network failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup api mock + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + ref). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), ref, fsys) + // Check error + assert.ErrorContains(t, err, "network error") + }) + + t.Run("throws error on project not found", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup api mock + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + ref). + Reply(http.StatusNotFound) + // Run test + err := Run(context.Background(), ref, fsys) + // Check error + assert.ErrorContains(t, err, "Project does not exist:") + }) + + t.Run("throws error on service unavailable", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup api mock + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + ref). + Reply(http.StatusServiceUnavailable) + // Run test + err := Run(context.Background(), ref, fsys) + // Check error + assert.ErrorContains(t, err, "Failed to delete project") + }) +} diff --git a/internal/projects/list/list.go b/internal/projects/list/list.go new file mode 100644 index 0000000..ef356df --- /dev/null +++ b/internal/projects/list/list.go @@ -0,0 +1,83 @@ +package list + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" +) + +type linkedProject struct { + api.V1ProjectWithDatabaseResponse `yaml:",inline"` + Linked bool `json:"linked"` +} + +func Run(ctx context.Context, fsys afero.Fs) error { + resp, err := utils.GetSupabase().V1ListAllProjectsWithResponse(ctx) + if err != nil { + return errors.Errorf("failed to list projects: %w", err) + } + + if resp.JSON200 == nil { + return errors.New("Unexpected error retrieving projects: " + string(resp.Body)) + } + + if err := flags.LoadProjectRef(fsys); err != nil && err != utils.ErrNotLinked { + fmt.Fprintln(os.Stderr, err) + } + + var projects []linkedProject + for _, project := range *resp.JSON200 { + projects = append(projects, linkedProject{ + V1ProjectWithDatabaseResponse: project, + Linked: project.Id == flags.ProjectRef, + }) + } + + if utils.OutputFormat.Value == utils.OutputPretty { + table := `LINKED|ORG ID|REFERENCE ID|NAME|REGION|CREATED AT (UTC) +|-|-|-|-|-|-| +` + for _, project := range projects { + table += fmt.Sprintf( + "|`%s`|`%s`|`%s`|`%s`|`%s`|`%s`|\n", + formatBullet(project.Linked), + project.OrganizationId, + project.Id, + strings.ReplaceAll(project.Name, "|", "\\|"), + formatRegion(project.Region), + utils.FormatTimestamp(project.CreatedAt), + ) + } + return list.RenderTable(table) + } else if utils.OutputFormat.Value == utils.OutputToml { + return utils.EncodeOutput(utils.OutputFormat.Value, os.Stdout, struct { + Projects []linkedProject `toml:"projects"` + }{ + Projects: projects, + }) + } + + return utils.EncodeOutput(utils.OutputFormat.Value, os.Stdout, projects) +} + +func formatBullet(value bool) string { + if value { + return " ●" + } + return " " +} + +func formatRegion(region string) string { + if readable, ok := utils.RegionMap[region]; ok { + return readable + } + return region +} diff --git a/internal/projects/list/list_test.go b/internal/projects/list/list_test.go new file mode 100644 index 0000000..d5450bf --- /dev/null +++ b/internal/projects/list/list_test.go @@ -0,0 +1,99 @@ +package list + +import ( + "context" + "errors" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestProjectListCommand(t *testing.T) { + t.Run("lists all projects", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects"). + Reply(200). + JSON([]api.V1ProjectResponse{ + { + Id: apitest.RandomProjectRef(), + OrganizationId: "combined-fuchsia-lion", + Name: "Test Project", + Region: "us-west-1", + CreatedAt: "2022-04-25T02:14:55.906498Z", + }, + }) + // Run test + assert.NoError(t, Run(context.Background(), fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on failure to load token", func(t *testing.T) { + assert.Error(t, Run(context.Background(), afero.NewMemMapFs())) + }) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects"). + ReplyError(errors.New("network error")) + // Run test + assert.Error(t, Run(context.Background(), fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on server unavailable", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects"). + Reply(500). + JSON(map[string]string{"message": "unavailable"}) + // Run test + assert.Error(t, Run(context.Background(), fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on malformed json", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects"). + Reply(200). + JSON(map[string]string{}) + // Run test + assert.Error(t, Run(context.Background(), fsys)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/restrictions/get/get.go b/internal/restrictions/get/get.go new file mode 100644 index 0000000..f8e2734 --- /dev/null +++ b/internal/restrictions/get/get.go @@ -0,0 +1,24 @@ +package get + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string) error { + resp, err := utils.GetSupabase().V1GetNetworkRestrictionsWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to retrieve network restrictions: %w", err) + } + if resp.JSON200 == nil { + return errors.New("failed to retrieve network restrictions; received: " + string(resp.Body)) + } + + fmt.Printf("DB Allowed IPv4 CIDRs: %+v\n", resp.JSON200.Config.DbAllowedCidrs) + fmt.Printf("DB Allowed IPv6 CIDRs: %+v\n", resp.JSON200.Config.DbAllowedCidrsV6) + fmt.Printf("Restrictions applied successfully: %+v\n", resp.JSON200.Status == "applied") + return nil +} diff --git a/internal/restrictions/update/update.go b/internal/restrictions/update/update.go new file mode 100644 index 0000000..98a70da --- /dev/null +++ b/internal/restrictions/update/update.go @@ -0,0 +1,47 @@ +package update + +import ( + "context" + "fmt" + "net" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, projectRef string, dbCidrsToAllow []string, bypassCidrChecks bool) error { + // 1. separate CIDR to v4 and v6 + body := api.V1UpdateNetworkRestrictionsJSONRequestBody{ + DbAllowedCidrs: &[]string{}, + DbAllowedCidrsV6: &[]string{}, + } + for _, cidr := range dbCidrsToAllow { + ip, _, err := net.ParseCIDR(cidr) + if err != nil { + return errors.Errorf("failed to parse IP: %s", cidr) + } + if ip.IsPrivate() && !bypassCidrChecks { + return errors.Errorf("private IP provided: %s", cidr) + } + if ip.To4() != nil { + *body.DbAllowedCidrs = append(*body.DbAllowedCidrs, cidr) + } else { + *body.DbAllowedCidrsV6 = append(*body.DbAllowedCidrsV6, cidr) + } + } + + // 2. update restrictions + resp, err := utils.GetSupabase().V1UpdateNetworkRestrictionsWithResponse(ctx, projectRef, body) + if err != nil { + return errors.Errorf("failed to apply network restrictions: %w", err) + } + if resp.JSON201 == nil { + return errors.New("failed to apply network restrictions: " + string(resp.Body)) + } + + fmt.Printf("DB Allowed IPv4 CIDRs: %+v\n", resp.JSON201.Config.DbAllowedCidrs) + fmt.Printf("DB Allowed IPv6 CIDRs: %+v\n", resp.JSON201.Config.DbAllowedCidrsV6) + fmt.Printf("Restrictions applied successfully: %+v\n", resp.JSON201.Status == "applied") + return nil +} diff --git a/internal/restrictions/update/update_test.go b/internal/restrictions/update/update_test.go new file mode 100644 index 0000000..5a6583e --- /dev/null +++ b/internal/restrictions/update/update_test.go @@ -0,0 +1,121 @@ +package update + +import ( + "context" + "net/http" + "testing" + + "github.com/go-errors/errors" + "github.com/h2non/gock" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestUpdateRestrictionsCommand(t *testing.T) { + projectRef := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + t.Run("updates v4 and v6 CIDR", func(t *testing.T) { + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + projectRef + "/network-restrictions/apply"). + MatchType("json"). + JSON(api.NetworkRestrictionsRequest{ + DbAllowedCidrs: &[]string{"12.3.4.5/32", "1.2.3.1/24"}, + DbAllowedCidrsV6: &[]string{"2001:db8:abcd:0012::0/64"}, + }). + Reply(http.StatusCreated). + JSON(api.NetworkRestrictionsResponse{ + Status: api.NetworkRestrictionsResponseStatus("applied"), + }) + // Run test + err := Run(context.Background(), projectRef, []string{"12.3.4.5/32", "2001:db8:abcd:0012::0/64", "1.2.3.1/24"}, false) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on network failure", func(t *testing.T) { + errNetwork := errors.New("network error") + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + projectRef + "/network-restrictions/apply"). + MatchType("json"). + JSON(api.NetworkRestrictionsRequest{ + DbAllowedCidrs: &[]string{}, + DbAllowedCidrsV6: &[]string{}, + }). + ReplyError(errNetwork) + // Run test + err := Run(context.Background(), projectRef, []string{}, true) + // Check error + assert.ErrorIs(t, err, errNetwork) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on server unavailable", func(t *testing.T) { + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + projectRef + "/network-restrictions/apply"). + MatchType("json"). + JSON(api.NetworkRestrictionsRequest{ + DbAllowedCidrs: &[]string{}, + DbAllowedCidrsV6: &[]string{}, + }). + Reply(http.StatusServiceUnavailable) + // Run test + err := Run(context.Background(), projectRef, []string{}, true) + // Check error + assert.ErrorContains(t, err, "failed to apply network restrictions:") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestValidateCIDR(t *testing.T) { + projectRef := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + t.Run("bypasses private subnet checks", func(t *testing.T) { + // Setup mock api + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + projectRef + "/network-restrictions/apply"). + MatchType("json"). + JSON(api.NetworkRestrictionsRequest{ + DbAllowedCidrs: &[]string{"10.0.0.0/8"}, + DbAllowedCidrsV6: &[]string{}, + }). + Reply(http.StatusCreated). + JSON(api.NetworkRestrictionsResponse{ + Status: api.NetworkRestrictionsResponseStatus("applied"), + }) + // Run test + err := Run(context.Background(), projectRef, []string{"10.0.0.0/8"}, true) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on private subnet", func(t *testing.T) { + // Run test + err := Run(context.Background(), projectRef, []string{"12.3.4.5/32", "10.0.0.0/8", "1.2.3.1/24"}, false) + // Check error + assert.ErrorContains(t, err, "private IP provided: 10.0.0.0/8") + }) + + t.Run("throws error on invalid subnet", func(t *testing.T) { + // Run test + err := Run(context.Background(), projectRef, []string{"12.3.4.5", "10.0.0.0/8", "1.2.3.1/24"}, false) + // Check error + assert.ErrorContains(t, err, "failed to parse IP: 12.3.4.5") + }) +} diff --git a/internal/secrets/list/list.go b/internal/secrets/list/list.go new file mode 100644 index 0000000..e5b4d45 --- /dev/null +++ b/internal/secrets/list/list.go @@ -0,0 +1,45 @@ +package list + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, projectRef string, fsys afero.Fs) error { + secrets, err := GetSecretDigests(ctx, projectRef) + if err != nil { + return err + } + + table := `|NAME|DIGEST| +|-|-| +` + for _, secret := range secrets { + table += fmt.Sprintf("|`%s`|`%s`|\n", strings.ReplaceAll(secret.Name, "|", "\\|"), secret.Value) + } + + return list.RenderTable(table) +} + +func GetSecretDigests(ctx context.Context, projectRef string) ([]api.SecretResponse, error) { + resp, err := utils.GetSupabase().V1ListAllSecretsWithResponse(ctx, projectRef) + if err != nil { + return nil, errors.Errorf("failed to list secrets: %w", err) + } + if resp.JSON200 == nil { + return nil, errors.New("Unexpected error retrieving project secrets: " + string(resp.Body)) + } + secrets := *resp.JSON200 + sort.Slice(secrets, func(i, j int) bool { + return secrets[i].Name < secrets[j].Name + }) + return secrets, nil +} diff --git a/internal/secrets/list/list_test.go b/internal/secrets/list/list_test.go new file mode 100644 index 0000000..4e0da98 --- /dev/null +++ b/internal/secrets/list/list_test.go @@ -0,0 +1,114 @@ +package list + +import ( + "context" + "errors" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestSecretListCommand(t *testing.T) { + t.Run("lists all secrets", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/secrets"). + Reply(200). + JSON([]api.SecretResponse{ + { + Name: "Test Secret", + Value: "dummy-secret-value", + }, + }) + // Run test + err := Run(context.Background(), project, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on missing access token", func(t *testing.T) { + t.Skip() + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), "", fsys) + // Check error + assert.ErrorContains(t, err, "Unexpected error retrieving project secrets") + }) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/secrets"). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), project, fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on server unavailable", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/secrets"). + Reply(500). + JSON(map[string]string{"message": "unavailable"}) + // Run test + err := Run(context.Background(), project, fsys) + // Check error + assert.ErrorContains(t, err, `Unexpected error retrieving project secrets: {"message":"unavailable"}`) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on malformed json", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + project + "/secrets"). + Reply(200). + JSON(map[string]string{}) + // Run test + err := Run(context.Background(), project, fsys) + // Check error + assert.ErrorContains(t, err, "json: cannot unmarshal object into Go value of type []api.SecretResponse") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/secrets/set/set.go b/internal/secrets/set/set.go new file mode 100644 index 0000000..d2877c3 --- /dev/null +++ b/internal/secrets/set/set.go @@ -0,0 +1,81 @@ +package set + +import ( + "context" + "fmt" + "maps" + "net/http" + "os" + "path/filepath" + "strings" + + "github.com/go-errors/errors" + "github.com/joho/godotenv" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, projectRef, envFilePath string, args []string, fsys afero.Fs) error { + // 1. Sanity checks. + envMap := make(map[string]string, len(args)) + if len(envFilePath) > 0 { + if !filepath.IsAbs(envFilePath) { + envFilePath = filepath.Join(utils.CurrentDirAbs, envFilePath) + } + parsed, err := ParseEnvFile(envFilePath, fsys) + if err != nil { + return err + } + maps.Copy(envMap, parsed) + } + for _, pair := range args { + name, value, found := strings.Cut(pair, "=") + if !found { + return errors.Errorf("Invalid secret pair: %s. Must be NAME=VALUE.", pair) + } + envMap[name] = value + } + if len(envMap) == 0 { + return errors.New("No arguments found. Use --env-file to read from a .env file.") + } + // 2. Set secret(s). + var secrets api.V1BulkCreateSecretsJSONBody + for name, value := range envMap { + // Lower case prefix is accepted by API + if strings.HasPrefix(name, "SUPABASE_") { + fmt.Fprintln(os.Stderr, "Env name cannot start with SUPABASE_, skipping: "+name) + continue + } + secret := api.CreateSecretBody{ + Name: name, + Value: value, + } + secrets = append(secrets, secret) + } + + resp, err := utils.GetSupabase().V1BulkCreateSecretsWithResponse(ctx, projectRef, secrets) + if err != nil { + return errors.Errorf("failed to set secrets: %w", err) + } + + if resp.StatusCode() != http.StatusCreated { + return errors.New("Unexpected error setting project secrets: " + string(resp.Body)) + } + + fmt.Println("Finished " + utils.Aqua("supabase secrets set") + ".") + return nil +} + +func ParseEnvFile(envFilePath string, fsys afero.Fs) (map[string]string, error) { + f, err := fsys.Open(envFilePath) + if err != nil { + return nil, errors.Errorf("failed to open env file: %w", err) + } + defer f.Close() + envMap, err := godotenv.Parse(f) + if err != nil { + return nil, errors.Errorf("failed to parse env file: %w", err) + } + return envMap, nil +} diff --git a/internal/secrets/set/set_test.go b/internal/secrets/set/set_test.go new file mode 100644 index 0000000..6021804 --- /dev/null +++ b/internal/secrets/set/set_test.go @@ -0,0 +1,139 @@ +package set + +import ( + "context" + "errors" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestSecretSetCommand(t *testing.T) { + dummy := api.CreateSecretBody{Name: "my_name", Value: "my_value"} + dummyEnv := dummy.Name + "=" + dummy.Value + + t.Run("Sets secret via cli args", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + project + "/secrets"). + MatchType("json"). + JSON(api.V1BulkCreateSecretsJSONRequestBody{dummy}). + Reply(http.StatusCreated) + // Run test + err := Run(context.Background(), project, "", []string{dummyEnv}, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("Sets secret value via env file", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, "/tmp/.env", []byte(dummyEnv), 0644)) + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + project + "/secrets"). + MatchType("json"). + JSON(api.V1BulkCreateSecretsJSONRequestBody{dummy}). + Reply(http.StatusCreated) + // Run test + err := Run(context.Background(), project, "/tmp/.env", []string{}, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on empty secret", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Run test + err := Run(context.Background(), project, "", []string{}, fsys) + // Check error + assert.ErrorContains(t, err, "No arguments found. Use --env-file to read from a .env file.") + }) + + t.Run("throws error on malformed secret", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Run test + err := Run(context.Background(), project, "", []string{"malformed"}, fsys) + // Check error + assert.ErrorContains(t, err, "Invalid secret pair: malformed. Must be NAME=VALUE.") + }) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + project + "/secrets"). + MatchType("json"). + JSON(api.V1BulkCreateSecretsJSONRequestBody{dummy}). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), project, "", []string{dummyEnv}, fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on server unavailable", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Post("/v1/projects/" + project + "/secrets"). + MatchType("json"). + JSON(api.V1BulkCreateSecretsJSONRequestBody{dummy}). + Reply(500). + JSON(map[string]string{"message": "unavailable"}) + // Run test + err := Run(context.Background(), project, "", []string{dummyEnv}, fsys) + // Check error + assert.ErrorContains(t, err, `Unexpected error setting project secrets: {"message":"unavailable"}`) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/secrets/unset/unset.go b/internal/secrets/unset/unset.go new file mode 100644 index 0000000..f6bbca8 --- /dev/null +++ b/internal/secrets/unset/unset.go @@ -0,0 +1,49 @@ +package unset + +import ( + "context" + "fmt" + "net/http" + "os" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/secrets/list" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, args []string, fsys afero.Fs) error { + if len(args) == 0 { + secrets, err := list.GetSecretDigests(ctx, projectRef) + if err != nil { + return err + } + for _, secret := range secrets { + if !strings.HasPrefix(secret.Name, "SUPABASE_") { + args = append(args, secret.Name) + } + } + } + // 1. Sanity checks. + if len(args) == 0 { + fmt.Fprintln(os.Stderr, "You have not set any function secrets, nothing to do.") + return nil + } + msg := fmt.Sprintf("Do you want to unset these function secrets?\n • %s\n\n", strings.Join(args, "\n • ")) + if shouldUnset, err := utils.NewConsole().PromptYesNo(ctx, msg, true); err != nil { + return err + } else if !shouldUnset { + return errors.New(context.Canceled) + } + // 2. Unset secret(s). + resp, err := utils.GetSupabase().V1BulkDeleteSecretsWithResponse(ctx, projectRef, args) + if err != nil { + return errors.Errorf("failed to delete secrets: %w", err) + } + if resp.StatusCode() != http.StatusOK { + return errors.New("Unexpected error unsetting project secrets: " + string(resp.Body)) + } + fmt.Println("Finished " + utils.Aqua("supabase secrets unset") + ".") + return nil +} diff --git a/internal/secrets/unset/unset_test.go b/internal/secrets/unset/unset_test.go new file mode 100644 index 0000000..3207f8d --- /dev/null +++ b/internal/secrets/unset/unset_test.go @@ -0,0 +1,83 @@ +package unset + +import ( + "context" + "errors" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestSecretUnsetCommand(t *testing.T) { + t.Run("Unsets secret via cli args", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + project + "/secrets"). + MatchType("json"). + JSON(api.V1BulkDeleteSecretsJSONRequestBody{"my-secret"}). + Reply(200) + // Run test + err := Run(context.Background(), project, []string{"my-secret"}, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on network error", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + project + "/secrets"). + MatchType("json"). + JSON(api.V1BulkDeleteSecretsJSONRequestBody{"my-secret"}). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), project, []string{"my-secret"}, fsys) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on server unavailable", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup valid project ref + project := apitest.RandomProjectRef() + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + // Flush pending mocks after test execution + defer gock.OffAll() + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + project + "/secrets"). + MatchType("json"). + JSON(api.V1BulkDeleteSecretsJSONRequestBody{"my-secret"}). + Reply(500). + JSON(map[string]string{"message": "unavailable"}) + // Run test + err := Run(context.Background(), project, []string{"my-secret"}, fsys) + // Check error + assert.ErrorContains(t, err, `Unexpected error unsetting project secrets: {"message":"unavailable"}`) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/seed/buckets/buckets.go b/internal/seed/buckets/buckets.go new file mode 100644 index 0000000..365c3a3 --- /dev/null +++ b/internal/seed/buckets/buckets.go @@ -0,0 +1,33 @@ +package buckets + +import ( + "context" + "fmt" + + "github.com/spf13/afero" + "github.com/supabase/cli/internal/storage/client" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, interactive bool, fsys afero.Fs) error { + api, err := client.NewStorageAPI(ctx, projectRef) + if err != nil { + return err + } + console := utils.NewConsole() + if !interactive { + console.IsTTY = false + } + filter := func(bucketId string) bool { + label := fmt.Sprintf("Bucket %s already exists. Do you want to overwrite its properties?", utils.Bold(bucketId)) + shouldOverwrite, err := console.PromptYesNo(ctx, label, true) + if err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + } + return shouldOverwrite + } + if err := api.UpsertBuckets(ctx, utils.Config.Storage.Buckets, filter); err != nil { + return err + } + return api.UpsertObjects(ctx, utils.Config.Storage.Buckets, utils.NewRootFS(fsys)) +} diff --git a/internal/seed/buckets/buckets_test.go b/internal/seed/buckets/buckets_test.go new file mode 100644 index 0000000..e14a40e --- /dev/null +++ b/internal/seed/buckets/buckets_test.go @@ -0,0 +1,67 @@ +package buckets + +import ( + "context" + "net/http" + "path/filepath" + "testing" + + "github.com/BurntSushi/toml" + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/storage" +) + +func TestSeedBuckets(t *testing.T) { + t.Run("seeds buckets", func(t *testing.T) { + t.Cleanup(func() { clear(utils.Config.Storage.Buckets) }) + config := ` +[test] +public = true +[private] +public = false` + require.NoError(t, toml.Unmarshal([]byte(config), &utils.Config.Storage.Buckets)) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + bucketPath := filepath.Join(utils.SupabaseDirPath, "images") + require.NoError(t, fsys.Mkdir(bucketPath, 0755)) + // Setup mock api + gock.New(utils.Config.Api.ExternalUrl). + Get("/storage/v1/bucket"). + Reply(http.StatusOK). + JSON([]storage.BucketResponse{{ + Name: "test", + Id: "test", + }}) + gock.New(utils.Config.Api.ExternalUrl). + Put("/storage/v1/bucket/test"). + Reply(http.StatusOK). + JSON(storage.UpdateBucketResponse{}) + gock.New(utils.Config.Api.ExternalUrl). + Post("/storage/v1/bucket"). + Reply(http.StatusOK). + JSON(storage.CreateBucketResponse{Name: "private"}) + // Run test + err := Run(context.Background(), "", false, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("ignores unconfigured buckets", func(t *testing.T) { + // Setup mock api + gock.New(utils.Config.Api.ExternalUrl). + Get("/storage/v1/bucket"). + Reply(http.StatusOK). + JSON([]storage.BucketResponse{}) + // Run test + err := Run(context.Background(), "", false, afero.NewMemMapFs()) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/services/services.go b/internal/services/services.go new file mode 100644 index 0000000..ebc08ef --- /dev/null +++ b/internal/services/services.go @@ -0,0 +1,113 @@ +package services + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "sync" + + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/internal/utils/tenant" +) + +func Run(ctx context.Context, fsys afero.Fs) error { + if err := flags.LoadProjectRef(fsys); err != nil && !errors.Is(err, utils.ErrNotLinked) { + fmt.Fprintln(os.Stderr, err) + } + if err := utils.Config.Load("", utils.NewRootFS(fsys)); err != nil && !errors.Is(err, os.ErrNotExist) { + fmt.Fprintln(os.Stderr, err) + } + + serviceImages := CheckVersions(ctx, fsys) + table := `|SERVICE IMAGE|LOCAL|LINKED| +|-|-|-| +` + for _, image := range serviceImages { + remote := image.Remote + if len(remote) == 0 { + remote = "-" + } + table += fmt.Sprintf("|`%s`|`%s`|`%s`|\n", image.Name, image.Local, remote) + } + + return list.RenderTable(table) +} + +type imageVersion struct { + Name string `json:"name"` + Local string `json:"local"` + Remote string `json:"remote"` +} + +func CheckVersions(ctx context.Context, fsys afero.Fs) []imageVersion { + var remote map[string]string + if _, err := utils.LoadAccessTokenFS(fsys); err == nil && len(flags.ProjectRef) > 0 { + remote = listRemoteImages(ctx, flags.ProjectRef) + } + var result []imageVersion + for _, image := range utils.Config.GetServiceImages() { + parts := strings.Split(image, ":") + v := imageVersion{Name: parts[0], Local: parts[1]} + if v.Remote = remote[image]; v.Remote == v.Local { + delete(remote, image) + } + result = append(result, v) + } + if len(remote) > 0 { + fmt.Fprintln(os.Stderr, suggestUpdateCmd(remote)) + } + return result +} + +func listRemoteImages(ctx context.Context, projectRef string) map[string]string { + linked := make(map[string]string, 4) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + if version, err := tenant.GetDatabaseVersion(ctx, projectRef); err == nil { + linked[utils.Config.Db.Image] = version + } + }() + keys, err := tenant.GetApiKeys(ctx, projectRef) + if err != nil { + wg.Wait() + return linked + } + api := tenant.NewTenantAPI(ctx, projectRef, keys.Anon) + wg.Add(3) + go func() { + defer wg.Done() + if version, err := api.GetGotrueVersion(ctx); err == nil { + linked[utils.Config.Auth.Image] = version + } + }() + go func() { + defer wg.Done() + if version, err := api.GetPostgrestVersion(ctx); err == nil { + linked[utils.Config.Api.Image] = version + } + }() + go func() { + defer wg.Done() + if version, err := api.GetStorageVersion(ctx); err == nil { + linked[utils.Config.Storage.Image] = version + } + }() + wg.Wait() + return linked +} + +func suggestUpdateCmd(serviceImages map[string]string) string { + cmd := fmt.Sprintln(utils.Yellow("WARNING:"), "You are running different service versions locally than your linked project:") + for k, v := range serviceImages { + cmd += fmt.Sprintf("%s => %s\n", k, v) + } + cmd += fmt.Sprintf("Run %s to update them.", utils.Aqua("supabase link")) + return cmd +} diff --git a/internal/snippets/download/download.go b/internal/snippets/download/download.go new file mode 100644 index 0000000..22b45dd --- /dev/null +++ b/internal/snippets/download/download.go @@ -0,0 +1,30 @@ +package download + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/google/uuid" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, snippetId string, fsys afero.Fs) error { + // Convert string to UUID + id, err := uuid.Parse(snippetId) + if err != nil { + return fmt.Errorf("invalid snippet ID: %w", err) + } + resp, err := utils.GetSupabase().V1GetASnippetWithResponse(ctx, id) + if err != nil { + return errors.Errorf("failed to download snippet: %w", err) + } + + if resp.JSON200 == nil { + return errors.New("Unexpected error downloading SQL snippet: " + string(resp.Body)) + } + + fmt.Println(resp.JSON200.Content.Sql) + return nil +} diff --git a/internal/snippets/list/list.go b/internal/snippets/list/list.go new file mode 100644 index 0000000..f54325e --- /dev/null +++ b/internal/snippets/list/list.go @@ -0,0 +1,43 @@ +package list + +import ( + "context" + "fmt" + "strings" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, fsys afero.Fs) error { + opts := api.V1ListAllSnippetsParams{ProjectRef: &flags.ProjectRef} + resp, err := utils.GetSupabase().V1ListAllSnippetsWithResponse(ctx, &opts) + if err != nil { + return errors.Errorf("failed to list snippets: %w", err) + } + + if resp.JSON200 == nil { + return errors.New("Unexpected error listing SQL snippets: " + string(resp.Body)) + } + + table := `|ID|NAME|VISIBILITY|OWNER|CREATED AT (UTC)|UPDATED AT (UTC)| +|-|-|-|-|-|-| +` + for _, snippet := range resp.JSON200.Data { + table += fmt.Sprintf( + "|`%s`|`%s`|`%s`|`%s`|`%s`|`%s`|\n", + snippet.Id, + strings.ReplaceAll(snippet.Name, "|", "\\|"), + strings.ReplaceAll(string(snippet.Visibility), "|", "\\|"), + strings.ReplaceAll(snippet.Owner.Username, "|", "\\|"), + utils.FormatTimestamp(snippet.InsertedAt), + utils.FormatTimestamp(snippet.UpdatedAt), + ) + } + + return list.RenderTable(table) +} diff --git a/internal/ssl_enforcement/get/get.go b/internal/ssl_enforcement/get/get.go new file mode 100644 index 0000000..e6d5869 --- /dev/null +++ b/internal/ssl_enforcement/get/get.go @@ -0,0 +1,31 @@ +package get + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, projectRef string, fsys afero.Fs) error { + // 1. Sanity checks. + // 2. get ssl enforcement config + { + resp, err := utils.GetSupabase().V1GetSslEnforcementConfigWithResponse(ctx, projectRef) + if err != nil { + return errors.Errorf("failed to retrieve SSL enforcement config: %w", err) + } + if resp.JSON200 == nil { + return errors.New("failed to retrieve SSL enforcement config; received: " + string(resp.Body)) + } + + if resp.JSON200.CurrentConfig.Database && resp.JSON200.AppliedSuccessfully { + fmt.Println("SSL is being enforced.") + } else { + fmt.Println("SSL is *NOT* being enforced.") + } + return nil + } +} diff --git a/internal/ssl_enforcement/update/update.go b/internal/ssl_enforcement/update/update.go new file mode 100644 index 0000000..51b0b67 --- /dev/null +++ b/internal/ssl_enforcement/update/update.go @@ -0,0 +1,35 @@ +package update + +import ( + "context" + "fmt" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, projectRef string, enforceDbSsl bool, fsys afero.Fs) error { + // 1. sanity checks + // 2. update restrictions + { + resp, err := utils.GetSupabase().V1UpdateSslEnforcementConfigWithResponse(ctx, projectRef, api.V1UpdateSslEnforcementConfigJSONRequestBody{ + RequestedConfig: api.SslEnforcements{ + Database: enforceDbSsl, + }, + }) + if err != nil { + return errors.Errorf("failed to update ssl enforcement: %w", err) + } + if resp.JSON200 == nil { + return errors.New("failed to update SSL enforcement confnig: " + string(resp.Body)) + } + if resp.JSON200.CurrentConfig.Database && resp.JSON200.AppliedSuccessfully { + fmt.Println("SSL is now being enforced.") + } else { + fmt.Println("SSL is *NOT* being enforced.") + } + return nil + } +} diff --git a/internal/sso/create/create.go b/internal/sso/create/create.go new file mode 100644 index 0000000..a33b6e7 --- /dev/null +++ b/internal/sso/create/create.go @@ -0,0 +1,90 @@ +package create + +import ( + "context" + "net/http" + "os" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/sso/internal/render" + "github.com/supabase/cli/internal/sso/internal/saml" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +var Fs = afero.NewOsFs() + +type RunParams struct { + ProjectRef string + Format string + + Type string + Domains []string + MetadataFile string + MetadataURL string + SkipURLValidation bool + AttributeMapping string +} + +func Run(ctx context.Context, params RunParams) error { + var body api.V1CreateASsoProviderJSONRequestBody + body.Type = api.CreateProviderBodyType(params.Type) + + if params.MetadataFile != "" { + data, err := saml.ReadMetadataFile(Fs, params.MetadataFile) + if err != nil { + return err + } + + body.MetadataXml = &data + } else if params.MetadataURL != "" { + if !params.SkipURLValidation { + if err := saml.ValidateMetadataURL(ctx, params.MetadataURL); err != nil { + return errors.Errorf("%w Use --skip-url-validation to suppress this error", err) + } + } + + body.MetadataUrl = ¶ms.MetadataURL + } + + if params.AttributeMapping != "" { + data, err := saml.ReadAttributeMappingFile(Fs, params.AttributeMapping) + if err != nil { + return err + } + + body.AttributeMapping = data + } + + if params.Domains != nil { + body.Domains = ¶ms.Domains + } + + resp, err := utils.GetSupabase().V1CreateASsoProviderWithResponse(ctx, params.ProjectRef, body) + if err != nil { + return errors.Errorf("failed to create sso provider: %w", err) + } + + if resp.JSON201 == nil { + if resp.StatusCode() == http.StatusNotFound { + return errors.New("SAML 2.0 support is not enabled for this project. Please enable it through the dashboard") + } + + return errors.New("Unexpected error adding identity provider: " + string(resp.Body)) + } + + switch params.Format { + case utils.OutputPretty: + return render.SingleMarkdown(api.Provider{ + Id: resp.JSON201.Id, + Saml: resp.JSON201.Saml, + Domains: resp.JSON201.Domains, + CreatedAt: resp.JSON201.CreatedAt, + UpdatedAt: resp.JSON201.UpdatedAt, + }) + + default: + return utils.EncodeOutput(params.Format, os.Stdout, resp.JSON201) + } +} diff --git a/internal/sso/get/get.go b/internal/sso/get/get.go new file mode 100644 index 0000000..3437052 --- /dev/null +++ b/internal/sso/get/get.go @@ -0,0 +1,46 @@ +package get + +import ( + "context" + "fmt" + "net/http" + "os" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/sso/internal/render" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, ref, providerId, format string) error { + resp, err := utils.GetSupabase().V1GetASsoProviderWithResponse(ctx, ref, providerId) + if err != nil { + return err + } + + if resp.JSON200 == nil { + if resp.StatusCode() == http.StatusNotFound { + return errors.Errorf("An identity provider with ID %q could not be found.", providerId) + } + + return errors.New("Unexpected error fetching identity provider: " + string(resp.Body)) + } + + switch format { + case utils.OutputMetadata: + _, err := fmt.Println(*resp.JSON200.Saml.MetadataXml) + return err + + case utils.OutputPretty: + return render.SingleMarkdown(api.Provider{ + Id: resp.JSON200.Id, + Saml: resp.JSON200.Saml, + Domains: resp.JSON200.Domains, + CreatedAt: resp.JSON200.CreatedAt, + UpdatedAt: resp.JSON200.UpdatedAt, + }) + + default: + return utils.EncodeOutput(format, os.Stdout, resp.JSON200) + } +} diff --git a/internal/sso/get/get_test.go b/internal/sso/get/get_test.go new file mode 100644 index 0000000..bd6fd0e --- /dev/null +++ b/internal/sso/get/get_test.go @@ -0,0 +1,95 @@ +package get + +import ( + "context" + "fmt" + "testing" + + "github.com/h2non/gock" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" +) + +func TestSSOProvidersShowCommand(t *testing.T) { + t.Run("show provider", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + // Flush pending mocks after test execution + defer gock.OffAll() + + projectRef := "abcdefghijklmnopqrst" + providerId := "0b0d48f6-878b-4190-88d7-2ca33ed800bc" + + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + projectRef + "/config/auth/sso/providers/" + providerId). + Reply(200). + JSON(map[string]any{ + "id": providerId, + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + "saml": map[string]any{ + "id": "8682fcf4-4056-455c-bd93-f33295604929", + "metadata_url": "https://example.com", + "metadata_xml": "", + "entity_id": "https://example.com", + "attribute_mapping": map[string]any{ + "keys": map[string]any{ + "a": map[string]any{ + "name": "xyz", + "names": []string{ + "x", + "y", + "z", + }, + "default": 3, + }, + }, + }, + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + }, + "domains": []map[string]any{ + { + "id": "9484591c-a203-4500-bea7-d0aaa845e2f5", + "domain": "example.com", + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + }, + }, + }) + + // Run test + assert.NoError(t, Run(context.Background(), projectRef, providerId, utils.OutputPretty)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("show provider that does not exist", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + // Flush pending mocks after test execution + defer gock.OffAll() + + projectRef := "abcdefghijklmnopqrst" + providerId := "0b0d48f6-878b-4190-88d7-2ca33ed800bc" + + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + projectRef + "/config/auth/sso/providers/" + providerId). + Reply(404). + JSON(map[string]string{}) + + err := Run(context.Background(), projectRef, providerId, utils.OutputPretty) + + // Run test + assert.Error(t, err) + assert.Equal(t, err.Error(), fmt.Sprintf("An identity provider with ID %q could not be found.", providerId)) + + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/sso/info/info.go b/internal/sso/info/info.go new file mode 100644 index 0000000..c8a2580 --- /dev/null +++ b/internal/sso/info/info.go @@ -0,0 +1,24 @@ +package info + +import ( + "context" + "fmt" + "os" + + "github.com/supabase/cli/internal/sso/internal/render" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, ref string, format string) error { + switch format { + case utils.OutputPretty: + return render.InfoMarkdown(ref) + + default: + return utils.EncodeOutput(format, os.Stdout, map[string]interface{}{ + "acs_url": fmt.Sprintf("https://%s.supabase.co/auth/v1/sso/saml/acs", ref), + "entity_id": fmt.Sprintf("https://%s.supabase.co/auth/v1/sso/saml/metadata", ref), + "relay_state": fmt.Sprintf("https://%s.supabase.co", ref), + }) + } +} diff --git a/internal/sso/internal/render/render.go b/internal/sso/internal/render/render.go new file mode 100644 index 0000000..3f3a03d --- /dev/null +++ b/internal/sso/internal/render/render.go @@ -0,0 +1,180 @@ +package render + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-errors/errors" + "github.com/go-xmlfmt/xmlfmt" + "github.com/supabase/cli/internal/migration/list" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func formatProtocol(provider api.Provider) string { + protocol := "SAML 2.0" + if provider.Saml == nil || *provider.Saml == (api.SamlDescriptor{}) { + protocol = "unknown" + } + + return protocol +} + +func formatMetadataSource(provider api.Provider) string { + source := "FILE" + if provider.Saml != nil && provider.Saml.MetadataUrl != nil && *provider.Saml.MetadataUrl != "" { + source = *provider.Saml.MetadataUrl + } + + return source +} + +func formatAttributeMapping(attributeMapping *api.AttributeMapping) (string, error) { + data, err := json.MarshalIndent(attributeMapping, "", " ") + if err != nil { + return "", errors.Errorf("failed to marshal attribute mapping: %w", err) + } + + return string(data), nil +} + +func formatTimestamp(timestamp *string) string { + if timestamp == nil { + return "" + } + + return utils.FormatTimestamp(*timestamp) +} + +func formatDomains(provider api.Provider) string { + var domains []string + + if provider.Domains != nil { + for _, domain := range *provider.Domains { + if domain.Domain != nil { + domains = append(domains, *domain.Domain) + } + } + } + + domainsString := "-" + if len(domains) > 0 { + domainsString = strings.Join(domains, ", ") + } + + return domainsString +} + +func formatEntityID(provider api.Provider) string { + entityID := "-" + if provider.Saml != nil && provider.Saml.EntityId != "" { + entityID = provider.Saml.EntityId + } + + return entityID +} + +func ListMarkdown(providers []api.Provider) error { + markdownTable := []string{ + "|TYPE|IDENTITY PROVIDER ID|DOMAINS|SAML 2.0 `EntityID`|CREATED AT (UTC)|UPDATED AT (UTC)|\n|-|-|-|-|-|-|\n", + } + + for _, item := range providers { + markdownTable = append(markdownTable, fmt.Sprintf( + "|`%s`|`%s`|`%s`|`%s`|`%s`|`%s`|\n", + formatProtocol(item), + item.Id, + formatDomains(item), + formatEntityID(item), + formatTimestamp(item.CreatedAt), + formatTimestamp(item.UpdatedAt), + )) + } + + return list.RenderTable(strings.Join(markdownTable, "")) +} + +func SingleMarkdown(provider api.Provider) error { + markdownTable := []string{ + "|PROPERTY|VALUE|", + "|-|-|", + } + + markdownTable = append(markdownTable, fmt.Sprintf( + "|IDENTITY PROVIDER ID|`%s`|", + provider.Id, + )) + + markdownTable = append(markdownTable, fmt.Sprintf( + "|TYPE|`%s`|", + formatProtocol(provider), + )) + + markdownTable = append(markdownTable, fmt.Sprintf( + "|DOMAINS|`%s`|", + formatDomains(provider), + )) + + if provider.Saml != nil { + markdownTable = append(markdownTable, fmt.Sprintf( + "|SAML 2.0 METADATA|`%s`|", + formatMetadataSource(provider), + )) + + markdownTable = append(markdownTable, fmt.Sprintf( + "|SAML 2.0 `EntityID`|`%s`|", + formatEntityID(provider), + )) + } + + markdownTable = append(markdownTable, fmt.Sprintf( + "|CREATED AT (UTC)|`%s`|", + formatTimestamp(provider.CreatedAt), + )) + + markdownTable = append(markdownTable, fmt.Sprintf( + "|UPDATED AT (UTC)|`%s`|", + formatTimestamp(provider.CreatedAt), + )) + + if provider.Saml != nil && provider.Saml.AttributeMapping != nil && len(provider.Saml.AttributeMapping.Keys) > 0 { + attributeMapping, err := formatAttributeMapping(provider.Saml.AttributeMapping) + if err != nil { + return err + } + + markdownTable = append(markdownTable, "", "## Attribute Mapping", "```json", attributeMapping, "```") + } + + if provider.Saml != nil && provider.Saml.MetadataXml != nil && *provider.Saml.MetadataXml != "" { + prettyXML := xmlfmt.FormatXML(*provider.Saml.MetadataXml, " ", " ") + markdownTable = append(markdownTable, "", "## SAML 2.0 Metadata XML", "```xml", prettyXML, "```") + } + + return list.RenderTable(strings.Join(markdownTable, "\n")) +} + +func InfoMarkdown(ref string) error { + markdownTable := []string{ + "|PROPERTY|VALUE|", + "|-|-|", + } + + markdownTable = append(markdownTable, fmt.Sprintf( + "|Single sign-on URL (ACS URL) |`%s`|", + fmt.Sprintf("https://%s.supabase.co/auth/v1/sso/saml/acs", ref), + )) + + markdownTable = append(markdownTable, fmt.Sprintf( + "|Audience URI (SP Entity ID)|`%s`|", + fmt.Sprintf("https://%s.supabase.co/auth/v1/sso/saml/metadata", ref), + )) + + markdownTable = append(markdownTable, fmt.Sprintf( + "|Default Relay State|`%s`|", + fmt.Sprintf("https://%s.supabase.co", ref), + )) + + return list.RenderTable(strings.Join(markdownTable, "\n")) +} diff --git a/internal/sso/internal/saml/files.go b/internal/sso/internal/saml/files.go new file mode 100644 index 0000000..62e0846 --- /dev/null +++ b/internal/sso/internal/saml/files.go @@ -0,0 +1,93 @@ +package saml + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "strings" + "time" + "unicode/utf8" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/pkg/api" + "github.com/supabase/cli/pkg/fetcher" +) + +var DefaultClient = http.DefaultClient + +func ReadMetadataFile(fsys afero.Fs, path string) (string, error) { + file, err := fsys.Open(path) + if err != nil { + return "", errors.Errorf("failed to open metadata file: %w", err) + } + + data, err := io.ReadAll(file) + if err != nil { + return "", errors.Errorf("failed to read metadata file: %w", err) + } + + if err := ValidateMetadata(data, path); err != nil { + return "", err + } + + return string(data), nil +} + +func ReadAttributeMappingFile(fsys afero.Fs, path string) (*api.AttributeMapping, error) { + file, err := fsys.Open(path) + if err != nil { + return nil, errors.Errorf("failed to open attribute mapping: %w", err) + } + + var mapping api.AttributeMapping + dec := json.NewDecoder(file) + if err := dec.Decode(&mapping); err != nil { + return nil, errors.Errorf("failed to parse attribute mapping: %w", err) + } + + return &mapping, nil +} + +func ValidateMetadata(data []byte, source string) error { + if !utf8.Valid(data) { + return errors.Errorf("SAML Metadata XML at %q is not UTF-8 encoded", source) + } + + return nil +} + +func ValidateMetadataURL(ctx context.Context, metadataURL string) error { + parsed, err := url.ParseRequestURI(metadataURL) + if err != nil { + return errors.Errorf("failed to parse metadata uri: %w", err) + } + + if !strings.EqualFold(parsed.Scheme, "https") { + return errors.New("only HTTPS Metadata URLs are supported") + } + + client := fetcher.NewFetcher("", + fetcher.WithHTTPClient(&http.Client{ + Timeout: 10 * time.Second, + }), + fetcher.WithRequestEditor(func(req *http.Request) { + req.Header.Add("Accept", "application/xml") + }), + fetcher.WithExpectedStatus(http.StatusOK), + ) + resp, err := client.Send(ctx, http.MethodGet, metadataURL, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + data, err := io.ReadAll(resp.Body) + if err != nil { + return errors.Errorf("failed to read http response: %w", err) + } + + return ValidateMetadata(data, metadataURL) +} diff --git a/internal/sso/internal/saml/files_test.go b/internal/sso/internal/saml/files_test.go new file mode 100644 index 0000000..ec7afe6 --- /dev/null +++ b/internal/sso/internal/saml/files_test.go @@ -0,0 +1,53 @@ +package saml + +import ( + "context" + "os" + "testing" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReadAttributeMappingFile(t *testing.T) { + t.Run("open file that does not exist", func(t *testing.T) { + _, err := ReadAttributeMappingFile(afero.NewMemMapFs(), "/does-not-exist") + assert.ErrorIs(t, err, os.ErrNotExist) + }) + + t.Run("open file that is not valid JSON", func(t *testing.T) { + fs := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fs, "/not-valid-json", []byte("not-valid-JSON"), 0755)) + + _, err := ReadAttributeMappingFile(fs, "/not-valid-json") + assert.ErrorContains(t, err, "failed to parse attribute mapping") + }) + + t.Run("open valid file", func(t *testing.T) { + fs := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fs, "/valid-json", []byte(`{"keys":{"abc":{"names":["x","y","z"],"default":2,"name":"k"}}}`), 0755)) + + _, err := ReadAttributeMappingFile(fs, "/valid-json") + assert.NoError(t, err) + }) +} + +func TestValidateMetadata(t *testing.T) { + t.Run("with invalid UTF-8", func(t *testing.T) { + err := ValidateMetadata([]byte{0xFF, 0xFF, 0xFF}, "/invalid-utf-8") + assert.ErrorContains(t, err, `SAML Metadata XML at "/invalid-utf-8" is not UTF-8 encoded`) + }) +} + +func TestValidateMetadataURL(t *testing.T) { + t.Run("with relative URL", func(t *testing.T) { + err := ValidateMetadataURL(context.TODO(), "./relative-url") + assert.ErrorContains(t, err, "invalid URI for request") + }) + + t.Run("with HTTP URL", func(t *testing.T) { + err := ValidateMetadataURL(context.TODO(), "http://example.com") + assert.ErrorContains(t, err, "only HTTPS Metadata URLs are supported") + }) +} diff --git a/internal/sso/list/list.go b/internal/sso/list/list.go new file mode 100644 index 0000000..efacb1a --- /dev/null +++ b/internal/sso/list/list.go @@ -0,0 +1,36 @@ +package list + +import ( + "context" + "net/http" + "os" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/sso/internal/render" + "github.com/supabase/cli/internal/utils" +) + +func Run(ctx context.Context, ref, format string) error { + resp, err := utils.GetSupabase().V1ListAllSsoProviderWithResponse(ctx, ref) + if err != nil { + return errors.Errorf("failed to list sso providers: %w", err) + } + + if resp.JSON200 == nil { + if resp.StatusCode() == http.StatusNotFound { + return errors.New("Looks like SAML 2.0 support is not enabled for this project. Please use the dashboard to enable it.") + } + + return errors.New("unexpected error listing identity providers: " + string(resp.Body)) + } + + switch format { + case utils.OutputPretty: + return render.ListMarkdown(resp.JSON200.Items) + + default: + return utils.EncodeOutput(format, os.Stdout, map[string]any{ + "providers": resp.JSON200.Items, + }) + } +} diff --git a/internal/sso/list/list_test.go b/internal/sso/list/list_test.go new file mode 100644 index 0000000..333ffd4 --- /dev/null +++ b/internal/sso/list/list_test.go @@ -0,0 +1,96 @@ +package list + +import ( + "context" + "testing" + + "github.com/h2non/gock" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" +) + +func TestSSOProvidersListCommand(t *testing.T) { + t.Run("lists all providers", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + // Flush pending mocks after test execution + defer gock.OffAll() + + projectRef := "abcdefghijklmnopqrst" + + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + projectRef + "/config/auth/sso/providers"). + Reply(200). + JSON(map[string]any{ + "items": []map[string]any{ + { + "id": "0b0d48f6-878b-4190-88d7-2ca33ed800bc", + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + "saml": map[string]any{ + "id": "8682fcf4-4056-455c-bd93-f33295604929", + "metadata_url": "https://example.com", + "metadata_xml": "", + "entity_id": "https://example.com", + "attribute_mapping": map[string]any{ + "keys": map[string]any{ + "a": map[string]any{ + "name": "xyz", + "names": []string{ + "x", + "y", + "z", + }, + "default": 3, + }, + }, + }, + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + }, + "domains": []map[string]any{ + { + "id": "9484591c-a203-4500-bea7-d0aaa845e2f5", + "domain": "example.com", + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + }, + }, + }, + }, + }) + + // Run test + assert.NoError(t, Run(context.Background(), projectRef, utils.OutputPretty)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("list providers with disabled SAML", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + // Flush pending mocks after test execution + defer gock.OffAll() + + projectRef := "abcdefghijklmnopqrst" + + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + projectRef + "/config/auth/sso/providers"). + Reply(404). + JSON(map[string]string{}) + + err := Run(context.Background(), projectRef, utils.OutputPretty) + + // Run test + assert.Error(t, err) + assert.Equal(t, err.Error(), "Looks like SAML 2.0 support is not enabled for this project. Please use the dashboard to enable it.") + + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/sso/remove/remove.go b/internal/sso/remove/remove.go new file mode 100644 index 0000000..1514314 --- /dev/null +++ b/internal/sso/remove/remove.go @@ -0,0 +1,41 @@ +package remove + +import ( + "context" + "net/http" + "os" + + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/sso/internal/render" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func Run(ctx context.Context, ref, providerId, format string) error { + resp, err := utils.GetSupabase().V1DeleteASsoProviderWithResponse(ctx, ref, providerId) + if err != nil { + return errors.Errorf("failed to remove sso provider: %w", err) + } + + if resp.JSON200 == nil { + if resp.StatusCode() == http.StatusNotFound { + return errors.Errorf("An identity provider with ID %q could not be found.", providerId) + } + + return errors.New("Unexpected error removing identity provider: " + string(resp.Body)) + } + + switch format { + case utils.OutputPretty: + return render.SingleMarkdown(api.Provider{ + Id: resp.JSON200.Id, + Saml: resp.JSON200.Saml, + Domains: resp.JSON200.Domains, + CreatedAt: resp.JSON200.CreatedAt, + UpdatedAt: resp.JSON200.UpdatedAt, + }) + + default: + return utils.EncodeOutput(format, os.Stdout, resp.JSON200) + } +} diff --git a/internal/sso/remove/remove_test.go b/internal/sso/remove/remove_test.go new file mode 100644 index 0000000..f284f7e --- /dev/null +++ b/internal/sso/remove/remove_test.go @@ -0,0 +1,95 @@ +package remove + +import ( + "context" + "fmt" + "testing" + + "github.com/h2non/gock" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" +) + +func TestSSOProvidersRemoveCommand(t *testing.T) { + t.Run("remove provider", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + // Flush pending mocks after test execution + defer gock.OffAll() + + projectRef := "abcdefghijklmnopqrst" + providerId := "0b0d48f6-878b-4190-88d7-2ca33ed800bc" + + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + projectRef + "/config/auth/sso/providers/" + providerId). + Reply(200). + JSON(map[string]any{ + "id": providerId, + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + "saml": map[string]any{ + "id": "8682fcf4-4056-455c-bd93-f33295604929", + "metadata_url": "https://example.com", + "metadata_xml": "", + "entity_id": "https://example.com", + "attribute_mapping": map[string]any{ + "keys": map[string]any{ + "a": map[string]any{ + "name": "xyz", + "names": []string{ + "x", + "y", + "z", + }, + "default": 3, + }, + }, + }, + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + }, + "domains": []map[string]any{ + { + "id": "9484591c-a203-4500-bea7-d0aaa845e2f5", + "domain": "example.com", + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + }, + }, + }) + + // Run test + assert.NoError(t, Run(context.Background(), projectRef, providerId, utils.OutputPretty)) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("remove provider that does not exist", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + // Flush pending mocks after test execution + defer gock.OffAll() + + projectRef := "abcdefghijklmnopqrst" + providerId := "0b0d48f6-878b-4190-88d7-2ca33ed800bc" + + gock.New(utils.DefaultApiHost). + Delete("/v1/projects/" + projectRef + "/config/auth/sso/providers/" + providerId). + Reply(404). + JSON(map[string]string{}) + + err := Run(context.Background(), projectRef, providerId, utils.OutputPretty) + + // Run test + assert.Error(t, err) + assert.Equal(t, err.Error(), fmt.Sprintf("An identity provider with ID %q could not be found.", providerId)) + + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/sso/update/update.go b/internal/sso/update/update.go new file mode 100644 index 0000000..e8a745d --- /dev/null +++ b/internal/sso/update/update.go @@ -0,0 +1,126 @@ +package update + +import ( + "context" + "net/http" + "os" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/sso/internal/render" + "github.com/supabase/cli/internal/sso/internal/saml" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +var Fs = afero.NewOsFs() + +type RunParams struct { + ProjectRef string + ProviderID string + Format string + + MetadataFile string + MetadataURL string + SkipURLValidation bool + AttributeMapping string + + Domains []string + AddDomains []string + RemoveDomains []string +} + +func Run(ctx context.Context, params RunParams) error { + getResp, err := utils.GetSupabase().V1GetASsoProviderWithResponse(ctx, params.ProjectRef, params.ProviderID) + if err != nil { + return errors.Errorf("failed to get sso provider: %w", err) + } + + if getResp.JSON200 == nil { + if getResp.StatusCode() == http.StatusNotFound { + return errors.Errorf("An identity provider with ID %q could not be found.", params.ProviderID) + } + + return errors.New("unexpected error fetching identity provider: " + string(getResp.Body)) + } + + var body api.V1UpdateASsoProviderJSONRequestBody + + if params.MetadataFile != "" { + data, err := saml.ReadMetadataFile(Fs, params.MetadataFile) + if err != nil { + return err + } + + body.MetadataXml = &data + } else if params.MetadataURL != "" { + if !params.SkipURLValidation { + if err := saml.ValidateMetadataURL(ctx, params.MetadataURL); err != nil { + return errors.Errorf("%w Use --skip-url-validation to suppress this error.", err) + } + } + + body.MetadataUrl = ¶ms.MetadataURL + } + + if params.AttributeMapping != "" { + data, err := saml.ReadAttributeMappingFile(Fs, params.AttributeMapping) + if err != nil { + return err + } + + body.AttributeMapping = data + } + + if len(params.Domains) != 0 { + body.Domains = ¶ms.Domains + } else if params.AddDomains != nil || params.RemoveDomains != nil { + domainsSet := make(map[string]bool) + + if getResp.JSON200.Domains != nil { + for _, domain := range *getResp.JSON200.Domains { + if domain.Domain != nil { + domainsSet[*domain.Domain] = true + } + } + } + + for _, rmDomain := range params.RemoveDomains { + delete(domainsSet, rmDomain) + } + + for _, addDomain := range params.AddDomains { + domainsSet[addDomain] = true + } + + domains := make([]string, 0) + for domain := range domainsSet { + domains = append(domains, domain) + } + + body.Domains = &domains + } + + putResp, err := utils.GetSupabase().V1UpdateASsoProviderWithResponse(ctx, params.ProjectRef, params.ProviderID, body) + if err != nil { + return errors.Errorf("failed to update sso provider: %w", err) + } + + if putResp.JSON200 == nil { + return errors.New("unexpected error fetching identity provider: " + string(putResp.Body)) + } + + switch params.Format { + case utils.OutputPretty: + return render.SingleMarkdown(api.Provider{ + Id: putResp.JSON200.Id, + Saml: putResp.JSON200.Saml, + Domains: putResp.JSON200.Domains, + CreatedAt: putResp.JSON200.CreatedAt, + UpdatedAt: putResp.JSON200.UpdatedAt, + }) + + default: + return utils.EncodeOutput(params.Format, os.Stdout, putResp.JSON200) + } +} diff --git a/internal/sso/update/update_test.go b/internal/sso/update/update_test.go new file mode 100644 index 0000000..6754fd4 --- /dev/null +++ b/internal/sso/update/update_test.go @@ -0,0 +1,195 @@ +package update + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "testing" + + "github.com/google/uuid" + "github.com/h2non/gock" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func response(providerId string, domains []string) map[string]any { + resp := map[string]any{ + "id": providerId, + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + "saml": map[string]any{ + "id": "8682fcf4-4056-455c-bd93-f33295604929", + "metadata_url": "https://example.com", + "metadata_xml": "", + "entity_id": "https://example.com", + "attribute_mapping": map[string]any{ + "keys": map[string]any{ + "a": map[string]any{ + "name": "xyz", + "names": []string{ + "x", + "y", + "z", + }, + "default": 3, + }, + }, + }, + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + }, + "domains": []map[string]any{}, + } + + for _, domain := range domains { + respDomains := resp["domains"].([]map[string]any) + resp["domains"] = append(respDomains, map[string]any{ + "id": "9484591c-a203-4500-bea7-d0aaa845e2f5", + "domain": domain, + "created_at": "2023-03-28T13:50:14.464Z", + "updated_at": "2023-03-28T13:50:14.464Z", + }) + } + + return resp +} + +func TestSSOProvidersUpdateCommand(t *testing.T) { + t.Run("update provider", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + // Flush pending mocks after test execution + defer gock.OffAll() + + projectRef := apitest.RandomProjectRef() + providerId := uuid.New().String() + + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + projectRef + "/config/auth/sso/providers/" + providerId). + Reply(200). + JSON(response(providerId, []string{"example.com"})) + + gock.New(utils.DefaultApiHost). + Put("/v1/projects/" + projectRef + "/config/auth/sso/providers/" + providerId). + Reply(200). + JSON(response(providerId, []string{"new-domain.com"})) + + observed := 0 + gock.Observe(func(r *http.Request, mock gock.Mock) { + if r.Method != http.MethodPut { + return + } + observed += 1 + + var body api.V1UpdateASsoProviderJSONRequestBody + assert.NoError(t, json.NewDecoder(r.Body).Decode(&body)) + + assert.NotNil(t, body.Domains) + assert.Equal(t, 1, len(*body.Domains)) + assert.Equal(t, "new-domain.com", (*body.Domains)[0]) + }) + + // Run test + assert.NoError(t, Run(context.Background(), RunParams{ + ProjectRef: projectRef, + ProviderID: providerId, + Format: utils.OutputPretty, + + Domains: []string{ + "new-domain.com", + }, + })) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + assert.Equal(t, 1, observed) + }) + + t.Run("update provider with --add-domains and --remove-domains", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + // Flush pending mocks after test execution + defer gock.OffAll() + + projectRef := apitest.RandomProjectRef() + providerId := uuid.New().String() + + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + projectRef + "/config/auth/sso/providers/" + providerId). + Reply(200). + JSON(response(providerId, []string{"example.com"})) + + gock.New(utils.DefaultApiHost). + Put("/v1/projects/" + projectRef + "/config/auth/sso/providers/" + providerId). + Reply(200). + JSON(response(providerId, []string{"new-domain.com"})) + + observed := 0 + gock.Observe(func(r *http.Request, mock gock.Mock) { + if r.Method != http.MethodPut { + return + } + observed += 1 + + var body api.V1UpdateASsoProviderJSONRequestBody + assert.NoError(t, json.NewDecoder(r.Body).Decode(&body)) + + assert.NotNil(t, body.Domains) + assert.Equal(t, 1, len(*body.Domains)) + assert.Equal(t, "new-domain.com", (*body.Domains)[0]) + }) + + // Run test + assert.NoError(t, Run(context.Background(), RunParams{ + ProjectRef: projectRef, + ProviderID: providerId, + Format: utils.OutputPretty, + + AddDomains: []string{ + "new-domain.com", + }, + RemoveDomains: []string{ + "example.com", + }, + })) + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + assert.Equal(t, 1, observed) + }) + + t.Run("update provider that does not exist", func(t *testing.T) { + // Setup valid access token + token := apitest.RandomAccessToken(t) + t.Setenv("SUPABASE_ACCESS_TOKEN", string(token)) + + // Flush pending mocks after test execution + defer gock.OffAll() + + projectRef := apitest.RandomProjectRef() + providerId := uuid.New().String() + + gock.New(utils.DefaultApiHost). + Get("/v1/projects/" + projectRef + "/config/auth/sso/providers/" + providerId). + Reply(404). + JSON(map[string]string{}) + + err := Run(context.Background(), RunParams{ + ProjectRef: projectRef, + ProviderID: providerId, + Format: utils.OutputPretty, + }) + + // Run test + assert.Error(t, err) + assert.Equal(t, err.Error(), fmt.Sprintf("An identity provider with ID %q could not be found.", providerId)) + + // Validate api + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} diff --git a/internal/start/start.go b/internal/start/start.go new file mode 100644 index 0000000..5402283 --- /dev/null +++ b/internal/start/start.go @@ -0,0 +1,1138 @@ +package start + +import ( + "bytes" + "context" + _ "embed" + "fmt" + "net" + "net/url" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "text/template" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/functions/serve" + "github.com/supabase/cli/internal/seed/buckets" + "github.com/supabase/cli/internal/services" + "github.com/supabase/cli/internal/status" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/pkg/config" + "golang.org/x/mod/semver" +) + +func Run(ctx context.Context, fsys afero.Fs, excludedContainers []string, ignoreHealthCheck bool) error { + // Sanity checks. + { + if err := flags.LoadConfig(fsys); err != nil { + return err + } + if err := utils.AssertSupabaseDbIsRunning(); err == nil { + fmt.Fprintln(os.Stderr, utils.Aqua("supabase start")+" is already running.") + names := status.CustomName{} + return status.Run(ctx, names, utils.OutputPretty, fsys) + } else if !errors.Is(err, utils.ErrNotRunning) { + return err + } + if err := flags.LoadProjectRef(fsys); err == nil { + _ = services.CheckVersions(ctx, fsys) + } + } + + if err := utils.RunProgram(ctx, func(p utils.Program, ctx context.Context) error { + dbConfig := pgconn.Config{ + Host: utils.DbId, + Port: 5432, + User: "postgres", + Password: utils.Config.Db.Password, + Database: "postgres", + } + return run(p, ctx, fsys, excludedContainers, dbConfig) + }); err != nil { + if ignoreHealthCheck && start.IsUnhealthyError(err) { + fmt.Fprintln(os.Stderr, err) + } else { + if err := utils.DockerRemoveAll(context.Background(), os.Stderr, utils.Config.ProjectId); err != nil { + fmt.Fprintln(os.Stderr, err) + } + return err + } + } + + fmt.Fprintf(os.Stderr, "Started %s local development setup.\n\n", utils.Aqua("supabase")) + status.PrettyPrint(os.Stdout, excludedContainers...) + return nil +} + +type kongConfig struct { + GotrueId string + RestId string + RealtimeId string + StorageId string + PgmetaId string + EdgeRuntimeId string + LogflareId string + PoolerId string + ApiHost string + ApiPort uint16 +} + +// TODO: deprecate after removing storage headers from kong +func StorageVersionBelow(target string) bool { + parts := strings.Split(utils.Config.Storage.Image, ":v") + return semver.Compare(parts[len(parts)-1], target) < 0 +} + +var ( + //go:embed templates/kong.yml + kongConfigEmbed string + kongConfigTemplate = template.Must(template.New("kongConfig").Funcs(template.FuncMap{ + "StorageVersionBelow": StorageVersionBelow, + }).Parse(kongConfigEmbed)) + + //go:embed templates/custom_nginx.template + nginxConfigEmbed string + // Hardcoded configs which match nginxConfigEmbed + nginxEmailTemplateDir = "/home/kong/templates/email" + nginxTemplateServerPort = 8088 +) + +type vectorConfig struct { + ApiKey string + VectorId string + LogflareId string + KongId string + GotrueId string + RestId string + RealtimeId string + StorageId string + EdgeRuntimeId string + DbId string +} + +var ( + //go:embed templates/vector.yaml + vectorConfigEmbed string + vectorConfigTemplate = template.Must(template.New("vectorConfig").Parse(vectorConfigEmbed)) +) + +type poolerTenant struct { + DbHost string + DbPort uint16 + DbDatabase string + DbPassword string + ExternalId string + ModeType config.PoolMode + DefaultMaxClients uint + DefaultPoolSize uint +} + +var ( + //go:embed templates/pooler.exs + poolerTenantEmbed string + poolerTenantTemplate = template.Must(template.New("poolerTenant").Parse(poolerTenantEmbed)) +) + +var serviceTimeout = 30 * time.Second + +func run(p utils.Program, ctx context.Context, fsys afero.Fs, excludedContainers []string, dbConfig pgconn.Config, options ...func(*pgx.ConnConfig)) error { + excluded := make(map[string]bool) + for _, name := range excludedContainers { + excluded[name] = true + } + + jwks, err := utils.Config.Auth.ResolveJWKS(ctx) + if err != nil { + return err + } + + // Start Postgres. + w := utils.StatusWriter{Program: p} + if dbConfig.Host == utils.DbId { + if err := start.StartDatabase(ctx, "", fsys, w, options...); err != nil { + return err + } + } + + var started []string + var isStorageEnabled = utils.Config.Storage.Enabled && !isContainerExcluded(utils.Config.Storage.Image, excluded) + var isImgProxyEnabled = utils.Config.Storage.ImageTransformation != nil && + utils.Config.Storage.ImageTransformation.Enabled && !isContainerExcluded(utils.Config.Storage.ImgProxyImage, excluded) + p.Send(utils.StatusMsg("Starting containers...")) + + // Start Logflare + if utils.Config.Analytics.Enabled && !isContainerExcluded(utils.Config.Analytics.Image, excluded) { + env := []string{ + "DB_DATABASE=_supabase", + "DB_HOSTNAME=" + dbConfig.Host, + fmt.Sprintf("DB_PORT=%d", dbConfig.Port), + "DB_SCHEMA=_analytics", + "DB_USERNAME=supabase_admin", + "DB_PASSWORD=" + dbConfig.Password, + "LOGFLARE_MIN_CLUSTER_SIZE=1", + "LOGFLARE_SINGLE_TENANT=true", + "LOGFLARE_SUPABASE_MODE=true", + "LOGFLARE_API_KEY=" + utils.Config.Analytics.ApiKey, + "LOGFLARE_LOG_LEVEL=warn", + "LOGFLARE_NODE_HOST=127.0.0.1", + "LOGFLARE_FEATURE_FLAG_OVERRIDE='multibackend=true'", + "RELEASE_COOKIE=cookie", + } + bind := []string{} + + switch utils.Config.Analytics.Backend { + case config.LogflareBigQuery: + workdir, err := os.Getwd() + if err != nil { + return errors.Errorf("failed to get working directory: %w", err) + } + hostJwtPath := filepath.Join(workdir, utils.Config.Analytics.GcpJwtPath) + bind = append(bind, hostJwtPath+":/opt/app/rel/logflare/bin/gcloud.json") + // This is hardcoded in studio frontend + env = append(env, + "GOOGLE_DATASET_ID_APPEND=_prod", + "GOOGLE_PROJECT_ID="+utils.Config.Analytics.GcpProjectId, + "GOOGLE_PROJECT_NUMBER="+utils.Config.Analytics.GcpProjectNumber, + ) + case config.LogflarePostgres: + env = append(env, + fmt.Sprintf("POSTGRES_BACKEND_URL=postgresql://%s:%s@%s:%d/%s", dbConfig.User, dbConfig.Password, dbConfig.Host, dbConfig.Port, "_supabase"), + "POSTGRES_BACKEND_SCHEMA=_analytics", + ) + } + + if _, err := utils.DockerStart( + ctx, + container.Config{ + Hostname: "127.0.0.1", + Image: utils.Config.Analytics.Image, + Env: env, + // Original entrypoint conflicts with healthcheck due to 15 seconds sleep: + // https://github.com/Logflare/logflare/blob/staging/run.sh#L35 + Entrypoint: []string{"sh", "-c", `cat <<'EOF' > run.sh && sh run.sh +./logflare eval Logflare.Release.migrate +./logflare start --sname logflare +EOF +`}, + Healthcheck: &container.HealthConfig{ + Test: []string{"CMD", "curl", "-sSfL", "--head", "-o", "/dev/null", + "http://127.0.0.1:4000/health", + }, + Interval: 10 * time.Second, + Timeout: 2 * time.Second, + Retries: 3, + StartPeriod: 10 * time.Second, + }, + ExposedPorts: nat.PortSet{"4000/tcp": {}}, + }, + container.HostConfig{ + Binds: bind, + PortBindings: nat.PortMap{"4000/tcp": []nat.PortBinding{{HostPort: strconv.FormatUint(uint64(utils.Config.Analytics.Port), 10)}}}, + RestartPolicy: container.RestartPolicy{Name: "always"}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.LogflareAliases, + }, + }, + }, + utils.LogflareId, + ); err != nil { + return err + } + started = append(started, utils.LogflareId) + } + + // Start vector + if utils.Config.Analytics.Enabled && !isContainerExcluded(utils.Config.Analytics.VectorImage, excluded) { + var vectorConfigBuf bytes.Buffer + if err := vectorConfigTemplate.Option("missingkey=error").Execute(&vectorConfigBuf, vectorConfig{ + ApiKey: utils.Config.Analytics.ApiKey, + VectorId: utils.VectorId, + LogflareId: utils.LogflareId, + KongId: utils.KongId, + GotrueId: utils.GotrueId, + RestId: utils.RestId, + RealtimeId: utils.RealtimeId, + StorageId: utils.StorageId, + EdgeRuntimeId: utils.EdgeRuntimeId, + DbId: utils.DbId, + }); err != nil { + return errors.Errorf("failed to exec template: %w", err) + } + var binds, env, securityOpts []string + // Special case for GitLab pipeline + parsed, err := client.ParseHostURL(utils.Docker.DaemonHost()) + if err != nil { + return errors.Errorf("failed to parse docker host: %w", err) + } + // Ref: https://vector.dev/docs/reference/configuration/sources/docker_logs/#docker_host + dindHost := &url.URL{Scheme: "http", Host: net.JoinHostPort(utils.DinDHost, "2375")} + switch parsed.Scheme { + case "tcp": + if _, port, err := net.SplitHostPort(parsed.Host); err == nil { + dindHost.Host = net.JoinHostPort(utils.DinDHost, port) + } + env = append(env, "DOCKER_HOST="+dindHost.String()) + case "npipe": + fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), "analytics requires docker daemon exposed on tcp://localhost:2375") + env = append(env, "DOCKER_HOST="+dindHost.String()) + case "unix": + if dindHost, err = client.ParseHostURL(client.DefaultDockerHost); err != nil { + return errors.Errorf("failed to parse default host: %w", err) + } else if strings.HasSuffix(parsed.Host, "/.docker/run/docker.sock") { + fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), "analytics requires mounting default docker socket:", dindHost.Host) + binds = append(binds, fmt.Sprintf("%[1]s:%[1]s:ro", dindHost.Host)) + } else { + // Podman and OrbStack can mount root-less socket without issue + binds = append(binds, fmt.Sprintf("%s:%s:ro", parsed.Host, dindHost.Host)) + securityOpts = append(securityOpts, "label:disable") + } + } + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Analytics.VectorImage, + Env: env, + Entrypoint: []string{"sh", "-c", `cat <<'EOF' > /etc/vector/vector.yaml && vector --config /etc/vector/vector.yaml +` + vectorConfigBuf.String() + ` +EOF +`}, + Healthcheck: &container.HealthConfig{ + Test: []string{"CMD", "wget", "--no-verbose", "--tries=1", "--spider", + "http://127.0.0.1:9001/health", + }, + Interval: 10 * time.Second, + Timeout: 2 * time.Second, + Retries: 3, + }, + }, + container.HostConfig{ + Binds: binds, + RestartPolicy: container.RestartPolicy{Name: "always"}, + SecurityOpt: securityOpts, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.VectorAliases, + }, + }, + }, + utils.VectorId, + ); err != nil { + return err + } + started = append(started, utils.VectorId) + } + + // Start Kong. + if !isContainerExcluded(utils.Config.Api.KongImage, excluded) { + var kongConfigBuf bytes.Buffer + if err := kongConfigTemplate.Option("missingkey=error").Execute(&kongConfigBuf, kongConfig{ + GotrueId: utils.GotrueId, + RestId: utils.RestId, + RealtimeId: utils.Config.Realtime.TenantId, + StorageId: utils.StorageId, + PgmetaId: utils.PgmetaId, + EdgeRuntimeId: utils.EdgeRuntimeId, + LogflareId: utils.LogflareId, + PoolerId: utils.PoolerId, + ApiHost: utils.Config.Hostname, + ApiPort: utils.Config.Api.Port, + }); err != nil { + return errors.Errorf("failed to exec template: %w", err) + } + + binds := []string{} + for id, tmpl := range utils.Config.Auth.Email.Template { + if len(tmpl.ContentPath) == 0 { + continue + } + hostPath := tmpl.ContentPath + if !filepath.IsAbs(tmpl.ContentPath) { + var err error + hostPath, err = filepath.Abs(hostPath) + if err != nil { + return errors.Errorf("failed to resolve absolute path: %w", err) + } + } + dockerPath := path.Join(nginxEmailTemplateDir, id+filepath.Ext(hostPath)) + binds = append(binds, fmt.Sprintf("%s:%s:rw", hostPath, dockerPath)) + } + + dockerPort := uint16(8000) + if utils.Config.Api.Tls.Enabled { + dockerPort = 8443 + } + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Api.KongImage, + Env: []string{ + "KONG_DATABASE=off", + "KONG_DECLARATIVE_CONFIG=/home/kong/kong.yml", + "KONG_DNS_ORDER=LAST,A,CNAME", // https://github.com/supabase/cli/issues/14 + "KONG_PLUGINS=request-transformer,cors", + fmt.Sprintf("KONG_PORT_MAPS=%d:8000", utils.Config.Api.Port), + // Need to increase the nginx buffers in kong to avoid it rejecting the rather + // sizeable response headers azure can generate + // Ref: https://github.com/Kong/kong/issues/3974#issuecomment-482105126 + "KONG_NGINX_PROXY_PROXY_BUFFER_SIZE=160k", + "KONG_NGINX_PROXY_PROXY_BUFFERS=64 160k", + "KONG_NGINX_WORKER_PROCESSES=1", + // Use modern TLS certificate + "KONG_SSL_CERT=/home/kong/localhost.crt", + "KONG_SSL_CERT_KEY=/home/kong/localhost.key", + }, + Entrypoint: []string{"sh", "-c", `cat <<'EOF' > /home/kong/kong.yml && \ +cat <<'EOF' > /home/kong/custom_nginx.template && \ +cat <<'EOF' > /home/kong/localhost.crt && \ +cat <<'EOF' > /home/kong/localhost.key && \ +./docker-entrypoint.sh kong docker-start --nginx-conf /home/kong/custom_nginx.template +` + kongConfigBuf.String() + ` +EOF +` + nginxConfigEmbed + ` +EOF +` + status.KongCert + ` +EOF +` + status.KongKey + ` +EOF +`}, + ExposedPorts: nat.PortSet{ + "8000/tcp": {}, + "8443/tcp": {}, + nat.Port(fmt.Sprintf("%d/tcp", nginxTemplateServerPort)): {}, + }, + }, + container.HostConfig{ + Binds: binds, + PortBindings: nat.PortMap{nat.Port(fmt.Sprintf("%d/tcp", dockerPort)): []nat.PortBinding{{ + HostPort: strconv.FormatUint(uint64(utils.Config.Api.Port), 10)}, + }}, + RestartPolicy: container.RestartPolicy{Name: "always"}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.KongAliases, + }, + }, + }, + utils.KongId, + ); err != nil { + return err + } + started = append(started, utils.KongId) + } + + // Start GoTrue. + if utils.Config.Auth.Enabled && !isContainerExcluded(utils.Config.Auth.Image, excluded) { + var testOTP bytes.Buffer + if len(utils.Config.Auth.Sms.TestOTP) > 0 { + formatMapForEnvConfig(utils.Config.Auth.Sms.TestOTP, &testOTP) + } + + env := []string{ + "API_EXTERNAL_URL=" + utils.Config.Api.ExternalUrl, + + "GOTRUE_API_HOST=0.0.0.0", + "GOTRUE_API_PORT=9999", + + "GOTRUE_DB_DRIVER=postgres", + fmt.Sprintf("GOTRUE_DB_DATABASE_URL=postgresql://supabase_auth_admin:%s@%s:%d/%s", dbConfig.Password, dbConfig.Host, dbConfig.Port, dbConfig.Database), + + "GOTRUE_SITE_URL=" + utils.Config.Auth.SiteUrl, + "GOTRUE_URI_ALLOW_LIST=" + strings.Join(utils.Config.Auth.AdditionalRedirectUrls, ","), + fmt.Sprintf("GOTRUE_DISABLE_SIGNUP=%v", !utils.Config.Auth.EnableSignup), + + "GOTRUE_JWT_ADMIN_ROLES=service_role", + "GOTRUE_JWT_AUD=authenticated", + "GOTRUE_JWT_DEFAULT_GROUP_NAME=authenticated", + fmt.Sprintf("GOTRUE_JWT_EXP=%v", utils.Config.Auth.JwtExpiry), + "GOTRUE_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + "GOTRUE_JWT_ISSUER=" + utils.GetApiUrl("/auth/v1"), + + fmt.Sprintf("GOTRUE_EXTERNAL_EMAIL_ENABLED=%v", utils.Config.Auth.Email.EnableSignup), + fmt.Sprintf("GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED=%v", utils.Config.Auth.Email.DoubleConfirmChanges), + fmt.Sprintf("GOTRUE_MAILER_AUTOCONFIRM=%v", !utils.Config.Auth.Email.EnableConfirmations), + fmt.Sprintf("GOTRUE_MAILER_OTP_LENGTH=%v", utils.Config.Auth.Email.OtpLength), + fmt.Sprintf("GOTRUE_MAILER_OTP_EXP=%v", utils.Config.Auth.Email.OtpExpiry), + + fmt.Sprintf("GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED=%v", utils.Config.Auth.EnableAnonymousSignIns), + + fmt.Sprintf("GOTRUE_SMTP_MAX_FREQUENCY=%v", utils.Config.Auth.Email.MaxFrequency), + + "GOTRUE_MAILER_URLPATHS_INVITE=" + utils.GetApiUrl("/auth/v1/verify"), + "GOTRUE_MAILER_URLPATHS_CONFIRMATION=" + utils.GetApiUrl("/auth/v1/verify"), + "GOTRUE_MAILER_URLPATHS_RECOVERY=" + utils.GetApiUrl("/auth/v1/verify"), + "GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE=" + utils.GetApiUrl("/auth/v1/verify"), + "GOTRUE_RATE_LIMIT_EMAIL_SENT=360000", + + fmt.Sprintf("GOTRUE_EXTERNAL_PHONE_ENABLED=%v", utils.Config.Auth.Sms.EnableSignup), + fmt.Sprintf("GOTRUE_SMS_AUTOCONFIRM=%v", !utils.Config.Auth.Sms.EnableConfirmations), + fmt.Sprintf("GOTRUE_SMS_MAX_FREQUENCY=%v", utils.Config.Auth.Sms.MaxFrequency), + "GOTRUE_SMS_OTP_EXP=6000", + "GOTRUE_SMS_OTP_LENGTH=6", + fmt.Sprintf("GOTRUE_SMS_TEMPLATE=%v", utils.Config.Auth.Sms.Template), + "GOTRUE_SMS_TEST_OTP=" + testOTP.String(), + + fmt.Sprintf("GOTRUE_PASSWORD_MIN_LENGTH=%v", utils.Config.Auth.MinimumPasswordLength), + fmt.Sprintf("GOTRUE_PASSWORD_REQUIRED_CHARACTERS=%v", utils.Config.Auth.PasswordRequirements.ToChar()), + fmt.Sprintf("GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED=%v", utils.Config.Auth.EnableRefreshTokenRotation), + fmt.Sprintf("GOTRUE_SECURITY_REFRESH_TOKEN_REUSE_INTERVAL=%v", utils.Config.Auth.RefreshTokenReuseInterval), + fmt.Sprintf("GOTRUE_SECURITY_MANUAL_LINKING_ENABLED=%v", utils.Config.Auth.EnableManualLinking), + fmt.Sprintf("GOTRUE_SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION=%v", utils.Config.Auth.Email.SecurePasswordChange), + fmt.Sprintf("GOTRUE_MFA_PHONE_ENROLL_ENABLED=%v", utils.Config.Auth.MFA.Phone.EnrollEnabled), + fmt.Sprintf("GOTRUE_MFA_PHONE_VERIFY_ENABLED=%v", utils.Config.Auth.MFA.Phone.VerifyEnabled), + fmt.Sprintf("GOTRUE_MFA_TOTP_ENROLL_ENABLED=%v", utils.Config.Auth.MFA.TOTP.EnrollEnabled), + fmt.Sprintf("GOTRUE_MFA_TOTP_VERIFY_ENABLED=%v", utils.Config.Auth.MFA.TOTP.VerifyEnabled), + fmt.Sprintf("GOTRUE_MFA_WEB_AUTHN_ENROLL_ENABLED=%v", utils.Config.Auth.MFA.WebAuthn.EnrollEnabled), + fmt.Sprintf("GOTRUE_MFA_WEB_AUTHN_VERIFY_ENABLED=%v", utils.Config.Auth.MFA.WebAuthn.VerifyEnabled), + fmt.Sprintf("GOTRUE_MFA_MAX_ENROLLED_FACTORS=%v", utils.Config.Auth.MFA.MaxEnrolledFactors), + } + + if utils.Config.Auth.Email.Smtp != nil && utils.Config.Auth.Email.Smtp.Enabled { + env = append(env, + fmt.Sprintf("GOTRUE_SMTP_HOST=%s", utils.Config.Auth.Email.Smtp.Host), + fmt.Sprintf("GOTRUE_SMTP_PORT=%d", utils.Config.Auth.Email.Smtp.Port), + fmt.Sprintf("GOTRUE_SMTP_USER=%s", utils.Config.Auth.Email.Smtp.User), + fmt.Sprintf("GOTRUE_SMTP_PASS=%s", utils.Config.Auth.Email.Smtp.Pass.Value), + fmt.Sprintf("GOTRUE_SMTP_ADMIN_EMAIL=%s", utils.Config.Auth.Email.Smtp.AdminEmail), + fmt.Sprintf("GOTRUE_SMTP_SENDER_NAME=%s", utils.Config.Auth.Email.Smtp.SenderName), + ) + } else if utils.Config.Inbucket.Enabled { + env = append(env, + "GOTRUE_SMTP_HOST="+utils.InbucketId, + "GOTRUE_SMTP_PORT=2500", + fmt.Sprintf("GOTRUE_SMTP_ADMIN_EMAIL=%s", utils.Config.Inbucket.AdminEmail), + fmt.Sprintf("GOTRUE_SMTP_SENDER_NAME=%s", utils.Config.Inbucket.SenderName), + ) + } + + if utils.Config.Auth.Sessions.Timebox > 0 { + env = append(env, fmt.Sprintf("GOTRUE_SESSIONS_TIMEBOX=%v", utils.Config.Auth.Sessions.Timebox)) + } + if utils.Config.Auth.Sessions.InactivityTimeout > 0 { + env = append(env, fmt.Sprintf("GOTRUE_SESSIONS_INACTIVITY_TIMEOUT=%v", utils.Config.Auth.Sessions.InactivityTimeout)) + } + + for id, tmpl := range utils.Config.Auth.Email.Template { + if len(tmpl.ContentPath) > 0 { + env = append(env, fmt.Sprintf("GOTRUE_MAILER_TEMPLATES_%s=http://%s:%d/email/%s", + strings.ToUpper(id), + utils.KongId, + nginxTemplateServerPort, + id+filepath.Ext(tmpl.ContentPath), + )) + } + if tmpl.Subject != nil { + env = append(env, fmt.Sprintf("GOTRUE_MAILER_SUBJECTS_%s=%s", + strings.ToUpper(id), + *tmpl.Subject, + )) + } + } + + switch { + case utils.Config.Auth.Sms.Twilio.Enabled: + env = append( + env, + "GOTRUE_SMS_PROVIDER=twilio", + "GOTRUE_SMS_TWILIO_ACCOUNT_SID="+utils.Config.Auth.Sms.Twilio.AccountSid, + "GOTRUE_SMS_TWILIO_AUTH_TOKEN="+utils.Config.Auth.Sms.Twilio.AuthToken.Value, + "GOTRUE_SMS_TWILIO_MESSAGE_SERVICE_SID="+utils.Config.Auth.Sms.Twilio.MessageServiceSid, + ) + case utils.Config.Auth.Sms.TwilioVerify.Enabled: + env = append( + env, + "GOTRUE_SMS_PROVIDER=twilio_verify", + "GOTRUE_SMS_TWILIO_VERIFY_ACCOUNT_SID="+utils.Config.Auth.Sms.TwilioVerify.AccountSid, + "GOTRUE_SMS_TWILIO_VERIFY_AUTH_TOKEN="+utils.Config.Auth.Sms.TwilioVerify.AuthToken.Value, + "GOTRUE_SMS_TWILIO_VERIFY_MESSAGE_SERVICE_SID="+utils.Config.Auth.Sms.TwilioVerify.MessageServiceSid, + ) + case utils.Config.Auth.Sms.Messagebird.Enabled: + env = append( + env, + "GOTRUE_SMS_PROVIDER=messagebird", + "GOTRUE_SMS_MESSAGEBIRD_ACCESS_KEY="+utils.Config.Auth.Sms.Messagebird.AccessKey.Value, + "GOTRUE_SMS_MESSAGEBIRD_ORIGINATOR="+utils.Config.Auth.Sms.Messagebird.Originator, + ) + case utils.Config.Auth.Sms.Textlocal.Enabled: + env = append( + env, + "GOTRUE_SMS_PROVIDER=textlocal", + "GOTRUE_SMS_TEXTLOCAL_API_KEY="+utils.Config.Auth.Sms.Textlocal.ApiKey.Value, + "GOTRUE_SMS_TEXTLOCAL_SENDER="+utils.Config.Auth.Sms.Textlocal.Sender, + ) + case utils.Config.Auth.Sms.Vonage.Enabled: + env = append( + env, + "GOTRUE_SMS_PROVIDER=vonage", + "GOTRUE_SMS_VONAGE_API_KEY="+utils.Config.Auth.Sms.Vonage.ApiKey, + "GOTRUE_SMS_VONAGE_API_SECRET="+utils.Config.Auth.Sms.Vonage.ApiSecret.Value, + "GOTRUE_SMS_VONAGE_FROM="+utils.Config.Auth.Sms.Vonage.From, + ) + } + + if captcha := utils.Config.Auth.Captcha; captcha != nil { + env = append( + env, + fmt.Sprintf("GOTRUE_SECURITY_CAPTCHA_ENABLED=%v", captcha.Enabled), + fmt.Sprintf("GOTRUE_SECURITY_CAPTCHA_PROVIDER=%v", captcha.Provider), + fmt.Sprintf("GOTRUE_SECURITY_CAPTCHA_SECRET=%v", captcha.Secret.Value), + ) + } + + if hook := utils.Config.Auth.Hook.MFAVerificationAttempt; hook != nil && hook.Enabled { + env = append( + env, + "GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED=true", + "GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI="+hook.URI, + "GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_SECRETS="+hook.Secrets.Value, + ) + } + if hook := utils.Config.Auth.Hook.PasswordVerificationAttempt; hook != nil && hook.Enabled { + env = append( + env, + "GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED=true", + "GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI="+hook.URI, + "GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_SECRETS="+hook.Secrets.Value, + ) + } + if hook := utils.Config.Auth.Hook.CustomAccessToken; hook != nil && hook.Enabled { + env = append( + env, + "GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED=true", + "GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI="+hook.URI, + "GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS="+hook.Secrets.Value, + ) + } + if hook := utils.Config.Auth.Hook.SendSMS; hook != nil && hook.Enabled { + env = append( + env, + "GOTRUE_HOOK_SEND_SMS_ENABLED=true", + "GOTRUE_HOOK_SEND_SMS_URI="+hook.URI, + "GOTRUE_HOOK_SEND_SMS_SECRETS="+hook.Secrets.Value, + ) + } + if hook := utils.Config.Auth.Hook.SendEmail; hook != nil && hook.Enabled { + env = append( + env, + "GOTRUE_HOOK_SEND_EMAIL_ENABLED=true", + "GOTRUE_HOOK_SEND_EMAIL_URI="+hook.URI, + "GOTRUE_HOOK_SEND_EMAIL_SECRETS="+hook.Secrets.Value, + ) + } + + if utils.Config.Auth.MFA.Phone.EnrollEnabled || utils.Config.Auth.MFA.Phone.VerifyEnabled { + env = append( + env, + "GOTRUE_MFA_PHONE_TEMPLATE="+utils.Config.Auth.MFA.Phone.Template, + fmt.Sprintf("GOTRUE_MFA_PHONE_OTP_LENGTH=%v", utils.Config.Auth.MFA.Phone.OtpLength), + fmt.Sprintf("GOTRUE_MFA_PHONE_MAX_FREQUENCY=%v", utils.Config.Auth.MFA.Phone.MaxFrequency), + ) + } + + for name, config := range utils.Config.Auth.External { + env = append( + env, + fmt.Sprintf("GOTRUE_EXTERNAL_%s_ENABLED=%v", strings.ToUpper(name), config.Enabled), + fmt.Sprintf("GOTRUE_EXTERNAL_%s_CLIENT_ID=%s", strings.ToUpper(name), config.ClientId), + fmt.Sprintf("GOTRUE_EXTERNAL_%s_SECRET=%s", strings.ToUpper(name), config.Secret.Value), + fmt.Sprintf("GOTRUE_EXTERNAL_%s_SKIP_NONCE_CHECK=%t", strings.ToUpper(name), config.SkipNonceCheck), + ) + + redirectUri := config.RedirectUri + if redirectUri == "" { + redirectUri = utils.GetApiUrl("/auth/v1/callback") + } + env = append(env, fmt.Sprintf("GOTRUE_EXTERNAL_%s_REDIRECT_URI=%s", strings.ToUpper(name), redirectUri)) + + if config.Url != "" { + env = append(env, fmt.Sprintf("GOTRUE_EXTERNAL_%s_URL=%s", strings.ToUpper(name), config.Url)) + } + } + + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Auth.Image, + Env: env, + ExposedPorts: nat.PortSet{"9999/tcp": {}}, + Healthcheck: &container.HealthConfig{ + Test: []string{"CMD", "wget", "--no-verbose", "--tries=1", "--spider", + "http://127.0.0.1:9999/health", + }, + Interval: 10 * time.Second, + Timeout: 2 * time.Second, + Retries: 3, + }, + }, + container.HostConfig{ + RestartPolicy: container.RestartPolicy{Name: "always"}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.GotrueAliases, + }, + }, + }, + utils.GotrueId, + ); err != nil { + return err + } + started = append(started, utils.GotrueId) + } + + // Start Inbucket. + if utils.Config.Inbucket.Enabled && !isContainerExcluded(utils.Config.Inbucket.Image, excluded) { + inbucketPortBindings := nat.PortMap{"9000/tcp": []nat.PortBinding{{HostPort: strconv.FormatUint(uint64(utils.Config.Inbucket.Port), 10)}}} + if utils.Config.Inbucket.SmtpPort != 0 { + inbucketPortBindings["2500/tcp"] = []nat.PortBinding{{HostPort: strconv.FormatUint(uint64(utils.Config.Inbucket.SmtpPort), 10)}} + } + if utils.Config.Inbucket.Pop3Port != 0 { + inbucketPortBindings["1100/tcp"] = []nat.PortBinding{{HostPort: strconv.FormatUint(uint64(utils.Config.Inbucket.Pop3Port), 10)}} + } + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Inbucket.Image, + }, + container.HostConfig{ + Binds: []string{ + // Override default mount points to avoid creating multiple anonymous volumes + // Ref: https://github.com/inbucket/inbucket/blob/v3.0.4/Dockerfile#L52 + utils.InbucketId + ":/config", + utils.InbucketId + ":/storage", + }, + PortBindings: inbucketPortBindings, + RestartPolicy: container.RestartPolicy{Name: "always"}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.InbucketAliases, + }, + }, + }, + utils.InbucketId, + ); err != nil { + return err + } + started = append(started, utils.InbucketId) + } + + // Start Realtime. + if utils.Config.Realtime.Enabled && !isContainerExcluded(utils.Config.Realtime.Image, excluded) { + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Realtime.Image, + Env: []string{ + "PORT=4000", + "DB_HOST=" + dbConfig.Host, + fmt.Sprintf("DB_PORT=%d", dbConfig.Port), + "DB_USER=supabase_admin", + "DB_PASSWORD=" + dbConfig.Password, + "DB_NAME=" + dbConfig.Database, + "DB_AFTER_CONNECT_QUERY=SET search_path TO _realtime", + "DB_ENC_KEY=" + utils.Config.Realtime.EncryptionKey, + "API_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + fmt.Sprintf("API_JWT_JWKS=%s", jwks), + "METRICS_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + "APP_NAME=realtime", + "SECRET_KEY_BASE=" + utils.Config.Realtime.SecretKeyBase, + "ERL_AFLAGS=" + utils.ToRealtimeEnv(utils.Config.Realtime.IpVersion), + "DNS_NODES=''", + "RLIMIT_NOFILE=", + "SEED_SELF_HOST=true", + "RUN_JANITOR=true", + fmt.Sprintf("MAX_HEADER_LENGTH=%d", utils.Config.Realtime.MaxHeaderLength), + }, + ExposedPorts: nat.PortSet{"4000/tcp": {}}, + Healthcheck: &container.HealthConfig{ + // Podman splits command by spaces unless it's quoted, but curl header can't be quoted. + Test: []string{"CMD", "curl", "-sSfL", "--head", "-o", "/dev/null", + "-H", "Host:" + utils.Config.Realtime.TenantId, + "http://127.0.0.1:4000/api/ping", + }, + Interval: 10 * time.Second, + Timeout: 2 * time.Second, + Retries: 3, + }, + }, + container.HostConfig{ + RestartPolicy: container.RestartPolicy{Name: "always"}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.RealtimeAliases, + }, + }, + }, + utils.RealtimeId, + ); err != nil { + return err + } + started = append(started, utils.RealtimeId) + } + + // Start PostgREST. + if utils.Config.Api.Enabled && !isContainerExcluded(utils.Config.Api.Image, excluded) { + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Api.Image, + Env: []string{ + fmt.Sprintf("PGRST_DB_URI=postgresql://authenticator:%s@%s:%d/%s", dbConfig.Password, dbConfig.Host, dbConfig.Port, dbConfig.Database), + "PGRST_DB_SCHEMAS=" + strings.Join(utils.Config.Api.Schemas, ","), + "PGRST_DB_EXTRA_SEARCH_PATH=" + strings.Join(utils.Config.Api.ExtraSearchPath, ","), + fmt.Sprintf("PGRST_DB_MAX_ROWS=%d", utils.Config.Api.MaxRows), + "PGRST_DB_ANON_ROLE=anon", + fmt.Sprintf("PGRST_JWT_SECRET=%s", jwks), + "PGRST_ADMIN_SERVER_PORT=3001", + }, + // PostgREST does not expose a shell for health check + }, + container.HostConfig{ + RestartPolicy: container.RestartPolicy{Name: "always"}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.RestAliases, + }, + }, + }, + utils.RestId, + ); err != nil { + return err + } + started = append(started, utils.RestId) + } + + // Start Storage. + if isStorageEnabled { + dockerStoragePath := "/mnt" + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Storage.Image, + Env: []string{ + "ANON_KEY=" + utils.Config.Auth.AnonKey, + "SERVICE_KEY=" + utils.Config.Auth.ServiceRoleKey, + "AUTH_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + fmt.Sprintf("AUTH_JWT_JWKS=%s", jwks), + fmt.Sprintf("DATABASE_URL=postgresql://supabase_storage_admin:%s@%s:%d/%s", dbConfig.Password, dbConfig.Host, dbConfig.Port, dbConfig.Database), + fmt.Sprintf("FILE_SIZE_LIMIT=%v", utils.Config.Storage.FileSizeLimit), + "STORAGE_BACKEND=file", + "FILE_STORAGE_BACKEND_PATH=" + dockerStoragePath, + "TENANT_ID=stub", + // TODO: https://github.com/supabase/storage-api/issues/55 + "STORAGE_S3_REGION=" + utils.Config.Storage.S3Credentials.Region, + "GLOBAL_S3_BUCKET=stub", + fmt.Sprintf("ENABLE_IMAGE_TRANSFORMATION=%t", isImgProxyEnabled), + fmt.Sprintf("IMGPROXY_URL=http://%s:5001", utils.ImgProxyId), + "TUS_URL_PATH=/storage/v1/upload/resumable", + "S3_PROTOCOL_ACCESS_KEY_ID=" + utils.Config.Storage.S3Credentials.AccessKeyId, + "S3_PROTOCOL_ACCESS_KEY_SECRET=" + utils.Config.Storage.S3Credentials.SecretAccessKey, + "S3_PROTOCOL_PREFIX=/storage/v1", + fmt.Sprintf("S3_ALLOW_FORWARDED_HEADER=%v", StorageVersionBelow("1.10.1")), + "UPLOAD_FILE_SIZE_LIMIT=52428800000", + "UPLOAD_FILE_SIZE_LIMIT_STANDARD=5242880000", + }, + Healthcheck: &container.HealthConfig{ + // For some reason, localhost resolves to IPv6 address on GitPod which breaks healthcheck. + Test: []string{"CMD", "wget", "--no-verbose", "--tries=1", "--spider", + "http://127.0.0.1:5000/status", + }, + Interval: 10 * time.Second, + Timeout: 2 * time.Second, + Retries: 3, + }, + }, + container.HostConfig{ + RestartPolicy: container.RestartPolicy{Name: "always"}, + Binds: []string{utils.StorageId + ":" + dockerStoragePath}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.StorageAliases, + }, + }, + }, + utils.StorageId, + ); err != nil { + return err + } + started = append(started, utils.StorageId) + } + + // Start Storage ImgProxy. + if isStorageEnabled && isImgProxyEnabled { + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Storage.ImgProxyImage, + Env: []string{ + "IMGPROXY_BIND=:5001", + "IMGPROXY_LOCAL_FILESYSTEM_ROOT=/", + "IMGPROXY_USE_ETAG=/", + "IMGPROXY_MAX_SRC_RESOLUTION=50", + "IMGPROXY_MAX_SRC_FILE_SIZE=25000000", + "IMGPROXY_MAX_ANIMATION_FRAMES=60", + "IMGPROXY_ENABLE_WEBP_DETECTION=true", + }, + Healthcheck: &container.HealthConfig{ + Test: []string{"CMD", "imgproxy", "health"}, + Interval: 10 * time.Second, + Timeout: 2 * time.Second, + Retries: 3, + }, + }, + container.HostConfig{ + VolumesFrom: []string{utils.StorageId}, + RestartPolicy: container.RestartPolicy{Name: "always"}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.ImgProxyAliases, + }, + }, + }, + utils.ImgProxyId, + ); err != nil { + return err + } + started = append(started, utils.ImgProxyId) + } + + // Start all functions. + if utils.Config.EdgeRuntime.Enabled && !isContainerExcluded(utils.Config.EdgeRuntime.Image, excluded) { + dbUrl := fmt.Sprintf("postgresql://%s:%s@%s:%d/%s", dbConfig.User, dbConfig.Password, dbConfig.Host, dbConfig.Port, dbConfig.Database) + if err := serve.ServeFunctions(ctx, "", nil, "", dbUrl, serve.RuntimeOption{}, fsys); err != nil { + return err + } + started = append(started, utils.EdgeRuntimeId) + } + + // Start pg-meta. + if utils.Config.Studio.Enabled && !isContainerExcluded(utils.Config.Studio.PgmetaImage, excluded) { + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Studio.PgmetaImage, + Env: []string{ + "PG_META_PORT=8080", + "PG_META_DB_HOST=" + dbConfig.Host, + "PG_META_DB_NAME=" + dbConfig.Database, + "PG_META_DB_USER=" + dbConfig.User, + fmt.Sprintf("PG_META_DB_PORT=%d", dbConfig.Port), + "PG_META_DB_PASSWORD=" + dbConfig.Password, + }, + Healthcheck: &container.HealthConfig{ + Test: []string{"CMD-SHELL", `node --eval="fetch('http://127.0.0.1:8080/health').then((r) => {if (!r.ok) throw new Error(r.status)})"`}, + Interval: 10 * time.Second, + Timeout: 2 * time.Second, + Retries: 3, + }, + }, + container.HostConfig{ + RestartPolicy: container.RestartPolicy{Name: "always"}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.PgmetaAliases, + }, + }, + }, + utils.PgmetaId, + ); err != nil { + return err + } + started = append(started, utils.PgmetaId) + } + + // Start Studio. + if utils.Config.Studio.Enabled && !isContainerExcluded(utils.Config.Studio.Image, excluded) { + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Studio.Image, + Env: []string{ + "STUDIO_PG_META_URL=http://" + utils.PgmetaId + ":8080", + "POSTGRES_PASSWORD=" + dbConfig.Password, + "SUPABASE_URL=http://" + utils.KongId + ":8000", + "SUPABASE_PUBLIC_URL=" + utils.Config.Studio.ApiUrl, + "AUTH_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + "SUPABASE_ANON_KEY=" + utils.Config.Auth.AnonKey, + "SUPABASE_SERVICE_KEY=" + utils.Config.Auth.ServiceRoleKey, + "LOGFLARE_API_KEY=" + utils.Config.Analytics.ApiKey, + "OPENAI_API_KEY=" + utils.Config.Studio.OpenaiApiKey, + fmt.Sprintf("LOGFLARE_URL=http://%v:4000", utils.LogflareId), + fmt.Sprintf("NEXT_PUBLIC_ENABLE_LOGS=%v", utils.Config.Analytics.Enabled), + fmt.Sprintf("NEXT_ANALYTICS_BACKEND_PROVIDER=%v", utils.Config.Analytics.Backend), + // Ref: https://github.com/vercel/next.js/issues/51684#issuecomment-1612834913 + "HOSTNAME=0.0.0.0", + }, + Healthcheck: &container.HealthConfig{ + Test: []string{"CMD-SHELL", `node --eval="fetch('http://127.0.0.1:3000/api/platform/profile').then((r) => {if (!r.ok) throw new Error(r.status)})"`}, + Interval: 10 * time.Second, + Timeout: 2 * time.Second, + Retries: 3, + }, + }, + container.HostConfig{ + PortBindings: nat.PortMap{"3000/tcp": []nat.PortBinding{{HostPort: strconv.FormatUint(uint64(utils.Config.Studio.Port), 10)}}}, + RestartPolicy: container.RestartPolicy{Name: "always"}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.StudioAliases, + }, + }, + }, + utils.StudioId, + ); err != nil { + return err + } + started = append(started, utils.StudioId) + } + + // Start pooler. + if utils.Config.Db.Pooler.Enabled && !isContainerExcluded(utils.Config.Db.Pooler.Image, excluded) { + portSession := uint16(5432) + portTransaction := uint16(6543) + dockerPort := portTransaction + if utils.Config.Db.Pooler.PoolMode == config.SessionMode { + dockerPort = portSession + } + // Create pooler tenant + var poolerTenantBuf bytes.Buffer + if err := poolerTenantTemplate.Option("missingkey=error").Execute(&poolerTenantBuf, poolerTenant{ + DbHost: dbConfig.Host, + DbPort: dbConfig.Port, + DbDatabase: dbConfig.Database, + DbPassword: dbConfig.Password, + ExternalId: utils.Config.Db.Pooler.TenantId, + ModeType: utils.Config.Db.Pooler.PoolMode, + DefaultMaxClients: utils.Config.Db.Pooler.MaxClientConn, + DefaultPoolSize: utils.Config.Db.Pooler.DefaultPoolSize, + }); err != nil { + return errors.Errorf("failed to exec template: %w", err) + } + if _, err := utils.DockerStart( + ctx, + container.Config{ + Image: utils.Config.Db.Pooler.Image, + Env: []string{ + "PORT=4000", + fmt.Sprintf("PROXY_PORT_SESSION=%d", portSession), + fmt.Sprintf("PROXY_PORT_TRANSACTION=%d", portTransaction), + fmt.Sprintf("DATABASE_URL=ecto://%s:%s@%s:%d/%s", dbConfig.User, dbConfig.Password, dbConfig.Host, dbConfig.Port, "_supabase"), + "CLUSTER_POSTGRES=true", + "SECRET_KEY_BASE=" + utils.Config.Db.Pooler.SecretKeyBase, + "VAULT_ENC_KEY=" + utils.Config.Db.Pooler.EncryptionKey, + "API_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + "METRICS_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + "REGION=local", + "RUN_JANITOR=true", + "ERL_AFLAGS=-proto_dist inet_tcp", + }, + Cmd: []string{ + "/bin/sh", "-c", + fmt.Sprintf("/app/bin/migrate && /app/bin/supavisor eval '%s' && /app/bin/server", poolerTenantBuf.String()), + }, + ExposedPorts: nat.PortSet{ + "4000/tcp": {}, + nat.Port(fmt.Sprintf("%d/tcp", portSession)): {}, + nat.Port(fmt.Sprintf("%d/tcp", portTransaction)): {}, + }, + Healthcheck: &container.HealthConfig{ + Test: []string{"CMD", "curl", "-sSfL", "--head", "-o", "/dev/null", "http://127.0.0.1:4000/api/health"}, + Interval: 10 * time.Second, + Timeout: 2 * time.Second, + Retries: 3, + }, + }, + container.HostConfig{ + PortBindings: nat.PortMap{nat.Port(fmt.Sprintf("%d/tcp", dockerPort)): []nat.PortBinding{{ + HostPort: strconv.FormatUint(uint64(utils.Config.Db.Pooler.Port), 10)}, + }}, + RestartPolicy: container.RestartPolicy{Name: "always"}, + }, + network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + utils.NetId: { + Aliases: utils.PoolerAliases, + }, + }, + }, + utils.PoolerId, + ); err != nil { + return err + } + started = append(started, utils.PoolerId) + } + + p.Send(utils.StatusMsg("Waiting for health checks...")) + if utils.NoBackupVolume && utils.SliceContains(started, utils.StorageId) { + if err := start.WaitForHealthyService(ctx, serviceTimeout, utils.StorageId); err != nil { + return err + } + // Disable prompts when seeding + if err := buckets.Run(ctx, "", false, fsys); err != nil { + return err + } + } + return start.WaitForHealthyService(ctx, serviceTimeout, started...) +} + +func isContainerExcluded(imageName string, excluded map[string]bool) bool { + short := utils.ShortContainerImageName(imageName) + val, ok := excluded[short] + return ok && val +} + +func ExcludableContainers() []string { + names := []string{} + for _, image := range config.Images.Services() { + names = append(names, utils.ShortContainerImageName(image)) + } + return names +} + +func formatMapForEnvConfig(input map[string]string, output *bytes.Buffer) { + numOfKeyPairs := len(input) + i := 0 + for k, v := range input { + output.WriteString(k) + output.WriteString(":") + output.WriteString(v) + i++ + if i < numOfKeyPairs { + output.WriteString(",") + } + } +} diff --git a/internal/start/start_test.go b/internal/start/start_test.go new file mode 100644 index 0000000..c00d2e9 --- /dev/null +++ b/internal/start/start_test.go @@ -0,0 +1,279 @@ +package start + +import ( + "bytes" + "context" + "errors" + "net/http" + "regexp" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/volume" + "github.com/h2non/gock" + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/config" + "github.com/supabase/cli/pkg/pgtest" + "github.com/supabase/cli/pkg/storage" +) + +func TestStartCommand(t *testing.T) { + t.Run("throws error on malformed config", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, utils.ConfigPath, []byte("malformed"), 0644)) + // Run test + err := Run(context.Background(), fsys, []string{}, false) + // Check error + assert.ErrorContains(t, err, "toml: expected = after a key, but the document ends there") + }) + + t.Run("throws error on missing docker", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + ReplyError(errors.New("network error")) + // Run test + err := Run(context.Background(), fsys, []string{}, false) + // Check error + assert.ErrorContains(t, err, "network error") + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("show status if database is already running", func(t *testing.T) { + var running []types.Container + for _, name := range utils.GetDockerIds() { + running = append(running, types.Container{ + Names: []string{name + "_test"}, + }) + } + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/supabase_db_start/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{Running: true}, + }}) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/json"). + Reply(http.StatusOK). + JSON(running) + // Run test + err := Run(context.Background(), fsys, []string{}, false) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestDatabaseStart(t *testing.T) { + t.Run("starts database locally", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/networks/create"). + Reply(http.StatusCreated). + JSON(network.CreateResponse{}) + // Caches all dependencies + imageUrl := utils.GetRegistryImageUrl(utils.Config.Db.Image) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + imageUrl + "/json"). + Reply(http.StatusOK). + JSON(types.ImageInspect{}) + for _, image := range config.Images.Services() { + service := utils.GetRegistryImageUrl(image) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + service + "/json"). + Reply(http.StatusOK). + JSON(types.ImageInspect{}) + } + // Start postgres + utils.DbId = "test-postgres" + utils.ConfigId = "test-config" + utils.Config.Db.Port = 54322 + utils.Config.Db.MajorVersion = 15 + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/volumes/" + utils.DbId). + Reply(http.StatusNotFound) + apitest.MockDockerStart(utils.Docker, imageUrl, utils.DbId) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Realtime.Image), "test-realtime") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-realtime", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Storage.Image), "test-storage") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-storage", "")) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Auth.Image), "test-auth") + require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-auth", "")) + // Start services + utils.KongId = "test-kong" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Api.KongImage), utils.KongId) + utils.GotrueId = "test-gotrue" + utils.Config.Auth.EnableSignup = true + utils.Config.Auth.Email.EnableSignup = true + utils.Config.Auth.Email.DoubleConfirmChanges = true + utils.Config.Auth.Email.EnableConfirmations = true + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Auth.Image), utils.GotrueId) + utils.InbucketId = "test-inbucket" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Inbucket.Image), utils.InbucketId) + utils.RealtimeId = "test-realtime" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Realtime.Image), utils.RealtimeId) + utils.RestId = "test-rest" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Api.Image), utils.RestId) + utils.StorageId = "test-storage" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Storage.Image), utils.StorageId) + utils.ImgProxyId = "test-imgproxy" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Storage.ImgProxyImage), utils.ImgProxyId) + utils.EdgeRuntimeId = "test-edge-runtime" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.EdgeRuntime.Image), utils.EdgeRuntimeId) + utils.PgmetaId = "test-pgmeta" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Studio.PgmetaImage), utils.PgmetaId) + utils.StudioId = "test-studio" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Studio.Image), utils.StudioId) + utils.LogflareId = "test-logflare" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Analytics.Image), utils.LogflareId) + utils.VectorId = "test-vector" + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Analytics.VectorImage), utils.VectorId) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + // Setup health probes + started := []string{ + utils.DbId, utils.KongId, utils.GotrueId, utils.InbucketId, utils.RealtimeId, + utils.StorageId, utils.ImgProxyId, utils.EdgeRuntimeId, utils.PgmetaId, utils.StudioId, + utils.LogflareId, utils.RestId, utils.VectorId, + } + for _, container := range started { + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + container + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + } + gock.New(utils.Config.Api.ExternalUrl). + Head("/rest-admin/v1/ready"). + Reply(http.StatusOK) + gock.New(utils.Config.Api.ExternalUrl). + Head("/functions/v1/_internal/health"). + Reply(http.StatusOK) + // Seed tenant services + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.StorageId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + gock.New(utils.Config.Api.ExternalUrl). + Get("/storage/v1/bucket"). + Reply(http.StatusOK). + JSON([]storage.BucketResponse{}) + // Run test + err := utils.RunProgram(context.Background(), func(p utils.Program, ctx context.Context) error { + return run(p, context.Background(), fsys, []string{}, pgconn.Config{Host: utils.DbId}, conn.Intercept) + }) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("skips excluded containers", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/networks/create"). + Reply(http.StatusCreated). + JSON(network.CreateResponse{}) + // Caches all dependencies + imageUrl := utils.GetRegistryImageUrl(utils.Config.Db.Image) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + imageUrl + "/json"). + Reply(http.StatusOK). + JSON(types.ImageInspect{}) + // Start postgres + utils.DbId = "test-postgres" + utils.ConfigId = "test-config" + utils.Config.Db.Port = 54322 + utils.Config.Db.MajorVersion = 15 + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/volumes/" + utils.DbId). + Reply(http.StatusOK). + JSON(volume.Volume{}) + apitest.MockDockerStart(utils.Docker, imageUrl, utils.DbId) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + // Run test + exclude := ExcludableContainers() + exclude = append(exclude, "invalid", exclude[0]) + err := utils.RunProgram(context.Background(), func(p utils.Program, ctx context.Context) error { + return run(p, context.Background(), fsys, exclude, pgconn.Config{Host: utils.DbId}) + }) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) +} + +func TestFormatMapForEnvConfig(t *testing.T) { + t.Run("It produces the correct format and removes the trailing comma", func(t *testing.T) { + output := bytes.Buffer{} + input := map[string]string{} + + keys := [4]string{"123456", "234567", "345678", "456789"} + values := [4]string{"123456", "234567", "345678", "456789"} + expected := [4]string{ + `^\w{6}:\w{6}$`, + `^\w{6}:\w{6},\w{6}:\w{6}$`, + `^\w{6}:\w{6},\w{6}:\w{6},\w{6}:\w{6}$`, + `^\w{6}:\w{6},\w{6}:\w{6},\w{6}:\w{6},\w{6}:\w{6}$`, + } + formatMapForEnvConfig(input, &output) + if len(output.Bytes()) > 0 { + t.Error("No values should be expected when empty map is provided") + } + for i := 0; i < 4; i++ { + output.Reset() + input[keys[i]] = values[i] + formatMapForEnvConfig(input, &output) + result := output.String() + assert.Regexp(t, regexp.MustCompile(expected[i]), result) + } + }) +} diff --git a/internal/start/templates/custom_nginx.template b/internal/start/templates/custom_nginx.template new file mode 100644 index 0000000..e1417cc --- /dev/null +++ b/internal/start/templates/custom_nginx.template @@ -0,0 +1,29 @@ +pid pids/nginx.pid; # this setting is mandatory +error_log logs/error.log ${{LOG_LEVEL}}; # can be set by kong.conf + +daemon ${{NGINX_DAEMON}}; # can be set by kong.conf +worker_processes ${{NGINX_WORKER_PROCESSES}}; # can be set by kong.conf + +events { + multi_accept on; +} + +http { + # here, we declare our custom location serving our website + # (or API portal) which we can optimize for serving static assets + server { + server_name email_templates; + listen 0.0.0.0:8088 reuseport backlog=16384; + + access_log logs/email_templates_access.log; + error_log logs/error.log notice; + + location /email { + autoindex on; + root /home/kong/templates; + } + } + + # include default Kong Nginx config + include 'nginx-kong.conf'; +} diff --git a/internal/start/templates/kong.yml b/internal/start/templates/kong.yml new file mode 100644 index 0000000..8f1d28f --- /dev/null +++ b/internal/start/templates/kong.yml @@ -0,0 +1,166 @@ +_format_version: "1.1" +services: + - name: auth-v1-open + _comment: "GoTrue: /auth/v1/verify* -> http://auth:9999/verify*" + url: http://{{ .GotrueId }}:9999/verify + routes: + - name: auth-v1-open + strip_path: true + paths: + - /auth/v1/verify + plugins: + - name: cors + - name: auth-v1-open-callback + _comment: "GoTrue: /auth/v1/callback* -> http://auth:9999/callback*" + url: http://{{ .GotrueId }}:9999/callback + routes: + - name: auth-v1-open-callback + strip_path: true + paths: + - /auth/v1/callback + plugins: + - name: cors + - name: auth-v1-open-authorize + _comment: "GoTrue: /auth/v1/authorize* -> http://auth:9999/authorize*" + url: http://{{ .GotrueId }}:9999/authorize + routes: + - name: auth-v1-open-authorize + strip_path: true + paths: + - /auth/v1/authorize + plugins: + - name: cors + - name: auth-v1 + _comment: "GoTrue: /auth/v1/* -> http://auth:9999/*" + url: http://{{ .GotrueId }}:9999/ + routes: + - name: auth-v1-all + strip_path: true + paths: + - /auth/v1/ + plugins: + - name: cors + - name: rest-v1 + _comment: "PostgREST: /rest/v1/* -> http://rest:3000/*" + url: http://{{ .RestId }}:3000/ + routes: + - name: rest-v1-all + strip_path: true + paths: + - /rest/v1/ + plugins: + - name: cors + - name: rest-admin-v1 + _comment: "PostgREST: /rest-admin/v1/* -> http://rest:3001/*" + url: http://{{ .RestId }}:3001/ + routes: + - name: rest-admin-v1-all + strip_path: true + paths: + - /rest-admin/v1/ + plugins: + - name: cors + - name: graphql-v1 + _comment: "PostgREST: /graphql/v1 -> http://rest:3000/rpc/graphql" + url: http://{{ .RestId }}:3000/rpc/graphql + routes: + - name: graphql-v1-all + strip_path: true + paths: + - /graphql/v1 + plugins: + - name: cors + - name: request-transformer + config: + add: + headers: + - "Content-Profile: graphql_public" + - name: realtime-v1-ws + _comment: "Realtime: /realtime/v1/* -> ws://realtime:4000/socket/websocket" + url: http://{{ .RealtimeId }}:4000/socket + protocol: ws + routes: + - name: realtime-v1-ws + strip_path: true + paths: + - /realtime/v1/ + plugins: + - name: cors + - name: realtime-v1-longpoll + _comment: "Realtime: /realtime/v1/* -> ws://realtime:4000/socket/longpoll" + url: http://{{ .RealtimeId }}:4000/socket + protocol: http + routes: + - name: realtime-v1-longpoll + strip_path: true + paths: + - /realtime/v1/ + plugins: + - name: cors + - name: realtime-v1-rest + _comment: "Realtime: /realtime/v1/* -> http://realtime:4000/api/*" + url: http://{{ .RealtimeId }}:4000/api + protocol: http + routes: + - name: realtime-v1-rest + strip_path: true + paths: + - /realtime/v1/api + plugins: + - name: cors + + - name: storage-v1 + _comment: "Storage: /storage/v1/* -> http://storage-api:5000/*" + url: http://{{ .StorageId }}:5000/ + routes: + - name: storage-v1-all + strip_path: true + paths: + - /storage/v1/ + plugins: + - name: cors +{{if StorageVersionBelow "1.10.1" }} + - name: request-transformer + config: + add: + headers: + - "Forwarded: host={{ .ApiHost }}:{{ .ApiPort }};proto=http" +{{end}} + - name: pg-meta + _comment: "pg-meta: /pg/* -> http://pg-meta:8080/*" + url: http://{{ .PgmetaId }}:8080/ + routes: + - name: pg-meta-all + strip_path: true + paths: + - /pg/ + - name: functions-v1 + _comment: "Functions: /functions/v1/* -> http://edge-runtime:8081/*" + url: http://{{ .EdgeRuntimeId }}:8081/ + # Set request idle timeout to 150s to match hosted project + # Ref: https://supabase.com/docs/guides/functions/limits + read_timeout: 150000 + routes: + - name: functions-v1-all + strip_path: true + paths: + - /functions/v1/ + - name: analytics-v1 + _comment: "Analytics: /analytics/v1/* -> http://logflare:4000/*" + url: http://{{ .LogflareId }}:4000/ + routes: + - name: analytics-v1-all + strip_path: true + paths: + - /analytics/v1/ + - name: pooler-v2-ws + _comment: "Pooler: /pooler/v2/* -> ws://pooler:4000/v2/*" + url: http://{{ .PoolerId }}:4000/v2 + protocol: ws + routes: + - name: pooler-v2-ws + strip_path: true + paths: + - /pooler/v2/ + plugins: + - name: cors diff --git a/internal/start/templates/pooler.exs b/internal/start/templates/pooler.exs new file mode 100644 index 0000000..3766d64 --- /dev/null +++ b/internal/start/templates/pooler.exs @@ -0,0 +1,30 @@ +{:ok, _} = Application.ensure_all_started(:supavisor) + +{:ok, version} = + case Supavisor.Repo.query!("select version()") do + %{rows: [[ver]]} -> Supavisor.Helpers.parse_pg_version(ver) + _ -> nil + end + +params = %{ + "external_id" => "{{ .ExternalId }}", + "db_host" => "{{ .DbHost }}", + "db_port" => {{ .DbPort }}, + "db_database" => "{{ .DbDatabase }}", + "require_user" => false, + "auth_query" => "SELECT * FROM pgbouncer.get_auth($1)", + "default_max_clients" => {{ .DefaultMaxClients }}, + "default_pool_size" => {{ .DefaultPoolSize }}, + "default_parameter_status" => %{"server_version" => version}, + "users" => [%{ + "db_user" => "pgbouncer", + "db_password" => "{{ .DbPassword }}", + "mode_type" => "{{ .ModeType }}", + "pool_size" => {{ .DefaultPoolSize }}, + "is_manager" => true + }] +} + +if !Supavisor.Tenants.get_tenant_by_external_id(params["external_id"]) do + {:ok, _} = Supavisor.Tenants.create_tenant(params) +end diff --git a/internal/start/templates/vector.yaml b/internal/start/templates/vector.yaml new file mode 100644 index 0000000..4c29864 --- /dev/null +++ b/internal/start/templates/vector.yaml @@ -0,0 +1,229 @@ +api: + enabled: true + address: 0.0.0.0:9001 + +sources: + docker_host: + type: docker_logs + exclude_containers: + - "{{ .VectorId }}" + +transforms: + project_logs: + type: remap + inputs: + - docker_host + source: |- + .project = "default" + .event_message = del(.message) + .appname = del(.container_name) + del(.container_created_at) + del(.container_id) + del(.source_type) + del(.stream) + del(.label) + del(.image) + del(.host) + del(.stream) + router: + type: route + inputs: + - project_logs + route: + kong: '.appname == "{{ .KongId }}"' + auth: '.appname == "{{ .GotrueId }}"' + rest: '.appname == "{{ .RestId }}"' + realtime: '.appname == "{{ .RealtimeId }}"' + storage: '.appname == "{{ .StorageId }}"' + functions: '.appname == "{{ .EdgeRuntimeId }}"' + db: '.appname == "{{ .DbId }}"' + # Ignores non nginx errors since they are related with kong booting up + kong_logs: + type: remap + inputs: + - router.kong + source: |- + req, err = parse_nginx_log(.event_message, "combined") + if err == null { + .timestamp = req.timestamp + .metadata.request.headers.referer = req.referer + .metadata.request.headers.user_agent = req.agent + .metadata.request.headers.cf_connecting_ip = req.client + .metadata.request.method = req.method + .metadata.request.path = req.path + .metadata.request.protocol = req.protocol + .metadata.response.status_code = req.status + } + if err != null { + abort + } + # Ignores non nginx errors since they are related with kong booting up + kong_err: + type: remap + inputs: + - router.kong + source: |- + .metadata.request.method = "GET" + .metadata.response.status_code = 200 + parsed, err = parse_nginx_log(.event_message, "error") + if err == null { + .timestamp = parsed.timestamp + .severity = parsed.severity + .metadata.request.host = parsed.host + .metadata.request.headers.cf_connecting_ip = parsed.client + url, err = split(parsed.request, " ") + if err == null { + .metadata.request.method = url[0] + .metadata.request.path = url[1] + .metadata.request.protocol = url[2] + } + } + if err != null { + abort + } + # Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency. + auth_logs: + type: remap + inputs: + - router.auth + source: |- + parsed, err = parse_json(.event_message) + if err == null { + .metadata.timestamp = parsed.time + .metadata = merge!(.metadata, parsed) + } + # PostgREST logs are structured so we separate timestamp from message using regex + rest_logs: + type: remap + inputs: + - router.rest + source: |- + parsed, err = parse_regex(.event_message, r'^(?P