commit b6bb6585d8d20f774758bcb38347f8033f050d5a Author: hailin Date: Wed May 14 22:47:16 2025 +0800 first commit. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..7780a25 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,491 @@ +# syntax=docker/dockerfile:1.6 +FROM golang:1.22.3-alpine3.20 as authbuild +ENV GO111MODULE=on +ENV CGO_ENABLED=0 +ENV GOOS=linux + +RUN apk add --no-cache make git + +WORKDIR /go/src/github.com/supabase/auth + +# Pulling dependencies +COPY auth_v2.169.0/Makefile auth_v2.169.0/go.* ./ +RUN make deps + +# Building stuff +COPY auth_v2.169.0/. ./ + +# Make sure you change the RELEASE_VERSION value before publishing an image. +RUN RELEASE_VERSION=1.22.3 make build + + + +# Base stage for shared environment setup +FROM node:20-alpine3.20 as s3base +RUN apk add --no-cache g++ make python3 +WORKDIR /app +COPY storage_v1.19.1/package.json storage_v1.19.1/package-lock.json ./ + +# Dependencies stage - install and cache all dependencies +FROM s3base as dependencies +RUN npm ci +# Cache the installed node_modules for later stages +RUN cp -R node_modules /node_modules_cache + +# Build stage - use cached node_modules for building the application +FROM s3base as s3build +COPY --from=dependencies /node_modules_cache ./node_modules +COPY storage_v1.19.1/. . +RUN npm run build + +# Production dependencies stage - use npm cache to install only production dependencies +FROM s3base as production-deps +COPY --from=dependencies /node_modules_cache ./node_modules +RUN npm ci --production + +#EXPOSE 5000 +#CMD ["node", "dist/start/server.js"] + + + +# Always use alpine:3 so the latest version is used. This will keep CA certs more up to date. +#FROM alpine:3 +FROM nvcr.io/nvidia/tritonserver:24.04-py3-min as base + +RUN mkdir -p /storage-api + +#RUN adduser -D -u 1000 supabase + +#RUN apk add --no-cache ca-certificates + +# 创建用户(Ubuntu方式) +RUN useradd -m -u 1000 supabase + +# 安装 ca-certificates(Ubuntu方式) +RUN apt-get update && \ + apt-get install -y --no-install-recommends ca-certificates && \ + rm -rf /var/lib/apt/lists/* + +COPY --from=authbuild /go/src/github.com/supabase/auth/auth /usr/local/bin/auth +COPY --from=authbuild /go/src/github.com/supabase/auth/migrations /usr/local/etc/auth/migrations/ +RUN ln -s /usr/local/bin/auth /usr/local/bin/gotrue + +ENV GOTRUE_DB_MIGRATIONS_PATH /usr/local/etc/auth/migrations + +#USER supabase +#CMD ["auth"] + + +ARG VERSION +ENV VERSION=$VERSION +COPY storage_v1.19.1/migrations migrations + +# Copy production node_modules from the production dependencies stage +COPY --from=production-deps /app/node_modules node_modules +# Copy build artifacts from the build stage +COPY --from=s3build /app/dist dist + + +#----------------------------------------------------------------- Postgrest -------------------------------- +RUN apt-get update -y \ + && apt install -y --no-install-recommends libpq-dev zlib1g-dev jq gcc libnuma-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +COPY postgrest_v12.2.8/postgrest /usr/bin/postgrest +RUN chmod +x /usr/bin/postgrest + +#EXPOSE 3000 + +#USER 1000 + +# Use the array form to avoid running the command using bash, which does not handle `SIGTERM` properly. +# See https://docs.docker.com/compose/faq/#why-do-my-services-take-10-seconds-to-recreate-or-stop +#CMD ["postgrest"] + + + + + + + + +#========================================================================== PostgreSQL ============================================================= +ARG postgresql_major=15 +ARG postgresql_release=${postgresql_major}.1 + +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.7.1 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.11 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.1.4 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.3.0 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.2.0 +ARG wal_g_release=2.0.1 + +#FROM ubuntu:focal as base +#FROM nvcr.io/nvidia/tritonserver:24.04-py3-min as base + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt update -y && apt install -y \ + curl \ + gnupg \ + lsb-release \ + software-properties-common \ + wget \ + sudo \ + && apt clean + + +RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres +RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \ +--init none \ +--no-confirm \ +--extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ +--extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" + +COPY postgres_15.8.1.044/. /nixpg + +WORKDIR /nixpg + +RUN nix profile install .#psql_15/bin + + + +WORKDIR / + + +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql + +# Create symbolic links +RUN ln -sf /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ + && ln -sf /nix/var/nix/profiles/default/bin/* /usr/bin/ \ + && chown -R postgres:postgres /usr/bin + +# Create symbolic links for PostgreSQL shares +RUN ln -sf /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ +RUN ln -sf /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ +RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ +RUN chown -R postgres:postgres /usr/share/postgresql/ +# Create symbolic links for contrib directory +RUN mkdir -p /usr/lib/postgresql/share/postgresql/contrib \ + && find /nix/var/nix/profiles/default/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ + && chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/contrib/ + +RUN chown -R postgres:postgres /usr/lib/postgresql + +RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets + + +RUN apt-get update && \ + apt-get install -y --no-install-recommends tzdata + +RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ + dpkg-reconfigure --frontend noninteractive tzdata + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + checkinstall \ + cmake + +ENV PGDATA=/var/lib/postgresql/data + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release +# ADD "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${TARGETARCH}.tar.gz" /tmp/wal-g.tar.gz +RUN arch=$([ "$TARGETARCH" = "arm64" ] && echo "aarch64" || echo "$TARGETARCH") && \ + apt-get update && apt-get install -y --no-install-recommends curl && \ + curl -kL "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-aarch64.tar.gz" -o /tmp/wal-g.tar.gz && \ + tar -xvf /tmp/wal-g.tar.gz -C /tmp && \ + rm -rf /tmp/wal-g.tar.gz && \ + mv /tmp/wal-g-pg-ubuntu*20.04-aarch64 /tmp/wal-g + +# #################### +# # Download gosu for easy step-down from root +# #################### +FROM base as gosu +ARG TARGETARCH +# Install dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Download binary +ARG GOSU_VERSION=1.16 +ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ + /usr/local/bin/gosu +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ + /usr/local/bin/gosu.asc +# Verify checksum +RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + gpgconf --kill all && \ + chmod +x /usr/local/bin/gosu + +# #################### +# # Build final image +# #################### +FROM gosu as production +RUN id postgres || (echo "postgres user does not exist" && exit 1) +# # Setup extensions +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +# # Initialise configs +COPY --chown=postgres:postgres postgres_15.8.1.044/ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres postgres_15.8.1.044/ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres postgres_15.8.1.044/ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres postgres_15.8.1.044/ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres postgres_15.8.1.044/ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres postgres_15.8.1.044/ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres postgres_15.8.1.044/ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres postgres_15.8.1.044/ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres postgres_15.8.1.044/ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY postgres_15.8.1.044/ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "cron.database_name = 'postgres'" >> /etc/postgresql/postgresql.conf && \ + #echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo 'auto_explain.log_min_duration = 10s' >> /etc/postgresql/postgresql.conf && \ + usermod -aG postgres wal-g && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + +# # Include schema migrations +COPY postgres_15.8.1.044/migrations/db /docker-entrypoint-initdb.d/ +COPY postgres_15.8.1.044/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY postgres_15.8.1.044/ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql + +# # Add upstream entrypoint script +COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +ADD --chmod=0755 \ + https://github.com/docker-library/postgres/raw/master/15/bullseye/docker-entrypoint.sh \ + /usr/local/bin/ + +RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql + +ENTRYPOINT ["docker-entrypoint.sh"] + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +ENV POSTGRES_USER=supabase_admin +ENV POSTGRES_DB=postgres +RUN apt-get update && apt-get install -y --no-install-recommends \ + locales \ + && rm -rf /var/lib/apt/lists/* && \ + localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 +RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV LC_CTYPE=C.UTF-8 +ENV LC_COLLATE=C.UTF-8 +ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +#CMD ["postgres", "-D", "/etc/postgresql"] + + + +#=================================================== kong ===================================================================== + +ARG ASSET=ce +ENV ASSET $ASSET + +ARG EE_PORTS + +COPY docker-kong_v2.8.1/ubuntu/kong.deb /tmp/kong.deb + +ARG KONG_VERSION=2.8.1 +ENV KONG_VERSION $KONG_VERSION + +ARG KONG_AMD64_SHA="10d12d23e5890414d666663094d51a42de41f8a9806fbc0baaf9ac4d37794361" +ARG KONG_ARM64_SHA="61c13219ef64dac9aeae5ae775411e8cfcd406f068cf3e75d463f916ae6513cb" + +# hadolint ignore=DL3015 +RUN set -ex; \ + arch=$(dpkg --print-architecture); \ + case "${arch}" in \ + amd64) KONG_SHA256=$KONG_AMD64_SHA ;; \ + arm64) KONG_SHA256=$KONG_ARM64_SHA ;; \ + esac; \ + apt-get update \ + && if [ "$ASSET" = "ce" ] ; then \ + apt-get install -y curl \ + && UBUNTU_CODENAME=focal \ + && KONG_REPO=$(echo ${KONG_VERSION%.*} | sed 's/\.//') \ + && curl -fL https://packages.konghq.com/public/gateway-$KONG_REPO/deb/ubuntu/pool/$UBUNTU_CODENAME/main/k/ko/kong_$KONG_VERSION/kong_${KONG_VERSION}_$arch.deb -o /tmp/kong.deb \ + && apt-get purge -y curl \ + && echo "$KONG_SHA256 /tmp/kong.deb" | sha256sum -c -; \ + else \ + # this needs to stay inside this "else" block so that it does not become part of the "official images" builds (https://github.com/docker-library/official-images/pull/11532#issuecomment-996219700) + apt-get upgrade -y ; \ + fi; \ + apt-get install -y --no-install-recommends unzip git \ + # Please update the ubuntu install docs if the below line is changed so that + # end users can properly install Kong along with its required dependencies + # and that our CI does not diverge from our docs. + && apt install --yes /tmp/kong.deb \ + && rm -rf /var/lib/apt/lists/* \ + && rm -rf /tmp/kong.deb \ + && chown kong:0 /usr/local/bin/kong \ + && chown -R kong:0 /usr/local/kong \ + && ln -s /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -s /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && if [ "$ASSET" = "ce" ] ; then \ + kong version ; \ + fi + +COPY docker-kong_v2.8.1/ubuntu/docker-entrypoint.sh /docker-entrypoint.sh + +#USER kong + +#ENTRYPOINT ["/docker-entrypoint.sh"] + +#EXPOSE 8000 8443 8001 8444 $EE_PORTS + +#STOPSIGNAL SIGQUIT + +#HEALTHCHECK --interval=10s --timeout=10s --retries=10 CMD kong health + +#CMD ["kong", "docker-start"] + + + + + + + + + +ARG CUDA_VERSION=12.5.1 + + + +#============================================= sglang ============================================ +#FROM nvcr.io/nvidia/tritonserver:24.04-py3-min + +ARG BUILD_TYPE=all +#ENV DEBIAN_FRONTEND=noninteractive + + +# 安装依赖(强制 IPv4) +RUN echo 'tzdata tzdata/Areas select Asia' | debconf-set-selections \ + && echo 'tzdata tzdata/Zones/Asia select Shanghai' | debconf-set-selections \ + && apt -o Acquire::ForceIPv4=true update -y \ + && apt -o Acquire::ForceIPv4=true install software-properties-common -y \ + && add-apt-repository ppa:deadsnakes/ppa -y \ + && apt -o Acquire::ForceIPv4=true update \ + && apt -o Acquire::ForceIPv4=true install python3.10 python3.10-dev -y \ + && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.10 1 \ + && update-alternatives --set python3 /usr/bin/python3.10 \ + && apt -o Acquire::ForceIPv4=true install python3.10-distutils -y \ + && apt -o Acquire::ForceIPv4=true install curl gnupg gnupg wget git sudo libibverbs-dev -y \ + && apt -o Acquire::ForceIPv4=true install -y rdma-core infiniband-diags openssh-server perftest ibverbs-providers libibumad3 libibverbs1 libnl-3-200 libnl-route-3-200 librdmacm1 \ + && curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py \ + && python3 get-pip.py \ + && python3 --version \ + && python3 -m pip --version \ + && rm -rf /var/lib/apt/lists/* \ + && apt clean + + +# 安装 datamodel_code_generator(用于 MiniCPM 模型) +RUN pip3 install datamodel_code_generator + +WORKDIR /sgl-workspace + +# 拷贝 sglang 源代码并构建包 +COPY ./sglang /sgl-workspace/sglang + +# 拷贝模型文件(修正方式) +#COPY ./Alibaba/QwQ-32B /root/.cradle/Alibaba/QwQ-32B + +ARG CUDA_VERSION + +# 安装依赖、安装 sglang、安装 transformers,并清理源码 +RUN python3 -m pip install --upgrade pip setuptools wheel html5lib six \ + && if [ "$CUDA_VERSION" = "12.1.1" ]; then \ + CUINDEX=121; \ + elif [ "$CUDA_VERSION" = "12.4.1" ]; then \ + CUINDEX=124; \ + elif [ "$CUDA_VERSION" = "12.5.1" ]; then \ + CUINDEX=124; \ + elif [ "$CUDA_VERSION" = "11.8.0" ]; then \ + CUINDEX=118; \ + python3 -m pip install --no-cache-dir sgl-kernel -i https://docs.sglang.ai/whl/cu118; \ + else \ + echo "Unsupported CUDA version: $CUDA_VERSION" && exit 1; \ + fi \ + && python3 -m pip install --no-cache-dir torch --index-url https://download.pytorch.org/whl/cu${CUINDEX} \ + && python3 -m pip install --no-cache-dir psutil pyzmq pynvml \ + && cd /sgl-workspace/sglang/python \ + && python3 -m pip install --no-cache-dir '.[srt,openai]' --find-links https://flashinfer.ai/whl/cu${CUINDEX}/torch2.5/flashinfer-python \ + && cd / && rm -rf /sgl-workspace/sglang \ + && python3 -m pip install --no-cache-dir transformers==4.48.3 \ + && python3 -c "import sglang; print('✅ sglang module installed successfully')" + + +#================================================== PostgreSQL ============================================================== +RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres +RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \ +--init none \ +--no-confirm \ +--extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ +--extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" + diff --git a/auth_v2.169.0/.dockerignore b/auth_v2.169.0/.dockerignore new file mode 100644 index 0000000..8efdb51 --- /dev/null +++ b/auth_v2.169.0/.dockerignore @@ -0,0 +1,3 @@ +/hack/ +/vendor/ +/www/ diff --git a/auth_v2.169.0/.gitattributes b/auth_v2.169.0/.gitattributes new file mode 100644 index 0000000..bbfc097 --- /dev/null +++ b/auth_v2.169.0/.gitattributes @@ -0,0 +1,41 @@ +# Set the default behavior +* text=auto + +# Go files +*.mod text eol=lf +*.sum text eol=lf +*.go text eol=lf + +# Serialization +*.yml eol=lf +*.yaml eol=lf +*.toml eol=lf +*.json eol=lf + +# Scripts +*.sh eol=lf + +# DB files +*.sql eol=lf + +# Html +*.html eol=lf + +# Text and markdown files +*.txt text eol=lf +*.md text eol=lf + +# Environment files/examples +*.env text eol=lf + +# Docker files +.dockerignore text eol=lf +Dockerfile* text eol=lf + +# Makefile +Makefile text eol=lf + +# Git files +.gitignore text eol=lf +.gitattributes text eol=lf +.gitkeep text eol=lf \ No newline at end of file diff --git a/auth_v2.169.0/.gitignore b/auth_v2.169.0/.gitignore new file mode 100644 index 0000000..acab1be --- /dev/null +++ b/auth_v2.169.0/.gitignore @@ -0,0 +1,18 @@ +.env* +vendor/ +gotrue +gotrue-arm64 +gotrue.exe +auth +auth-arm64 +auth.exe + +coverage.out + +.DS_Store +.vscode +www/dist/ +www/.DS_Store +www/node_modules +npm-debug.log +.data diff --git a/auth_v2.169.0/.releaserc b/auth_v2.169.0/.releaserc new file mode 100644 index 0000000..32f0e45 --- /dev/null +++ b/auth_v2.169.0/.releaserc @@ -0,0 +1,10 @@ +{ + "branches": [ + "master" + ], + "plugins": [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + "@semantic-release/github" + ] +} diff --git a/auth_v2.169.0/CHANGELOG.md b/auth_v2.169.0/CHANGELOG.md new file mode 100644 index 0000000..551852f --- /dev/null +++ b/auth_v2.169.0/CHANGELOG.md @@ -0,0 +1,601 @@ +# Changelog + +## [2.169.0](https://github.com/supabase/auth/compare/v2.168.0...v2.169.0) (2025-01-27) + + +### Features + +* add an optional burstable rate limiter ([#1924](https://github.com/supabase/auth/issues/1924)) ([1f06f58](https://github.com/supabase/auth/commit/1f06f58e1434b91612c0d96c8c0435d26570f3e2)) +* cover 100% of crypto with tests ([#1892](https://github.com/supabase/auth/issues/1892)) ([174198e](https://github.com/supabase/auth/commit/174198e56f8e9b8470a717d0021c626130288d2e)) + + +### Bug Fixes + +* convert refreshed_at to UTC before updating ([#1916](https://github.com/supabase/auth/issues/1916)) ([a4c692f](https://github.com/supabase/auth/commit/a4c692f6cb1b8bf4c47ea012872af5ce93382fbf)) +* correct casing of API key authentication in openapi.yaml ([0cfd177](https://github.com/supabase/auth/commit/0cfd177b8fb1df8f62e84fbd3761ef9f90c384de)) +* improve invalid channel error message returned ([#1908](https://github.com/supabase/auth/issues/1908)) ([f72f0ee](https://github.com/supabase/auth/commit/f72f0eee328fa0aa041155f5f5dc305f0874d2bf)) +* improve saml assertion logging ([#1915](https://github.com/supabase/auth/issues/1915)) ([d6030cc](https://github.com/supabase/auth/commit/d6030ccd271a381e2a6ababa11a5beae4b79e5c3)) + +## [2.168.0](https://github.com/supabase/auth/compare/v2.167.0...v2.168.0) (2025-01-06) + + +### Features + +* set `email_verified` to true on all identities with the verified email ([#1902](https://github.com/supabase/auth/issues/1902)) ([307892f](https://github.com/supabase/auth/commit/307892f85b39150074fbb80b9c8f45ac3312aae2)) + +## [2.167.0](https://github.com/supabase/auth/compare/v2.166.0...v2.167.0) (2024-12-24) + + +### Features + +* fix argon2 parsing and comparison ([#1887](https://github.com/supabase/auth/issues/1887)) ([9dbe6ef](https://github.com/supabase/auth/commit/9dbe6ef931ae94e621d55a5f7aea4b7ee0449949)) + +## [2.166.0](https://github.com/supabase/auth/compare/v2.165.0...v2.166.0) (2024-12-23) + + +### Features + +* switch to googleapis/release-please-action, bump to 2.166.0 ([#1883](https://github.com/supabase/auth/issues/1883)) ([11a312f](https://github.com/supabase/auth/commit/11a312fcf77771b3732f2f439078225895df7a85)) + + +### Bug Fixes + +* check if session is nil ([#1873](https://github.com/supabase/auth/issues/1873)) ([fd82601](https://github.com/supabase/auth/commit/fd82601917adcd9f8c38263953eb1ef098b26b7f)) +* email_verified field not being updated on signup confirmation ([#1868](https://github.com/supabase/auth/issues/1868)) ([483463e](https://github.com/supabase/auth/commit/483463e49eec7b2974cca05eadca6b933b2145b5)) +* handle user banned error code ([#1851](https://github.com/supabase/auth/issues/1851)) ([a6918f4](https://github.com/supabase/auth/commit/a6918f49baee42899b3ae1b7b6bc126d84629c99)) +* Revert "fix: revert fallback on btree indexes when hash is unavailable" ([#1859](https://github.com/supabase/auth/issues/1859)) ([9fe5b1e](https://github.com/supabase/auth/commit/9fe5b1eebfafb385d6b5d10196aeb2a1964ab296)) +* skip cleanup for non-2xx status ([#1877](https://github.com/supabase/auth/issues/1877)) ([f572ced](https://github.com/supabase/auth/commit/f572ced3699c7f920deccce1a3539299541ec94c)) + +## [2.165.1](https://github.com/supabase/auth/compare/v2.165.0...v2.165.1) (2024-12-06) + + +### Bug Fixes + +* allow setting the mailer service headers as strings ([#1861](https://github.com/supabase/auth/issues/1861)) ([7907b56](https://github.com/supabase/auth/commit/7907b566228f7e2d76049b44cfe0cc808c109100)) + +## [2.165.0](https://github.com/supabase/auth/compare/v2.164.0...v2.165.0) (2024-12-05) + + +### Features + +* add email validation function to lower bounce rates ([#1845](https://github.com/supabase/auth/issues/1845)) ([2c291f0](https://github.com/supabase/auth/commit/2c291f0356f3e91063b6b43bf2a21625b0ce0ebd)) +* use embedded migrations for `migrate` command ([#1843](https://github.com/supabase/auth/issues/1843)) ([e358da5](https://github.com/supabase/auth/commit/e358da5f0e267725a77308461d0a4126436fc537)) + + +### Bug Fixes + +* fallback on btree indexes when hash is unavailable ([#1856](https://github.com/supabase/auth/issues/1856)) ([b33bc31](https://github.com/supabase/auth/commit/b33bc31c07549dc9dc221100995d6f6b6754fd3a)) +* return the error code instead of status code ([#1855](https://github.com/supabase/auth/issues/1855)) ([834a380](https://github.com/supabase/auth/commit/834a380d803ae9ce59ce5ee233fa3a78a984fe68)) +* revert fallback on btree indexes when hash is unavailable ([#1858](https://github.com/supabase/auth/issues/1858)) ([1c7202f](https://github.com/supabase/auth/commit/1c7202ff835856562ee66b33be131eca769acf1d)) +* update ip mismatch error message ([#1849](https://github.com/supabase/auth/issues/1849)) ([49fbbf0](https://github.com/supabase/auth/commit/49fbbf03917a1085c58e9a1ff76c247ae6bb9ca7)) + +## [2.164.0](https://github.com/supabase/auth/compare/v2.163.2...v2.164.0) (2024-11-13) + + +### Features + +* return validation failed error if captcha request was not json ([#1815](https://github.com/supabase/auth/issues/1815)) ([26d2e36](https://github.com/supabase/auth/commit/26d2e36bba29eb8a6ddba556acfd0820f3bfde5d)) + + +### Bug Fixes + +* add error codes to refresh token flow ([#1824](https://github.com/supabase/auth/issues/1824)) ([4614dc5](https://github.com/supabase/auth/commit/4614dc54ab1dcb5390cfed05441e7888af017d92)) +* add test coverage for rate limits with 0 permitted events ([#1834](https://github.com/supabase/auth/issues/1834)) ([7c3cf26](https://github.com/supabase/auth/commit/7c3cf26cfe2a3e4de579d10509945186ad719855)) +* correct web authn aaguid column naming ([#1826](https://github.com/supabase/auth/issues/1826)) ([0a589d0](https://github.com/supabase/auth/commit/0a589d04e1cd9310cb260d329bc8beb050adf8da)) +* default to files:read scope for Figma provider ([#1831](https://github.com/supabase/auth/issues/1831)) ([9ce2857](https://github.com/supabase/auth/commit/9ce28570bf3da9571198d44d693c7ad7038cde33)) +* improve error messaging for http hooks ([#1821](https://github.com/supabase/auth/issues/1821)) ([fa020d0](https://github.com/supabase/auth/commit/fa020d0fc292d5c381c57ecac6666d9ff657e4c4)) +* make drop_uniqueness_constraint_on_phone idempotent ([#1817](https://github.com/supabase/auth/issues/1817)) ([158e473](https://github.com/supabase/auth/commit/158e4732afa17620cdd89c85b7b57569feea5c21)) +* possible panic if refresh token has a null session_id ([#1822](https://github.com/supabase/auth/issues/1822)) ([a7129df](https://github.com/supabase/auth/commit/a7129df4e1d91a042b56ff1f041b9c6598825475)) +* rate limits of 0 take precedence over MAILER_AUTO_CONFIRM ([#1837](https://github.com/supabase/auth/issues/1837)) ([cb7894e](https://github.com/supabase/auth/commit/cb7894e1119d27d527dedcca22d8b3d433beddac)) + +## [2.163.2](https://github.com/supabase/auth/compare/v2.163.1...v2.163.2) (2024-10-22) + + +### Bug Fixes + +* ignore rate limits for autoconfirm ([#1810](https://github.com/supabase/auth/issues/1810)) ([9ce2340](https://github.com/supabase/auth/commit/9ce23409f960a8efa55075931138624cb681eca5)) + +## [2.163.1](https://github.com/supabase/auth/compare/v2.163.0...v2.163.1) (2024-10-22) + + +### Bug Fixes + +* external host validation ([#1808](https://github.com/supabase/auth/issues/1808)) ([4f6a461](https://github.com/supabase/auth/commit/4f6a4617074e61ba3b31836ccb112014904ce97c)), closes [#1228](https://github.com/supabase/auth/issues/1228) + +## [2.163.0](https://github.com/supabase/auth/compare/v2.162.2...v2.163.0) (2024-10-15) + + +### Features + +* add mail header support via `GOTRUE_SMTP_HEADERS` with `$messageType` ([#1804](https://github.com/supabase/auth/issues/1804)) ([99d6a13](https://github.com/supabase/auth/commit/99d6a134c44554a8ad06695e1dff54c942c8335d)) +* add MFA for WebAuthn ([#1775](https://github.com/supabase/auth/issues/1775)) ([8cc2f0e](https://github.com/supabase/auth/commit/8cc2f0e14d06d0feb56b25a0278fda9e213b6b5a)) +* configurable email and sms rate limiting ([#1800](https://github.com/supabase/auth/issues/1800)) ([5e94047](https://github.com/supabase/auth/commit/5e9404717e1c962ab729cde150ef5b40ea31a6e8)) +* mailer logging ([#1805](https://github.com/supabase/auth/issues/1805)) ([9354b83](https://github.com/supabase/auth/commit/9354b83a48a3edcb49197c997a1e96efc80c5383)) +* preserve rate limiters in memory across configuration reloads ([#1792](https://github.com/supabase/auth/issues/1792)) ([0a3968b](https://github.com/supabase/auth/commit/0a3968b02b9f044bfb7e5ebc71dca970d2bb7807)) + + +### Bug Fixes + +* add twilio verify support on mfa ([#1714](https://github.com/supabase/auth/issues/1714)) ([aeb5d8f](https://github.com/supabase/auth/commit/aeb5d8f8f18af60ce369cab5714979ac0c208308)) +* email header setting no longer misleading ([#1802](https://github.com/supabase/auth/issues/1802)) ([3af03be](https://github.com/supabase/auth/commit/3af03be6b65c40f3f4f62ce9ab989a20d75ae53a)) +* enforce authorized address checks on send email only ([#1806](https://github.com/supabase/auth/issues/1806)) ([c0c5b23](https://github.com/supabase/auth/commit/c0c5b23728c8fb633dae23aa4b29ed60e2691a2b)) +* fix `getExcludedColumns` slice allocation ([#1788](https://github.com/supabase/auth/issues/1788)) ([7f006b6](https://github.com/supabase/auth/commit/7f006b63c8d7e28e55a6d471881e9c118df80585)) +* Fix reqPath for bypass check for verify EP ([#1789](https://github.com/supabase/auth/issues/1789)) ([646dc66](https://github.com/supabase/auth/commit/646dc66ea8d59a7f78bf5a5e55d9b5065a718c23)) +* inline mailme package for easy development ([#1803](https://github.com/supabase/auth/issues/1803)) ([fa6f729](https://github.com/supabase/auth/commit/fa6f729a027eff551db104550fa626088e00bc15)) + +## [2.162.2](https://github.com/supabase/auth/compare/v2.162.1...v2.162.2) (2024-10-05) + + +### Bug Fixes + +* refactor mfa validation into functions ([#1780](https://github.com/supabase/auth/issues/1780)) ([410b8ac](https://github.com/supabase/auth/commit/410b8acdd659fc4c929fe57a9e9dba4c76da305d)) +* upgrade ci Go version ([#1782](https://github.com/supabase/auth/issues/1782)) ([97a48f6](https://github.com/supabase/auth/commit/97a48f6daaa2edda5b568939cbb1007ccdf33cfc)) +* validateEmail should normalise emails ([#1790](https://github.com/supabase/auth/issues/1790)) ([2e9b144](https://github.com/supabase/auth/commit/2e9b144a0cbf2d26d3c4c2eafbff1899a36aeb3b)) + +## [2.162.1](https://github.com/supabase/auth/compare/v2.162.0...v2.162.1) (2024-10-03) + + +### Bug Fixes + +* bypass check for token & verify endpoints ([#1785](https://github.com/supabase/auth/issues/1785)) ([9ac2ea0](https://github.com/supabase/auth/commit/9ac2ea0180826cd2f65e679524aabfb10666e973)) + +## [2.162.0](https://github.com/supabase/auth/compare/v2.161.0...v2.162.0) (2024-09-27) + + +### Features + +* add support for migration of firebase scrypt passwords ([#1768](https://github.com/supabase/auth/issues/1768)) ([ba00f75](https://github.com/supabase/auth/commit/ba00f75c28d6708ddf8ee151ce18f2d6193689ef)) + + +### Bug Fixes + +* apply authorized email restriction to non-admin routes ([#1778](https://github.com/supabase/auth/issues/1778)) ([1af203f](https://github.com/supabase/auth/commit/1af203f92372e6db12454a0d319aad8ce3d149e7)) +* magiclink failing due to passwordStrength check ([#1769](https://github.com/supabase/auth/issues/1769)) ([7a5411f](https://github.com/supabase/auth/commit/7a5411f1d4247478f91027bc4969cbbe95b7774c)) + +## [2.161.0](https://github.com/supabase/auth/compare/v2.160.0...v2.161.0) (2024-09-24) + + +### Features + +* add `x-sb-error-code` header, show error code in logs ([#1765](https://github.com/supabase/auth/issues/1765)) ([ed91c59](https://github.com/supabase/auth/commit/ed91c59aa332738bd0ac4b994aeec2cdf193a068)) +* add webauthn configuration variables ([#1773](https://github.com/supabase/auth/issues/1773)) ([77d5897](https://github.com/supabase/auth/commit/77d58976ae624dbb7f8abee041dd4557aab81109)) +* config reloading ([#1771](https://github.com/supabase/auth/issues/1771)) ([6ee0091](https://github.com/supabase/auth/commit/6ee009163bfe451e2a0b923705e073928a12c004)) + + +### Bug Fixes + +* add additional information around errors for missing content type header ([#1576](https://github.com/supabase/auth/issues/1576)) ([c2b2f96](https://github.com/supabase/auth/commit/c2b2f96f07c97c15597cd972b1cd672238d87cdc)) +* add token to hook payload for non-secure email change ([#1763](https://github.com/supabase/auth/issues/1763)) ([7e472ad](https://github.com/supabase/auth/commit/7e472ad72042e86882dab3fddce9fafa66a8236c)) +* update aal requirements to update user ([#1766](https://github.com/supabase/auth/issues/1766)) ([25d9874](https://github.com/supabase/auth/commit/25d98743f6cc2cca2b490a087f468c8556ec5e44)) +* update mfa admin methods ([#1774](https://github.com/supabase/auth/issues/1774)) ([567ea7e](https://github.com/supabase/auth/commit/567ea7ebd18eacc5e6daea8adc72e59e94459991)) +* user sanitization should clean up email change info too ([#1759](https://github.com/supabase/auth/issues/1759)) ([9d419b4](https://github.com/supabase/auth/commit/9d419b400f0637b10e5c235b8fd5bac0d69352bd)) + +## [2.160.0](https://github.com/supabase/auth/compare/v2.159.2...v2.160.0) (2024-09-02) + + +### Features + +* add authorized email address support ([#1757](https://github.com/supabase/auth/issues/1757)) ([f3a28d1](https://github.com/supabase/auth/commit/f3a28d182d193cf528cc72a985dfeaf7ecb67056)) +* add option to disable magic links ([#1756](https://github.com/supabase/auth/issues/1756)) ([2ad0737](https://github.com/supabase/auth/commit/2ad07373aa9239eba94abdabbb01c9abfa8c48de)) +* add support for saml encrypted assertions ([#1752](https://github.com/supabase/auth/issues/1752)) ([c5480ef](https://github.com/supabase/auth/commit/c5480ef83248ec2e7e3d3d87f92f43f17161ed25)) + + +### Bug Fixes + +* apply shared limiters before email / sms is sent ([#1748](https://github.com/supabase/auth/issues/1748)) ([bf276ab](https://github.com/supabase/auth/commit/bf276ab49753642793471815727559172fea4efc)) +* simplify WaitForCleanup ([#1747](https://github.com/supabase/auth/issues/1747)) ([0084625](https://github.com/supabase/auth/commit/0084625ad0790dd7c14b412d932425f4b84bb4c8)) + +## [2.159.2](https://github.com/supabase/auth/compare/v2.159.1...v2.159.2) (2024-08-28) + + +### Bug Fixes + +* allow anonymous user to update password ([#1739](https://github.com/supabase/auth/issues/1739)) ([2d51956](https://github.com/supabase/auth/commit/2d519569d7b8540886d0a64bf3e561ef5f91eb63)) +* hide hook name ([#1743](https://github.com/supabase/auth/issues/1743)) ([7e38f4c](https://github.com/supabase/auth/commit/7e38f4cf37768fe2adf92bbd0723d1d521b3d74c)) +* remove server side cookie token methods ([#1742](https://github.com/supabase/auth/issues/1742)) ([c6efec4](https://github.com/supabase/auth/commit/c6efec4cbc950e01e1fd06d45ed821bd27c2ad08)) + +## [2.159.1](https://github.com/supabase/auth/compare/v2.159.0...v2.159.1) (2024-08-23) + + +### Bug Fixes + +* return oauth identity when user is created ([#1736](https://github.com/supabase/auth/issues/1736)) ([60cfb60](https://github.com/supabase/auth/commit/60cfb6063afa574dfe4993df6b0e087d4df71309)) + +## [2.159.0](https://github.com/supabase/auth/compare/v2.158.1...v2.159.0) (2024-08-21) + + +### Features + +* Vercel marketplace OIDC ([#1731](https://github.com/supabase/auth/issues/1731)) ([a9ff361](https://github.com/supabase/auth/commit/a9ff3612196af4a228b53a8bfb9c11785bcfba8d)) + + +### Bug Fixes + +* add error codes to password login flow ([#1721](https://github.com/supabase/auth/issues/1721)) ([4351226](https://github.com/supabase/auth/commit/435122627a0784f1c5cb76d7e08caa1f6259423b)) +* change phone constraint to per user ([#1713](https://github.com/supabase/auth/issues/1713)) ([b9bc769](https://github.com/supabase/auth/commit/b9bc769b93b6e700925fcbc1ebf8bf9678034205)) +* custom SMS does not work with Twilio Verify ([#1733](https://github.com/supabase/auth/issues/1733)) ([dc2391d](https://github.com/supabase/auth/commit/dc2391d15f2c0725710aa388cd32a18797e6769c)) +* ignore errors if transaction has closed already ([#1726](https://github.com/supabase/auth/issues/1726)) ([53c11d1](https://github.com/supabase/auth/commit/53c11d173a79ae5c004871b1b5840c6f9425a080)) +* redirect invalid state errors to site url ([#1722](https://github.com/supabase/auth/issues/1722)) ([b2b1123](https://github.com/supabase/auth/commit/b2b11239dc9f9bd3c85d76f6c23ee94beb3330bb)) +* remove TOTP field for phone enroll response ([#1717](https://github.com/supabase/auth/issues/1717)) ([4b04327](https://github.com/supabase/auth/commit/4b043275dd2d94600a8138d4ebf4638754ed926b)) +* use signing jwk to sign oauth state ([#1728](https://github.com/supabase/auth/issues/1728)) ([66fd0c8](https://github.com/supabase/auth/commit/66fd0c8434388bbff1e1bf02f40517aca0e9d339)) + +## [2.158.1](https://github.com/supabase/auth/compare/v2.158.0...v2.158.1) (2024-08-05) + + +### Bug Fixes + +* add last_challenged_at field to mfa factors ([#1705](https://github.com/supabase/auth/issues/1705)) ([29cbeb7](https://github.com/supabase/auth/commit/29cbeb799ff35ce528bfbd01b7103a24903d8061)) +* allow enabling sms hook without setting up sms provider ([#1704](https://github.com/supabase/auth/issues/1704)) ([575e88a](https://github.com/supabase/auth/commit/575e88ac345adaeb76ab6aae077307fdab9cda3c)) +* drop the MFA_ENABLED config ([#1701](https://github.com/supabase/auth/issues/1701)) ([078c3a8](https://github.com/supabase/auth/commit/078c3a8adcd51e57b68ab1b582549f5813cccd14)) +* enforce uniqueness on verified phone numbers ([#1693](https://github.com/supabase/auth/issues/1693)) ([70446cc](https://github.com/supabase/auth/commit/70446cc11d70b0493d742fe03f272330bb5b633e)) +* expose `X-Supabase-Api-Version` header in CORS ([#1612](https://github.com/supabase/auth/issues/1612)) ([6ccd814](https://github.com/supabase/auth/commit/6ccd814309dca70a9e3585543887194b05d725d3)) +* include factor_id in query ([#1702](https://github.com/supabase/auth/issues/1702)) ([ac14e82](https://github.com/supabase/auth/commit/ac14e82b33545466184da99e99b9d3fe5f3876d9)) +* move is owned by check to load factor ([#1703](https://github.com/supabase/auth/issues/1703)) ([701a779](https://github.com/supabase/auth/commit/701a779cf092e777dd4ad4954dc650164b09ab32)) +* refactor TOTP MFA into separate methods ([#1698](https://github.com/supabase/auth/issues/1698)) ([250d92f](https://github.com/supabase/auth/commit/250d92f9a18d38089d1bf262ef9088022a446965)) +* remove check for content-length ([#1700](https://github.com/supabase/auth/issues/1700)) ([81b332d](https://github.com/supabase/auth/commit/81b332d2f48622008469d2c5a9b130465a65f2a3)) +* remove FindFactorsByUser ([#1707](https://github.com/supabase/auth/issues/1707)) ([af8e2dd](https://github.com/supabase/auth/commit/af8e2dda15a1234a05e7d2d34d316eaa029e0912)) +* update openapi spec for MFA (Phone) ([#1689](https://github.com/supabase/auth/issues/1689)) ([a3da4b8](https://github.com/supabase/auth/commit/a3da4b89820c37f03ea128889616aca598d99f68)) + +## [2.158.0](https://github.com/supabase/auth/compare/v2.157.0...v2.158.0) (2024-07-31) + + +### Features + +* add hook log entry with `run_hook` action ([#1684](https://github.com/supabase/auth/issues/1684)) ([46491b8](https://github.com/supabase/auth/commit/46491b867a4f5896494417391392a373a453fa5f)) +* MFA (Phone) ([#1668](https://github.com/supabase/auth/issues/1668)) ([ae091aa](https://github.com/supabase/auth/commit/ae091aa942bdc5bc97481037508ec3bb4079d859)) + + +### Bug Fixes + +* maintain backward compatibility for asymmetric JWTs ([#1690](https://github.com/supabase/auth/issues/1690)) ([0ad1402](https://github.com/supabase/auth/commit/0ad1402444348e47e1e42be186b3f052d31be824)) +* MFA NewFactor to default to creating unverfied factors ([#1692](https://github.com/supabase/auth/issues/1692)) ([3d448fa](https://github.com/supabase/auth/commit/3d448fa73cb77eb8511dbc47bfafecce4a4a2150)) +* minor spelling errors ([#1688](https://github.com/supabase/auth/issues/1688)) ([6aca52b](https://github.com/supabase/auth/commit/6aca52b56f8a6254de7709c767b9a5649f1da248)), closes [#1682](https://github.com/supabase/auth/issues/1682) +* treat `GOTRUE_MFA_ENABLED` as meaning TOTP enabled on enroll and verify ([#1694](https://github.com/supabase/auth/issues/1694)) ([8015251](https://github.com/supabase/auth/commit/8015251400bd52cbdad3ea28afb83b1cdfe816dd)) +* update mfa phone migration to be idempotent ([#1687](https://github.com/supabase/auth/issues/1687)) ([fdff1e7](https://github.com/supabase/auth/commit/fdff1e703bccf93217636266f1862bd0a9205edb)) + +## [2.157.0](https://github.com/supabase/auth/compare/v2.156.0...v2.157.0) (2024-07-26) + + +### Features + +* add asymmetric jwt support ([#1674](https://github.com/supabase/auth/issues/1674)) ([c7a2be3](https://github.com/supabase/auth/commit/c7a2be347b301b666e99adc3d3fed78c5e287c82)) + +## [2.156.0](https://github.com/supabase/auth/compare/v2.155.6...v2.156.0) (2024-07-25) + + +### Features + +* add is_anonymous claim to Auth hook jsonschema ([#1667](https://github.com/supabase/auth/issues/1667)) ([f9df65c](https://github.com/supabase/auth/commit/f9df65c91e226084abfa2e868ab6bab892d16d2f)) + + +### Bug Fixes + +* restrict autoconfirm email change to anonymous users ([#1679](https://github.com/supabase/auth/issues/1679)) ([b57e223](https://github.com/supabase/auth/commit/b57e2230102280ed873acf70be1aeb5a2f6f7a4f)) + +## [2.155.6](https://github.com/supabase/auth/compare/v2.155.5...v2.155.6) (2024-07-22) + + +### Bug Fixes + +* use deep equal ([#1672](https://github.com/supabase/auth/issues/1672)) ([8efd57d](https://github.com/supabase/auth/commit/8efd57dab40346762a04bac61b314ce05d6fa69c)) + +## [2.155.5](https://github.com/supabase/auth/compare/v2.155.4...v2.155.5) (2024-07-19) + + +### Bug Fixes + +* check password max length in checkPasswordStrength ([#1659](https://github.com/supabase/auth/issues/1659)) ([1858c93](https://github.com/supabase/auth/commit/1858c93bba6f5bc41e4c65489f12c1a0786a1f2b)) +* don't update attribute mapping if nil ([#1665](https://github.com/supabase/auth/issues/1665)) ([7e67f3e](https://github.com/supabase/auth/commit/7e67f3edbf81766df297a66f52a8e472583438c6)) +* refactor mfa models and add observability to loadFactor ([#1669](https://github.com/supabase/auth/issues/1669)) ([822fb93](https://github.com/supabase/auth/commit/822fb93faab325ba3d4bb628dff43381d68d0b5d)) + +## [2.155.4](https://github.com/supabase/auth/compare/v2.155.3...v2.155.4) (2024-07-17) + + +### Bug Fixes + +* treat empty string as nil in `encrypted_password` ([#1663](https://github.com/supabase/auth/issues/1663)) ([f99286e](https://github.com/supabase/auth/commit/f99286eaed505daf3db6f381265ef6024e7e36d2)) + +## [2.155.3](https://github.com/supabase/auth/compare/v2.155.2...v2.155.3) (2024-07-12) + + +### Bug Fixes + +* serialize jwt as string ([#1657](https://github.com/supabase/auth/issues/1657)) ([98d8324](https://github.com/supabase/auth/commit/98d83245e40d606438eb0afdbf474276179fd91d)) + +## [2.155.2](https://github.com/supabase/auth/compare/v2.155.1...v2.155.2) (2024-07-12) + + +### Bug Fixes + +* improve session error logging ([#1655](https://github.com/supabase/auth/issues/1655)) ([5a6793e](https://github.com/supabase/auth/commit/5a6793ee8fce7a089750fe10b3b63bb0a19d6d21)) +* omit empty string from name & use case-insensitive equality for comparing SAML attributes ([#1654](https://github.com/supabase/auth/issues/1654)) ([bf5381a](https://github.com/supabase/auth/commit/bf5381a6b1c686955dc4e39fe5fb806ffd309563)) +* set rate limit log level to warn ([#1652](https://github.com/supabase/auth/issues/1652)) ([10ca9c8](https://github.com/supabase/auth/commit/10ca9c806e4b67a371897f1b3f93c515764c4240)) + +## [2.155.1](https://github.com/supabase/auth/compare/v2.155.0...v2.155.1) (2024-07-04) + + +### Bug Fixes + +* apply mailer autoconfirm config to update user email ([#1646](https://github.com/supabase/auth/issues/1646)) ([a518505](https://github.com/supabase/auth/commit/a5185058e72509b0781e0eb59910ecdbb8676fee)) +* check for empty aud string ([#1649](https://github.com/supabase/auth/issues/1649)) ([42c1d45](https://github.com/supabase/auth/commit/42c1d4526b98203664d4a22c23014ecd0b4951f9)) +* return proper error if sms rate limit is exceeded ([#1647](https://github.com/supabase/auth/issues/1647)) ([3c8d765](https://github.com/supabase/auth/commit/3c8d7656431ac4b2e80726b7c37adb8f0c778495)) + +## [2.155.0](https://github.com/supabase/auth/compare/v2.154.2...v2.155.0) (2024-07-03) + + +### Features + +* add `password_hash` and `id` fields to admin create user ([#1641](https://github.com/supabase/auth/issues/1641)) ([20d59f1](https://github.com/supabase/auth/commit/20d59f10b601577683d05bcd7d2128ff4bc462a0)) + + +### Bug Fixes + +* improve mfa verify logs ([#1635](https://github.com/supabase/auth/issues/1635)) ([d8b47f9](https://github.com/supabase/auth/commit/d8b47f9d3f0dc8f97ad1de49e45f452ebc726481)) +* invited users should have a temporary password generated ([#1644](https://github.com/supabase/auth/issues/1644)) ([3f70d9d](https://github.com/supabase/auth/commit/3f70d9d8974d0e9c437c51e1312ad17ce9056ec9)) +* upgrade golang-jwt to v5 ([#1639](https://github.com/supabase/auth/issues/1639)) ([2cb97f0](https://github.com/supabase/auth/commit/2cb97f080fa4695766985cc4792d09476534be68)) +* use pointer for `user.EncryptedPassword` ([#1637](https://github.com/supabase/auth/issues/1637)) ([bbecbd6](https://github.com/supabase/auth/commit/bbecbd61a46b0c528b1191f48d51f166c06f4b16)) + +## [2.154.2](https://github.com/supabase/auth/compare/v2.154.1...v2.154.2) (2024-06-24) + + +### Bug Fixes + +* publish to ghcr.io/supabase/auth ([#1626](https://github.com/supabase/auth/issues/1626)) ([930aa3e](https://github.com/supabase/auth/commit/930aa3edb633823d4510c2aff675672df06f1211)), closes [#1625](https://github.com/supabase/auth/issues/1625) +* revert define search path in auth functions ([#1634](https://github.com/supabase/auth/issues/1634)) ([155e87e](https://github.com/supabase/auth/commit/155e87ef8129366d665968f64d1fc66676d07e16)) +* update MaxFrequency error message to reflect number of seconds ([#1540](https://github.com/supabase/auth/issues/1540)) ([e81c25d](https://github.com/supabase/auth/commit/e81c25d19551fdebfc5197d96bc220ddb0f8227b)) + +## [2.154.1](https://github.com/supabase/auth/compare/v2.154.0...v2.154.1) (2024-06-17) + + +### Bug Fixes + +* add ip based limiter ([#1622](https://github.com/supabase/auth/issues/1622)) ([06464c0](https://github.com/supabase/auth/commit/06464c013571253d1f18f7ae5e840826c4bd84a7)) +* admin user update should update is_anonymous field ([#1623](https://github.com/supabase/auth/issues/1623)) ([f5c6fcd](https://github.com/supabase/auth/commit/f5c6fcd9c3fee0f793f96880a8caebc5b5cb0916)) + +## [2.154.0](https://github.com/supabase/auth/compare/v2.153.0...v2.154.0) (2024-06-12) + + +### Features + +* add max length check for email ([#1508](https://github.com/supabase/auth/issues/1508)) ([f9c13c0](https://github.com/supabase/auth/commit/f9c13c0ad5c556bede49d3e0f6e5f58ca26161c3)) +* add support for Slack OAuth V2 ([#1591](https://github.com/supabase/auth/issues/1591)) ([bb99251](https://github.com/supabase/auth/commit/bb992519cdf7578dc02cd7de55e2e6aa09b4c0f3)) +* encrypt sensitive columns ([#1593](https://github.com/supabase/auth/issues/1593)) ([e4a4758](https://github.com/supabase/auth/commit/e4a475820b2dc1f985bd37df15a8ab9e781626f5)) +* upgrade otel to v1.26 ([#1585](https://github.com/supabase/auth/issues/1585)) ([cdd13ad](https://github.com/supabase/auth/commit/cdd13adec02eb0c9401bc55a2915c1005d50dea1)) +* use largest avatar from spotify instead ([#1210](https://github.com/supabase/auth/issues/1210)) ([4f9994b](https://github.com/supabase/auth/commit/4f9994bf792c3887f2f45910b11a9c19ee3a896b)), closes [#1209](https://github.com/supabase/auth/issues/1209) + + +### Bug Fixes + +* define search path in auth functions ([#1616](https://github.com/supabase/auth/issues/1616)) ([357bda2](https://github.com/supabase/auth/commit/357bda23cb2abd12748df80a9d27288aa548534d)) +* enable rls & update grants for auth tables ([#1617](https://github.com/supabase/auth/issues/1617)) ([28967aa](https://github.com/supabase/auth/commit/28967aa4b5db2363cc581c9da0d64e974eb7b64c)) + +## [2.153.0](https://github.com/supabase/auth/compare/v2.152.0...v2.153.0) (2024-06-04) + + +### Features + +* add SAML specific external URL config ([#1599](https://github.com/supabase/auth/issues/1599)) ([b352719](https://github.com/supabase/auth/commit/b3527190560381fafe9ba2fae4adc3b73703024a)) +* add support for verifying argon2i and argon2id passwords ([#1597](https://github.com/supabase/auth/issues/1597)) ([55409f7](https://github.com/supabase/auth/commit/55409f797bea55068a3fafdddd6cfdb78feba1b4)) +* make the email client explicity set the format to be HTML ([#1149](https://github.com/supabase/auth/issues/1149)) ([53e223a](https://github.com/supabase/auth/commit/53e223abdf29f4abcad13f99baf00daedcb00c3f)) + + +### Bug Fixes + +* call write header in write if not written ([#1598](https://github.com/supabase/auth/issues/1598)) ([0ef7eb3](https://github.com/supabase/auth/commit/0ef7eb30619d4c365e06a94a79b9cb0333d792da)) +* deadlock issue with timeout middleware write ([#1595](https://github.com/supabase/auth/issues/1595)) ([6c9fbd4](https://github.com/supabase/auth/commit/6c9fbd4bd5623c729906fca7857ab508166a3056)) +* improve token OIDC logging ([#1606](https://github.com/supabase/auth/issues/1606)) ([5262683](https://github.com/supabase/auth/commit/526268311844467664e89c8329e5aaee817dbbaf)) +* update contributing to use v1.22 ([#1609](https://github.com/supabase/auth/issues/1609)) ([5894d9e](https://github.com/supabase/auth/commit/5894d9e41e7681512a9904ad47082a705e948c98)) + +## [2.152.0](https://github.com/supabase/auth/compare/v2.151.0...v2.152.0) (2024-05-22) + + +### Features + +* new timeout writer implementation ([#1584](https://github.com/supabase/auth/issues/1584)) ([72614a1](https://github.com/supabase/auth/commit/72614a1fce27888f294772b512f8e31c55a36d87)) +* remove legacy lookup in users for one_time_tokens (phase II) ([#1569](https://github.com/supabase/auth/issues/1569)) ([39ca026](https://github.com/supabase/auth/commit/39ca026035f6c61d206d31772c661b326c2a424c)) +* update chi version ([#1581](https://github.com/supabase/auth/issues/1581)) ([c64ae3d](https://github.com/supabase/auth/commit/c64ae3dd775e8fb3022239252c31b4ee73893237)) +* update openapi spec with identity and is_anonymous fields ([#1573](https://github.com/supabase/auth/issues/1573)) ([86a79df](https://github.com/supabase/auth/commit/86a79df9ecfcf09fda0b8e07afbc41154fbb7d9d)) + + +### Bug Fixes + +* improve logging structure ([#1583](https://github.com/supabase/auth/issues/1583)) ([c22fc15](https://github.com/supabase/auth/commit/c22fc15d2a8383e95a2364f383dfa7dce5f5df88)) +* sms verify should update is_anonymous field ([#1580](https://github.com/supabase/auth/issues/1580)) ([e5f98cb](https://github.com/supabase/auth/commit/e5f98cb9e24ecebb0b7dc88c495fd456cc73fcba)) +* use api_external_url domain as localname ([#1575](https://github.com/supabase/auth/issues/1575)) ([ed2b490](https://github.com/supabase/auth/commit/ed2b4907244281e4c54aaef74b1f4c8a8e3d97c9)) + +## [2.151.0](https://github.com/supabase/auth/compare/v2.150.1...v2.151.0) (2024-05-06) + + +### Features + +* refactor one-time tokens for performance ([#1558](https://github.com/supabase/auth/issues/1558)) ([d1cf8d9](https://github.com/supabase/auth/commit/d1cf8d9096e9183d7772b73031de8ecbd66e912b)) + + +### Bug Fixes + +* do call send sms hook when SMS autoconfirm is enabled ([#1562](https://github.com/supabase/auth/issues/1562)) ([bfe4d98](https://github.com/supabase/auth/commit/bfe4d988f3768b0407526bcc7979fb21d8cbebb3)) +* format test otps ([#1567](https://github.com/supabase/auth/issues/1567)) ([434a59a](https://github.com/supabase/auth/commit/434a59ae387c35fd6629ec7c674d439537e344e5)) +* log final writer error instead of handling ([#1564](https://github.com/supabase/auth/issues/1564)) ([170bd66](https://github.com/supabase/auth/commit/170bd6615405afc852c7107f7358dfc837bad737)) + +## [2.150.1](https://github.com/supabase/auth/compare/v2.150.0...v2.150.1) (2024-04-28) + + +### Bug Fixes + +* add db conn max idle time setting ([#1555](https://github.com/supabase/auth/issues/1555)) ([2caa7b4](https://github.com/supabase/auth/commit/2caa7b4d75d2ff54af20f3e7a30a8eeec8cbcda9)) + +## [2.150.0](https://github.com/supabase/auth/compare/v2.149.0...v2.150.0) (2024-04-25) + + +### Features + +* add support for Azure CIAM login ([#1541](https://github.com/supabase/auth/issues/1541)) ([1cb4f96](https://github.com/supabase/auth/commit/1cb4f96bdc7ef3ef995781b4cf3c4364663a2bf3)) +* add timeout middleware ([#1529](https://github.com/supabase/auth/issues/1529)) ([f96ff31](https://github.com/supabase/auth/commit/f96ff31040b28e3a7373b4fd41b7334eda1b413e)) +* allow for postgres and http functions on each extensibility point ([#1528](https://github.com/supabase/auth/issues/1528)) ([348a1da](https://github.com/supabase/auth/commit/348a1daee24f6e44b14c018830b748e46d34b4c2)) +* merge provider metadata on link account ([#1552](https://github.com/supabase/auth/issues/1552)) ([bd8b5c4](https://github.com/supabase/auth/commit/bd8b5c41dd544575e1a52ccf1ef3f0fdee67458c)) +* send over user in SendSMS Hook instead of UserID ([#1551](https://github.com/supabase/auth/issues/1551)) ([d4d743c](https://github.com/supabase/auth/commit/d4d743c2ae9490e1b3249387e3b0d60df6913c68)) + + +### Bug Fixes + +* return error if session id does not exist ([#1538](https://github.com/supabase/auth/issues/1538)) ([91e9eca](https://github.com/supabase/auth/commit/91e9ecabe33a1c022f8e82a6050c22a7ca42de48)) + +## [2.149.0](https://github.com/supabase/auth/compare/v2.148.0...v2.149.0) (2024-04-15) + + +### Features + +* refactor generate accesss token to take in request ([#1531](https://github.com/supabase/auth/issues/1531)) ([e4f2b59](https://github.com/supabase/auth/commit/e4f2b59e8e1f8158b6461a384349f1a32cc1bf9a)) + + +### Bug Fixes + +* linkedin_oidc provider error ([#1534](https://github.com/supabase/auth/issues/1534)) ([4f5e8e5](https://github.com/supabase/auth/commit/4f5e8e5120531e5a103fbdda91b51cabcb4e1a8c)) +* revert patch for linkedin_oidc provider error ([#1535](https://github.com/supabase/auth/issues/1535)) ([58ef4af](https://github.com/supabase/auth/commit/58ef4af0b4224b78cd9e59428788d16a8d31e562)) +* update linkedin issuer url ([#1536](https://github.com/supabase/auth/issues/1536)) ([10d6d8b](https://github.com/supabase/auth/commit/10d6d8b1eafa504da2b2a351d1f64a3a832ab1b9)) + +## [2.148.0](https://github.com/supabase/auth/compare/v2.147.1...v2.148.0) (2024-04-10) + + +### Features + +* add array attribute mapping for SAML ([#1526](https://github.com/supabase/auth/issues/1526)) ([7326285](https://github.com/supabase/auth/commit/7326285c8af5c42e5c0c2d729ab224cf33ac3a1f)) + +## [2.147.1](https://github.com/supabase/auth/compare/v2.147.0...v2.147.1) (2024-04-09) + + +### Bug Fixes + +* add validation and proper decoding on send email hook ([#1520](https://github.com/supabase/auth/issues/1520)) ([e19e762](https://github.com/supabase/auth/commit/e19e762e3e29729a1d1164c65461427822cc87f1)) +* remove deprecated LogoutAllRefreshTokens ([#1519](https://github.com/supabase/auth/issues/1519)) ([35533ea](https://github.com/supabase/auth/commit/35533ea100669559e1209ecc7b091db3657234d9)) + +## [2.147.0](https://github.com/supabase/auth/compare/v2.146.0...v2.147.0) (2024-04-05) + + +### Features + +* add send email Hook ([#1512](https://github.com/supabase/auth/issues/1512)) ([cf42e02](https://github.com/supabase/auth/commit/cf42e02ec63779f52b1652a7413f64994964c82d)) + +## [2.146.0](https://github.com/supabase/auth/compare/v2.145.0...v2.146.0) (2024-04-03) + + +### Features + +* add custom sms hook ([#1474](https://github.com/supabase/auth/issues/1474)) ([0f6b29a](https://github.com/supabase/auth/commit/0f6b29a46f1dcbf92aa1f7cb702f42e7640f5f93)) +* forbid generating an access token without a session ([#1504](https://github.com/supabase/auth/issues/1504)) ([795e93d](https://github.com/supabase/auth/commit/795e93d0afbe94bcd78489a3319a970b7bf8e8bc)) + + +### Bug Fixes + +* add cleanup statement for anonymous users ([#1497](https://github.com/supabase/auth/issues/1497)) ([cf2372a](https://github.com/supabase/auth/commit/cf2372a177796b829b72454e7491ce768bf5a42f)) +* generate signup link should not error ([#1514](https://github.com/supabase/auth/issues/1514)) ([4fc3881](https://github.com/supabase/auth/commit/4fc388186ac7e7a9a32ca9b963a83d6ac2eb7603)) +* move all EmailActionTypes to mailer package ([#1510](https://github.com/supabase/auth/issues/1510)) ([765db08](https://github.com/supabase/auth/commit/765db08582669a1b7f054217fa8f0ed45804c0b5)) +* refactor mfa and aal update methods ([#1503](https://github.com/supabase/auth/issues/1503)) ([31a5854](https://github.com/supabase/auth/commit/31a585429bf248aa919d94c82c7c9e0c1c695461)) +* rename from CustomSMSProvider to SendSMS ([#1513](https://github.com/supabase/auth/issues/1513)) ([c0bc37b](https://github.com/supabase/auth/commit/c0bc37b44effaebb62ba85102f072db07fe57e48)) + +## [2.145.0](https://github.com/supabase/gotrue/compare/v2.144.0...v2.145.0) (2024-03-26) + + +### Features + +* add error codes ([#1377](https://github.com/supabase/gotrue/issues/1377)) ([e4beea1](https://github.com/supabase/gotrue/commit/e4beea1cdb80544b0581f1882696a698fdf64938)) +* add kakao OIDC ([#1381](https://github.com/supabase/gotrue/issues/1381)) ([b5566e7](https://github.com/supabase/gotrue/commit/b5566e7ac001cc9f2bac128de0fcb908caf3a5ed)) +* clean up expired factors ([#1371](https://github.com/supabase/gotrue/issues/1371)) ([5c94207](https://github.com/supabase/gotrue/commit/5c9420743a9aef0675f823c30aa4525b4933836e)) +* configurable NameID format for SAML provider ([#1481](https://github.com/supabase/gotrue/issues/1481)) ([ef405d8](https://github.com/supabase/gotrue/commit/ef405d89e69e008640f275bc37f8ec02ad32da40)) +* HTTP Hook - Add custom envconfig decoding for HTTP Hook Secrets ([#1467](https://github.com/supabase/gotrue/issues/1467)) ([5b24c4e](https://github.com/supabase/gotrue/commit/5b24c4eb05b2b52c4177d5f41cba30cb68495c8c)) +* refactor PKCE FlowState to reduce duplicate code ([#1446](https://github.com/supabase/gotrue/issues/1446)) ([b8d0337](https://github.com/supabase/gotrue/commit/b8d0337922c6712380f6dc74f7eac9fb71b1ae48)) + + +### Bug Fixes + +* add http support for https hooks on localhost ([#1484](https://github.com/supabase/gotrue/issues/1484)) ([5c04104](https://github.com/supabase/gotrue/commit/5c04104bf77a9c2db46d009764ec3ec3e484fc09)) +* cleanup panics due to bad inactivity timeout code ([#1471](https://github.com/supabase/gotrue/issues/1471)) ([548edf8](https://github.com/supabase/gotrue/commit/548edf898161c9ba9a136fc99ec2d52a8ba1f856)) +* **docs:** remove bracket on file name for broken link ([#1493](https://github.com/supabase/gotrue/issues/1493)) ([96f7a68](https://github.com/supabase/gotrue/commit/96f7a68a5479825e31106c2f55f82d5b2c007c0f)) +* impose expiry on auth code instead of magic link ([#1440](https://github.com/supabase/gotrue/issues/1440)) ([35aeaf1](https://github.com/supabase/gotrue/commit/35aeaf1b60dd27a22662a6d1955d60cc907b55dd)) +* invalidate email, phone OTPs on password change ([#1489](https://github.com/supabase/gotrue/issues/1489)) ([960a4f9](https://github.com/supabase/gotrue/commit/960a4f94f5500e33a0ec2f6afe0380bbc9562500)) +* move creation of flow state into function ([#1470](https://github.com/supabase/gotrue/issues/1470)) ([4392a08](https://github.com/supabase/gotrue/commit/4392a08d68d18828005d11382730117a7b143635)) +* prevent user email side-channel leak on verify ([#1472](https://github.com/supabase/gotrue/issues/1472)) ([311cde8](https://github.com/supabase/gotrue/commit/311cde8d1e82f823ae26a341e068034d60273864)) +* refactor email sending functions ([#1495](https://github.com/supabase/gotrue/issues/1495)) ([285c290](https://github.com/supabase/gotrue/commit/285c290adf231fea7ca1dff954491dc427cf18e2)) +* refactor factor_test to centralize setup ([#1473](https://github.com/supabase/gotrue/issues/1473)) ([c86007e](https://github.com/supabase/gotrue/commit/c86007e59684334b5e8c2285c36094b6eec89442)) +* refactor mfa challenge and tests ([#1469](https://github.com/supabase/gotrue/issues/1469)) ([6c76f21](https://github.com/supabase/gotrue/commit/6c76f21cee5dbef0562c37df6a546939affb2f8d)) +* Resend SMS when duplicate SMS sign ups are made ([#1490](https://github.com/supabase/gotrue/issues/1490)) ([73240a0](https://github.com/supabase/gotrue/commit/73240a0b096977703e3c7d24a224b5641ce47c81)) +* unlink identity bugs ([#1475](https://github.com/supabase/gotrue/issues/1475)) ([73e8d87](https://github.com/supabase/gotrue/commit/73e8d8742de3575b3165a707b5d2f486b2598d9d)) + +## [2.144.0](https://github.com/supabase/gotrue/compare/v2.143.0...v2.144.0) (2024-03-04) + + +### Features + +* add configuration for custom sms sender hook ([#1428](https://github.com/supabase/gotrue/issues/1428)) ([1ea56b6](https://github.com/supabase/gotrue/commit/1ea56b62d47edb0766d9e445406ecb43d387d920)) +* anonymous sign-ins ([#1460](https://github.com/supabase/gotrue/issues/1460)) ([130df16](https://github.com/supabase/gotrue/commit/130df165270c69c8e28aaa1b9421342f997c1ff3)) +* clean up test setup in MFA tests ([#1452](https://github.com/supabase/gotrue/issues/1452)) ([7185af8](https://github.com/supabase/gotrue/commit/7185af8de4a269cdde2629054d222333d3522ebe)) +* pass transaction to `invokeHook`, fixing pool exhaustion ([#1465](https://github.com/supabase/gotrue/issues/1465)) ([b536d36](https://github.com/supabase/gotrue/commit/b536d368f35adb31f937169e3f093d28352fa7be)) +* refactor resource owner password grant ([#1443](https://github.com/supabase/gotrue/issues/1443)) ([e63ad6f](https://github.com/supabase/gotrue/commit/e63ad6ff0f67d9a83456918a972ecb5109125628)) +* use dummy instance id to improve performance on refresh token queries ([#1454](https://github.com/supabase/gotrue/issues/1454)) ([656474e](https://github.com/supabase/gotrue/commit/656474e1b9ff3d5129190943e8c48e456625afe5)) + + +### Bug Fixes + +* expose `provider` under `amr` in access token ([#1456](https://github.com/supabase/gotrue/issues/1456)) ([e9f38e7](https://github.com/supabase/gotrue/commit/e9f38e76d8a7b93c5c2bb0de918a9b156155f018)) +* improve MFA QR Code resilience so as to support providers like 1Password ([#1455](https://github.com/supabase/gotrue/issues/1455)) ([6522780](https://github.com/supabase/gotrue/commit/652278046c9dd92f5cecd778735b058ef3fb41c7)) +* refactor request params to use generics ([#1464](https://github.com/supabase/gotrue/issues/1464)) ([e1cdf5c](https://github.com/supabase/gotrue/commit/e1cdf5c4b5c1bf467094f4bdcaa2e42a5cc51c20)) +* revert refactor resource owner password grant ([#1466](https://github.com/supabase/gotrue/issues/1466)) ([fa21244](https://github.com/supabase/gotrue/commit/fa21244fa929709470c2e1fc4092a9ce947399e7)) +* update file name so migration to Drop IP Address is applied ([#1447](https://github.com/supabase/gotrue/issues/1447)) ([f29e89d](https://github.com/supabase/gotrue/commit/f29e89d7d2c48ee8fd5bf8279a7fa3db0ad4d842)) + +## [2.143.0](https://github.com/supabase/gotrue/compare/v2.142.0...v2.143.0) (2024-02-19) + + +### Features + +* calculate aal without transaction ([#1437](https://github.com/supabase/gotrue/issues/1437)) ([8dae661](https://github.com/supabase/gotrue/commit/8dae6614f1a2b58819f94894cef01e9f99117769)) + + +### Bug Fixes + +* deprecate hooks ([#1421](https://github.com/supabase/gotrue/issues/1421)) ([effef1b](https://github.com/supabase/gotrue/commit/effef1b6ecc448b7927eff23df8d5b509cf16b5c)) +* error should be an IsNotFoundError ([#1432](https://github.com/supabase/gotrue/issues/1432)) ([7f40047](https://github.com/supabase/gotrue/commit/7f40047aec3577d876602444b1d88078b2237d66)) +* populate password verification attempt hook ([#1436](https://github.com/supabase/gotrue/issues/1436)) ([f974bdb](https://github.com/supabase/gotrue/commit/f974bdb58340395955ca27bdd26d57062433ece9)) +* restrict mfa enrollment to aal2 if verified factors are present ([#1439](https://github.com/supabase/gotrue/issues/1439)) ([7e10d45](https://github.com/supabase/gotrue/commit/7e10d45e54010d38677f4c3f2f224127688eb9a2)) +* update phone if autoconfirm is enabled ([#1431](https://github.com/supabase/gotrue/issues/1431)) ([95db770](https://github.com/supabase/gotrue/commit/95db770c5d2ecca4a1e960a8cb28ded37cccc100)) +* use email change email in identity ([#1429](https://github.com/supabase/gotrue/issues/1429)) ([4d3b9b8](https://github.com/supabase/gotrue/commit/4d3b9b8841b1a5fa8f3244825153cc81a73ba300)) + +## [2.142.0](https://github.com/supabase/gotrue/compare/v2.141.0...v2.142.0) (2024-02-14) + + +### Features + +* alter tag to use raw ([#1427](https://github.com/supabase/gotrue/issues/1427)) ([53cfe5d](https://github.com/supabase/gotrue/commit/53cfe5de57d4b5ab6e8e2915493856ecd96f4ede)) +* update README.md to trigger release ([#1425](https://github.com/supabase/gotrue/issues/1425)) ([91e0e24](https://github.com/supabase/gotrue/commit/91e0e245f5957ebce13370f79fd4a6be8108ed80)) + +## [2.141.0](https://github.com/supabase/gotrue/compare/v2.140.0...v2.141.0) (2024-02-13) + + +### Features + +* drop sha hash tag ([#1422](https://github.com/supabase/gotrue/issues/1422)) ([76853ce](https://github.com/supabase/gotrue/commit/76853ce6d45064de5608acc8100c67a8337ba791)) +* prefix release with v ([#1424](https://github.com/supabase/gotrue/issues/1424)) ([9d398cd](https://github.com/supabase/gotrue/commit/9d398cd75fca01fb848aa88b4f545552e8b5751a)) + +## [2.140.0](https://github.com/supabase/gotrue/compare/v2.139.2...v2.140.0) (2024-02-13) + + +### Features + +* deprecate existing webhook implementation ([#1417](https://github.com/supabase/gotrue/issues/1417)) ([5301e48](https://github.com/supabase/gotrue/commit/5301e481b0c7278c18b4578a5b1aa8d2256c2f5d)) +* update publish.yml checkout repository so there is access to Dockerfile ([#1419](https://github.com/supabase/gotrue/issues/1419)) ([7cce351](https://github.com/supabase/gotrue/commit/7cce3518e8c9f1f3f93e4f6a0658ee08771c4f1c)) + +## [2.139.2](https://github.com/supabase/gotrue/compare/v2.139.1...v2.139.2) (2024-02-08) + + +### Bug Fixes + +* improve perf in account linking ([#1394](https://github.com/supabase/gotrue/issues/1394)) ([8eedb95](https://github.com/supabase/gotrue/commit/8eedb95dbaa310aac464645ec91d6a374813ab89)) +* OIDC provider validation log message ([#1380](https://github.com/supabase/gotrue/issues/1380)) ([27e6b1f](https://github.com/supabase/gotrue/commit/27e6b1f9a4394c5c4f8dff9a8b5529db1fc67af9)) +* only create or update the email / phone identity after it's been verified ([#1403](https://github.com/supabase/gotrue/issues/1403)) ([2d20729](https://github.com/supabase/gotrue/commit/2d207296ec22dd6c003c89626d255e35441fd52d)) +* only create or update the email / phone identity after it's been verified (again) ([#1409](https://github.com/supabase/gotrue/issues/1409)) ([bc6a5b8](https://github.com/supabase/gotrue/commit/bc6a5b884b43fe6b8cb924d3f79999fe5bfe7c5f)) +* unmarshal is_private_email correctly ([#1402](https://github.com/supabase/gotrue/issues/1402)) ([47df151](https://github.com/supabase/gotrue/commit/47df15113ce8d86666c0aba3854954c24fe39f7f)) +* use `pattern` for semver docker image tags ([#1411](https://github.com/supabase/gotrue/issues/1411)) ([14a3aeb](https://github.com/supabase/gotrue/commit/14a3aeb6c3f46c8d38d98cc840112dfd0278eeda)) + + +### Reverts + +* "fix: only create or update the email / phone identity after i… ([#1407](https://github.com/supabase/gotrue/issues/1407)) ([ff86849](https://github.com/supabase/gotrue/commit/ff868493169a0d9ac18b66058a735197b1df5b9b)) diff --git a/auth_v2.169.0/CODEOWNERS b/auth_v2.169.0/CODEOWNERS new file mode 100644 index 0000000..cb9b3ca --- /dev/null +++ b/auth_v2.169.0/CODEOWNERS @@ -0,0 +1 @@ +* @supabase/auth diff --git a/auth_v2.169.0/CODE_OF_CONDUCT.md b/auth_v2.169.0/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..2867c8a --- /dev/null +++ b/auth_v2.169.0/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at david@netlify.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/auth_v2.169.0/CONTRIBUTING.md b/auth_v2.169.0/CONTRIBUTING.md new file mode 100644 index 0000000..22a1add --- /dev/null +++ b/auth_v2.169.0/CONTRIBUTING.md @@ -0,0 +1,523 @@ +# CONTRIBUTING + +We would love to have contributions from each and every one of you in the community be it big or small and you are the ones who motivate us to do better than what we do today. + +## Code Of Conduct + +Please help us keep all our projects open and inclusive. Kindly follow our [Code of Conduct](CODE_OF_CONDUCT.md) to keep the ecosystem healthy and friendly for all. + +## Quick Start + +Auth has a development container setup that makes it easy to get started contributing. This setup only requires that [Docker](https://www.docker.com/get-started) is setup on your system. The development container setup includes a PostgreSQL container with migrations already applied and a container running GoTrue that will perform a hot reload when changes to the source code are detected. + +If you would like to run Auth locally or learn more about what these containers are doing for you, continue reading the [Setup and Tooling](#setup-and-tooling) section below. Otherwise, you can skip ahead to the [How To Verify that GoTrue is Available](#how-to-verify-that-auth-is-available) section to learn about working with and developing GoTrue. + +Before using the containers, you will need to make sure an `.env.docker` file exists by making a copy of `example.docker.env` and configuring it for your needs. The set of env vars in `example.docker.env` only contain the necessary env vars for auth to start in a docker environment. For the full list of env vars, please refer to `example.env` and copy over the necessary ones into your `.env.docker` file. + +The following are some basic commands. A full and up to date list of commands can be found in the project's `Makefile` or by running `make help`. + +### Starting the containers + +Start the containers as described above in an attached state with log output. + +```bash +make dev +``` + +### Running tests in the containers + +Start the containers with a fresh database and run the project's tests. + +```bash +make docker-test +``` + +### Removing the containers + +Remove both containers and their volumes. This removes any data associated with the containers. + +```bash +make docker-clean +``` + +### Rebuild the containers + +Fully rebuild the containers without using any cached layers. + +```bash +make docker-build +``` + +## Setup and Tooling + +Auth -- as the name implies -- is a user registration and authentication API developed in [Go](https://go.dev). + +It connects to a [PostgreSQL](https://www.postgresql.org) database in order to store authentication data, [Soda CLI](https://gobuffalo.io/en/docs/db/toolbox) to manage database schema and migrations, +and runs inside a [Docker](https://www.docker.com/get-started) container. + +Therefore, to contribute to Auth you will need to install these tools. + +### Install Tools + +- Install [Go](https://go.dev) 1.22 + +```zsh +# Via Homebrew on macOS +brew install go@1.22 + +# Set the environment variable in the ~/.zshrc file +echo 'export PATH="/opt/homebrew/opt/go@1.22/bin:$PATH"' >> ~/.zshrc +``` + +- Install [Docker](https://www.docker.com/get-started) + +```zsh +# Via Homebrew on macOS +brew install docker +``` + +Or, if you prefer, download [Docker Desktop](https://www.docker.com/get-started). + +- Install [Soda CLI](https://gobuffalo.io/en/docs/db/toolbox) + +```zsh +# Via Homebrew on macOS +brew install gobuffalo/tap/pop +``` + +If you are on macOS Catalina you may [run into issues installing Soda with Brew](https://github.com/gobuffalo/homebrew-tap/issues/5). Do check your `GOPATH` and run + +`go build -o /bin/soda github.com/gobuffalo/pop/soda` to resolve. + +- Clone the Auth [repository](https://github.com/supabase/auth) + +```zsh +git clone https://github.com/supabase/auth +``` + +### Install Auth + +To begin installation, be sure to start from the root directory. + +- `cd auth` + +To complete installation, you will: + +- Install the PostgreSQL Docker image +- Create the DB Schema and Migrations +- Setup a local `.env` for environment variables +- Compile Auth +- Run the Auth binary executable + +#### Installation Steps + +1. Start Docker +2. To install the PostgreSQL Docker image, run: + +```zsh +# Builds the postgres image +docker-compose -f docker-compose-dev.yml build postgres + +# Runs the postgres container +docker-compose -f docker-compose-dev.yml up postgres +``` + +You should then see in Docker that `auth_postgresql` is running on `port: 5432`. + +> **Important** If you happen to already have a local running instance of Postgres running on the port `5432` because you +> may have installed via [homebrew on macOS](https://formulae.brew.sh/formula/postgresql) then be certain to stop the process using: +> +> - `brew services stop postgresql` +> +> If you need to run the test environment on another port, you will need to modify several configuration files to use a different custom port. + +3. Next compile the Auth binary: + +When you fork a repository, GitHub does not automatically copy all the tags (tags are not included by default). To ensure the correct tag is set before building the binary, you need to fetch the tags from the upstream repository and push them to your fork. Follow these steps: + +```zsh +# Fetch the tags from the upstream repository +git fetch upstream --tags + +# Push the tags to your fork +git push origin --tags +``` + +Then build the binary by running: + +```zsh +make build +``` + +4. To setup the database schema via Soda, run: + +```zsh +make migrate_test +``` + +You should see log messages that indicate that the Auth migrations were applied successfully: + +```terminal +INFO[0000] Auth migrations applied successfully +DEBU[0000] after status +[POP] 2021/12/15 10:44:36 sql - SELECT EXISTS (SELECT schema_migrations.* FROM schema_migrations AS schema_migrations WHERE version = $1) | ["20210710035447"] +[POP] 2021/12/15 10:44:36 sql - SELECT EXISTS (SELECT schema_migrations.* FROM schema_migrations AS schema_migrations WHERE version = $1) | ["20210722035447"] +[POP] 2021/12/15 10:44:36 sql - SELECT EXISTS (SELECT schema_migrations.* FROM schema_migrations AS schema_migrations WHERE version = $1) | ["20210730183235"] +[POP] 2021/12/15 10:44:36 sql - SELECT EXISTS (SELECT schema_migrations.* FROM schema_migrations AS schema_migrations WHERE version = $1) | ["20210909172000"] +[POP] 2021/12/15 10:44:36 sql - SELECT EXISTS (SELECT schema_migrations.* FROM schema_migrations AS schema_migrations WHERE version = $1) | ["20211122151130"] +Version Name Status +20210710035447 alter_users Applied +20210722035447 adds_confirmed_at Applied +20210730183235 add_email_change_confirmed Applied +20210909172000 create_identities_table Applied +20211122151130 create_user_id_idx Applied +``` + +That lists each migration that was applied. Note: there may be more migrations than those listed. + +4. Create a `.env` file in the root of the project and copy the following config in [example.env](example.env). Set the values to GOTRUE_SMS_TEST_OTP_VALID_UNTIL in the `.env` file. + +5. In order to have Auth connect to your PostgreSQL database running in Docker, it is important to set a connection string like: + +``` +DATABASE_URL="postgres://supabase_auth_admin:root@localhost:5432/postgres" +``` + +> Important: Auth requires a set of SMTP credentials to run, you can generate your own SMTP credentials via an SMTP provider such as AWS SES, SendGrid, MailChimp, SendInBlue or any other SMTP providers. + +6. Then finally Start Auth +7. Verify that Auth is Available + +### Starting Auth + +Start Auth by running the executable: + +```zsh +./auth +``` + +This command will re-run migrations and then indicate that Auth has started: + +```zsh +INFO[0000] Auth API started on: localhost:9999 +``` + +### How To Verify that Auth is Available + +To test that your Auth is up and available, you can query the `health` endpoint at `http://localhost:9999/health`. You should see a response similar to: + +```json +{ + "description": "Auth is a user registration and authentication API", + "name": "Auth", + "version": "" +} +``` + +To see the current settings, make a request to `http://localhost:9999/settings` and you should see a response similar to: + +```json +{ + "external": { + "apple": false, + "azure": false, + "bitbucket": false, + "discord": false, + "github": false, + "gitlab": false, + "google": false, + "facebook": false, + "spotify": false, + "slack": false, + "slack_oidc": false, + "twitch": true, + "twitter": false, + "email": true, + "phone": false, + "saml": false + }, + "external_labels": { + "saml": "auth0" + }, + "disable_signup": false, + "mailer_autoconfirm": false, + "phone_autoconfirm": false, + "sms_provider": "twilio" +} +``` + +## How to Use Admin API Endpoints + +To test the admin endpoints (or other api endpoints), you can invoke via HTTP requests. Using [Insomnia](https://insomnia.rest/products/insomnia) can help you issue these requests. + +You will need to know the `GOTRUE_JWT_SECRET` configured in the `.env` settings. + +Also, you must generate a JWT with the signature which has the `supabase_admin` role (or one that is specified in `GOTRUE_JWT_ADMIN_ROLES`). + +For example: + +```json +{ + "role": "supabase_admin" +} +``` + +You can sign this payload using the [JWT.io Debugger](https://jwt.io/#debugger-io) but make sure that `secret base64 encoded` is unchecked. + +Then you can use this JWT as a Bearer token for admin requests. + +### Create User (aka Sign Up a User) + +To create a new user, `POST /admin/users` with the payload: + +```json +{ + "email": "user@example.com", + "password": "12345678" +} +``` + +#### Request + +``` +POST /admin/users HTTP/1.1 +Host: localhost:9999 +User-Agent: insomnia/2021.7.2 +Content-Type: application/json +Authorization: Bearer +Accept: */* +Content-Length: 57 +``` + +#### Response + +And you should get a new user: + +```json +{ + "id": "e78c512d-68e4-482b-901b-75003e89acae", + "aud": "authenticated", + "role": "authenticated", + "email": "user@example.com", + "phone": "", + "app_metadata": { + "provider": "email", + "providers": ["email"] + }, + "user_metadata": {}, + "identities": null, + "created_at": "2021-12-15T12:40:03.507551-05:00", + "updated_at": "2021-12-15T12:40:03.512067-05:00" +} +``` + +### List/Find Users + +To create a new user, make a request to `GET /admin/users`. + +#### Request + +``` +GET /admin/users HTTP/1.1 +Host: localhost:9999 +User-Agent: insomnia/2021.7.2 +Authorization: Bearer +Accept: */\_ +``` + +#### Response + +The response from `/admin/users` should return all users: + +```json +{ + "aud": "authenticated", + "users": [ + { + "id": "b7fd0253-6e16-4d4e-b61b-5943cb1b2102", + "aud": "authenticated", + "role": "authenticated", + "email": "user+4@example.com", + "phone": "", + "app_metadata": { + "provider": "email", + "providers": ["email"] + }, + "user_metadata": {}, + "identities": null, + "created_at": "2021-12-15T12:43:58.12207-05:00", + "updated_at": "2021-12-15T12:43:58.122073-05:00" + }, + { + "id": "d69ae847-99be-4642-868f-439c2cdd9af4", + "aud": "authenticated", + "role": "authenticated", + "email": "user+3@example.com", + "phone": "", + "app_metadata": { + "provider": "email", + "providers": ["email"] + }, + "user_metadata": {}, + "identities": null, + "created_at": "2021-12-15T12:43:56.730209-05:00", + "updated_at": "2021-12-15T12:43:56.730213-05:00" + }, + { + "id": "7282cf42-344e-4474-bdf6-d48e4968a2e4", + "aud": "authenticated", + "role": "authenticated", + "email": "user+2@example.com", + "phone": "", + "app_metadata": { + "provider": "email", + "providers": ["email"] + }, + "user_metadata": {}, + "identities": null, + "created_at": "2021-12-15T12:43:54.867676-05:00", + "updated_at": "2021-12-15T12:43:54.867679-05:00" + }, + { + "id": "e78c512d-68e4-482b-901b-75003e89acae", + "aud": "authenticated", + "role": "authenticated", + "email": "user@example.com", + "phone": "", + "app_metadata": { + "provider": "email", + "providers": ["email"] + }, + "user_metadata": {}, + "identities": null, + "created_at": "2021-12-15T12:40:03.507551-05:00", + "updated_at": "2021-12-15T12:40:03.507554-05:00" + } + ] +} +``` + +### Running Database Migrations + +If you need to run any new migrations: + +```zsh +make migrate_test +``` + +## Testing + +Currently, we don't use a separate test database, so the same database created when installing Auth to run locally is used. + +The following commands should help in setting up a database and running the tests: + +```sh +# Runs the database in a docker container +$ docker-compose -f docker-compose-dev.yml up postgres + +# Applies the migrations to the database (requires soda cli) +$ make migrate_test + +# Executes the tests +$ make test +``` + +### Customizing the PostgreSQL Port + +if you already run PostgreSQL and need to run your database on a different, custom port, +you will need to make several configuration changes to the following files: + +In these examples, we change the port from 5432 to 7432. + +> Note: This is not recommended, but if you do, please do not check in changes. + +``` +// file: docker-compose-dev.yml +ports: + - 7432:5432 \ 👈 set the first value to your external facing port +``` + +The port you customize here can them be used in the subsequent configuration: + +``` +// file: database.yaml +test: +dialect: "postgres" +database: "postgres" +host: {{ envOr "POSTGRES_HOST" "127.0.0.1" }} +port: {{ envOr "POSTGRES_PORT" "7432" }} 👈 set to your port +``` + +``` +// file: test.env +DATABASE_URL="postgres://supabase_auth_admin:root@localhost:7432/postgres" 👈 set to your port +``` + +``` +// file: migrate.sh +export GOTRUE_DB_DATABASE_URL="postgres://supabase_auth_admin:root@localhost:7432/$DB_ENV" +``` + +## Helpful Docker Commands + +``` +// file: docker-compose-dev.yml +container_name: auth_postgres +``` + +```zsh +# Command line into bash on the PostgreSQL container +docker exec -it auth_postgres bash + +# Removes Container +docker container rm -f auth_postgres + +# Removes volume +docker volume rm postgres_data +``` + +## Updating Package Dependencies + +- `make deps` +- `go mod tidy` if necessary + +## Submitting Pull Requests + +We actively welcome your pull requests. + +- Fork the repo and create your branch from `master`. +- If you've added code that should be tested, add tests. +- If you've changed APIs, update the documentation. +- Ensure the test suite passes. +- Make sure your code lints. + +### Checklist for Submitting Pull Requests + +- Is there a corresponding issue created for it? If so, please include it in the PR description so we can track / refer to it. +- Does your PR follow the [semantic-release commit guidelines](https://github.com/angular/angular.js/blob/master/DEVELOPERS.md#-git-commit-guidelines)? +- If the PR is a `feat`, an [RFC](https://github.com/supabase/rfcs) or a detailed description of the design implementation is required. The former (RFC) is preferred before starting on the PR. +- Are the existing tests passing? +- Have you written some tests for your PR? + +## Guidelines for Implementing Additional OAuth Providers + +> ⚠️ We won't be accepting any additional oauth / sms provider contributions for now because we intend to support these through webhooks or a generic provider in the future. + +Please ensure that an end-to-end test is done for the OAuth provider implemented. + +An end-to-end test includes: + +- Creating an application on the oauth provider site +- Generating your own client_id and secret +- Testing that `http://localhost:9999/authorize?provider=MY_COOL_NEW_PROVIDER` redirects you to the provider sign-in page +- The callback is handled properly +- Gotrue redirects to the `SITE_URL` or one of the URI's specified in the `URI_ALLOW_LIST` with the access_token, provider_token, expiry and refresh_token as query fragments + +### Writing tests for the new OAuth provider implemented + +Since implementing an additional OAuth provider consists of making api calls to an external api, we set up a mock server to attempt to mock the responses expected from the OAuth provider. + +## License + +By contributing to Auth, you agree that your contributions will be licensed +under its [MIT license](LICENSE). diff --git a/auth_v2.169.0/Dockerfile b/auth_v2.169.0/Dockerfile new file mode 100644 index 0000000..17d5071 --- /dev/null +++ b/auth_v2.169.0/Dockerfile @@ -0,0 +1,32 @@ +FROM golang:1.22.3-alpine3.20 as build +ENV GO111MODULE=on +ENV CGO_ENABLED=0 +ENV GOOS=linux + +RUN apk add --no-cache make git + +WORKDIR /go/src/github.com/supabase/auth + +# Pulling dependencies +COPY ./Makefile ./go.* ./ +RUN make deps + +# Building stuff +COPY . /go/src/github.com/supabase/auth + +# Make sure you change the RELEASE_VERSION value before publishing an image. +RUN RELEASE_VERSION=unspecified make build + +# Always use alpine:3 so the latest version is used. This will keep CA certs more up to date. +FROM alpine:3 +RUN adduser -D -u 1000 supabase + +RUN apk add --no-cache ca-certificates +COPY --from=build /go/src/github.com/supabase/auth/auth /usr/local/bin/auth +COPY --from=build /go/src/github.com/supabase/auth/migrations /usr/local/etc/auth/migrations/ +RUN ln -s /usr/local/bin/auth /usr/local/bin/gotrue + +ENV GOTRUE_DB_MIGRATIONS_PATH /usr/local/etc/auth/migrations + +USER supabase +CMD ["auth"] diff --git a/auth_v2.169.0/Dockerfile.dev b/auth_v2.169.0/Dockerfile.dev new file mode 100644 index 0000000..d2733aa --- /dev/null +++ b/auth_v2.169.0/Dockerfile.dev @@ -0,0 +1,18 @@ +FROM golang:1.22.3-alpine3.20 +ENV GO111MODULE=on +ENV CGO_ENABLED=0 +ENV GOOS=linux + +RUN apk add --no-cache make git bash + +WORKDIR /go/src/github.com/supabase/auth + +# Pulling dependencies +COPY ./Makefile ./go.* ./ + +# Production dependencies +RUN make deps + +# Development dependences +RUN go get github.com/githubnemo/CompileDaemon +RUN go install github.com/githubnemo/CompileDaemon diff --git a/auth_v2.169.0/Dockerfile.postgres.dev b/auth_v2.169.0/Dockerfile.postgres.dev new file mode 100644 index 0000000..58661ef --- /dev/null +++ b/auth_v2.169.0/Dockerfile.postgres.dev @@ -0,0 +1,8 @@ +FROM postgres:15 +WORKDIR / +RUN pwd +COPY init_postgres.sh /docker-entrypoint-initdb.d/init.sh +RUN chmod +x /docker-entrypoint-initdb.d/init.sh +EXPOSE 5432 + +CMD ["postgres"] diff --git a/auth_v2.169.0/LICENSE b/auth_v2.169.0/LICENSE new file mode 100644 index 0000000..8a7d702 --- /dev/null +++ b/auth_v2.169.0/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Supabase + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/auth_v2.169.0/Makefile b/auth_v2.169.0/Makefile new file mode 100644 index 0000000..7bfd8ab --- /dev/null +++ b/auth_v2.169.0/Makefile @@ -0,0 +1,93 @@ +.PHONY: all build deps dev-deps image migrate test vet sec format unused +CHECK_FILES?=./... + +FLAGS=-ldflags "-X github.com/supabase/auth/internal/utilities.Version=`git describe --tags`" -buildvcs=false +ifdef RELEASE_VERSION + FLAGS=-ldflags "-X github.com/supabase/auth/internal/utilities.Version=v$(RELEASE_VERSION)" -buildvcs=false +endif + +ifneq ($(shell docker compose version 2>/dev/null),) + DOCKER_COMPOSE=docker compose +else + DOCKER_COMPOSE=docker-compose +endif + +DEV_DOCKER_COMPOSE:=docker-compose-dev.yml + +help: ## Show this help. + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +all: vet sec static build ## Run the tests and build the binary. + +build: deps ## Build the binary. + CGO_ENABLED=0 go build $(FLAGS) + CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build $(FLAGS) -o auth-arm64 + +dev-deps: ## Install developer dependencies + @go install github.com/gobuffalo/pop/soda@latest + @go install github.com/securego/gosec/v2/cmd/gosec@latest + @go install honnef.co/go/tools/cmd/staticcheck@latest + @go install github.com/deepmap/oapi-codegen/cmd/oapi-codegen@latest + @go install github.com/nishanths/exhaustive/cmd/exhaustive@latest + +deps: ## Install dependencies. + @go mod download + @go mod verify + +migrate_dev: ## Run database migrations for development. + hack/migrate.sh postgres + +migrate_test: ## Run database migrations for test. + hack/migrate.sh postgres + +test: build ## Run tests. + go test $(CHECK_FILES) -coverprofile=coverage.out -coverpkg ./... -p 1 -race -v -count=1 + ./hack/coverage.sh + +vet: # Vet the code + go vet $(CHECK_FILES) + +sec: dev-deps # Check for security vulnerabilities + gosec -quiet -exclude-generated $(CHECK_FILES) + gosec -quiet -tests -exclude-generated -exclude=G104 $(CHECK_FILES) + +unused: dev-deps # Look for unused code + @echo "Unused code:" + staticcheck -checks U1000 $(CHECK_FILES) + + @echo + + @echo "Code used only in _test.go (do move it in those files):" + staticcheck -checks U1000 -tests=false $(CHECK_FILES) + +static: dev-deps + staticcheck ./... + exhaustive ./... + +generate: dev-deps + go generate ./... + +dev: ## Run the development containers + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) up + +down: ## Shutdown the development containers + # Start postgres first and apply migrations + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) down + +docker-test: ## Run the tests using the development containers + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) up -d postgres + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) run auth sh -c "make migrate_test" + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) run auth sh -c "make test" + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) down -v + +docker-build: ## Force a full rebuild of the development containers + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) build --no-cache + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) up -d postgres + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) run auth sh -c "make migrate_dev" + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) down + +docker-clean: ## Remove the development containers and volumes + ${DOCKER_COMPOSE} -f $(DEV_DOCKER_COMPOSE) rm -fsv + +format: + gofmt -s -w . diff --git a/auth_v2.169.0/README.md b/auth_v2.169.0/README.md new file mode 100644 index 0000000..5b9c702 --- /dev/null +++ b/auth_v2.169.0/README.md @@ -0,0 +1,1229 @@ +# Auth - Authentication and User Management by Supabase + +[![Coverage Status](https://coveralls.io/repos/github/supabase/auth/badge.svg?branch=master)](https://coveralls.io/github/supabase/auth?branch=master) + +Auth is a user management and authentication server written in Go that powers +[Supabase](https://supabase.com)'s features such as: + +- Issuing JWTs +- Row Level Security with PostgREST +- User management +- Sign in with email, password, magic link, phone number +- Sign in with external providers (Google, Apple, Facebook, Discord, ...) + +It is originally based on the excellent +[GoTrue codebase by Netlify](https://github.com/netlify/gotrue), however both have diverged significantly in features and capabilities. + +If you wish to contribute to the project, please refer to the [contributing guide](/CONTRIBUTING.md). + +## Table Of Contents + +- [Quick Start](#quick-start) +- [Running in Production](#running-in-production) +- [Configuration](#configuration) +- [Endpoints](#endpoints) + +## Quick Start + +Create a `.env` file to store your own custom env vars. See [`example.env`](example.env) + +1. Start the local postgres database in a postgres container: `docker-compose -f docker-compose-dev.yml up postgres` +2. Build the auth binary: `make build` . You should see an output like this: + +```bash +go build -ldflags "-X github.com/supabase/auth/cmd.Version=`git rev-parse HEAD`" +GOOS=linux GOARCH=arm64 go build -ldflags "-X github.com/supabase/auth/cmd.Version=`git rev-parse HEAD`" -o gotrue-arm64 +``` + +3. Execute the auth binary: `./auth` + +### If you have docker installed + +Create a `.env.docker` file to store your own custom env vars. See [`example.docker.env`](example.docker.env) + +1. `make build` +2. `make dev` +3. `docker ps` should show 2 docker containers (`auth_postgresql` and `gotrue_gotrue`) +4. That's it! Visit the [health checkendpoint](http://localhost:9999/health) to confirm that auth is running. + +## Running in production + +Running an authentication server in production is not an easy feat. We +recommend using [Supabase Auth](https://supabase.com/auth) which gets regular +security updates. + +Otherwise, please make sure you setup a process to promptly update to the +latest version. You can do that by following this repository, specifically the +[Releases](https://github.com/supabase/auth/releases) and [Security +Advisories](https://github.com/supabase/auth/security/advisories) sections. + +### Backward compatibility + +Auth uses the [Semantic Versioning](https://semver.org) scheme. Here are some +further clarifications on backward compatibility guarantees: + +**Go API compatibility** + +Auth is not meant to be used as a Go library. There are no guarantees on +backward API compatibility when used this way regardless which version number +changes. + +**Patch** + +Changes to the patch version guarantees backward compatibility with: + +- Database objects (tables, columns, indexes, functions). +- REST API +- JWT structure +- Configuration + +Guaranteed examples: + +- A column won't change its type. +- A table won't change its primary key. +- An index will not be removed. +- A uniqueness constraint will not be removed. +- A REST API will not be removed. +- Parameters to REST APIs will work equivalently as before (or better, if a bug + has been fixed). +- Configuration will not change. + +Not guaranteed examples: + +- A table may add new columns. +- Columns in a table may be reordered. +- Non-unique constraints may be removed (database level checks, null, default + values). +- JWT may add new properties. + +**Minor** + +Changes to minor version guarantees backward compatibility with: + +- REST API +- JWT structure +- Configuration + +Exceptions to these guarantees will be made only when serious security issues +are found that can't be remedied in any other way. + +Guaranteed examples: + +- Existing APIs may be deprecated but continue working for the next few minor + version releases. +- Configuration changes may become deprecated but continue working for the next + few minor version releases. +- Already issued JWTs will be accepted, but new JWTs may be with a different + structure (but usually similar). + +Not guaranteed examples: + +- Removal of JWT fields after a deprecation notice. +- Removal of certain APIs after a deprecation notice. +- Removal of sign-in with external providers, after a deprecation notice. +- Deletion, truncation, significant schema changes to tables, indexes, views, + functions. + +We aim to provide a deprecation notice in execution logs for at least two major +version releases or two weeks if multiple releases go out. Compatibility will +be guaranteed while the notice is live. + +**Major** + +Changes to the major version do not guarantee any backward compatibility with +previous versions. + +### Inherited features + +Certain inherited features from the Netlify codebase are not supported by +Supabase and they may be removed without prior notice in the future. This is a +comprehensive list of those features: + +1. Multi-tenancy via the `instances` table i.e. `GOTRUE_MULTI_INSTANCE_MODE` + configuration parameter. +2. System user (zero UUID user). +3. Super admin via the `is_super_admin` column. +4. Group information in JWTs via `GOTRUE_JWT_ADMIN_GROUP_NAME` and other + configuration fields. +5. Symmetrics JWTs. In the future it is very likely that Auth will begin + issuing asymmetric JWTs (subject to configuration), so do not rely on the + assumption that only HS256 signed JWTs will be issued long term. + +Note that this is not an exhaustive list and it may change. + +### Best practices when self-hosting + +These are some best practices to follow when self-hosting to ensure backward +compatibility with Auth: + +1. Do not modify the schema managed by Auth. You can see all of the + migrations in the `migrations` directory. +2. Do not rely on schema and structure of data in the database. Always use + Auth APIs and JWTs to infer information about users. +3. Always run Auth behind a TLS-capable proxy such as a load balancer, CDN, + nginx or other similar software. + +## Configuration + +You may configure Auth using either a configuration file named `.env`, +environment variables, or a combination of both. Environment variables are prefixed with `GOTRUE_`, and will always have precedence over values provided via file. + +### Top-Level + +```properties +GOTRUE_SITE_URL=https://example.netlify.com/ +``` + +`SITE_URL` - `string` **required** + +The base URL your site is located at. Currently used in combination with other settings to construct URLs used in emails. Any URI that shares a host with `SITE_URL` is a permitted value for `redirect_to` params (see `/authorize` etc.). + +`URI_ALLOW_LIST` - `string` + +A comma separated list of URIs (e.g. `"https://foo.example.com,https://*.foo.example.com,https://bar.example.com"`) which are permitted as valid `redirect_to` destinations. Defaults to []. Supports wildcard matching through globbing. e.g. `https://*.foo.example.com` will allow `https://a.foo.example.com` and `https://b.foo.example.com` to be accepted. Globbing is also supported on subdomains. e.g. `https://foo.example.com/*` will allow `https://foo.example.com/page1` and `https://foo.example.com/page2` to be accepted. + +For more common glob patterns, check out the [following link](https://pkg.go.dev/github.com/gobwas/glob#Compile). + +`OPERATOR_TOKEN` - `string` _Multi-instance mode only_ + +The shared secret with an operator (usually Netlify) for this microservice. Used to verify requests have been proxied through the operator and +the payload values can be trusted. + +`DISABLE_SIGNUP` - `bool` + +When signup is disabled the only way to create new users is through invites. Defaults to `false`, all signups enabled. + +`GOTRUE_EXTERNAL_EMAIL_ENABLED` - `bool` + +Use this to disable email signups (users can still use external oauth providers to sign up / sign in) + +`GOTRUE_EXTERNAL_PHONE_ENABLED` - `bool` + +Use this to disable phone signups (users can still use external oauth providers to sign up / sign in) + +`GOTRUE_RATE_LIMIT_HEADER` - `string` + +Header on which to rate limit the `/token` endpoint. + +`GOTRUE_RATE_LIMIT_EMAIL_SENT` - `string` + +Rate limit the number of emails sent per hr on the following endpoints: `/signup`, `/invite`, `/magiclink`, `/recover`, `/otp`, & `/user`. + +`GOTRUE_PASSWORD_MIN_LENGTH` - `int` + +Minimum password length, defaults to 6. + +`GOTRUE_PASSWORD_REQUIRED_CHARACTERS` - a string of character sets separated by `:`. A password must contain at least one character of each set to be accepted. To use the `:` character escape it with `\`. + +`GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED` - `bool` + +If refresh token rotation is enabled, auth will automatically detect malicious attempts to reuse a revoked refresh token. When a malicious attempt is detected, gotrue immediately revokes all tokens that descended from the offending token. + +`GOTRUE_SECURITY_REFRESH_TOKEN_REUSE_INTERVAL` - `string` + +This setting is only applicable if `GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED` is enabled. The reuse interval for a refresh token allows for exchanging the refresh token multiple times during the interval to support concurrency or offline issues. During the reuse interval, auth will not consider using a revoked token as a malicious attempt and will simply return the child refresh token. + +Only the previous revoked token can be reused. Using an old refresh token way before the current valid refresh token will trigger the reuse detection. + +### API + +```properties +GOTRUE_API_HOST=localhost +PORT=9999 +API_EXTERNAL_URL=http://localhost:9999 +``` + +`API_HOST` - `string` + +Hostname to listen on. + +`PORT` (no prefix) / `API_PORT` - `number` + +Port number to listen on. Defaults to `8081`. + +`API_ENDPOINT` - `string` _Multi-instance mode only_ + +Controls what endpoint Netlify can access this API on. + +`API_EXTERNAL_URL` - `string` **required** + +The URL on which Gotrue might be accessed at. + +`REQUEST_ID_HEADER` - `string` + +If you wish to inherit a request ID from the incoming request, specify the name in this value. + +### Database + +```properties +GOTRUE_DB_DRIVER=postgres +DATABASE_URL=root@localhost/auth +``` + +`DB_DRIVER` - `string` **required** + +Chooses what dialect of database you want. Must be `postgres`. + +`DATABASE_URL` (no prefix) / `DB_DATABASE_URL` - `string` **required** + +Connection string for the database. + +`GOTRUE_DB_MAX_POOL_SIZE` - `int` + +Sets the maximum number of open connections to the database. Defaults to 0 which is equivalent to an "unlimited" number of connections. + +`DB_NAMESPACE` - `string` + +Adds a prefix to all table names. + +**Migrations Note** + +Migrations are applied automatically when you run `./auth`. However, you also have the option to rerun the migrations via the following methods: + +- If built locally: `./auth migrate` +- Using Docker: `docker run --rm auth gotrue migrate` + +### Logging + +```properties +LOG_LEVEL=debug # available without GOTRUE prefix (exception) +GOTRUE_LOG_FILE=/var/log/go/auth.log +``` + +`LOG_LEVEL` - `string` + +Controls what log levels are output. Choose from `panic`, `fatal`, `error`, `warn`, `info`, or `debug`. Defaults to `info`. + +`LOG_FILE` - `string` + +If you wish logs to be written to a file, set `log_file` to a valid file path. + +### Observability + +Auth has basic observability built in. It is able to export +[OpenTelemetry](https://opentelemetry.io) metrics and traces to a collector. + +#### Tracing + +To enable tracing configure these variables: + +`GOTRUE_TRACING_ENABLED` - `boolean` + +`GOTRUE_TRACING_EXPORTER` - `string` only `opentelemetry` supported + +Make sure you also configure the [OpenTelemetry +Exporter](https://opentelemetry.io/docs/reference/specification/protocol/exporter/) +configuration for your collector or service. + +For example, if you use +[Honeycomb.io](https://docs.honeycomb.io/getting-data-in/opentelemetry/go-distro/#using-opentelemetry-without-the-honeycomb-distribution) +you should set these standard OpenTelemetry OTLP variables: + +``` +OTEL_SERVICE_NAME=auth +OTEL_EXPORTER_OTLP_PROTOCOL=grpc +OTEL_EXPORTER_OTLP_ENDPOINT=https://api.honeycomb.io:443 +OTEL_EXPORTER_OTLP_HEADERS="x-honeycomb-team=,x-honeycomb-dataset=auth" +``` + +#### Metrics + +To enable metrics configure these variables: + +`GOTRUE_METRICS_ENABLED` - `boolean` + +`GOTRUE_METRICS_EXPORTER` - `string` only `opentelemetry` and `prometheus` +supported + +Make sure you also configure the [OpenTelemetry +Exporter](https://opentelemetry.io/docs/reference/specification/protocol/exporter/) +configuration for your collector or service. + +If you use the `prometheus` exporter, the server host and port can be +configured using these standard OpenTelemetry variables: + +`OTEL_EXPORTER_PROMETHEUS_HOST` - IP address, default `0.0.0.0` + +`OTEL_EXPORTER_PROMETHEUS_PORT` - port number, default `9100` + +The metrics are exported on the `/` path on the server. + +If you use the `opentelemetry` exporter, the metrics are pushed to the +collector. + +For example, if you use +[Honeycomb.io](https://docs.honeycomb.io/getting-data-in/opentelemetry/go-distro/#using-opentelemetry-without-the-honeycomb-distribution) +you should set these standard OpenTelemetry OTLP variables: + +``` +OTEL_SERVICE_NAME=auth +OTEL_EXPORTER_OTLP_PROTOCOL=grpc +OTEL_EXPORTER_OTLP_ENDPOINT=https://api.honeycomb.io:443 +OTEL_EXPORTER_OTLP_HEADERS="x-honeycomb-team=,x-honeycomb-dataset=auth" +``` + +Note that Honeycomb.io requires a paid plan to ingest metrics. + +If you need to debug an issue with traces or metrics not being pushed, you can +set `DEBUG=true` to get more insights from the OpenTelemetry SDK. + +#### Custom resource attributes + +When using the OpenTelemetry tracing or metrics exporter you can define custom +resource attributes using the [standard `OTEL_RESOURCE_ATTRIBUTES` environment +variable](https://opentelemetry.io/docs/reference/specification/resource/sdk/#specifying-resource-information-via-an-environment-variable). + +A default attribute `auth.version` is provided containing the build version. + +#### Tracing HTTP routes + +All HTTP calls to the Auth API are traced. Routes use the parametrized +version of the route, and the values for the route parameters can be found as +the `http.route.params.` span attribute. + +For example, the following request: + +``` +GET /admin/users/4acde936-82dc-4552-b851-831fb8ce0927/ +``` + +will be traced as: + +``` +http.method = GET +http.route = /admin/users/{user_id} +http.route.params.user_id = 4acde936-82dc-4552-b851-831fb8ce0927 +``` + +#### Go runtime and HTTP metrics + +All of the Go runtime metrics are exposed. Some HTTP metrics are also collected +by default. + +### JSON Web Tokens (JWT) + +```properties +GOTRUE_JWT_SECRET=supersecretvalue +GOTRUE_JWT_EXP=3600 +GOTRUE_JWT_AUD=netlify +``` + +`JWT_SECRET` - `string` **required** + +The secret used to sign JWT tokens with. + +`JWT_EXP` - `number` + +How long tokens are valid for, in seconds. Defaults to 3600 (1 hour). + +`JWT_AUD` - `string` + +The default JWT audience. Use audiences to group users. + +`JWT_ADMIN_GROUP_NAME` - `string` + +The name of the admin group (if enabled). Defaults to `admin`. + +`JWT_DEFAULT_GROUP_NAME` - `string` + +The default group to assign all new users to. + +### External Authentication Providers + +We support `apple`, `azure`, `bitbucket`, `discord`, `facebook`, `figma`, `github`, `gitlab`, `google`, `keycloak`, `linkedin`, `notion`, `spotify`, `slack`, `twitch`, `twitter` and `workos` for external authentication. + +Use the names as the keys underneath `external` to configure each separately. + +```properties +GOTRUE_EXTERNAL_GITHUB_ENABLED=true +GOTRUE_EXTERNAL_GITHUB_CLIENT_ID=myappclientid +GOTRUE_EXTERNAL_GITHUB_SECRET=clientsecretvaluessssh +GOTRUE_EXTERNAL_GITHUB_REDIRECT_URI=http://localhost:3000/callback +``` + +No external providers are required, but you must provide the required values if you choose to enable any. + +`EXTERNAL_X_ENABLED` - `bool` + +Whether this external provider is enabled or not + +`EXTERNAL_X_CLIENT_ID` - `string` **required** + +The OAuth2 Client ID registered with the external provider. + +`EXTERNAL_X_SECRET` - `string` **required** + +The OAuth2 Client Secret provided by the external provider when you registered. + +`EXTERNAL_X_REDIRECT_URI` - `string` **required** + +The URI a OAuth2 provider will redirect to with the `code` and `state` values. + +`EXTERNAL_X_URL` - `string` + +The base URL used for constructing the URLs to request authorization and access tokens. Used by `gitlab` and `keycloak`. For `gitlab` it defaults to `https://gitlab.com`. For `keycloak` you need to set this to your instance, for example: `https://keycloak.example.com/realms/myrealm` + +#### Apple OAuth + +To try out external authentication with Apple locally, you will need to do the following: + +1. Remap localhost to \ in your `/etc/hosts` config. +2. Configure auth to serve HTTPS traffic over localhost by replacing `ListenAndServe` in [api.go](internal/api/api.go) with: + + ``` + func (a *API) ListenAndServe(hostAndPort string) { + log := logrus.WithField("component", "api") + path, err := os.Getwd() + if err != nil { + log.Println(err) + } + server := &http.Server{ + Addr: hostAndPort, + Handler: a.handler, + } + done := make(chan struct{}) + defer close(done) + go func() { + waitForTermination(log, done) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + server.Shutdown(ctx) + }() + if err := server.ListenAndServeTLS("PATH_TO_CRT_FILE", "PATH_TO_KEY_FILE"); err != http.ErrServerClosed { + log.WithError(err).Fatal("http server listen failed") + } + } + ``` + +3. Generate the crt and key file. See [here](https://www.freecodecamp.org/news/how-to-get-https-working-on-your-local-development-environment-in-5-minutes-7af615770eec/) for more information. +4. Generate the `GOTRUE_EXTERNAL_APPLE_SECRET` by following this [post](https://medium.com/identity-beyond-borders/how-to-configure-sign-in-with-apple-77c61e336003)! + +### E-Mail + +Sending email is not required, but highly recommended for password recovery. +If enabled, you must provide the required values below. + +```properties +GOTRUE_SMTP_HOST=smtp.mandrillapp.com +GOTRUE_SMTP_PORT=587 +GOTRUE_SMTP_USER=smtp-delivery@example.com +GOTRUE_SMTP_PASS=correcthorsebatterystaple +GOTRUE_SMTP_ADMIN_EMAIL=support@example.com +GOTRUE_MAILER_SUBJECTS_CONFIRMATION="Please confirm" +``` + +`SMTP_ADMIN_EMAIL` - `string` **required** + +The `From` email address for all emails sent. + +`SMTP_HOST` - `string` **required** + +The mail server hostname to send emails through. + +`SMTP_PORT` - `number` **required** + +The port number to connect to the mail server on. + +`SMTP_USER` - `string` + +If the mail server requires authentication, the username to use. + +`SMTP_PASS` - `string` + +If the mail server requires authentication, the password to use. + +`SMTP_MAX_FREQUENCY` - `number` + +Controls the minimum amount of time that must pass before sending another signup confirmation or password reset email. The value is the number of seconds. Defaults to 900 (15 minutes). + +`SMTP_SENDER_NAME` - `string` + +Sets the name of the sender. Defaults to the `SMTP_ADMIN_EMAIL` if not used. + +`MAILER_AUTOCONFIRM` - `bool` + +If you do not require email confirmation, you may set this to `true`. Defaults to `false`. + +`MAILER_OTP_EXP` - `number` + +Controls the duration an email link or otp is valid for. + +`MAILER_URLPATHS_INVITE` - `string` + +URL path to use in the user invite email. Defaults to `/verify`. + +`MAILER_URLPATHS_CONFIRMATION` - `string` + +URL path to use in the signup confirmation email. Defaults to `/verify`. + +`MAILER_URLPATHS_RECOVERY` - `string` + +URL path to use in the password reset email. Defaults to `/verify`. + +`MAILER_URLPATHS_EMAIL_CHANGE` - `string` + +URL path to use in the email change confirmation email. Defaults to `/verify`. + +`MAILER_SUBJECTS_INVITE` - `string` + +Email subject to use for user invite. Defaults to `You have been invited`. + +`MAILER_SUBJECTS_CONFIRMATION` - `string` + +Email subject to use for signup confirmation. Defaults to `Confirm Your Signup`. + +`MAILER_SUBJECTS_RECOVERY` - `string` + +Email subject to use for password reset. Defaults to `Reset Your Password`. + +`MAILER_SUBJECTS_MAGIC_LINK` - `string` + +Email subject to use for magic link email. Defaults to `Your Magic Link`. + +`MAILER_SUBJECTS_EMAIL_CHANGE` - `string` + +Email subject to use for email change confirmation. Defaults to `Confirm Email Change`. + +`MAILER_TEMPLATES_INVITE` - `string` + +URL path to an email template to use when inviting a user. (e.g. `https://www.example.com/path-to-email-template.html`) +`SiteURL`, `Email`, and `ConfirmationURL` variables are available. + +Default Content (if template is unavailable): + +```html +

You have been invited

+ +

+ You have been invited to create a user on {{ .SiteURL }}. Follow this link to + accept the invite: +

+

Accept the invite

+``` + +`MAILER_TEMPLATES_CONFIRMATION` - `string` + +URL path to an email template to use when confirming a signup. (e.g. `https://www.example.com/path-to-email-template.html`) +`SiteURL`, `Email`, and `ConfirmationURL` variables are available. + +Default Content (if template is unavailable): + +```html +

Confirm your signup

+ +

Follow this link to confirm your user:

+

Confirm your mail

+``` + +`MAILER_TEMPLATES_RECOVERY` - `string` + +URL path to an email template to use when resetting a password. (e.g. `https://www.example.com/path-to-email-template.html`) +`SiteURL`, `Email`, and `ConfirmationURL` variables are available. + +Default Content (if template is unavailable): + +```html +

Reset Password

+ +

Follow this link to reset the password for your user:

+

Reset Password

+``` + +`MAILER_TEMPLATES_MAGIC_LINK` - `string` + +URL path to an email template to use when sending magic link. (e.g. `https://www.example.com/path-to-email-template.html`) +`SiteURL`, `Email`, and `ConfirmationURL` variables are available. + +Default Content (if template is unavailable): + +```html +

Magic Link

+ +

Follow this link to login:

+

Log In

+``` + +`MAILER_TEMPLATES_EMAIL_CHANGE` - `string` + +URL path to an email template to use when confirming the change of an email address. (e.g. `https://www.example.com/path-to-email-template.html`) +`SiteURL`, `Email`, `NewEmail`, and `ConfirmationURL` variables are available. + +Default Content (if template is unavailable): + +```html +

Confirm Change of Email

+ +

+ Follow this link to confirm the update of your email from {{ .Email }} to {{ + .NewEmail }}: +

+

Change Email

+``` + +### Phone Auth + +`SMS_AUTOCONFIRM` - `bool` + +If you do not require phone confirmation, you may set this to `true`. Defaults to `false`. + +`SMS_MAX_FREQUENCY` - `number` + +Controls the minimum amount of time that must pass before sending another sms otp. The value is the number of seconds. Defaults to 60 (1 minute)). + +`SMS_OTP_EXP` - `number` + +Controls the duration an sms otp is valid for. + +`SMS_OTP_LENGTH` - `number` + +Controls the number of digits of the sms otp sent. + +`SMS_PROVIDER` - `string` + +Available options are: `twilio`, `messagebird`, `textlocal`, and `vonage` + +Then you can use your [twilio credentials](https://www.twilio.com/docs/usage/requests-to-twilio#credentials): + +- `SMS_TWILIO_ACCOUNT_SID` +- `SMS_TWILIO_AUTH_TOKEN` +- `SMS_TWILIO_MESSAGE_SERVICE_SID` - can be set to your twilio sender mobile number + +Or Messagebird credentials, which can be obtained in the [Dashboard](https://dashboard.messagebird.com/en/developers/access): + +- `SMS_MESSAGEBIRD_ACCESS_KEY` - your Messagebird access key +- `SMS_MESSAGEBIRD_ORIGINATOR` - SMS sender (your Messagebird phone number with + or company name) + +### CAPTCHA + +- If enabled, CAPTCHA will check the request body for the `captcha_token` field and make a verification request to the CAPTCHA provider. + +`SECURITY_CAPTCHA_ENABLED` - `string` + +Whether captcha middleware is enabled + +`SECURITY_CAPTCHA_PROVIDER` - `string` + +for now the only options supported are: `hcaptcha` and `turnstile` + +- `SECURITY_CAPTCHA_SECRET` - `string` +- `SECURITY_CAPTCHA_TIMEOUT` - `string` + +Retrieve from hcaptcha or turnstile account + +### Reauthentication + +`SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION` - `bool` + +Enforce reauthentication on password update. + +### Anonymous Sign-Ins + +`GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED` - `bool` + +Use this to enable/disable anonymous sign-ins. + +## Endpoints + +Auth exposes the following endpoints: + +### **GET /settings** + +Returns the publicly available settings for this auth instance. + +```json +{ + "external": { + "apple": true, + "azure": true, + "bitbucket": true, + "discord": true, + "facebook": true, + "figma": true, + "github": true, + "gitlab": true, + "google": true, + "keycloak": true, + "linkedin": true, + "notion": true, + "slack": true, + "spotify": true, + "twitch": true, + "twitter": true, + "workos": true + }, + "disable_signup": false, + "autoconfirm": false +} +``` + +### **POST, PUT /admin/users/** + +Creates (POST) or Updates (PUT) the user based on the `user_id` specified. The `ban_duration` field accepts the following time units: "ns", "us", "ms", "s", "m", "h". See [`time.ParseDuration`](https://pkg.go.dev/time#ParseDuration) for more details on the format used. + +```js +headers: +{ + "Authorization": "Bearer eyJhbGciOiJI...M3A90LCkxxtX9oNP9KZO" // requires a role claim that can be set in the GOTRUE_JWT_ADMIN_ROLES env var +} + +body: +{ + "role": "test-user", + "email": "email@example.com", + "phone": "12345678", + "password": "secret", // only if type = signup + "email_confirm": true, + "phone_confirm": true, + "user_metadata": {}, + "app_metadata": {}, + "ban_duration": "24h" or "none" // to unban a user +} +``` + +### **POST /admin/generate_link** + +Returns the corresponding email action link based on the type specified. Among other things, the response also contains the query params of the action link as separate JSON fields for convenience (along with the email OTP from which the corresponding token is generated). + +```js +headers: +{ + "Authorization": "Bearer eyJhbGciOiJI...M3A90LCkxxtX9oNP9KZO" // admin role required +} + +body: +{ + "type": "signup" or "magiclink" or "recovery" or "invite", + "email": "email@example.com", + "password": "secret", // only if type = signup + "data": { + ... + }, // only if type = signup + "redirect_to": "https://supabase.io" // Redirect URL to send the user to after an email action. Defaults to SITE_URL. + +} +``` + +Returns + +```js +{ + "action_link": "http://localhost:9999/verify?token=TOKEN&type=TYPE&redirect_to=REDIRECT_URL", + "email_otp": "EMAIL_OTP", + "hashed_token": "TOKEN", + "verification_type": "TYPE", + "redirect_to": "REDIRECT_URL", + ... +} +``` + +### **POST /signup** + +Register a new user with an email and password. + +```json +{ + "email": "email@example.com", + "password": "secret" +} +``` + +returns: + +```json +{ + "id": "11111111-2222-3333-4444-5555555555555", + "email": "email@example.com", + "confirmation_sent_at": "2016-05-15T20:49:40.882805774-07:00", + "created_at": "2016-05-15T19:53:12.368652374-07:00", + "updated_at": "2016-05-15T19:53:12.368652374-07:00" +} + +// if sign up is a duplicate then faux data will be returned +// as to not leak information about whether a given email +// has an account with your service or not +``` + +Register a new user with a phone number and password. + +```js +{ + "phone": "12345678", // follows the E.164 format + "password": "secret" +} +``` + +Returns: + +```json +{ + "id": "11111111-2222-3333-4444-5555555555555", // if duplicate sign up, this ID will be faux + "phone": "12345678", + "confirmation_sent_at": "2016-05-15T20:49:40.882805774-07:00", + "created_at": "2016-05-15T19:53:12.368652374-07:00", + "updated_at": "2016-05-15T19:53:12.368652374-07:00" +} +``` + +if AUTOCONFIRM is enabled and the sign up is a duplicate, then the endpoint will return: + +```json +{ + "code":400, + "msg":"User already registered" +} +``` + +### **POST /resend** + +Allows a user to resend an existing signup, sms, email_change or phone_change OTP. + +```json +{ + "email": "user@example.com", + "type": "signup" +} +``` + +```json +{ + "phone": "12345678", + "type": "sms" +} +``` + +returns: + +```json +{ + "message_id": "msgid123456" +} +``` + +### **POST /invite** + +Invites a new user with an email. +This endpoint requires the `service_role` or `supabase_admin` JWT set as an Auth Bearer header: + +e.g. + +```json +headers: { + "Authorization" : "Bearer eyJhbGciOiJI...M3A90LCkxxtX9oNP9KZO" +} +``` + +```json +{ + "email": "email@example.com" +} +``` + +Returns: + +```json +{ + "id": "11111111-2222-3333-4444-5555555555555", + "email": "email@example.com", + "confirmation_sent_at": "2016-05-15T20:49:40.882805774-07:00", + "created_at": "2016-05-15T19:53:12.368652374-07:00", + "updated_at": "2016-05-15T19:53:12.368652374-07:00", + "invited_at": "2016-05-15T19:53:12.368652374-07:00" +} +``` + +### **POST /verify** + +Verify a registration or a password recovery. Type can be `signup` or `recovery` or `invite` +and the `token` is a token returned from either `/signup` or `/recover`. + +```json +{ + "type": "signup", + "token": "confirmation-code-delivered-in-email" +} +``` + +`password` is required for signup verification if no existing password exists. + +Returns: + +```json +{ + "access_token": "jwt-token-representing-the-user", + "token_type": "bearer", + "expires_in": 3600, + "refresh_token": "a-refresh-token", + "type": "signup | recovery | invite" +} +``` + +Verify a phone signup or sms otp. Type should be set to `sms`. + +```json +{ + "type": "sms", + "token": "confirmation-otp-delivered-in-sms", + "redirect_to": "https://supabase.io", + "phone": "phone-number-sms-otp-was-delivered-to" +} +``` + +Returns: + +```json +{ + "access_token": "jwt-token-representing-the-user", + "token_type": "bearer", + "expires_in": 3600, + "refresh_token": "a-refresh-token" +} +``` + +### **GET /verify** + +Verify a registration or a password recovery. Type can be `signup` or `recovery` or `magiclink` or `invite` +and the `token` is a token returned from either `/signup` or `/recover` or `/magiclink`. + +query params: + +```json +{ + "type": "signup", + "token": "confirmation-code-delivered-in-email", + "redirect_to": "https://supabase.io" +} +``` + +User will be logged in and redirected to: + +```json +SITE_URL/#access_token=jwt-token-representing-the-user&token_type=bearer&expires_in=3600&refresh_token=a-refresh-token&type=invite +``` + +Your app should detect the query params in the fragment and use them to set the session (supabase-js does this automatically) + +You can use the `type` param to redirect the user to a password set form in the case of `invite` or `recovery`, +or show an account confirmed/welcome message in the case of `signup`, or direct them to some additional onboarding flow + +### **POST /otp** + +One-Time-Password. Will deliver a magiclink or sms otp to the user depending on whether the request body contains an "email" or "phone" key. + +If `"create_user": true`, user will not be automatically signed up if the user doesn't exist. + +```json +{ + "phone": "12345678" // follows the E.164 format + "create_user": true +} +``` + +OR + +```json +// exactly the same as /magiclink +{ + "email": "email@example.com" + "create_user": true +} +``` + +Returns: + +```json +{} +``` + +### **POST /magiclink** (recommended to use /otp instead. See above.) + +Magic Link. Will deliver a link (e.g. `/verify?type=magiclink&token=fgtyuf68ddqdaDd`) to the user based on +email address which they can use to redeem an access_token. + +By default Magic Links can only be sent once every 60 seconds + +```json +{ + "email": "email@example.com" +} +``` + +Returns: + +```json +{} +``` + +when clicked the magic link will redirect the user to `#access_token=x&refresh_token=y&expires_in=z&token_type=bearer&type=magiclink` (see `/verify` above) + +### **POST /recover** + +Password recovery. Will deliver a password recovery mail to the user based on +email address. + +By default recovery links can only be sent once every 60 seconds + +```json +{ + "email": "email@example.com" +} +``` + +Returns: + +```json +{} +``` + +### **POST /token** + +This is an OAuth2 endpoint that currently implements +the password and refresh_token grant types + +query params: + +``` +?grant_type=password +``` + +body: + +```json +// Email login +{ + "email": "name@domain.com", + "password": "somepassword" +} + +// Phone login +{ + "phone": "12345678", + "password": "somepassword" +} +``` + +or + +query params: + +``` +grant_type=refresh_token +``` + +body: + +```json +{ + "refresh_token": "a-refresh-token" +} +``` + +Once you have an access token, you can access the methods requiring authentication +by settings the `Authorization: Bearer YOUR_ACCESS_TOKEN_HERE` header. + +Returns: + +```json +{ + "access_token": "jwt-token-representing-the-user", + "token_type": "bearer", + "expires_in": 3600, + "refresh_token": "a-refresh-token" +} +``` + +### **GET /user** + +Get the JSON object for the logged in user (requires authentication) + +Returns: + +```json +{ + "id": "11111111-2222-3333-4444-5555555555555", + "email": "email@example.com", + "confirmation_sent_at": "2016-05-15T20:49:40.882805774-07:00", + "created_at": "2016-05-15T19:53:12.368652374-07:00", + "updated_at": "2016-05-15T19:53:12.368652374-07:00" +} +``` + +### **PUT /user** + +Update a user (Requires authentication). Apart from changing email/password, this +method can be used to set custom user data. Changing the email will result in a magiclink being sent out. + +```json +{ + "email": "new-email@example.com", + "password": "new-password", + "phone": "+123456789", + "data": { + "key": "value", + "number": 10, + "admin": false + } +} +``` + +Returns: + +```json +{ + "id": "11111111-2222-3333-4444-5555555555555", + "email": "email@example.com", + "email_change_sent_at": "2016-05-15T20:49:40.882805774-07:00", + "phone": "+123456789", + "phone_change_sent_at": "2016-05-15T20:49:40.882805774-07:00", + "created_at": "2016-05-15T19:53:12.368652374-07:00", + "updated_at": "2016-05-15T19:53:12.368652374-07:00" +} +``` + +If `GOTRUE_SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION` is enabled, the user will need to reauthenticate first. + +```json +{ + "password": "new-password", + "nonce": "123456" +} +``` + +### **GET /reauthenticate** + +Sends a nonce to the user's email (preferred) or phone. This endpoint requires the user to be logged in / authenticated first. The user needs to have either an email or phone number for the nonce to be sent successfully. + +```json +headers: { + "Authorization" : "Bearer eyJhbGciOiJI...M3A90LCkxxtX9oNP9KZO" +} +``` + +### **POST /logout** + +Logout a user (Requires authentication). + +This will revoke all refresh tokens for the user. Remember that the JWT tokens +will still be valid for stateless auth until they expires. + +### **GET /authorize** + +Get access_token from external oauth provider + +query params: + +``` +provider=apple | azure | bitbucket | discord | facebook | figma | github | gitlab | google | keycloak | linkedin | notion | slack | spotify | twitch | twitter | workos + +scopes= +``` + +Redirects to provider and then to `/callback` + +For apple specific setup see: + +### **GET /callback** + +External provider should redirect to here + +Redirects to `#access_token=&refresh_token=&provider_token=&expires_in=3600&provider=` +If additional scopes were requested then `provider_token` will be populated, you can use this to fetch additional data from the provider or interact with their services diff --git a/auth_v2.169.0/SECURITY.md b/auth_v2.169.0/SECURITY.md new file mode 100644 index 0000000..c607303 --- /dev/null +++ b/auth_v2.169.0/SECURITY.md @@ -0,0 +1,60 @@ +# Security Policy + +Auth is a project maintained by [Supabase](https://supabase.com). Below is +our security policy. + +Contact: security@supabase.io +Canonical: https://supabase.com/.well-known/security.txt + +At Supabase, we consider the security of our systems a top priority. But no +matter how much effort we put into system security, there can still be +vulnerabilities present. + +If you discover a vulnerability, we would like to know about it so we can take +steps to address it as quickly as possible. We would like to ask you to help us +better protect our clients and our systems. + +Out of scope vulnerabilities: + +- Clickjacking on pages with no sensitive actions. +- Unauthenticated/logout/login CSRF. +- Attacks requiring MITM or physical access to a user's device. +- Any activity that could lead to the disruption of our service (DoS). +- Content spoofing and text injection issues without showing an attack + vector/without being able to modify HTML/CSS. +- Email spoofing +- Missing DNSSEC, CAA, CSP headers +- Lack of Secure or HTTP only flag on non-sensitive cookies +- Deadlinks + +Please do the following: + +- E-mail your findings to security@supabase.io. +- Do not run automated scanners on our infrastructure or dashboard. If you wish + to do this, contact us and we will set up a sandbox for you. +- Do not take advantage of the vulnerability or problem you have discovered, + for example by downloading more data than necessary to demonstrate the + vulnerability or deleting or modifying other people's data, +- Do not reveal the problem to others until it has been resolved, +- Do not use attacks on physical security, social engineering, distributed + denial of service, spam or applications of third parties, and +- Do provide sufficient information to reproduce the problem, so we will be + able to resolve it as quickly as possible. Usually, the IP address or the URL + of the affected system and a description of the vulnerability will be + sufficient, but complex vulnerabilities may require further explanation. + +What we promise: + +- We will respond to your report within 3 business days with our evaluation of + the report and an expected resolution date, +- If you have followed the instructions above, we will not take any legal + action against you in regard to the report, +- We will handle your report with strict confidentiality, and not pass on your + personal details to third parties without your permission, +- We will keep you informed of the progress towards resolving the problem, +- In the public information concerning the problem reported, we will give your + name as the discoverer of the problem (unless you desire otherwise), and + +We strive to resolve all problems as quickly as possible, and we would like to +play an active role in the ultimate publication on the problem after it is +resolved. diff --git a/auth_v2.169.0/app.json b/auth_v2.169.0/app.json new file mode 100644 index 0000000..4868656 --- /dev/null +++ b/auth_v2.169.0/app.json @@ -0,0 +1,34 @@ +{ + "name": "Gotrue", + "description": "", + "website": "https://www.gotrueapi.org", + "repository": "https://github.com/supabase/gotrue", + "env": { + "DATABASE_URL": {}, + "GOTRUE_DB_DRIVER": { + "value": "postgres" + }, + "GOTRUE_DB_AUTOMIGRATE": { + "value": true + }, + "GOTRUE_DB_NAMESPACE": { + "value": "auth" + }, + "GOTRUE_JWT_SECRET": { + "required": true + }, + "GOTRUE_SMTP_ADMIN_EMAIL": {}, + "GOTRUE_SMTP_HOST": {}, + "GOTRUE_SMTP_PASS": {}, + "GOTRUE_SMTP_PORT": {}, + "GOTRUE_MAILER_SITE_URL": {}, + "GOTRUE_MAILER_SUBJECTS_CONFIRMATION": {}, + "GOTRUE_MAILER_SUBJECTS_RECOVERY": {}, + "GOTRUE_MAILER_SUBJECTS_MAGIC_LINK": {}, + "GOTRUE_MAILER_TEMPLATES_CONFIRMATION": {}, + "GOTRUE_MAILER_TEMPLATES_EMAIL_CHANGE": {}, + "GOTRUE_MAILER_TEMPLATES_RECOVERY": {}, + "GOTRUE_MAILER_TEMPLATES_MAGIC_LINK": {}, + "GOTRUE_MAILER_USER": {} + } +} diff --git a/auth_v2.169.0/client/admin/client.go b/auth_v2.169.0/client/admin/client.go new file mode 100644 index 0000000..7abe6e8 --- /dev/null +++ b/auth_v2.169.0/client/admin/client.go @@ -0,0 +1,2674 @@ +// Package admin provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/deepmap/oapi-codegen version v1.12.4 DO NOT EDIT. +package admin + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/deepmap/oapi-codegen/pkg/runtime" + openapi_types "github.com/deepmap/oapi-codegen/pkg/types" +) + +const ( + APIKeyAuthScopes = "APIKeyAuth.Scopes" + AdminAuthScopes = "AdminAuth.Scopes" +) + +// Defines values for PostAdminSsoProvidersJSONBodyType. +const ( + Saml PostAdminSsoProvidersJSONBodyType = "saml" +) + +// Defines values for PostGenerateLinkJSONBodyType. +const ( + EmailChangeCurrent PostGenerateLinkJSONBodyType = "email_change_current" + EmailChangeNew PostGenerateLinkJSONBodyType = "email_change_new" + Magiclink PostGenerateLinkJSONBodyType = "magiclink" + Recovery PostGenerateLinkJSONBodyType = "recovery" + Signup PostGenerateLinkJSONBodyType = "signup" +) + +// ErrorSchema defines model for ErrorSchema. +type ErrorSchema struct { + // Code The HTTP status code. Usually missing if `error` is present. + Code *int `json:"code,omitempty"` + + // Error Certain responses will contain this property with the provided values. + // + // Usually one of these: + // - invalid_request + // - unauthorized_client + // - access_denied + // - server_error + // - temporarily_unavailable + // - unsupported_otp_type + Error *string `json:"error,omitempty"` + + // ErrorDescription Certain responses that have an `error` property may have this property which describes the error. + ErrorDescription *string `json:"error_description,omitempty"` + + // Msg A basic message describing the problem with the request. Usually missing if `error` is present. + Msg *string `json:"msg,omitempty"` +} + +// MFAFactorSchema Represents a MFA factor. +type MFAFactorSchema struct { + // FactorType Usually one of: + // - totp + FactorType *string `json:"factor_type,omitempty"` + FriendlyName *string `json:"friendly_name,omitempty"` + Id *openapi_types.UUID `json:"id,omitempty"` + + // Status Usually one of: + // - verified + // - unverified + Status *string `json:"status,omitempty"` +} + +// SAMLAttributeMappingSchema defines model for SAMLAttributeMappingSchema. +type SAMLAttributeMappingSchema struct { + Keys *map[string]interface{} `json:"keys,omitempty"` +} + +// SSOProviderSchema defines model for SSOProviderSchema. +type SSOProviderSchema struct { + Id *openapi_types.UUID `json:"id,omitempty"` + Saml *struct { + AttributeMapping *SAMLAttributeMappingSchema `json:"attribute_mapping,omitempty"` + EntityId *string `json:"entity_id,omitempty"` + MetadataUrl *string `json:"metadata_url,omitempty"` + MetadataXml *string `json:"metadata_xml,omitempty"` + } `json:"saml,omitempty"` + SsoDomains *[]struct { + Domain *string `json:"domain,omitempty"` + } `json:"sso_domains,omitempty"` +} + +// UserSchema Object describing the user related to the issued access and refresh tokens. +type UserSchema struct { + AppMetadata *map[string]interface{} `json:"app_metadata,omitempty"` + Aud *string `json:"aud,omitempty"` + BannedUntil *time.Time `json:"banned_until,omitempty"` + ConfirmationSentAt *time.Time `json:"confirmation_sent_at,omitempty"` + ConfirmedAt *time.Time `json:"confirmed_at,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + DeletedAt *time.Time `json:"deleted_at,omitempty"` + + // Email User's primary contact email. In most cases you can uniquely identify a user by their email address, but not in all cases. + Email *string `json:"email,omitempty"` + EmailChangeSentAt *time.Time `json:"email_change_sent_at,omitempty"` + EmailConfirmedAt *time.Time `json:"email_confirmed_at,omitempty"` + Factors *[]MFAFactorSchema `json:"factors,omitempty"` + Id *openapi_types.UUID `json:"id,omitempty"` + Identities *[]map[string]interface{} `json:"identities,omitempty"` + LastSignInAt *time.Time `json:"last_sign_in_at,omitempty"` + NewEmail *openapi_types.Email `json:"new_email,omitempty"` + NewPhone *string `json:"new_phone,omitempty"` + + // Phone User's primary contact phone number. In most cases you can uniquely identify a user by their phone number, but not in all cases. + Phone *string `json:"phone,omitempty"` + PhoneChangeSentAt *time.Time `json:"phone_change_sent_at,omitempty"` + PhoneConfirmedAt *time.Time `json:"phone_confirmed_at,omitempty"` + ReauthenticationSentAt *time.Time `json:"reauthentication_sent_at,omitempty"` + RecoverySentAt *time.Time `json:"recovery_sent_at,omitempty"` + Role *string `json:"role,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` + UserMetadata *map[string]interface{} `json:"user_metadata,omitempty"` +} + +// BadRequestResponse defines model for BadRequestResponse. +type BadRequestResponse = ErrorSchema + +// ForbiddenResponse defines model for ForbiddenResponse. +type ForbiddenResponse = ErrorSchema + +// UnauthorizedResponse defines model for UnauthorizedResponse. +type UnauthorizedResponse = ErrorSchema + +// GetAdminAuditParams defines parameters for GetAdminAudit. +type GetAdminAuditParams struct { + Page *int `form:"page,omitempty" json:"page,omitempty"` + PerPage *int `form:"per_page,omitempty" json:"per_page,omitempty"` +} + +// PostAdminSsoProvidersJSONBody defines parameters for PostAdminSsoProviders. +type PostAdminSsoProvidersJSONBody struct { + AttributeMapping *SAMLAttributeMappingSchema `json:"attribute_mapping,omitempty"` + Domains *[]string `json:"domains,omitempty"` + MetadataUrl *string `json:"metadata_url,omitempty"` + MetadataXml *string `json:"metadata_xml,omitempty"` + Type PostAdminSsoProvidersJSONBodyType `json:"type"` +} + +// PostAdminSsoProvidersJSONBodyType defines parameters for PostAdminSsoProviders. +type PostAdminSsoProvidersJSONBodyType string + +// PutAdminSsoProvidersSsoProviderIdJSONBody defines parameters for PutAdminSsoProvidersSsoProviderId. +type PutAdminSsoProvidersSsoProviderIdJSONBody struct { + AttributeMapping *SAMLAttributeMappingSchema `json:"attribute_mapping,omitempty"` + Domains *[]string `json:"domains,omitempty"` + MetadataUrl *string `json:"metadata_url,omitempty"` + MetadataXml *string `json:"metadata_xml,omitempty"` +} + +// GetAdminUsersParams defines parameters for GetAdminUsers. +type GetAdminUsersParams struct { + Page *int `form:"page,omitempty" json:"page,omitempty"` + PerPage *int `form:"per_page,omitempty" json:"per_page,omitempty"` +} + +// PutAdminUsersUserIdFactorsFactorIdJSONBody defines parameters for PutAdminUsersUserIdFactorsFactorId. +type PutAdminUsersUserIdFactorsFactorIdJSONBody = map[string]interface{} + +// PostGenerateLinkJSONBody defines parameters for PostGenerateLink. +type PostGenerateLinkJSONBody struct { + Data *map[string]interface{} `json:"data,omitempty"` + Email openapi_types.Email `json:"email"` + NewEmail *openapi_types.Email `json:"new_email,omitempty"` + Password *string `json:"password,omitempty"` + RedirectTo *string `json:"redirect_to,omitempty"` + Type PostGenerateLinkJSONBodyType `json:"type"` +} + +// PostGenerateLinkJSONBodyType defines parameters for PostGenerateLink. +type PostGenerateLinkJSONBodyType string + +// PostInviteJSONBody defines parameters for PostInvite. +type PostInviteJSONBody struct { + Data *map[string]interface{} `json:"data,omitempty"` + Email string `json:"email"` +} + +// PostAdminSsoProvidersJSONRequestBody defines body for PostAdminSsoProviders for application/json ContentType. +type PostAdminSsoProvidersJSONRequestBody PostAdminSsoProvidersJSONBody + +// PutAdminSsoProvidersSsoProviderIdJSONRequestBody defines body for PutAdminSsoProvidersSsoProviderId for application/json ContentType. +type PutAdminSsoProvidersSsoProviderIdJSONRequestBody PutAdminSsoProvidersSsoProviderIdJSONBody + +// PutAdminUsersUserIdJSONRequestBody defines body for PutAdminUsersUserId for application/json ContentType. +type PutAdminUsersUserIdJSONRequestBody = UserSchema + +// PutAdminUsersUserIdFactorsFactorIdJSONRequestBody defines body for PutAdminUsersUserIdFactorsFactorId for application/json ContentType. +type PutAdminUsersUserIdFactorsFactorIdJSONRequestBody = PutAdminUsersUserIdFactorsFactorIdJSONBody + +// PostGenerateLinkJSONRequestBody defines body for PostGenerateLink for application/json ContentType. +type PostGenerateLinkJSONRequestBody PostGenerateLinkJSONBody + +// PostInviteJSONRequestBody defines body for PostInvite for application/json ContentType. +type PostInviteJSONRequestBody PostInviteJSONBody + +// RequestEditorFn is the function signature for the RequestEditor callback function +type RequestEditorFn func(ctx context.Context, req *http.Request) error + +// Doer performs HTTP requests. +// +// The standard http.Client implements this interface. +type HttpRequestDoer interface { + Do(req *http.Request) (*http.Response, error) +} + +// Client which conforms to the OpenAPI3 specification for this service. +type Client struct { + // The endpoint of the server conforming to this interface, with scheme, + // https://api.deepmap.com for example. This can contain a path relative + // to the server, such as https://api.deepmap.com/dev-test, and all the + // paths in the swagger spec will be appended to the server. + Server string + + // Doer for performing requests, typically a *http.Client with any + // customized settings, such as certificate chains. + Client HttpRequestDoer + + // A list of callbacks for modifying requests which are generated before sending over + // the network. + RequestEditors []RequestEditorFn +} + +// ClientOption allows setting custom parameters during construction +type ClientOption func(*Client) error + +// Creates a new Client, with reasonable defaults +func NewClient(server string, opts ...ClientOption) (*Client, error) { + // create a client with sane default values + client := Client{ + Server: server, + } + // mutate client and add all optional params + for _, o := range opts { + if err := o(&client); err != nil { + return nil, err + } + } + // ensure the server URL always has a trailing slash + if !strings.HasSuffix(client.Server, "/") { + client.Server += "/" + } + // create httpClient, if not already present + if client.Client == nil { + client.Client = &http.Client{} + } + return &client, nil +} + +// WithHTTPClient allows overriding the default Doer, which is +// automatically created using http.Client. This is useful for tests. +func WithHTTPClient(doer HttpRequestDoer) ClientOption { + return func(c *Client) error { + c.Client = doer + return nil + } +} + +// WithRequestEditorFn allows setting up a callback function, which will be +// called right before sending the request. This can be used to mutate the request. +func WithRequestEditorFn(fn RequestEditorFn) ClientOption { + return func(c *Client) error { + c.RequestEditors = append(c.RequestEditors, fn) + return nil + } +} + +// The interface specification for the client above. +type ClientInterface interface { + // GetAdminAudit request + GetAdminAudit(ctx context.Context, params *GetAdminAuditParams, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetAdminSsoProviders request + GetAdminSsoProviders(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // PostAdminSsoProviders request with any body + PostAdminSsoProvidersWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + PostAdminSsoProviders(ctx context.Context, body PostAdminSsoProvidersJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // DeleteAdminSsoProvidersSsoProviderId request + DeleteAdminSsoProvidersSsoProviderId(ctx context.Context, ssoProviderId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetAdminSsoProvidersSsoProviderId request + GetAdminSsoProvidersSsoProviderId(ctx context.Context, ssoProviderId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) + + // PutAdminSsoProvidersSsoProviderId request with any body + PutAdminSsoProvidersSsoProviderIdWithBody(ctx context.Context, ssoProviderId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + PutAdminSsoProvidersSsoProviderId(ctx context.Context, ssoProviderId openapi_types.UUID, body PutAdminSsoProvidersSsoProviderIdJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetAdminUsers request + GetAdminUsers(ctx context.Context, params *GetAdminUsersParams, reqEditors ...RequestEditorFn) (*http.Response, error) + + // DeleteAdminUsersUserId request + DeleteAdminUsersUserId(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetAdminUsersUserId request + GetAdminUsersUserId(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) + + // PutAdminUsersUserId request with any body + PutAdminUsersUserIdWithBody(ctx context.Context, userId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + PutAdminUsersUserId(ctx context.Context, userId openapi_types.UUID, body PutAdminUsersUserIdJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetAdminUsersUserIdFactors request + GetAdminUsersUserIdFactors(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) + + // DeleteAdminUsersUserIdFactorsFactorId request + DeleteAdminUsersUserIdFactorsFactorId(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) + + // PutAdminUsersUserIdFactorsFactorId request with any body + PutAdminUsersUserIdFactorsFactorIdWithBody(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + PutAdminUsersUserIdFactorsFactorId(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, body PutAdminUsersUserIdFactorsFactorIdJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // PostGenerateLink request with any body + PostGenerateLinkWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + PostGenerateLink(ctx context.Context, body PostGenerateLinkJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // PostInvite request with any body + PostInviteWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + PostInvite(ctx context.Context, body PostInviteJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) +} + +func (c *Client) GetAdminAudit(ctx context.Context, params *GetAdminAuditParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetAdminAuditRequest(c.Server, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetAdminSsoProviders(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetAdminSsoProvidersRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PostAdminSsoProvidersWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPostAdminSsoProvidersRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PostAdminSsoProviders(ctx context.Context, body PostAdminSsoProvidersJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPostAdminSsoProvidersRequest(c.Server, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) DeleteAdminSsoProvidersSsoProviderId(ctx context.Context, ssoProviderId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDeleteAdminSsoProvidersSsoProviderIdRequest(c.Server, ssoProviderId) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetAdminSsoProvidersSsoProviderId(ctx context.Context, ssoProviderId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetAdminSsoProvidersSsoProviderIdRequest(c.Server, ssoProviderId) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PutAdminSsoProvidersSsoProviderIdWithBody(ctx context.Context, ssoProviderId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPutAdminSsoProvidersSsoProviderIdRequestWithBody(c.Server, ssoProviderId, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PutAdminSsoProvidersSsoProviderId(ctx context.Context, ssoProviderId openapi_types.UUID, body PutAdminSsoProvidersSsoProviderIdJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPutAdminSsoProvidersSsoProviderIdRequest(c.Server, ssoProviderId, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetAdminUsers(ctx context.Context, params *GetAdminUsersParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetAdminUsersRequest(c.Server, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) DeleteAdminUsersUserId(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDeleteAdminUsersUserIdRequest(c.Server, userId) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetAdminUsersUserId(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetAdminUsersUserIdRequest(c.Server, userId) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PutAdminUsersUserIdWithBody(ctx context.Context, userId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPutAdminUsersUserIdRequestWithBody(c.Server, userId, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PutAdminUsersUserId(ctx context.Context, userId openapi_types.UUID, body PutAdminUsersUserIdJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPutAdminUsersUserIdRequest(c.Server, userId, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetAdminUsersUserIdFactors(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetAdminUsersUserIdFactorsRequest(c.Server, userId) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) DeleteAdminUsersUserIdFactorsFactorId(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDeleteAdminUsersUserIdFactorsFactorIdRequest(c.Server, userId, factorId) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PutAdminUsersUserIdFactorsFactorIdWithBody(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPutAdminUsersUserIdFactorsFactorIdRequestWithBody(c.Server, userId, factorId, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PutAdminUsersUserIdFactorsFactorId(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, body PutAdminUsersUserIdFactorsFactorIdJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPutAdminUsersUserIdFactorsFactorIdRequest(c.Server, userId, factorId, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PostGenerateLinkWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPostGenerateLinkRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PostGenerateLink(ctx context.Context, body PostGenerateLinkJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPostGenerateLinkRequest(c.Server, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PostInviteWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPostInviteRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) PostInvite(ctx context.Context, body PostInviteJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPostInviteRequest(c.Server, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +// NewGetAdminAuditRequest generates requests for GetAdminAudit +func NewGetAdminAuditRequest(server string, params *GetAdminAuditParams) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/audit") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + queryValues := queryURL.Query() + + if params.Page != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "page", runtime.ParamLocationQuery, *params.Page); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + if params.PerPage != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "per_page", runtime.ParamLocationQuery, *params.PerPage); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + queryURL.RawQuery = queryValues.Encode() + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewGetAdminSsoProvidersRequest generates requests for GetAdminSsoProviders +func NewGetAdminSsoProvidersRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/sso/providers") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewPostAdminSsoProvidersRequest calls the generic PostAdminSsoProviders builder with application/json body +func NewPostAdminSsoProvidersRequest(server string, body PostAdminSsoProvidersJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewPostAdminSsoProvidersRequestWithBody(server, "application/json", bodyReader) +} + +// NewPostAdminSsoProvidersRequestWithBody generates requests for PostAdminSsoProviders with any type of body +func NewPostAdminSsoProvidersRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/sso/providers") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewDeleteAdminSsoProvidersSsoProviderIdRequest generates requests for DeleteAdminSsoProvidersSsoProviderId +func NewDeleteAdminSsoProvidersSsoProviderIdRequest(server string, ssoProviderId openapi_types.UUID) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "ssoProviderId", runtime.ParamLocationPath, ssoProviderId) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/sso/providers/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("DELETE", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewGetAdminSsoProvidersSsoProviderIdRequest generates requests for GetAdminSsoProvidersSsoProviderId +func NewGetAdminSsoProvidersSsoProviderIdRequest(server string, ssoProviderId openapi_types.UUID) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "ssoProviderId", runtime.ParamLocationPath, ssoProviderId) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/sso/providers/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewPutAdminSsoProvidersSsoProviderIdRequest calls the generic PutAdminSsoProvidersSsoProviderId builder with application/json body +func NewPutAdminSsoProvidersSsoProviderIdRequest(server string, ssoProviderId openapi_types.UUID, body PutAdminSsoProvidersSsoProviderIdJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewPutAdminSsoProvidersSsoProviderIdRequestWithBody(server, ssoProviderId, "application/json", bodyReader) +} + +// NewPutAdminSsoProvidersSsoProviderIdRequestWithBody generates requests for PutAdminSsoProvidersSsoProviderId with any type of body +func NewPutAdminSsoProvidersSsoProviderIdRequestWithBody(server string, ssoProviderId openapi_types.UUID, contentType string, body io.Reader) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "ssoProviderId", runtime.ParamLocationPath, ssoProviderId) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/sso/providers/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("PUT", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewGetAdminUsersRequest generates requests for GetAdminUsers +func NewGetAdminUsersRequest(server string, params *GetAdminUsersParams) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/users") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + queryValues := queryURL.Query() + + if params.Page != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "page", runtime.ParamLocationQuery, *params.Page); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + if params.PerPage != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "per_page", runtime.ParamLocationQuery, *params.PerPage); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + queryURL.RawQuery = queryValues.Encode() + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewDeleteAdminUsersUserIdRequest generates requests for DeleteAdminUsersUserId +func NewDeleteAdminUsersUserIdRequest(server string, userId openapi_types.UUID) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "userId", runtime.ParamLocationPath, userId) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/users/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("DELETE", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewGetAdminUsersUserIdRequest generates requests for GetAdminUsersUserId +func NewGetAdminUsersUserIdRequest(server string, userId openapi_types.UUID) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "userId", runtime.ParamLocationPath, userId) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/users/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewPutAdminUsersUserIdRequest calls the generic PutAdminUsersUserId builder with application/json body +func NewPutAdminUsersUserIdRequest(server string, userId openapi_types.UUID, body PutAdminUsersUserIdJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewPutAdminUsersUserIdRequestWithBody(server, userId, "application/json", bodyReader) +} + +// NewPutAdminUsersUserIdRequestWithBody generates requests for PutAdminUsersUserId with any type of body +func NewPutAdminUsersUserIdRequestWithBody(server string, userId openapi_types.UUID, contentType string, body io.Reader) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "userId", runtime.ParamLocationPath, userId) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/users/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("PUT", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewGetAdminUsersUserIdFactorsRequest generates requests for GetAdminUsersUserIdFactors +func NewGetAdminUsersUserIdFactorsRequest(server string, userId openapi_types.UUID) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "userId", runtime.ParamLocationPath, userId) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/users/%s/factors", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewDeleteAdminUsersUserIdFactorsFactorIdRequest generates requests for DeleteAdminUsersUserIdFactorsFactorId +func NewDeleteAdminUsersUserIdFactorsFactorIdRequest(server string, userId openapi_types.UUID, factorId openapi_types.UUID) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "userId", runtime.ParamLocationPath, userId) + if err != nil { + return nil, err + } + + var pathParam1 string + + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "factorId", runtime.ParamLocationPath, factorId) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/users/%s/factors/%s", pathParam0, pathParam1) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("DELETE", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewPutAdminUsersUserIdFactorsFactorIdRequest calls the generic PutAdminUsersUserIdFactorsFactorId builder with application/json body +func NewPutAdminUsersUserIdFactorsFactorIdRequest(server string, userId openapi_types.UUID, factorId openapi_types.UUID, body PutAdminUsersUserIdFactorsFactorIdJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewPutAdminUsersUserIdFactorsFactorIdRequestWithBody(server, userId, factorId, "application/json", bodyReader) +} + +// NewPutAdminUsersUserIdFactorsFactorIdRequestWithBody generates requests for PutAdminUsersUserIdFactorsFactorId with any type of body +func NewPutAdminUsersUserIdFactorsFactorIdRequestWithBody(server string, userId openapi_types.UUID, factorId openapi_types.UUID, contentType string, body io.Reader) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "userId", runtime.ParamLocationPath, userId) + if err != nil { + return nil, err + } + + var pathParam1 string + + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "factorId", runtime.ParamLocationPath, factorId) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/admin/users/%s/factors/%s", pathParam0, pathParam1) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("PUT", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewPostGenerateLinkRequest calls the generic PostGenerateLink builder with application/json body +func NewPostGenerateLinkRequest(server string, body PostGenerateLinkJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewPostGenerateLinkRequestWithBody(server, "application/json", bodyReader) +} + +// NewPostGenerateLinkRequestWithBody generates requests for PostGenerateLink with any type of body +func NewPostGenerateLinkRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/generate_link") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewPostInviteRequest calls the generic PostInvite builder with application/json body +func NewPostInviteRequest(server string, body PostInviteJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewPostInviteRequestWithBody(server, "application/json", bodyReader) +} + +// NewPostInviteRequestWithBody generates requests for PostInvite with any type of body +func NewPostInviteRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/invite") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { + for _, r := range c.RequestEditors { + if err := r(ctx, req); err != nil { + return err + } + } + for _, r := range additionalEditors { + if err := r(ctx, req); err != nil { + return err + } + } + return nil +} + +// ClientWithResponses builds on ClientInterface to offer response payloads +type ClientWithResponses struct { + ClientInterface +} + +// NewClientWithResponses creates a new ClientWithResponses, which wraps +// Client with return type handling +func NewClientWithResponses(server string, opts ...ClientOption) (*ClientWithResponses, error) { + client, err := NewClient(server, opts...) + if err != nil { + return nil, err + } + return &ClientWithResponses{client}, nil +} + +// WithBaseURL overrides the baseURL. +func WithBaseURL(baseURL string) ClientOption { + return func(c *Client) error { + newBaseURL, err := url.Parse(baseURL) + if err != nil { + return err + } + c.Server = newBaseURL.String() + return nil + } +} + +// ClientWithResponsesInterface is the interface specification for the client with responses above. +type ClientWithResponsesInterface interface { + // GetAdminAudit request + GetAdminAuditWithResponse(ctx context.Context, params *GetAdminAuditParams, reqEditors ...RequestEditorFn) (*GetAdminAuditResponse, error) + + // GetAdminSsoProviders request + GetAdminSsoProvidersWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetAdminSsoProvidersResponse, error) + + // PostAdminSsoProviders request with any body + PostAdminSsoProvidersWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PostAdminSsoProvidersResponse, error) + + PostAdminSsoProvidersWithResponse(ctx context.Context, body PostAdminSsoProvidersJSONRequestBody, reqEditors ...RequestEditorFn) (*PostAdminSsoProvidersResponse, error) + + // DeleteAdminSsoProvidersSsoProviderId request + DeleteAdminSsoProvidersSsoProviderIdWithResponse(ctx context.Context, ssoProviderId openapi_types.UUID, reqEditors ...RequestEditorFn) (*DeleteAdminSsoProvidersSsoProviderIdResponse, error) + + // GetAdminSsoProvidersSsoProviderId request + GetAdminSsoProvidersSsoProviderIdWithResponse(ctx context.Context, ssoProviderId openapi_types.UUID, reqEditors ...RequestEditorFn) (*GetAdminSsoProvidersSsoProviderIdResponse, error) + + // PutAdminSsoProvidersSsoProviderId request with any body + PutAdminSsoProvidersSsoProviderIdWithBodyWithResponse(ctx context.Context, ssoProviderId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PutAdminSsoProvidersSsoProviderIdResponse, error) + + PutAdminSsoProvidersSsoProviderIdWithResponse(ctx context.Context, ssoProviderId openapi_types.UUID, body PutAdminSsoProvidersSsoProviderIdJSONRequestBody, reqEditors ...RequestEditorFn) (*PutAdminSsoProvidersSsoProviderIdResponse, error) + + // GetAdminUsers request + GetAdminUsersWithResponse(ctx context.Context, params *GetAdminUsersParams, reqEditors ...RequestEditorFn) (*GetAdminUsersResponse, error) + + // DeleteAdminUsersUserId request + DeleteAdminUsersUserIdWithResponse(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*DeleteAdminUsersUserIdResponse, error) + + // GetAdminUsersUserId request + GetAdminUsersUserIdWithResponse(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*GetAdminUsersUserIdResponse, error) + + // PutAdminUsersUserId request with any body + PutAdminUsersUserIdWithBodyWithResponse(ctx context.Context, userId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PutAdminUsersUserIdResponse, error) + + PutAdminUsersUserIdWithResponse(ctx context.Context, userId openapi_types.UUID, body PutAdminUsersUserIdJSONRequestBody, reqEditors ...RequestEditorFn) (*PutAdminUsersUserIdResponse, error) + + // GetAdminUsersUserIdFactors request + GetAdminUsersUserIdFactorsWithResponse(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*GetAdminUsersUserIdFactorsResponse, error) + + // DeleteAdminUsersUserIdFactorsFactorId request + DeleteAdminUsersUserIdFactorsFactorIdWithResponse(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, reqEditors ...RequestEditorFn) (*DeleteAdminUsersUserIdFactorsFactorIdResponse, error) + + // PutAdminUsersUserIdFactorsFactorId request with any body + PutAdminUsersUserIdFactorsFactorIdWithBodyWithResponse(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PutAdminUsersUserIdFactorsFactorIdResponse, error) + + PutAdminUsersUserIdFactorsFactorIdWithResponse(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, body PutAdminUsersUserIdFactorsFactorIdJSONRequestBody, reqEditors ...RequestEditorFn) (*PutAdminUsersUserIdFactorsFactorIdResponse, error) + + // PostGenerateLink request with any body + PostGenerateLinkWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PostGenerateLinkResponse, error) + + PostGenerateLinkWithResponse(ctx context.Context, body PostGenerateLinkJSONRequestBody, reqEditors ...RequestEditorFn) (*PostGenerateLinkResponse, error) + + // PostInvite request with any body + PostInviteWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PostInviteResponse, error) + + PostInviteWithResponse(ctx context.Context, body PostInviteJSONRequestBody, reqEditors ...RequestEditorFn) (*PostInviteResponse, error) +} + +type GetAdminAuditResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]struct { + CreatedAt *time.Time `json:"created_at,omitempty"` + Id *openapi_types.UUID `json:"id,omitempty"` + IpAddress *string `json:"ip_address,omitempty"` + Payload *struct { + // Action Usually one of these values: + // - login + // - logout + // - invite_accepted + // - user_signedup + // - user_invited + // - user_deleted + // - user_modified + // - user_recovery_requested + // - user_reauthenticate_requested + // - user_confirmation_requested + // - user_repeated_signup + // - user_updated_password + // - token_revoked + // - token_refreshed + // - generate_recovery_codes + // - factor_in_progress + // - factor_unenrolled + // - challenge_created + // - verification_attempted + // - factor_deleted + // - recovery_codes_deleted + // - factor_updated + // - mfa_code_login + Action *string `json:"action,omitempty"` + ActorId *string `json:"actor_id,omitempty"` + ActorName *string `json:"actor_name,omitempty"` + ActorUsername *string `json:"actor_username,omitempty"` + + // LogType Usually one of these values: + // - account + // - team + // - token + // - user + // - factor + // - recovery_codes + LogType *string `json:"log_type,omitempty"` + Traits *map[string]interface{} `json:"traits,omitempty"` + } `json:"payload,omitempty"` + } + JSON401 *ErrorSchema + JSON403 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r GetAdminAuditResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetAdminAuditResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetAdminSsoProvidersResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *struct { + Items *[]SSOProviderSchema `json:"items,omitempty"` + } +} + +// Status returns HTTPResponse.Status +func (r GetAdminSsoProvidersResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetAdminSsoProvidersResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type PostAdminSsoProvidersResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *SSOProviderSchema + JSON400 *ErrorSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r PostAdminSsoProvidersResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r PostAdminSsoProvidersResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type DeleteAdminSsoProvidersSsoProviderIdResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *SSOProviderSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema + JSON404 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r DeleteAdminSsoProvidersSsoProviderIdResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DeleteAdminSsoProvidersSsoProviderIdResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetAdminSsoProvidersSsoProviderIdResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *SSOProviderSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema + JSON404 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r GetAdminSsoProvidersSsoProviderIdResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetAdminSsoProvidersSsoProviderIdResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type PutAdminSsoProvidersSsoProviderIdResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *SSOProviderSchema + JSON400 *ErrorSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema + JSON404 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r PutAdminSsoProvidersSsoProviderIdResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r PutAdminSsoProvidersSsoProviderIdResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetAdminUsersResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *struct { + Aud *string `json:"aud,omitempty"` + Users *[]UserSchema `json:"users,omitempty"` + } + JSON401 *ErrorSchema + JSON403 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r GetAdminUsersResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetAdminUsersResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type DeleteAdminUsersUserIdResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *UserSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema + JSON404 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r DeleteAdminUsersUserIdResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DeleteAdminUsersUserIdResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetAdminUsersUserIdResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *UserSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema + JSON404 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r GetAdminUsersUserIdResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetAdminUsersUserIdResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type PutAdminUsersUserIdResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *UserSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema + JSON404 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r PutAdminUsersUserIdResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r PutAdminUsersUserIdResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetAdminUsersUserIdFactorsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]MFAFactorSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema + JSON404 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r GetAdminUsersUserIdFactorsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetAdminUsersUserIdFactorsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type DeleteAdminUsersUserIdFactorsFactorIdResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *MFAFactorSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema + JSON404 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r DeleteAdminUsersUserIdFactorsFactorIdResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DeleteAdminUsersUserIdFactorsFactorIdResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type PutAdminUsersUserIdFactorsFactorIdResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *MFAFactorSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema + JSON404 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r PutAdminUsersUserIdFactorsFactorIdResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r PutAdminUsersUserIdFactorsFactorIdResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type PostGenerateLinkResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *struct { + ActionLink *string `json:"action_link,omitempty"` + EmailOtp *string `json:"email_otp,omitempty"` + HashedToken *string `json:"hashed_token,omitempty"` + RedirectTo *string `json:"redirect_to,omitempty"` + VerificationType *string `json:"verification_type,omitempty"` + AdditionalProperties map[string]interface{} `json:"-"` + } + JSON400 *ErrorSchema + JSON401 *ErrorSchema + JSON403 *ErrorSchema + JSON404 *ErrorSchema + JSON422 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r PostGenerateLinkResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r PostGenerateLinkResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type PostInviteResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *UserSchema + JSON400 *ErrorSchema + JSON422 *ErrorSchema +} + +// Status returns HTTPResponse.Status +func (r PostInviteResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r PostInviteResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +// GetAdminAuditWithResponse request returning *GetAdminAuditResponse +func (c *ClientWithResponses) GetAdminAuditWithResponse(ctx context.Context, params *GetAdminAuditParams, reqEditors ...RequestEditorFn) (*GetAdminAuditResponse, error) { + rsp, err := c.GetAdminAudit(ctx, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetAdminAuditResponse(rsp) +} + +// GetAdminSsoProvidersWithResponse request returning *GetAdminSsoProvidersResponse +func (c *ClientWithResponses) GetAdminSsoProvidersWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetAdminSsoProvidersResponse, error) { + rsp, err := c.GetAdminSsoProviders(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetAdminSsoProvidersResponse(rsp) +} + +// PostAdminSsoProvidersWithBodyWithResponse request with arbitrary body returning *PostAdminSsoProvidersResponse +func (c *ClientWithResponses) PostAdminSsoProvidersWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PostAdminSsoProvidersResponse, error) { + rsp, err := c.PostAdminSsoProvidersWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePostAdminSsoProvidersResponse(rsp) +} + +func (c *ClientWithResponses) PostAdminSsoProvidersWithResponse(ctx context.Context, body PostAdminSsoProvidersJSONRequestBody, reqEditors ...RequestEditorFn) (*PostAdminSsoProvidersResponse, error) { + rsp, err := c.PostAdminSsoProviders(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePostAdminSsoProvidersResponse(rsp) +} + +// DeleteAdminSsoProvidersSsoProviderIdWithResponse request returning *DeleteAdminSsoProvidersSsoProviderIdResponse +func (c *ClientWithResponses) DeleteAdminSsoProvidersSsoProviderIdWithResponse(ctx context.Context, ssoProviderId openapi_types.UUID, reqEditors ...RequestEditorFn) (*DeleteAdminSsoProvidersSsoProviderIdResponse, error) { + rsp, err := c.DeleteAdminSsoProvidersSsoProviderId(ctx, ssoProviderId, reqEditors...) + if err != nil { + return nil, err + } + return ParseDeleteAdminSsoProvidersSsoProviderIdResponse(rsp) +} + +// GetAdminSsoProvidersSsoProviderIdWithResponse request returning *GetAdminSsoProvidersSsoProviderIdResponse +func (c *ClientWithResponses) GetAdminSsoProvidersSsoProviderIdWithResponse(ctx context.Context, ssoProviderId openapi_types.UUID, reqEditors ...RequestEditorFn) (*GetAdminSsoProvidersSsoProviderIdResponse, error) { + rsp, err := c.GetAdminSsoProvidersSsoProviderId(ctx, ssoProviderId, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetAdminSsoProvidersSsoProviderIdResponse(rsp) +} + +// PutAdminSsoProvidersSsoProviderIdWithBodyWithResponse request with arbitrary body returning *PutAdminSsoProvidersSsoProviderIdResponse +func (c *ClientWithResponses) PutAdminSsoProvidersSsoProviderIdWithBodyWithResponse(ctx context.Context, ssoProviderId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PutAdminSsoProvidersSsoProviderIdResponse, error) { + rsp, err := c.PutAdminSsoProvidersSsoProviderIdWithBody(ctx, ssoProviderId, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePutAdminSsoProvidersSsoProviderIdResponse(rsp) +} + +func (c *ClientWithResponses) PutAdminSsoProvidersSsoProviderIdWithResponse(ctx context.Context, ssoProviderId openapi_types.UUID, body PutAdminSsoProvidersSsoProviderIdJSONRequestBody, reqEditors ...RequestEditorFn) (*PutAdminSsoProvidersSsoProviderIdResponse, error) { + rsp, err := c.PutAdminSsoProvidersSsoProviderId(ctx, ssoProviderId, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePutAdminSsoProvidersSsoProviderIdResponse(rsp) +} + +// GetAdminUsersWithResponse request returning *GetAdminUsersResponse +func (c *ClientWithResponses) GetAdminUsersWithResponse(ctx context.Context, params *GetAdminUsersParams, reqEditors ...RequestEditorFn) (*GetAdminUsersResponse, error) { + rsp, err := c.GetAdminUsers(ctx, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetAdminUsersResponse(rsp) +} + +// DeleteAdminUsersUserIdWithResponse request returning *DeleteAdminUsersUserIdResponse +func (c *ClientWithResponses) DeleteAdminUsersUserIdWithResponse(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*DeleteAdminUsersUserIdResponse, error) { + rsp, err := c.DeleteAdminUsersUserId(ctx, userId, reqEditors...) + if err != nil { + return nil, err + } + return ParseDeleteAdminUsersUserIdResponse(rsp) +} + +// GetAdminUsersUserIdWithResponse request returning *GetAdminUsersUserIdResponse +func (c *ClientWithResponses) GetAdminUsersUserIdWithResponse(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*GetAdminUsersUserIdResponse, error) { + rsp, err := c.GetAdminUsersUserId(ctx, userId, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetAdminUsersUserIdResponse(rsp) +} + +// PutAdminUsersUserIdWithBodyWithResponse request with arbitrary body returning *PutAdminUsersUserIdResponse +func (c *ClientWithResponses) PutAdminUsersUserIdWithBodyWithResponse(ctx context.Context, userId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PutAdminUsersUserIdResponse, error) { + rsp, err := c.PutAdminUsersUserIdWithBody(ctx, userId, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePutAdminUsersUserIdResponse(rsp) +} + +func (c *ClientWithResponses) PutAdminUsersUserIdWithResponse(ctx context.Context, userId openapi_types.UUID, body PutAdminUsersUserIdJSONRequestBody, reqEditors ...RequestEditorFn) (*PutAdminUsersUserIdResponse, error) { + rsp, err := c.PutAdminUsersUserId(ctx, userId, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePutAdminUsersUserIdResponse(rsp) +} + +// GetAdminUsersUserIdFactorsWithResponse request returning *GetAdminUsersUserIdFactorsResponse +func (c *ClientWithResponses) GetAdminUsersUserIdFactorsWithResponse(ctx context.Context, userId openapi_types.UUID, reqEditors ...RequestEditorFn) (*GetAdminUsersUserIdFactorsResponse, error) { + rsp, err := c.GetAdminUsersUserIdFactors(ctx, userId, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetAdminUsersUserIdFactorsResponse(rsp) +} + +// DeleteAdminUsersUserIdFactorsFactorIdWithResponse request returning *DeleteAdminUsersUserIdFactorsFactorIdResponse +func (c *ClientWithResponses) DeleteAdminUsersUserIdFactorsFactorIdWithResponse(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, reqEditors ...RequestEditorFn) (*DeleteAdminUsersUserIdFactorsFactorIdResponse, error) { + rsp, err := c.DeleteAdminUsersUserIdFactorsFactorId(ctx, userId, factorId, reqEditors...) + if err != nil { + return nil, err + } + return ParseDeleteAdminUsersUserIdFactorsFactorIdResponse(rsp) +} + +// PutAdminUsersUserIdFactorsFactorIdWithBodyWithResponse request with arbitrary body returning *PutAdminUsersUserIdFactorsFactorIdResponse +func (c *ClientWithResponses) PutAdminUsersUserIdFactorsFactorIdWithBodyWithResponse(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PutAdminUsersUserIdFactorsFactorIdResponse, error) { + rsp, err := c.PutAdminUsersUserIdFactorsFactorIdWithBody(ctx, userId, factorId, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePutAdminUsersUserIdFactorsFactorIdResponse(rsp) +} + +func (c *ClientWithResponses) PutAdminUsersUserIdFactorsFactorIdWithResponse(ctx context.Context, userId openapi_types.UUID, factorId openapi_types.UUID, body PutAdminUsersUserIdFactorsFactorIdJSONRequestBody, reqEditors ...RequestEditorFn) (*PutAdminUsersUserIdFactorsFactorIdResponse, error) { + rsp, err := c.PutAdminUsersUserIdFactorsFactorId(ctx, userId, factorId, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePutAdminUsersUserIdFactorsFactorIdResponse(rsp) +} + +// PostGenerateLinkWithBodyWithResponse request with arbitrary body returning *PostGenerateLinkResponse +func (c *ClientWithResponses) PostGenerateLinkWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PostGenerateLinkResponse, error) { + rsp, err := c.PostGenerateLinkWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePostGenerateLinkResponse(rsp) +} + +func (c *ClientWithResponses) PostGenerateLinkWithResponse(ctx context.Context, body PostGenerateLinkJSONRequestBody, reqEditors ...RequestEditorFn) (*PostGenerateLinkResponse, error) { + rsp, err := c.PostGenerateLink(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePostGenerateLinkResponse(rsp) +} + +// PostInviteWithBodyWithResponse request with arbitrary body returning *PostInviteResponse +func (c *ClientWithResponses) PostInviteWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*PostInviteResponse, error) { + rsp, err := c.PostInviteWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePostInviteResponse(rsp) +} + +func (c *ClientWithResponses) PostInviteWithResponse(ctx context.Context, body PostInviteJSONRequestBody, reqEditors ...RequestEditorFn) (*PostInviteResponse, error) { + rsp, err := c.PostInvite(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePostInviteResponse(rsp) +} + +// ParseGetAdminAuditResponse parses an HTTP response from a GetAdminAuditWithResponse call +func ParseGetAdminAuditResponse(rsp *http.Response) (*GetAdminAuditResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetAdminAuditResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest []struct { + CreatedAt *time.Time `json:"created_at,omitempty"` + Id *openapi_types.UUID `json:"id,omitempty"` + IpAddress *string `json:"ip_address,omitempty"` + Payload *struct { + // Action Usually one of these values: + // - login + // - logout + // - invite_accepted + // - user_signedup + // - user_invited + // - user_deleted + // - user_modified + // - user_recovery_requested + // - user_reauthenticate_requested + // - user_confirmation_requested + // - user_repeated_signup + // - user_updated_password + // - token_revoked + // - token_refreshed + // - generate_recovery_codes + // - factor_in_progress + // - factor_unenrolled + // - challenge_created + // - verification_attempted + // - factor_deleted + // - recovery_codes_deleted + // - factor_updated + // - mfa_code_login + Action *string `json:"action,omitempty"` + ActorId *string `json:"actor_id,omitempty"` + ActorName *string `json:"actor_name,omitempty"` + ActorUsername *string `json:"actor_username,omitempty"` + + // LogType Usually one of these values: + // - account + // - team + // - token + // - user + // - factor + // - recovery_codes + LogType *string `json:"log_type,omitempty"` + Traits *map[string]interface{} `json:"traits,omitempty"` + } `json:"payload,omitempty"` + } + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + } + + return response, nil +} + +// ParseGetAdminSsoProvidersResponse parses an HTTP response from a GetAdminSsoProvidersWithResponse call +func ParseGetAdminSsoProvidersResponse(rsp *http.Response) (*GetAdminSsoProvidersResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetAdminSsoProvidersResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest struct { + Items *[]SSOProviderSchema `json:"items,omitempty"` + } + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil +} + +// ParsePostAdminSsoProvidersResponse parses an HTTP response from a PostAdminSsoProvidersWithResponse call +func ParsePostAdminSsoProvidersResponse(rsp *http.Response) (*PostAdminSsoProvidersResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &PostAdminSsoProvidersResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest SSOProviderSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + } + + return response, nil +} + +// ParseDeleteAdminSsoProvidersSsoProviderIdResponse parses an HTTP response from a DeleteAdminSsoProvidersSsoProviderIdWithResponse call +func ParseDeleteAdminSsoProvidersSsoProviderIdResponse(rsp *http.Response) (*DeleteAdminSsoProvidersSsoProviderIdResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DeleteAdminSsoProvidersSsoProviderIdResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest SSOProviderSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + } + + return response, nil +} + +// ParseGetAdminSsoProvidersSsoProviderIdResponse parses an HTTP response from a GetAdminSsoProvidersSsoProviderIdWithResponse call +func ParseGetAdminSsoProvidersSsoProviderIdResponse(rsp *http.Response) (*GetAdminSsoProvidersSsoProviderIdResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetAdminSsoProvidersSsoProviderIdResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest SSOProviderSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + } + + return response, nil +} + +// ParsePutAdminSsoProvidersSsoProviderIdResponse parses an HTTP response from a PutAdminSsoProvidersSsoProviderIdWithResponse call +func ParsePutAdminSsoProvidersSsoProviderIdResponse(rsp *http.Response) (*PutAdminSsoProvidersSsoProviderIdResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &PutAdminSsoProvidersSsoProviderIdResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest SSOProviderSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + } + + return response, nil +} + +// ParseGetAdminUsersResponse parses an HTTP response from a GetAdminUsersWithResponse call +func ParseGetAdminUsersResponse(rsp *http.Response) (*GetAdminUsersResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetAdminUsersResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest struct { + Aud *string `json:"aud,omitempty"` + Users *[]UserSchema `json:"users,omitempty"` + } + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + } + + return response, nil +} + +// ParseDeleteAdminUsersUserIdResponse parses an HTTP response from a DeleteAdminUsersUserIdWithResponse call +func ParseDeleteAdminUsersUserIdResponse(rsp *http.Response) (*DeleteAdminUsersUserIdResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DeleteAdminUsersUserIdResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest UserSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + } + + return response, nil +} + +// ParseGetAdminUsersUserIdResponse parses an HTTP response from a GetAdminUsersUserIdWithResponse call +func ParseGetAdminUsersUserIdResponse(rsp *http.Response) (*GetAdminUsersUserIdResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetAdminUsersUserIdResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest UserSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + } + + return response, nil +} + +// ParsePutAdminUsersUserIdResponse parses an HTTP response from a PutAdminUsersUserIdWithResponse call +func ParsePutAdminUsersUserIdResponse(rsp *http.Response) (*PutAdminUsersUserIdResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &PutAdminUsersUserIdResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest UserSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + } + + return response, nil +} + +// ParseGetAdminUsersUserIdFactorsResponse parses an HTTP response from a GetAdminUsersUserIdFactorsWithResponse call +func ParseGetAdminUsersUserIdFactorsResponse(rsp *http.Response) (*GetAdminUsersUserIdFactorsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetAdminUsersUserIdFactorsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest []MFAFactorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + } + + return response, nil +} + +// ParseDeleteAdminUsersUserIdFactorsFactorIdResponse parses an HTTP response from a DeleteAdminUsersUserIdFactorsFactorIdWithResponse call +func ParseDeleteAdminUsersUserIdFactorsFactorIdResponse(rsp *http.Response) (*DeleteAdminUsersUserIdFactorsFactorIdResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DeleteAdminUsersUserIdFactorsFactorIdResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest MFAFactorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + } + + return response, nil +} + +// ParsePutAdminUsersUserIdFactorsFactorIdResponse parses an HTTP response from a PutAdminUsersUserIdFactorsFactorIdWithResponse call +func ParsePutAdminUsersUserIdFactorsFactorIdResponse(rsp *http.Response) (*PutAdminUsersUserIdFactorsFactorIdResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &PutAdminUsersUserIdFactorsFactorIdResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest MFAFactorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + } + + return response, nil +} + +// ParsePostGenerateLinkResponse parses an HTTP response from a PostGenerateLinkWithResponse call +func ParsePostGenerateLinkResponse(rsp *http.Response) (*PostGenerateLinkResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &PostGenerateLinkResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest struct { + ActionLink *string `json:"action_link,omitempty"` + EmailOtp *string `json:"email_otp,omitempty"` + HashedToken *string `json:"hashed_token,omitempty"` + RedirectTo *string `json:"redirect_to,omitempty"` + VerificationType *string `json:"verification_type,omitempty"` + AdditionalProperties map[string]interface{} `json:"-"` + } + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 422: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON422 = &dest + + } + + return response, nil +} + +// ParsePostInviteResponse parses an HTTP response from a PostInviteWithResponse call +func ParsePostInviteResponse(rsp *http.Response) (*PostInviteResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &PostInviteResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest UserSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 422: + var dest ErrorSchema + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON422 = &dest + + } + + return response, nil +} diff --git a/auth_v2.169.0/client/admin/gen.go b/auth_v2.169.0/client/admin/gen.go new file mode 100644 index 0000000..c0cf6e3 --- /dev/null +++ b/auth_v2.169.0/client/admin/gen.go @@ -0,0 +1,3 @@ +package admin + +//go:generate oapi-codegen -config ./oapi-codegen.yaml ../../openapi.yaml diff --git a/auth_v2.169.0/client/admin/oapi-codegen.yaml b/auth_v2.169.0/client/admin/oapi-codegen.yaml new file mode 100644 index 0000000..a3aa634 --- /dev/null +++ b/auth_v2.169.0/client/admin/oapi-codegen.yaml @@ -0,0 +1,7 @@ +package: admin +generate: + - client + - types +include-tags: + - admin +output: client.go diff --git a/auth_v2.169.0/cmd/admin_cmd.go b/auth_v2.169.0/cmd/admin_cmd.go new file mode 100644 index 0000000..7997bb5 --- /dev/null +++ b/auth_v2.169.0/cmd/admin_cmd.go @@ -0,0 +1,131 @@ +package cmd + +import ( + "github.com/gofrs/uuid" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +var autoconfirm, isAdmin bool +var audience string + +func getAudience(c *conf.GlobalConfiguration) string { + if audience == "" { + return c.JWT.Aud + } + + return audience +} + +func adminCmd() *cobra.Command { + var adminCmd = &cobra.Command{ + Use: "admin", + } + + adminCmd.AddCommand(&adminCreateUserCmd, &adminDeleteUserCmd) + adminCmd.PersistentFlags().StringVarP(&audience, "aud", "a", "", "Set the new user's audience") + + adminCreateUserCmd.Flags().BoolVar(&autoconfirm, "confirm", false, "Automatically confirm user without sending an email") + adminCreateUserCmd.Flags().BoolVar(&isAdmin, "admin", false, "Create user with admin privileges") + + return adminCmd +} + +var adminCreateUserCmd = cobra.Command{ + Use: "createuser", + Run: func(cmd *cobra.Command, args []string) { + if len(args) < 2 { + logrus.Fatal("Not enough arguments to createuser command. Expected at least email and password values") + return + } + + execWithConfigAndArgs(cmd, adminCreateUser, args) + }, +} + +var adminDeleteUserCmd = cobra.Command{ + Use: "deleteuser", + Run: func(cmd *cobra.Command, args []string) { + if len(args) < 1 { + logrus.Fatal("Not enough arguments to deleteuser command. Expected at least ID or email") + return + } + + execWithConfigAndArgs(cmd, adminDeleteUser, args) + }, +} + +func adminCreateUser(config *conf.GlobalConfiguration, args []string) { + db, err := storage.Dial(config) + if err != nil { + logrus.Fatalf("Error opening database: %+v", err) + } + defer db.Close() + + aud := getAudience(config) + if user, err := models.IsDuplicatedEmail(db, args[0], aud, nil); user != nil { + logrus.Fatalf("Error creating new user: user already exists") + } else if err != nil { + logrus.Fatalf("Error checking user email: %+v", err) + } + + user, err := models.NewUser("", args[0], args[1], aud, nil) + if err != nil { + logrus.Fatalf("Error creating new user: %+v", err) + } + + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + if terr = tx.Create(user); terr != nil { + return terr + } + + if len(args) > 2 { + if terr = user.SetRole(tx, args[2]); terr != nil { + return terr + } + } else if isAdmin { + if terr = user.SetRole(tx, config.JWT.AdminGroupName); terr != nil { + return terr + } + } + + if config.Mailer.Autoconfirm || autoconfirm { + if terr = user.Confirm(tx); terr != nil { + return terr + } + } + return nil + }) + if err != nil { + logrus.Fatalf("Unable to create user (%s): %+v", args[0], err) + } + + logrus.Infof("Created user: %s", args[0]) +} + +func adminDeleteUser(config *conf.GlobalConfiguration, args []string) { + db, err := storage.Dial(config) + if err != nil { + logrus.Fatalf("Error opening database: %+v", err) + } + defer db.Close() + + user, err := models.FindUserByEmailAndAudience(db, args[0], getAudience(config)) + if err != nil { + userID := uuid.Must(uuid.FromString(args[0])) + user, err = models.FindUserByID(db, userID) + if err != nil { + logrus.Fatalf("Error finding user (%s): %+v", userID, err) + } + } + + if err = db.Destroy(user); err != nil { + logrus.Fatalf("Error removing user (%s): %+v", args[0], err) + } + + logrus.Infof("Removed user: %s", args[0]) +} diff --git a/auth_v2.169.0/cmd/migrate_cmd.go b/auth_v2.169.0/cmd/migrate_cmd.go new file mode 100644 index 0000000..e0251d6 --- /dev/null +++ b/auth_v2.169.0/cmd/migrate_cmd.go @@ -0,0 +1,117 @@ +package cmd + +import ( + "embed" + "fmt" + "net/url" + "os" + + "github.com/gobuffalo/pop/v6" + "github.com/gobuffalo/pop/v6/logging" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var EmbeddedMigrations embed.FS + +var migrateCmd = cobra.Command{ + Use: "migrate", + Long: "Migrate database strucutures. This will create new tables and add missing columns and indexes.", + Run: migrate, +} + +func migrate(cmd *cobra.Command, args []string) { + globalConfig := loadGlobalConfig(cmd.Context()) + + if globalConfig.DB.Driver == "" && globalConfig.DB.URL != "" { + u, err := url.Parse(globalConfig.DB.URL) + if err != nil { + logrus.Fatalf("%+v", errors.Wrap(err, "parsing db connection url")) + } + globalConfig.DB.Driver = u.Scheme + } + + log := logrus.StandardLogger() + + pop.Debug = false + if globalConfig.Logging.Level != "" { + level, err := logrus.ParseLevel(globalConfig.Logging.Level) + if err != nil { + log.Fatalf("Failed to parse log level: %+v", err) + } + log.SetLevel(level) + if level == logrus.DebugLevel { + // Set to true to display query info + pop.Debug = true + } + if level != logrus.DebugLevel { + var noopLogger = func(lvl logging.Level, s string, args ...interface{}) { + } + // Hide pop migration logging + pop.SetLogger(noopLogger) + } + } + + u, _ := url.Parse(globalConfig.DB.URL) + processedUrl := globalConfig.DB.URL + if len(u.Query()) != 0 { + processedUrl = fmt.Sprintf("%s&application_name=gotrue_migrations", processedUrl) + } else { + processedUrl = fmt.Sprintf("%s?application_name=gotrue_migrations", processedUrl) + } + deets := &pop.ConnectionDetails{ + Dialect: globalConfig.DB.Driver, + URL: processedUrl, + } + deets.Options = map[string]string{ + "migration_table_name": "schema_migrations", + "Namespace": globalConfig.DB.Namespace, + } + + db, err := pop.NewConnection(deets) + if err != nil { + log.Fatalf("%+v", errors.Wrap(err, "opening db connection")) + } + defer db.Close() + + if err := db.Open(); err != nil { + log.Fatalf("%+v", errors.Wrap(err, "checking database connection")) + } + + log.Debugf("Reading migrations from executable") + box, err := pop.NewMigrationBox(EmbeddedMigrations, db) + if err != nil { + log.Fatalf("%+v", errors.Wrap(err, "creating db migrator")) + } + + mig := box.Migrator + + log.Debugf("before status") + + if log.Level == logrus.DebugLevel { + err = mig.Status(os.Stdout) + if err != nil { + log.Fatalf("%+v", errors.Wrap(err, "migration status")) + } + } + + // turn off schema dump + mig.SchemaPath = "" + + err = mig.Up() + if err != nil { + log.Fatalf("%v", errors.Wrap(err, "running db migrations")) + } else { + log.Infof("GoTrue migrations applied successfully") + } + + log.Debugf("after status") + + if log.Level == logrus.DebugLevel { + err = mig.Status(os.Stdout) + if err != nil { + log.Fatalf("%+v", errors.Wrap(err, "migration status")) + } + } +} diff --git a/auth_v2.169.0/cmd/root_cmd.go b/auth_v2.169.0/cmd/root_cmd.go new file mode 100644 index 0000000..e8783d4 --- /dev/null +++ b/auth_v2.169.0/cmd/root_cmd.go @@ -0,0 +1,63 @@ +package cmd + +import ( + "context" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/observability" +) + +var ( + configFile = "" + watchDir = "" +) + +var rootCmd = cobra.Command{ + Use: "gotrue", + Run: func(cmd *cobra.Command, args []string) { + migrate(cmd, args) + serve(cmd.Context()) + }, +} + +// RootCommand will setup and return the root command +func RootCommand() *cobra.Command { + rootCmd.AddCommand(&serveCmd, &migrateCmd, &versionCmd, adminCmd()) + rootCmd.PersistentFlags().StringVarP(&configFile, "config", "c", "", "base configuration file to load") + rootCmd.PersistentFlags().StringVarP(&watchDir, "config-dir", "d", "", "directory containing a sorted list of config files to watch for changes") + return &rootCmd +} + +func loadGlobalConfig(ctx context.Context) *conf.GlobalConfiguration { + if ctx == nil { + panic("context must not be nil") + } + + config, err := conf.LoadGlobal(configFile) + if err != nil { + logrus.Fatalf("Failed to load configuration: %+v", err) + } + + if err := observability.ConfigureLogging(&config.Logging); err != nil { + logrus.WithError(err).Error("unable to configure logging") + } + + if err := observability.ConfigureTracing(ctx, &config.Tracing); err != nil { + logrus.WithError(err).Error("unable to configure tracing") + } + + if err := observability.ConfigureMetrics(ctx, &config.Metrics); err != nil { + logrus.WithError(err).Error("unable to configure metrics") + } + + if err := observability.ConfigureProfiler(ctx, &config.Profiler); err != nil { + logrus.WithError(err).Error("unable to configure profiler") + } + return config +} + +func execWithConfigAndArgs(cmd *cobra.Command, fn func(config *conf.GlobalConfiguration, args []string), args []string) { + fn(loadGlobalConfig(cmd.Context()), args) +} diff --git a/auth_v2.169.0/cmd/serve_cmd.go b/auth_v2.169.0/cmd/serve_cmd.go new file mode 100644 index 0000000..06fa2f5 --- /dev/null +++ b/auth_v2.169.0/cmd/serve_cmd.go @@ -0,0 +1,111 @@ +package cmd + +import ( + "context" + "net" + "net/http" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/supabase/auth/internal/api" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/reloader" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" +) + +var serveCmd = cobra.Command{ + Use: "serve", + Long: "Start API server", + Run: func(cmd *cobra.Command, args []string) { + serve(cmd.Context()) + }, +} + +func serve(ctx context.Context) { + if err := conf.LoadFile(configFile); err != nil { + logrus.WithError(err).Fatal("unable to load config") + } + + if err := conf.LoadDirectory(watchDir); err != nil { + logrus.WithError(err).Fatal("unable to load config from watch dir") + } + + config, err := conf.LoadGlobalFromEnv() + if err != nil { + logrus.WithError(err).Fatal("unable to load config") + } + + db, err := storage.Dial(config) + if err != nil { + logrus.Fatalf("error opening database: %+v", err) + } + defer db.Close() + + addr := net.JoinHostPort(config.API.Host, config.API.Port) + logrus.Infof("GoTrue API started on: %s", addr) + + opts := []api.Option{ + api.NewLimiterOptions(config), + } + a := api.NewAPIWithVersion(config, db, utilities.Version, opts...) + ah := reloader.NewAtomicHandler(a) + + baseCtx, baseCancel := context.WithCancel(context.Background()) + defer baseCancel() + + httpSrv := &http.Server{ + Addr: addr, + Handler: ah, + ReadHeaderTimeout: 2 * time.Second, // to mitigate a Slowloris attack + BaseContext: func(net.Listener) context.Context { + return baseCtx + }, + } + log := logrus.WithField("component", "api") + + var wg sync.WaitGroup + defer wg.Wait() // Do not return to caller until this goroutine is done. + + if watchDir != "" { + wg.Add(1) + go func() { + defer wg.Done() + + fn := func(latestCfg *conf.GlobalConfiguration) { + log.Info("reloading api with new configuration") + latestAPI := api.NewAPIWithVersion( + latestCfg, db, utilities.Version, opts...) + ah.Store(latestAPI) + } + + rl := reloader.NewReloader(watchDir) + if err := rl.Watch(ctx, fn); err != nil { + log.WithError(err).Error("watcher is exiting") + } + }() + } + + wg.Add(1) + go func() { + defer wg.Done() + + <-ctx.Done() + + defer baseCancel() // close baseContext + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), time.Minute) + defer shutdownCancel() + + if err := httpSrv.Shutdown(shutdownCtx); err != nil && !errors.Is(err, context.Canceled) { + log.WithError(err).Error("shutdown failed") + } + }() + + if err := httpSrv.ListenAndServe(); err != http.ErrServerClosed { + log.WithError(err).Fatal("http server listen failed") + } +} diff --git a/auth_v2.169.0/cmd/version_cmd.go b/auth_v2.169.0/cmd/version_cmd.go new file mode 100644 index 0000000..cb555d4 --- /dev/null +++ b/auth_v2.169.0/cmd/version_cmd.go @@ -0,0 +1,17 @@ +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/supabase/auth/internal/utilities" +) + +var versionCmd = cobra.Command{ + Run: showVersion, + Use: "version", +} + +func showVersion(cmd *cobra.Command, args []string) { + fmt.Println(utilities.Version) +} diff --git a/auth_v2.169.0/docker-compose-dev.yml b/auth_v2.169.0/docker-compose-dev.yml new file mode 100644 index 0000000..47ae53d --- /dev/null +++ b/auth_v2.169.0/docker-compose-dev.yml @@ -0,0 +1,34 @@ +version: "3.9" +services: + auth: + container_name: auth + depends_on: + - postgres + build: + context: ./ + dockerfile: Dockerfile.dev + ports: + - '9999:9999' + - '9100:9100' + environment: + - GOTRUE_DB_MIGRATIONS_PATH=/go/src/github.com/supabase/auth/migrations + volumes: + - ./:/go/src/github.com/supabase/auth + command: CompileDaemon --build="make build" --directory=/go/src/github.com/supabase/auth --recursive=true -pattern="(.+\.go|.+\.env)" -exclude=auth -exclude=auth-arm64 -exclude=.env --command="/go/src/github.com/supabase/auth/auth -c=.env.docker" + postgres: + build: + context: . + dockerfile: Dockerfile.postgres.dev + container_name: auth_postgres + ports: + - '5432:5432' + volumes: + - postgres_data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=root + - POSTGRES_DB=postgres + # sets the schema name, this should match the `NAMESPACE` env var set in your .env file + - DB_NAMESPACE=auth +volumes: + postgres_data: diff --git a/auth_v2.169.0/docs/admin.go b/auth_v2.169.0/docs/admin.go new file mode 100644 index 0000000..5c89222 --- /dev/null +++ b/auth_v2.169.0/docs/admin.go @@ -0,0 +1,106 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +import ( + "github.com/supabase/auth/internal/api" +) + +// swagger:route GET /admin/users admin admin-list-users +// List all users. +// security: +// - bearer: +// responses: +// 200: adminListUserResponse +// 401: unauthorizedError + +// The list of users. +// swagger:response adminListUserResponse +type adminListUserResponseWrapper struct { + // in:body + Body api.AdminListUsersResponse +} + +// swagger:route POST /admin/users admin admin-create-user +// Returns the created user. +// security: +// - bearer: +// responses: +// 200: userResponse +// 401: unauthorizedError + +// The user to be created. +// swagger:parameters admin-create-user +type adminUserParamsWrapper struct { + // in:body + Body api.AdminUserParams +} + +// swagger:route GET /admin/user/{user_id} admin admin-get-user +// Get a user. +// security: +// - bearer: +// parameters: +// + name: user_id +// in: path +// description: The user's id +// required: true +// responses: +// 200: userResponse +// 401: unauthorizedError + +// The user specified. +// swagger:response userResponse + +// swagger:route PUT /admin/user/{user_id} admin admin-update-user +// Update a user. +// security: +// - bearer: +// parameters: +// + name: user_id +// in: path +// description: The user's id +// required: true +// responses: +// 200: userResponse +// 401: unauthorizedError + +// The updated user. +// swagger:response userResponse + +// swagger:route DELETE /admin/user/{user_id} admin admin-delete-user +// Deletes a user. +// security: +// - bearer: +// parameters: +// + name: user_id +// in: path +// description: The user's id +// required: true +// responses: +// 200: deleteUserResponse +// 401: unauthorizedError + +// The updated user. +// swagger:response deleteUserResponse +type deleteUserResponseWrapper struct{} + +// swagger:route POST /admin/generate_link admin admin-generate-link +// Generates an email action link. +// security: +// - bearer: +// responses: +// 200: generateLinkResponse +// 401: unauthorizedError + +// swagger:parameters admin-generate-link +type generateLinkParams struct { + // in:body + Body api.GenerateLinkParams +} + +// The response object for generate link. +// swagger:response generateLinkResponse +type generateLinkResponseWrapper struct { + // in:body + Body api.GenerateLinkResponse +} diff --git a/auth_v2.169.0/docs/doc.go b/auth_v2.169.0/docs/doc.go new file mode 100644 index 0000000..5d5a148 --- /dev/null +++ b/auth_v2.169.0/docs/doc.go @@ -0,0 +1,20 @@ +// Package classification gotrue +// +// Documentation of the gotrue API. +// +// Schemes: http, https +// BasePath: / +// Version: 1.0.0 +// Host: localhost:9999 +// +// SecurityDefinitions: +// bearer: +// type: apiKey +// name: Authentication +// in: header +// +// Produces: +// - application/json +// +// swagger:meta +package docs diff --git a/auth_v2.169.0/docs/errors.go b/auth_v2.169.0/docs/errors.go new file mode 100644 index 0000000..a406445 --- /dev/null +++ b/auth_v2.169.0/docs/errors.go @@ -0,0 +1,6 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +// This endpoint requires a bearer token. +// swagger:response unauthorizedError +type unauthorizedError struct{} diff --git a/auth_v2.169.0/docs/health.go b/auth_v2.169.0/docs/health.go new file mode 100644 index 0000000..3034fad --- /dev/null +++ b/auth_v2.169.0/docs/health.go @@ -0,0 +1,15 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +import "github.com/supabase/auth/internal/api" + +// swagger:route GET /health health health +// The healthcheck endpoint for gotrue. Returns the current gotrue version. +// responses: +// 200: healthCheckResponse + +// swagger:response healthCheckResponse +type healthCheckResponseWrapper struct { + // in:body + Body api.HealthCheckResponse +} diff --git a/auth_v2.169.0/docs/invite.go b/auth_v2.169.0/docs/invite.go new file mode 100644 index 0000000..2775cfb --- /dev/null +++ b/auth_v2.169.0/docs/invite.go @@ -0,0 +1,18 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +import "github.com/supabase/auth/internal/api" + +// swagger:route POST /invite invite invite +// Sends an invite link to the user. +// responses: +// 200: inviteResponse + +// swagger:parameters invite +type inviteParamsWrapper struct { + // in:body + Body api.InviteParams +} + +// swagger:response inviteResponse +type inviteResponseWrapper struct{} diff --git a/auth_v2.169.0/docs/logout.go b/auth_v2.169.0/docs/logout.go new file mode 100644 index 0000000..f0b1125 --- /dev/null +++ b/auth_v2.169.0/docs/logout.go @@ -0,0 +1,12 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +// swagger:route POST /logout logout logout +// Logs out the user. +// security: +// - bearer: +// responses: +// 204: logoutResponse + +// swagger:response logoutResponse +type logoutResponseWrapper struct{} diff --git a/auth_v2.169.0/docs/oauth.go b/auth_v2.169.0/docs/oauth.go new file mode 100644 index 0000000..9b3c223 --- /dev/null +++ b/auth_v2.169.0/docs/oauth.go @@ -0,0 +1,25 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +// swagger:route GET /authorize oauth authorize +// Redirects the user to the 3rd-party OAuth provider to start the OAuth1.0 or OAuth2.0 authentication process. +// parameters: +// + name: redirect_to +// in: query +// description: The redirect url to return the user to after the `/callback` endpoint has completed. +// required: false +// responses: +// 302: authorizeResponse + +// Redirects user to the 3rd-party OAuth provider +// swagger:response authorizeResponse +type authorizeResponseWrapper struct{} + +// swagger:route GET /callback oauth callback +// Receives the redirect from an external provider during the OAuth authentication process. Starts the process of creating an access and refresh token. +// responses: +// 302: callbackResponse + +// Redirects user to the redirect url specified in `/authorize`. If no `redirect_url` is provided, the user will be redirected to the `SITE_URL`. +// swagger:response callbackResponse +type callbackResponseWrapper struct{} diff --git a/auth_v2.169.0/docs/otp.go b/auth_v2.169.0/docs/otp.go new file mode 100644 index 0000000..a62fa07 --- /dev/null +++ b/auth_v2.169.0/docs/otp.go @@ -0,0 +1,19 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +import "github.com/supabase/auth/internal/api" + +// swagger:route POST /otp otp otp +// Passwordless sign-in method for email or phone. +// responses: +// 200: otpResponse + +// swagger:parameters otp +type otpParamsWrapper struct { + // Only an email or phone should be provided. + // in:body + Body api.OtpParams +} + +// swagger:response otpResponse +type otpResponseWrapper struct{} diff --git a/auth_v2.169.0/docs/recover.go b/auth_v2.169.0/docs/recover.go new file mode 100644 index 0000000..1bd249a --- /dev/null +++ b/auth_v2.169.0/docs/recover.go @@ -0,0 +1,18 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +import "github.com/supabase/auth/internal/api" + +// swagger:route POST /recover recovery recovery +// Sends a password recovery email link to the user's email. +// responses: +// 200: recoveryResponse + +// swagger:parameters recovery +type recoveryParamsWrapper struct { + // in:body + Body api.RecoverParams +} + +// swagger:response recoveryResponse +type recoveryResponseWrapper struct{} diff --git a/auth_v2.169.0/docs/settings.go b/auth_v2.169.0/docs/settings.go new file mode 100644 index 0000000..ff5d4ed --- /dev/null +++ b/auth_v2.169.0/docs/settings.go @@ -0,0 +1,15 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +import "github.com/supabase/auth/internal/api" + +// swagger:route GET /settings settings settings +// Returns the configuration settings for the gotrue server. +// responses: +// 200: settingsResponse + +// swagger:response settingsResponse +type settingsResponseWrapper struct { + // in:body + Body api.Settings +} diff --git a/auth_v2.169.0/docs/signup.go b/auth_v2.169.0/docs/signup.go new file mode 100644 index 0000000..a69f015 --- /dev/null +++ b/auth_v2.169.0/docs/signup.go @@ -0,0 +1,17 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +import ( + "github.com/supabase/auth/internal/api" +) + +// swagger:route POST /signup signup signup +// Password-based signup with either email or phone. +// responses: +// 200: userResponse + +// swagger:parameters signup +type signupParamsWrapper struct { + // in:body + Body api.SignupParams +} diff --git a/auth_v2.169.0/docs/token.go b/auth_v2.169.0/docs/token.go new file mode 100644 index 0000000..b4ae542 --- /dev/null +++ b/auth_v2.169.0/docs/token.go @@ -0,0 +1,34 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +import ( + "github.com/supabase/auth/internal/api" +) + +// swagger:route POST /token?grant_type=password token token-password +// Signs in a user with a password. +// responses: +// 200: tokenResponse + +// swagger:parameters token-password +type tokenPasswordGrantParamsWrapper struct { + // in:body + Body api.PasswordGrantParams +} + +// swagger:route POST /token?grant_type=refresh_token token token-refresh +// Refreshes a user's refresh token. +// responses: +// 200: tokenResponse + +// swagger:parameters token-refresh +type tokenRefreshTokenGrantParamsWrapper struct { + // in:body + Body api.RefreshTokenGrantParams +} + +// swagger:response tokenResponse +type tokenResponseWrapper struct { + // in:body + Body api.AccessTokenResponse +} diff --git a/auth_v2.169.0/docs/user.go b/auth_v2.169.0/docs/user.go new file mode 100644 index 0000000..464abfc --- /dev/null +++ b/auth_v2.169.0/docs/user.go @@ -0,0 +1,37 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +import ( + "github.com/supabase/auth/internal/api" + "github.com/supabase/auth/internal/models" +) + +// swagger:route GET /user user user-get +// Get information for the logged-in user. +// security: +// - bearer: +// responses: +// 200: userResponse +// 401: unauthorizedError + +// The current user. +// swagger:response userResponse +type userResponseWrapper struct { + // in:body + Body models.User +} + +// swagger:route PUT /user user user-put +// Returns the updated user. +// security: +// - bearer: +// responses: +// 200: userResponse +// 401: unauthorizedError + +// The current user. +// swagger:parameters user-put +type userUpdateParams struct { + // in:body + Body api.UserUpdateParams +} diff --git a/auth_v2.169.0/docs/verify.go b/auth_v2.169.0/docs/verify.go new file mode 100644 index 0000000..3590650 --- /dev/null +++ b/auth_v2.169.0/docs/verify.go @@ -0,0 +1,24 @@ +//lint:file-ignore U1000 ignore go-swagger template +package docs + +import ( + "github.com/supabase/auth/internal/api" +) + +// swagger:route GET /verify verify verify-get +// Verifies a sign up. + +// swagger:parameters verify-get +type verifyGetParamsWrapper struct { + // in:query + api.VerifyParams +} + +// swagger:route POST /verify verify verify-post +// Verifies a sign up. + +// swagger:parameters verify-post +type verifyPostParamsWrapper struct { + // in:body + Body api.VerifyParams +} diff --git a/auth_v2.169.0/example.docker.env b/auth_v2.169.0/example.docker.env new file mode 100644 index 0000000..477a5d1 --- /dev/null +++ b/auth_v2.169.0/example.docker.env @@ -0,0 +1,8 @@ +GOTRUE_SITE_URL="http://localhost:3000" +GOTRUE_JWT_SECRET="" +GOTRUE_DB_MIGRATIONS_PATH=/go/src/github.com/supabase/auth/migrations +GOTRUE_DB_DRIVER=postgres +DATABASE_URL=postgres://supabase_auth_admin:root@postgres:5432/postgres +GOTRUE_API_HOST=0.0.0.0 +API_EXTERNAL_URL="http://localhost:9999" +PORT=9999 diff --git a/auth_v2.169.0/example.env b/auth_v2.169.0/example.env new file mode 100644 index 0000000..e645c96 --- /dev/null +++ b/auth_v2.169.0/example.env @@ -0,0 +1,238 @@ +# General Config +# NOTE: The service_role key is required as an authorization header for /admin endpoints + +GOTRUE_JWT_SECRET="CHANGE-THIS! VERY IMPORTANT!" +GOTRUE_JWT_EXP="3600" +GOTRUE_JWT_AUD="authenticated" +GOTRUE_JWT_DEFAULT_GROUP_NAME="authenticated" +GOTRUE_JWT_ADMIN_ROLES="supabase_admin,service_role" + +# Database & API connection details +GOTRUE_DB_DRIVER="postgres" +DB_NAMESPACE="auth" +DATABASE_URL="postgres://supabase_auth_admin:root@localhost:5432/postgres" +API_EXTERNAL_URL="http://localhost:9999" +GOTRUE_API_HOST="localhost" +PORT="9999" + +# SMTP config (generate credentials for signup to work) +GOTRUE_SMTP_HOST="" +GOTRUE_SMTP_PORT="" +GOTRUE_SMTP_USER="" +GOTRUE_SMTP_MAX_FREQUENCY="5s" +GOTRUE_SMTP_PASS="" +GOTRUE_SMTP_ADMIN_EMAIL="" +GOTRUE_SMTP_SENDER_NAME="" + +# Mailer config +GOTRUE_MAILER_AUTOCONFIRM="true" +GOTRUE_MAILER_URLPATHS_CONFIRMATION="/verify" +GOTRUE_MAILER_URLPATHS_INVITE="/verify" +GOTRUE_MAILER_URLPATHS_RECOVERY="/verify" +GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE="/verify" +GOTRUE_MAILER_SUBJECTS_CONFIRMATION="Confirm Your Email" +GOTRUE_MAILER_SUBJECTS_RECOVERY="Reset Your Password" +GOTRUE_MAILER_SUBJECTS_MAGIC_LINK="Your Magic Link" +GOTRUE_MAILER_SUBJECTS_EMAIL_CHANGE="Confirm Email Change" +GOTRUE_MAILER_SUBJECTS_INVITE="You have been invited" +GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED="true" + +# Custom mailer template config +GOTRUE_MAILER_TEMPLATES_INVITE="" +GOTRUE_MAILER_TEMPLATES_CONFIRMATION="" +GOTRUE_MAILER_TEMPLATES_RECOVERY="" +GOTRUE_MAILER_TEMPLATES_MAGIC_LINK="" +GOTRUE_MAILER_TEMPLATES_EMAIL_CHANGE="" + +# Signup config +GOTRUE_DISABLE_SIGNUP="false" +GOTRUE_SITE_URL="http://localhost:3000" +GOTRUE_EXTERNAL_EMAIL_ENABLED="true" +GOTRUE_EXTERNAL_PHONE_ENABLED="true" +GOTRUE_EXTERNAL_IOS_BUNDLE_ID="com.supabase.auth" + +# Whitelist redirect to URLs here, a comma separated list of URIs (e.g. "https://foo.example.com,https://*.foo.example.com,https://bar.example.com") +GOTRUE_URI_ALLOW_LIST="http://localhost:3000" + +# Apple OAuth config +GOTRUE_EXTERNAL_APPLE_ENABLED="false" +GOTRUE_EXTERNAL_APPLE_CLIENT_ID="" +GOTRUE_EXTERNAL_APPLE_SECRET="" +GOTRUE_EXTERNAL_APPLE_REDIRECT_URI="http://localhost:9999/callback" + +# Azure OAuth config +GOTRUE_EXTERNAL_AZURE_ENABLED="false" +GOTRUE_EXTERNAL_AZURE_CLIENT_ID="" +GOTRUE_EXTERNAL_AZURE_SECRET="" +GOTRUE_EXTERNAL_AZURE_REDIRECT_URI="https://localhost:9999/callback" + +# Bitbucket OAuth config +GOTRUE_EXTERNAL_BITBUCKET_ENABLED="false" +GOTRUE_EXTERNAL_BITBUCKET_CLIENT_ID="" +GOTRUE_EXTERNAL_BITBUCKET_SECRET="" +GOTRUE_EXTERNAL_BITBUCKET_REDIRECT_URI="http://localhost:9999/callback" + +# Discord OAuth config +GOTRUE_EXTERNAL_DISCORD_ENABLED="false" +GOTRUE_EXTERNAL_DISCORD_CLIENT_ID="" +GOTRUE_EXTERNAL_DISCORD_SECRET="" +GOTRUE_EXTERNAL_DISCORD_REDIRECT_URI="https://localhost:9999/callback" + +# Facebook OAuth config +GOTRUE_EXTERNAL_FACEBOOK_ENABLED="false" +GOTRUE_EXTERNAL_FACEBOOK_CLIENT_ID="" +GOTRUE_EXTERNAL_FACEBOOK_SECRET="" +GOTRUE_EXTERNAL_FACEBOOK_REDIRECT_URI="https://localhost:9999/callback" + +# Figma OAuth config +GOTRUE_EXTERNAL_FIGMA_ENABLED="false" +GOTRUE_EXTERNAL_FIGMA_CLIENT_ID="" +GOTRUE_EXTERNAL_FIGMA_SECRET="" +GOTRUE_EXTERNAL_FIGMA_REDIRECT_URI="https://localhost:9999/callback" + +# Gitlab OAuth config +GOTRUE_EXTERNAL_GITLAB_ENABLED="false" +GOTRUE_EXTERNAL_GITLAB_CLIENT_ID="" +GOTRUE_EXTERNAL_GITLAB_SECRET="" +GOTRUE_EXTERNAL_GITLAB_REDIRECT_URI="http://localhost:9999/callback" + +# Google OAuth config +GOTRUE_EXTERNAL_GOOGLE_ENABLED="false" +GOTRUE_EXTERNAL_GOOGLE_CLIENT_ID="" +GOTRUE_EXTERNAL_GOOGLE_SECRET="" +GOTRUE_EXTERNAL_GOOGLE_REDIRECT_URI="http://localhost:9999/callback" + +# Github OAuth config +GOTRUE_EXTERNAL_GITHUB_ENABLED="false" +GOTRUE_EXTERNAL_GITHUB_CLIENT_ID="" +GOTRUE_EXTERNAL_GITHUB_SECRET="" +GOTRUE_EXTERNAL_GITHUB_REDIRECT_URI="http://localhost:9999/callback" + +# Kakao OAuth config +GOTRUE_EXTERNAL_KAKAO_ENABLED="false" +GOTRUE_EXTERNAL_KAKAO_CLIENT_ID="" +GOTRUE_EXTERNAL_KAKAO_SECRET="" +GOTRUE_EXTERNAL_KAKAO_REDIRECT_URI="http://localhost:9999/callback" + +# Notion OAuth config +GOTRUE_EXTERNAL_NOTION_ENABLED="false" +GOTRUE_EXTERNAL_NOTION_CLIENT_ID="" +GOTRUE_EXTERNAL_NOTION_SECRET="" +GOTRUE_EXTERNAL_NOTION_REDIRECT_URI="https://localhost:9999/callback" + +# Twitter OAuth1 config +GOTRUE_EXTERNAL_TWITTER_ENABLED="false" +GOTRUE_EXTERNAL_TWITTER_CLIENT_ID="" +GOTRUE_EXTERNAL_TWITTER_SECRET="" +GOTRUE_EXTERNAL_TWITTER_REDIRECT_URI="http://localhost:9999/callback" + +# Twitch OAuth config +GOTRUE_EXTERNAL_TWITCH_ENABLED="false" +GOTRUE_EXTERNAL_TWITCH_CLIENT_ID="" +GOTRUE_EXTERNAL_TWITCH_SECRET="" +GOTRUE_EXTERNAL_TWITCH_REDIRECT_URI="http://localhost:9999/callback" + +# Spotify OAuth config +GOTRUE_EXTERNAL_SPOTIFY_ENABLED="false" +GOTRUE_EXTERNAL_SPOTIFY_CLIENT_ID="" +GOTRUE_EXTERNAL_SPOTIFY_SECRET="" +GOTRUE_EXTERNAL_SPOTIFY_REDIRECT_URI="http://localhost:9999/callback" + +# Keycloak OAuth config +GOTRUE_EXTERNAL_KEYCLOAK_ENABLED="false" +GOTRUE_EXTERNAL_KEYCLOAK_CLIENT_ID="" +GOTRUE_EXTERNAL_KEYCLOAK_SECRET="" +GOTRUE_EXTERNAL_KEYCLOAK_REDIRECT_URI="http://localhost:9999/callback" +GOTRUE_EXTERNAL_KEYCLOAK_URL="https://keycloak.example.com/auth/realms/myrealm" + +# Linkedin OAuth config +GOTRUE_EXTERNAL_LINKEDIN_ENABLED="true" +GOTRUE_EXTERNAL_LINKEDIN_CLIENT_ID="" +GOTRUE_EXTERNAL_LINKEDIN_SECRET="" + +# Slack OAuth config +GOTRUE_EXTERNAL_SLACK_ENABLED="false" +GOTRUE_EXTERNAL_SLACK_CLIENT_ID="" +GOTRUE_EXTERNAL_SLACK_SECRET="" +GOTRUE_EXTERNAL_SLACK_REDIRECT_URI="http://localhost:9999/callback" + +# WorkOS OAuth config +GOTRUE_EXTERNAL_WORKOS_ENABLED="true" +GOTRUE_EXTERNAL_WORKOS_CLIENT_ID="" +GOTRUE_EXTERNAL_WORKOS_SECRET="" +GOTRUE_EXTERNAL_WORKOS_REDIRECT_URI="http://localhost:9999/callback" + +# Zoom OAuth config +GOTRUE_EXTERNAL_ZOOM_ENABLED="false" +GOTRUE_EXTERNAL_ZOOM_CLIENT_ID="" +GOTRUE_EXTERNAL_ZOOM_SECRET="" +GOTRUE_EXTERNAL_ZOOM_REDIRECT_URI="http://localhost:9999/callback" + +# Anonymous auth config +GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED="false" + +# PKCE Config +GOTRUE_EXTERNAL_FLOW_STATE_EXPIRY_DURATION="300s" + +# Phone provider config +GOTRUE_SMS_AUTOCONFIRM="false" +GOTRUE_SMS_MAX_FREQUENCY="5s" +GOTRUE_SMS_OTP_EXP="6000" +GOTRUE_SMS_OTP_LENGTH="6" +GOTRUE_SMS_PROVIDER="twilio" +GOTRUE_SMS_TWILIO_ACCOUNT_SID="" +GOTRUE_SMS_TWILIO_AUTH_TOKEN="" +GOTRUE_SMS_TWILIO_MESSAGE_SERVICE_SID="" +GOTRUE_SMS_TEMPLATE="This is from supabase. Your code is {{ .Code }} ." +GOTRUE_SMS_MESSAGEBIRD_ACCESS_KEY="" +GOTRUE_SMS_MESSAGEBIRD_ORIGINATOR="" +GOTRUE_SMS_TEXTLOCAL_API_KEY="" +GOTRUE_SMS_TEXTLOCAL_SENDER="" +GOTRUE_SMS_VONAGE_API_KEY="" +GOTRUE_SMS_VONAGE_API_SECRET="" +GOTRUE_SMS_VONAGE_FROM="" + +# Captcha config +GOTRUE_SECURITY_CAPTCHA_ENABLED="false" +GOTRUE_SECURITY_CAPTCHA_PROVIDER="hcaptcha" +GOTRUE_SECURITY_CAPTCHA_SECRET="0x0000000000000000000000000000000000000000" +GOTRUE_SECURITY_CAPTCHA_TIMEOUT="10s" +GOTRUE_SESSION_KEY="" + +# SAML config +GOTRUE_EXTERNAL_SAML_ENABLED="true" +GOTRUE_EXTERNAL_SAML_METADATA_URL="" +GOTRUE_EXTERNAL_SAML_API_BASE="http://localhost:9999" +GOTRUE_EXTERNAL_SAML_NAME="auth0" +GOTRUE_EXTERNAL_SAML_SIGNING_CERT="" +GOTRUE_EXTERNAL_SAML_SIGNING_KEY="" + +# Additional Security config +GOTRUE_LOG_LEVEL="debug" +GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED="false" +GOTRUE_SECURITY_REFRESH_TOKEN_REUSE_INTERVAL="0" +GOTRUE_SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION="false" +GOTRUE_OPERATOR_TOKEN="unused-operator-token" +GOTRUE_RATE_LIMIT_HEADER="X-Forwarded-For" +GOTRUE_RATE_LIMIT_EMAIL_SENT="100" + +GOTRUE_MAX_VERIFIED_FACTORS=10 + +# Auth Hook Configuration +GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED=false +GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI="" +# Only for HTTPS Hooks +GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRET="" + +GOTRUE_HOOK_CUSTOM_SMS_PROVIDER_ENABLED=false +GOTRUE_HOOK_CUSTOM_SMS_PROVIDER_URI="" +# Only for HTTPS Hooks +GOTRUE_HOOK_CUSTOM_SMS_PROVIDER_SECRET="" + + +# Test OTP Config +GOTRUE_SMS_TEST_OTP=":, :..." +GOTRUE_SMS_TEST_OTP_VALID_UNTIL="" # (e.g. 2023-09-29T08:14:06Z) + +GOTRUE_MFA_WEB_AUTHN_ENROLL_ENABLED="false" +GOTRUE_MFA_WEB_AUTHN_VERIFY_ENABLED="false" diff --git a/auth_v2.169.0/go.mod b/auth_v2.169.0/go.mod new file mode 100644 index 0000000..a99b2b6 --- /dev/null +++ b/auth_v2.169.0/go.mod @@ -0,0 +1,163 @@ +module github.com/supabase/auth + +require ( + github.com/Masterminds/semver/v3 v3.1.1 // indirect + github.com/aaronarduino/goqrsvg v0.0.0-20220419053939-17e843f1dd40 + github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b + github.com/badoux/checkmail v0.0.0-20170203135005-d0a759655d62 + github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc + github.com/coreos/go-oidc/v3 v3.6.0 + github.com/didip/tollbooth/v5 v5.1.1 + github.com/gobuffalo/validate/v3 v3.3.3 // indirect + github.com/gobwas/glob v0.2.3 + github.com/gofrs/uuid v4.3.1+incompatible + github.com/jackc/pgconn v1.14.3 + github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451 + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jmoiron/sqlx v1.3.5 + github.com/joho/godotenv v1.4.0 + github.com/kelseyhightower/envconfig v1.4.0 + github.com/microcosm-cc/bluemonday v1.0.26 // indirect + github.com/mitchellh/mapstructure v1.5.0 + github.com/mrjones/oauth v0.0.0-20190623134757-126b35219450 + github.com/pkg/errors v0.9.1 + github.com/pquerna/otp v1.4.0 + github.com/rs/cors v1.11.0 + github.com/sebest/xff v0.0.0-20160910043805-6c115e0ffa35 + github.com/sethvargo/go-password v0.2.0 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.6.1 + github.com/stretchr/testify v1.9.0 + golang.org/x/crypto v0.31.0 + golang.org/x/oauth2 v0.17.0 + gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df +) + +require ( + github.com/bits-and-blooms/bitset v1.10.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-webauthn/x v0.1.12 // indirect + github.com/gobuffalo/nulls v0.4.2 // indirect + github.com/goccy/go-json v0.10.3 // indirect + github.com/google/go-tpm v0.9.1 // indirect + github.com/jackc/pgx/v4 v4.18.2 // indirect + github.com/lestrrat-go/blackmagic v1.0.2 // indirect + github.com/lestrrat-go/httpcc v1.0.1 // indirect + github.com/lestrrat-go/httprc v1.0.5 // indirect + github.com/lestrrat-go/iter v1.0.2 // indirect + github.com/lestrrat-go/option v1.0.1 // indirect + github.com/segmentio/asm v1.2.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + golang.org/x/mod v0.17.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect +) + +require ( + github.com/XSAM/otelsql v0.26.0 + github.com/bombsimon/logrusr/v3 v3.0.0 + go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0 + go.opentelemetry.io/otel v1.26.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 + go.opentelemetry.io/otel/metric v1.26.0 + go.opentelemetry.io/otel/sdk v1.26.0 + go.opentelemetry.io/otel/sdk/metric v1.26.0 + go.opentelemetry.io/otel/trace v1.26.0 + gopkg.in/h2non/gock.v1 v1.1.2 +) + +require ( + github.com/bits-and-blooms/bloom/v3 v3.6.0 + github.com/crewjam/saml v0.4.14 + github.com/deepmap/oapi-codegen v1.12.4 + github.com/fatih/structs v1.1.0 + github.com/fsnotify/fsnotify v1.7.0 + github.com/go-chi/chi/v5 v5.0.12 + github.com/go-webauthn/webauthn v0.11.1 + github.com/gobuffalo/pop/v6 v6.1.1 + github.com/golang-jwt/jwt/v5 v5.2.1 + github.com/lestrrat-go/jwx/v2 v2.1.0 + github.com/standard-webhooks/standard-webhooks/libraries v0.0.0-20240303152453-e0e82adf1721 + github.com/supabase/hibp v0.0.0-20231124125943-d225752ae869 + github.com/xeipuuv/gojsonschema v1.2.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.26.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0 + go.opentelemetry.io/otel/exporters/prometheus v0.48.0 +) + +require ( + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/aymerick/douceur v0.2.0 // indirect + github.com/beevik/etree v1.1.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/crewjam/httperr v0.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-sql-driver/mysql v1.7.0 // indirect + github.com/gobuffalo/envy v1.10.2 // indirect + github.com/gobuffalo/fizz v1.14.4 // indirect + github.com/gobuffalo/flect v1.0.2 // indirect + github.com/gobuffalo/github_flavored_markdown v1.1.3 // indirect + github.com/gobuffalo/helpers v0.6.7 // indirect + github.com/gobuffalo/plush/v4 v4.1.18 // indirect + github.com/gobuffalo/tags/v3 v3.1.4 // indirect + github.com/golang-jwt/jwt/v4 v4.5.1 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/css v1.0.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect + github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/luna-duclos/instrumentedsql v1.1.3 // indirect + github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/russellhaering/goxmldsig v1.3.0 // indirect + github.com/sergi/go-diff v1.2.0 // indirect + github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d // indirect + github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.5.2 // indirect + go.opentelemetry.io/proto/otlp v1.2.0 // indirect + golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb + golang.org/x/net v0.23.0 // indirect + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/grpc v1.63.2 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +go 1.22.3 diff --git a/auth_v2.169.0/go.sum b/auth_v2.169.0/go.sum new file mode 100644 index 0000000..827144a --- /dev/null +++ b/auth_v2.169.0/go.sum @@ -0,0 +1,559 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/XSAM/otelsql v0.26.0 h1:UhAGVBD34Ctbh2aYcm/JAdL+6T6ybrP+YMWYkHqCdmo= +github.com/XSAM/otelsql v0.26.0/go.mod h1:5ciw61eMSh+RtTPN8spvPEPLJpAErZw8mFFPNfYiaxA= +github.com/aaronarduino/goqrsvg v0.0.0-20220419053939-17e843f1dd40 h1:uz4N2yHL4MF8vZX+36n+tcxeUf8D/gL4aJkyouhDw4A= +github.com/aaronarduino/goqrsvg v0.0.0-20220419053939-17e843f1dd40/go.mod h1:dytw+5qs+pdi61fO/S4OmXR7AuEq/HvNCuG03KxQHT4= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/badoux/checkmail v0.0.0-20170203135005-d0a759655d62 h1:vMqcPzLT1/mbYew0gM6EJy4/sCNy9lY9rmlFO+pPwhY= +github.com/badoux/checkmail v0.0.0-20170203135005-d0a759655d62/go.mod h1:r5ZalvRl3tXevRNJkwIB6DC4DD3DMjIlY9NEU1XGoaQ= +github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= +github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bloom/v3 v3.6.0 h1:dTU0OVLJSoOhz9m68FTXMFfA39nR8U/nTCs1zb26mOI= +github.com/bits-and-blooms/bloom/v3 v3.6.0/go.mod h1:VKlUSvp0lFIYqxJjzdnSsZEw4iHb1kOL2tfHTgyJBHg= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/bombsimon/logrusr/v3 v3.0.0 h1:tcAoLfuAhKP9npBxWzSdpsvKPQt1XV02nSf2lZA82TQ= +github.com/bombsimon/logrusr/v3 v3.0.0/go.mod h1:PksPPgSFEL2I52pla2glgCyyd2OqOHAnFF5E+g8Ixco= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/coreos/go-oidc/v3 v3.6.0 h1:AKVxfYw1Gmkn/w96z0DbT/B/xFnzTd3MkZvWLjF4n/o= +github.com/coreos/go-oidc/v3 v3.6.0/go.mod h1:ZpHUsHBucTUj6WOkrP4E20UPynbLZzhTQ1XKCXkxyPc= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo= +github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4= +github.com/crewjam/saml v0.4.14 h1:g9FBNx62osKusnFzs3QTN5L9CVA/Egfgm+stJShzw/c= +github.com/crewjam/saml v0.4.14/go.mod h1:UVSZCf18jJkk6GpWNVqcyQJMD5HsRugBPf4I1nl2mME= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/deepmap/oapi-codegen v1.12.4 h1:pPmn6qI9MuOtCz82WY2Xaw46EQjgvxednXXrP7g5Q2s= +github.com/deepmap/oapi-codegen v1.12.4/go.mod h1:3lgHGMu6myQ2vqbbTXH2H1o4eXFTGnFiDaOaKKl5yas= +github.com/didip/tollbooth/v5 v5.1.1 h1:QpKFg56jsbNuQ6FFj++Z1gn2fbBsvAc1ZPLUaDOYW5k= +github.com/didip/tollbooth/v5 v5.1.1/go.mod h1:d9rzwOULswrD3YIrAQmP3bfjxab32Df4IaO6+D25l9g= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= +github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-webauthn/webauthn v0.11.1 h1:5G/+dg91/VcaJHTtJUfwIlNJkLwbJCcnUc4W8VtkpzA= +github.com/go-webauthn/webauthn v0.11.1/go.mod h1:YXRm1WG0OtUyDFaVAgB5KG7kVqW+6dYCJ7FTQH4SxEE= +github.com/go-webauthn/x v0.1.12 h1:RjQ5cvApzyU/xLCiP+rub0PE4HBZsLggbxGR5ZpUf/A= +github.com/go-webauthn/x v0.1.12/go.mod h1:XlRcGkNH8PT45TfeJYc6gqpOtiOendHhVmnOxh+5yHs= +github.com/gobuffalo/attrs v1.0.3/go.mod h1:KvDJCE0avbufqS0Bw3UV7RQynESY0jjod+572ctX4t8= +github.com/gobuffalo/envy v1.10.2 h1:EIi03p9c3yeuRCFPOKcSfajzkLb3hrRjEpHGI8I2Wo4= +github.com/gobuffalo/envy v1.10.2/go.mod h1:qGAGwdvDsaEtPhfBzb3o0SfDea8ByGn9j8bKmVft9z8= +github.com/gobuffalo/fizz v1.14.4 h1:8uume7joF6niTNWN582IQ2jhGTUoa9g1fiV/tIoGdBs= +github.com/gobuffalo/fizz v1.14.4/go.mod h1:9/2fGNXNeIFOXEEgTPJwiK63e44RjG+Nc4hfMm1ArGM= +github.com/gobuffalo/flect v0.3.0/go.mod h1:5pf3aGnsvqvCj50AVni7mJJF8ICxGZ8HomberC3pXLE= +github.com/gobuffalo/flect v1.0.0/go.mod h1:l9V6xSb4BlXwsxEMj3FVEub2nkdQjWhPvD8XTTlHPQc= +github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= +github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/gobuffalo/genny/v2 v2.1.0/go.mod h1:4yoTNk4bYuP3BMM6uQKYPvtP6WsXFGm2w2EFYZdRls8= +github.com/gobuffalo/github_flavored_markdown v1.1.3 h1:rSMPtx9ePkFB22vJ+dH+m/EUBS8doQ3S8LeEXcdwZHk= +github.com/gobuffalo/github_flavored_markdown v1.1.3/go.mod h1:IzgO5xS6hqkDmUh91BW/+Qxo/qYnvfzoz3A7uLkg77I= +github.com/gobuffalo/helpers v0.6.7 h1:C9CedoRSfgWg2ZoIkVXgjI5kgmSpL34Z3qdnzpfNVd8= +github.com/gobuffalo/helpers v0.6.7/go.mod h1:j0u1iC1VqlCaJEEVkZN8Ia3TEzfj/zoXANqyJExTMTA= +github.com/gobuffalo/logger v1.0.7/go.mod h1:u40u6Bq3VVvaMcy5sRBclD8SXhBYPS0Qk95ubt+1xJM= +github.com/gobuffalo/nulls v0.4.2 h1:GAqBR29R3oPY+WCC7JL9KKk9erchaNuV6unsOSZGQkw= +github.com/gobuffalo/nulls v0.4.2/go.mod h1:EElw2zmBYafU2R9W4Ii1ByIj177wA/pc0JdjtD0EsH8= +github.com/gobuffalo/packd v1.0.2/go.mod h1:sUc61tDqGMXON80zpKGp92lDb86Km28jfvX7IAyxFT8= +github.com/gobuffalo/plush/v4 v4.1.16/go.mod h1:6t7swVsarJ8qSLw1qyAH/KbrcSTwdun2ASEQkOznakg= +github.com/gobuffalo/plush/v4 v4.1.18 h1:bnPjdMTEUQHqj9TNX2Ck3mxEXYZa+0nrFMNM07kpX9g= +github.com/gobuffalo/plush/v4 v4.1.18/go.mod h1:xi2tJIhFI4UdzIL8sxZtzGYOd2xbBpcFbLZlIPGGZhU= +github.com/gobuffalo/pop/v6 v6.1.1 h1:eUDBaZcb0gYrmFnKwpuTEUA7t5ZHqNfvS4POqJYXDZY= +github.com/gobuffalo/pop/v6 v6.1.1/go.mod h1:1n7jAmI1i7fxuXPZjZb0VBPQDbksRtCoFnrDV5IsvaI= +github.com/gobuffalo/tags/v3 v3.1.4 h1:X/ydLLPhgXV4h04Hp2xlbI2oc5MDaa7eub6zw8oHjsM= +github.com/gobuffalo/tags/v3 v3.1.4/go.mod h1:ArRNo3ErlHO8BtdA0REaZxijuWnWzF6PUXngmMXd2I0= +github.com/gobuffalo/validate/v3 v3.3.3 h1:o7wkIGSvZBYBd6ChQoLxkz2y1pfmhbI4jNJYh6PuNJ4= +github.com/gobuffalo/validate/v3 v3.3.3/go.mod h1:YC7FsbJ/9hW/VjQdmXPvFqvRis4vrRYFxr69WiNZw6g= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI= +github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-tpm v0.9.1 h1:0pGc4X//bAlmZzMKf8iz6IsDo1nYTbYJ6FZN/rg4zdM= +github.com/google/go-tpm v0.9.1/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451 h1:WAvSpGf7MsFuzAtK4Vk7R4EVe+liW4x83r4oWu0WHKw= +github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= +github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= +github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc v1.0.5 h1:bsTfiH8xaKOJPrg1R+E3iE/AWZr/x0Phj9PBTG/OLUk= +github.com/lestrrat-go/httprc v1.0.5/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= +github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= +github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= +github.com/lestrrat-go/jwx/v2 v2.1.0 h1:0zs7Ya6+39qoit7gwAf+cYm1zzgS3fceIdo7RmQ5lkw= +github.com/lestrrat-go/jwx/v2 v2.1.0/go.mod h1:Xpw9QIaUGiIUD1Wx0NcY1sIHwFf8lDuZn/cmxtXYRys= +github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/luna-duclos/instrumentedsql v1.1.3 h1:t7mvC0z1jUt5A0UQ6I/0H31ryymuQRnJcWCiqV3lSAA= +github.com/luna-duclos/instrumentedsql v1.1.3/go.mod h1:9J1njvFds+zN7y85EDhN9XNQLANWwZt2ULeIC8yMNYs= +github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU= +github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/microcosm-cc/bluemonday v1.0.20/go.mod h1:yfBmMi8mxvaZut3Yytv+jTXRY8mxyjJ0/kQBTElld50= +github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= +github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mrjones/oauth v0.0.0-20190623134757-126b35219450 h1:j2kD3MT1z4PXCiUllUJF9mWUESr9TWKS7iEKsQ/IipM= +github.com/mrjones/oauth v0.0.0-20190623134757-126b35219450/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= +github.com/patrickmn/go-cache v0.0.0-20170418232947-7ac151875ffb/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/otp v1.4.0 h1:wZvl1TIVxKRThZIBiwOOHOGP/1+nZyWBil9Y2XNEDzg= +github.com/pquerna/otp v1.4.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= +github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russellhaering/goxmldsig v1.3.0 h1:DllIWUgMy0cRUMfGiASiYEa35nsieyD3cigIwLonTPM= +github.com/russellhaering/goxmldsig v1.3.0/go.mod h1:gM4MDENBQf7M+V824SGfyIUVFWydB7n0KkEubVJl+Tw= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sebest/xff v0.0.0-20160910043805-6c115e0ffa35 h1:eajwn6K3weW5cd1ZXLu2sJ4pvwlBiCWY4uDejOr73gM= +github.com/sebest/xff v0.0.0-20160910043805-6c115e0ffa35/go.mod h1:wozgYq9WEBQBaIJe4YZ0qTSFAMxmcwBhQH0fO0R34Z0= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI= +github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d h1:yKm7XZV6j9Ev6lojP2XaIshpT4ymkqhMeSghO5Ps00E= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e h1:qpG93cPwA5f7s/ZPBJnGOYQNK/vKsaDaseuKT5Asee8= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= +github.com/standard-webhooks/standard-webhooks/libraries v0.0.0-20240303152453-e0e82adf1721 h1:HTsFo0buahHfjuVUTPDdJRBkfjExkRM1LUBy6crQ7lc= +github.com/standard-webhooks/standard-webhooks/libraries v0.0.0-20240303152453-e0e82adf1721/go.mod h1:L1MQhA6x4dn9r007T033lsaZMv9EmBAdXyU/+EF40fo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/supabase/hibp v0.0.0-20231124125943-d225752ae869 h1:VDuRtwen5Z7QQ5ctuHUse4wAv/JozkKZkdic5vUV4Lg= +github.com/supabase/hibp v0.0.0-20231124125943-d225752ae869/go.mod h1:eHX5nlSMSnyPjUrbYzeqrA8snCe2SKyfizKjU3dkfOw= +github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= +github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= +go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0 h1:2JydY5UiDpqvj2p7sO9bgHuhTy4hgTZ0ymehdq/Ob0Q= +go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0/go.mod h1:ch3a5QxOqVWxas4CzjCFFOOQe+7HgAXC/N1oVxS9DK4= +go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= +go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.26.0 h1:+hm+I+KigBy3M24/h1p/NHkUx/evbLH0PNcjpMyCHc4= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.26.0/go.mod h1:NjC8142mLvvNT6biDpaMjyz78kyEHIwAJlSX0N9P5KI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0 h1:HGZWGmCVRCVyAs2GQaiHQPbDHo+ObFWeUEOd+zDnp64= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0/go.mod h1:SaH+v38LSCHddyk7RGlU9uZyQoRrKao6IBnJw6Kbn+c= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/exporters/prometheus v0.48.0 h1:sBQe3VNGUjY9IKWQC6z2lNqa5iGbDSxhs60ABwK4y0s= +go.opentelemetry.io/otel/exporters/prometheus v0.48.0/go.mod h1:DtrbMzoZWwQHyrQmCfLam5DZbnmorsGbOtTbYHycU5o= +go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= +go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= +go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= +go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= +go.opentelemetry.io/otel/sdk/metric v1.26.0 h1:cWSks5tfriHPdWFnl+qpX3P681aAYqlZHcAyHw5aU9Y= +go.opentelemetry.io/otel/sdk/metric v1.26.0/go.mod h1:ClMFFknnThJCksebJwz7KIyEDHO+nTB6gK8obLy8RyE= +go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= +go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w= +golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20161007143504-f4b625ec9b21/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.0.0-20160926182426-711ca1cb8763/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= +golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= +gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= +gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= diff --git a/auth_v2.169.0/hack/coverage.sh b/auth_v2.169.0/hack/coverage.sh new file mode 100644 index 0000000..5510196 --- /dev/null +++ b/auth_v2.169.0/hack/coverage.sh @@ -0,0 +1,21 @@ +FAIL=false + +for PKG in "crypto" +do + UNCOVERED_FUNCS=$(go tool cover -func=coverage.out | grep "^github.com/supabase/auth/internal/$PKG/" | grep -v '100.0%$') + UNCOVERED_FUNCS_COUNT=$(echo "$UNCOVERED_FUNCS" | wc -l) + + if [ "$UNCOVERED_FUNCS_COUNT" -gt 1 ] # wc -l counts +1 line + then + echo "Package $PKG not covered 100% with tests. $UNCOVERED_FUNCS_COUNT functions need more tests. This is mandatory." + echo "$UNCOVERED_FUNCS" + FAIL=true + fi +done + +if [ "$FAIL" = "true" ] +then + exit 1 +else + exit 0 +fi diff --git a/auth_v2.169.0/hack/database.yml b/auth_v2.169.0/hack/database.yml new file mode 100644 index 0000000..8614ce4 --- /dev/null +++ b/auth_v2.169.0/hack/database.yml @@ -0,0 +1,15 @@ +postgres: + dialect: "postgres" + database: "postgres" + host: {{ envOr "POSTGRES_HOST" "127.0.0.1" }} + port: {{ envOr "POSTGRES_PORT" "5432" }} + user: {{ envOr "POSTGRES_USER" "postgres" }} + password: {{ envOr "POSTGRES_PASSWORD" "root" }} + +test: + dialect: "postgres" + database: "postgres" + host: {{ envOr "POSTGRES_HOST" "127.0.0.1" }} + port: {{ envOr "POSTGRES_PORT" "5432" }} + user: {{ envOr "POSTGRES_USER" "postgres" }} + password: {{ envOr "POSTGRES_PASSWORD" "root" }} diff --git a/auth_v2.169.0/hack/init_postgres.sql b/auth_v2.169.0/hack/init_postgres.sql new file mode 100644 index 0000000..d1ef709 --- /dev/null +++ b/auth_v2.169.0/hack/init_postgres.sql @@ -0,0 +1,7 @@ +CREATE USER supabase_admin LOGIN CREATEROLE CREATEDB REPLICATION BYPASSRLS; + +-- Supabase super admin +CREATE USER supabase_auth_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION PASSWORD 'root'; +CREATE SCHEMA IF NOT EXISTS auth AUTHORIZATION supabase_auth_admin; +GRANT CREATE ON DATABASE postgres TO supabase_auth_admin; +ALTER USER supabase_auth_admin SET search_path = 'auth'; diff --git a/auth_v2.169.0/hack/migrate.sh b/auth_v2.169.0/hack/migrate.sh new file mode 100644 index 0000000..2d1f0e5 --- /dev/null +++ b/auth_v2.169.0/hack/migrate.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +DB_ENV=$1 + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +DATABASE="$DIR/database.yml" + +export GOTRUE_DB_DRIVER="postgres" +export GOTRUE_DB_DATABASE_URL="postgres://supabase_auth_admin:root@localhost:5432/$DB_ENV" +export GOTRUE_DB_MIGRATIONS_PATH=$DIR/../migrations + +go run main.go migrate -c $DIR/test.env diff --git a/auth_v2.169.0/hack/postgresd.sh b/auth_v2.169.0/hack/postgresd.sh new file mode 100644 index 0000000..c4b6a58 --- /dev/null +++ b/auth_v2.169.0/hack/postgresd.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +docker rm -f gotrue_postgresql >/dev/null 2>/dev/null || true + +docker volume inspect postgres_data 2>/dev/null >/dev/null || docker volume create --name postgres_data >/dev/null + +docker run --name gotrue_postgresql \ + -p 5432:5432 \ + -e POSTGRES_USER=postgres \ + -e POSTGRES_PASSWORD=root \ + -e POSTGRES_DB=postgres \ + --volume postgres_data:/var/lib/postgresql/data \ + --volume "$(pwd)"/hack/init_postgres.sql:/docker-entrypoint-initdb.d/init.sql \ + -d postgres:15 diff --git a/auth_v2.169.0/hack/test.env b/auth_v2.169.0/hack/test.env new file mode 100644 index 0000000..35e4b61 --- /dev/null +++ b/auth_v2.169.0/hack/test.env @@ -0,0 +1,128 @@ +GOTRUE_JWT_SECRET=testsecret +GOTRUE_JWT_EXP=3600 +GOTRUE_JWT_AUD="authenticated" +GOTRUE_JWT_ADMIN_ROLES="supabase_admin,service_role" +GOTRUE_JWT_DEFAULT_GROUP_NAME="authenticated" +GOTRUE_DB_DRIVER=postgres +DB_NAMESPACE="auth" +GOTRUE_DB_AUTOMIGRATE=true +DATABASE_URL="postgres://supabase_auth_admin:root@localhost:5432/postgres" +GOTRUE_API_HOST=localhost +PORT=9999 +API_EXTERNAL_URL="http://localhost:9999" +GOTRUE_LOG_SQL=none +GOTRUE_LOG_LEVEL=warn +GOTRUE_SITE_URL=https://example.netlify.com +GOTRUE_URI_ALLOW_LIST="http://localhost:3000" +GOTRUE_OPERATOR_TOKEN=foobar +GOTRUE_EXTERNAL_APPLE_ENABLED=true +GOTRUE_EXTERNAL_APPLE_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_APPLE_SECRET=testsecret +GOTRUE_EXTERNAL_APPLE_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_AZURE_ENABLED=true +GOTRUE_EXTERNAL_AZURE_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_AZURE_SECRET=testsecret +GOTRUE_EXTERNAL_AZURE_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_BITBUCKET_ENABLED=true +GOTRUE_EXTERNAL_BITBUCKET_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_BITBUCKET_SECRET=testsecret +GOTRUE_EXTERNAL_BITBUCKET_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_DISCORD_ENABLED=true +GOTRUE_EXTERNAL_DISCORD_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_DISCORD_SECRET=testsecret +GOTRUE_EXTERNAL_DISCORD_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_FACEBOOK_ENABLED=true +GOTRUE_EXTERNAL_FACEBOOK_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_FACEBOOK_SECRET=testsecret +GOTRUE_EXTERNAL_FACEBOOK_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_FLY_ENABLED=true +GOTRUE_EXTERNAL_FLY_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_FLY_SECRET=testsecret +GOTRUE_EXTERNAL_FLY_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_FIGMA_ENABLED=true +GOTRUE_EXTERNAL_FIGMA_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_FIGMA_SECRET=testsecret +GOTRUE_EXTERNAL_FIGMA_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_GITHUB_ENABLED=true +GOTRUE_EXTERNAL_GITHUB_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_GITHUB_SECRET=testsecret +GOTRUE_EXTERNAL_GITHUB_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_KAKAO_ENABLED=true +GOTRUE_EXTERNAL_KAKAO_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_KAKAO_SECRET=testsecret +GOTRUE_EXTERNAL_KAKAO_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_KEYCLOAK_ENABLED=true +GOTRUE_EXTERNAL_KEYCLOAK_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_KEYCLOAK_SECRET=testsecret +GOTRUE_EXTERNAL_KEYCLOAK_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_KEYCLOAK_URL=https://keycloak.example.com/auth/realms/myrealm +GOTRUE_EXTERNAL_LINKEDIN_ENABLED=true +GOTRUE_EXTERNAL_LINKEDIN_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_LINKEDIN_SECRET=testsecret +GOTRUE_EXTERNAL_LINKEDIN_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_LINKEDIN_OIDC_ENABLED=true +GOTRUE_EXTERNAL_LINKEDIN_OIDC_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_LINKEDIN_OIDC_SECRET=testsecret +GOTRUE_EXTERNAL_LINKEDIN_OIDC_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_GITLAB_ENABLED=true +GOTRUE_EXTERNAL_GITLAB_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_GITLAB_SECRET=testsecret +GOTRUE_EXTERNAL_GITLAB_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_GOOGLE_ENABLED=true +GOTRUE_EXTERNAL_GOOGLE_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_GOOGLE_SECRET=testsecret +GOTRUE_EXTERNAL_GOOGLE_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_NOTION_ENABLED=true +GOTRUE_EXTERNAL_NOTION_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_NOTION_SECRET=testsecret +GOTRUE_EXTERNAL_NOTION_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_SPOTIFY_ENABLED=true +GOTRUE_EXTERNAL_SPOTIFY_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_SPOTIFY_SECRET=testsecret +GOTRUE_EXTERNAL_SPOTIFY_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_SLACK_ENABLED=true +GOTRUE_EXTERNAL_SLACK_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_SLACK_SECRET=testsecret +GOTRUE_EXTERNAL_SLACK_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_SLACK_OIDC_ENABLED=true +GOTRUE_EXTERNAL_SLACK_OIDC_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_SLACK_OIDC_SECRET=testsecret +GOTRUE_EXTERNAL_SLACK_OIDC_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_WORKOS_ENABLED=true +GOTRUE_EXTERNAL_WORKOS_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_WORKOS_SECRET=testsecret +GOTRUE_EXTERNAL_WORKOS_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_TWITCH_ENABLED=true +GOTRUE_EXTERNAL_TWITCH_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_TWITCH_SECRET=testsecret +GOTRUE_EXTERNAL_TWITCH_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_TWITTER_ENABLED=true +GOTRUE_EXTERNAL_TWITTER_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_TWITTER_SECRET=testsecret +GOTRUE_EXTERNAL_TWITTER_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_ZOOM_ENABLED=true +GOTRUE_EXTERNAL_ZOOM_CLIENT_ID=testclientid +GOTRUE_EXTERNAL_ZOOM_SECRET=testsecret +GOTRUE_EXTERNAL_ZOOM_REDIRECT_URI=https://identity.services.netlify.com/callback +GOTRUE_EXTERNAL_FLOW_STATE_EXPIRY_DURATION="300s" +GOTRUE_RATE_LIMIT_VERIFY="100000" +GOTRUE_RATE_LIMIT_TOKEN_REFRESH="30" +GOTRUE_RATE_LIMIT_ANONYMOUS_USERS="5" +GOTRUE_RATE_LIMIT_HEADER="My-Custom-Header" +GOTRUE_TRACING_ENABLED=true +GOTRUE_TRACING_EXPORTER=default +GOTRUE_TRACING_HOST=127.0.0.1 +GOTRUE_TRACING_PORT=8126 +GOTRUE_TRACING_TAGS="env:test" +GOTRUE_SECURITY_CAPTCHA_ENABLED="false" +GOTRUE_SECURITY_CAPTCHA_PROVIDER="hcaptcha" +GOTRUE_SECURITY_CAPTCHA_SECRET="0x0000000000000000000000000000000000000000" +GOTRUE_SECURITY_CAPTCHA_TIMEOUT="10s" +GOTRUE_SAML_ENABLED="true" +GOTRUE_SAML_PRIVATE_KEY="MIIEowIBAAKCAQEAszrVveMQcSsa0Y+zN1ZFb19cRS0jn4UgIHTprW2tVBmO2PABzjY3XFCfx6vPirMAPWBYpsKmXrvm1tr0A6DZYmA8YmJd937VUQ67fa6DMyppBYTjNgGEkEhmKuszvF3MARsIKCGtZqUrmS7UG4404wYxVppnr2EYm3RGtHlkYsXu20MBqSDXP47bQP+PkJqC3BuNGk3xt5UHl2FSFpTHelkI6lBynw16B+lUT1F96SERNDaMqi/TRsZdGe5mB/29ngC/QBMpEbRBLNRir5iUevKS7Pn4aph9Qjaxx/97siktK210FJT23KjHpgcUfjoQ6BgPBTLtEeQdRyDuc/CgfwIDAQABAoIBAGYDWOEpupQPSsZ4mjMnAYJwrp4ZISuMpEqVAORbhspVeb70bLKonT4IDcmiexCg7cQBcLQKGpPVM4CbQ0RFazXZPMVq470ZDeWDEyhoCfk3bGtdxc1Zc9CDxNMs6FeQs6r1beEZug6weG5J/yRn/qYxQife3qEuDMl+lzfl2EN3HYVOSnBmdt50dxRuX26iW3nqqbMRqYn9OHuJ1LvRRfYeyVKqgC5vgt/6Tf7DAJwGe0dD7q08byHV8DBZ0pnMVU0bYpf1GTgMibgjnLjK//EVWafFHtN+RXcjzGmyJrk3+7ZyPUpzpDjO21kpzUQLrpEkkBRnmg6bwHnSrBr8avECgYEA3pq1PTCAOuLQoIm1CWR9/dhkbJQiKTJevlWV8slXQLR50P0WvI2RdFuSxlWmA4xZej8s4e7iD3MYye6SBsQHygOVGc4efvvEZV8/XTlDdyj7iLVGhnEmu2r7AFKzy8cOvXx0QcLg+zNd7vxZv/8D3Qj9Jje2LjLHKM5n/dZ3RzUCgYEAzh5Lo2anc4WN8faLGt7rPkGQF+7/18ImQE11joHWa3LzAEy7FbeOGpE/vhOv5umq5M/KlWFIRahMEQv4RusieHWI19ZLIP+JwQFxWxS+cPp3xOiGcquSAZnlyVSxZ//dlVgaZq2o2MfrxECcovRlaknl2csyf+HjFFwKlNxHm2MCgYAr//R3BdEy0oZeVRndo2lr9YvUEmu2LOihQpWDCd0fQw0ZDA2kc28eysL2RROte95r1XTvq6IvX5a0w11FzRWlDpQ4J4/LlcQ6LVt+98SoFwew+/PWuyLmxLycUbyMOOpm9eSc4wJJZNvaUzMCSkvfMtmm5jgyZYMMQ9A2Ul/9SQKBgB9mfh9mhBwVPIqgBJETZMMXOdxrjI5SBYHGSyJqpT+5Q0vIZLfqPrvNZOiQFzwWXPJ+tV4Mc/YorW3rZOdo6tdvEGnRO6DLTTEaByrY/io3/gcBZXoSqSuVRmxleqFdWWRnB56c1hwwWLqNHU+1671FhL6pNghFYVK4suP6qu4BAoGBAMk+VipXcIlD67mfGrET/xDqiWWBZtgTzTMjTpODhDY1GZck1eb4CQMP5j5V3gFJ4cSgWDJvnWg8rcz0unz/q4aeMGl1rah5WNDWj1QKWMS6vJhMHM/rqN1WHWR0ZnV83svYgtg0zDnQKlLujqW4JmGXLMU7ur6a+e6lpa1fvLsP" +GOTRUE_MAX_VERIFIED_FACTORS=10 +GOTRUE_SMS_TEST_OTP_VALID_UNTIL="" +GOTRUE_SECURITY_DB_ENCRYPTION_ENCRYPT=true +GOTRUE_SECURITY_DB_ENCRYPTION_ENCRYPTION_KEY_ID=abc +GOTRUE_SECURITY_DB_ENCRYPTION_ENCRYPTION_KEY=pwFoiPyybQMqNmYVN0gUnpbfpGQV2sDv9vp0ZAxi_Y4 +GOTRUE_SECURITY_DB_ENCRYPTION_DECRYPTION_KEYS=abc:pwFoiPyybQMqNmYVN0gUnpbfpGQV2sDv9vp0ZAxi_Y4 diff --git a/auth_v2.169.0/init_postgres.sh b/auth_v2.169.0/init_postgres.sh new file mode 100644 index 0000000..134e179 --- /dev/null +++ b/auth_v2.169.0/init_postgres.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + CREATE USER supabase_admin LOGIN CREATEROLE CREATEDB REPLICATION BYPASSRLS; + + -- Supabase super admin + CREATE USER supabase_auth_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION PASSWORD 'root'; + CREATE SCHEMA IF NOT EXISTS $DB_NAMESPACE AUTHORIZATION supabase_auth_admin; + GRANT CREATE ON DATABASE postgres TO supabase_auth_admin; + ALTER USER supabase_auth_admin SET search_path = '$DB_NAMESPACE'; +EOSQL diff --git a/auth_v2.169.0/internal/api/admin.go b/auth_v2.169.0/internal/api/admin.go new file mode 100644 index 0000000..63cde06 --- /dev/null +++ b/auth_v2.169.0/internal/api/admin.go @@ -0,0 +1,642 @@ +package api + +import ( + "context" + "net/http" + "time" + + "github.com/fatih/structs" + "github.com/go-chi/chi/v5" + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/sethvargo/go-password/password" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" + "golang.org/x/crypto/bcrypt" +) + +type AdminUserParams struct { + Id string `json:"id"` + Aud string `json:"aud"` + Role string `json:"role"` + Email string `json:"email"` + Phone string `json:"phone"` + Password *string `json:"password"` + PasswordHash string `json:"password_hash"` + EmailConfirm bool `json:"email_confirm"` + PhoneConfirm bool `json:"phone_confirm"` + UserMetaData map[string]interface{} `json:"user_metadata"` + AppMetaData map[string]interface{} `json:"app_metadata"` + BanDuration string `json:"ban_duration"` +} + +type adminUserDeleteParams struct { + ShouldSoftDelete bool `json:"should_soft_delete"` +} + +type adminUserUpdateFactorParams struct { + FriendlyName string `json:"friendly_name"` + Phone string `json:"phone"` +} + +type AdminListUsersResponse struct { + Users []*models.User `json:"users"` + Aud string `json:"aud"` +} + +func (a *API) loadUser(w http.ResponseWriter, r *http.Request) (context.Context, error) { + ctx := r.Context() + db := a.db.WithContext(ctx) + + userID, err := uuid.FromString(chi.URLParam(r, "user_id")) + if err != nil { + return nil, notFoundError(ErrorCodeValidationFailed, "user_id must be an UUID") + } + + observability.LogEntrySetField(r, "user_id", userID) + + u, err := models.FindUserByID(db, userID) + if err != nil { + if models.IsNotFoundError(err) { + return nil, notFoundError(ErrorCodeUserNotFound, "User not found") + } + return nil, internalServerError("Database error loading user").WithInternalError(err) + } + + return withUser(ctx, u), nil +} + +// Use only after requireAuthentication, so that there is a valid user +func (a *API) loadFactor(w http.ResponseWriter, r *http.Request) (context.Context, error) { + ctx := r.Context() + db := a.db.WithContext(ctx) + user := getUser(ctx) + factorID, err := uuid.FromString(chi.URLParam(r, "factor_id")) + if err != nil { + return nil, notFoundError(ErrorCodeValidationFailed, "factor_id must be an UUID") + } + + observability.LogEntrySetField(r, "factor_id", factorID) + + factor, err := user.FindOwnedFactorByID(db, factorID) + if err != nil { + if models.IsNotFoundError(err) { + return nil, notFoundError(ErrorCodeMFAFactorNotFound, "Factor not found") + } + return nil, internalServerError("Database error loading factor").WithInternalError(err) + } + return withFactor(ctx, factor), nil +} + +func (a *API) getAdminParams(r *http.Request) (*AdminUserParams, error) { + params := &AdminUserParams{} + if err := retrieveRequestParams(r, params); err != nil { + return nil, err + } + + return params, nil +} + +// adminUsers responds with a list of all users in a given audience +func (a *API) adminUsers(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + aud := a.requestAud(ctx, r) + + pageParams, err := paginate(r) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "Bad Pagination Parameters: %v", err).WithInternalError(err) + } + + sortParams, err := sort(r, map[string]bool{models.CreatedAt: true}, []models.SortField{{Name: models.CreatedAt, Dir: models.Descending}}) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "Bad Sort Parameters: %v", err) + } + + filter := r.URL.Query().Get("filter") + + users, err := models.FindUsersInAudience(db, aud, pageParams, sortParams, filter) + if err != nil { + return internalServerError("Database error finding users").WithInternalError(err) + } + addPaginationHeaders(w, r, pageParams) + + return sendJSON(w, http.StatusOK, AdminListUsersResponse{ + Users: users, + Aud: aud, + }) +} + +// adminUserGet returns information about a single user +func (a *API) adminUserGet(w http.ResponseWriter, r *http.Request) error { + user := getUser(r.Context()) + + return sendJSON(w, http.StatusOK, user) +} + +// adminUserUpdate updates a single user object +func (a *API) adminUserUpdate(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + config := a.config + user := getUser(ctx) + adminUser := getAdminUser(ctx) + params, err := a.getAdminParams(r) + if err != nil { + return err + } + + if params.Email != "" { + params.Email, err = a.validateEmail(params.Email) + if err != nil { + return err + } + } + + if params.Phone != "" { + params.Phone, err = validatePhone(params.Phone) + if err != nil { + return err + } + } + + var banDuration *time.Duration + if params.BanDuration != "" { + duration := time.Duration(0) + if params.BanDuration != "none" { + duration, err = time.ParseDuration(params.BanDuration) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "invalid format for ban duration: %v", err) + } + } + banDuration = &duration + } + + if params.Password != nil { + password := *params.Password + + if err := a.checkPasswordStrength(ctx, password); err != nil { + return err + } + + if err := user.SetPassword(ctx, password, config.Security.DBEncryption.Encrypt, config.Security.DBEncryption.EncryptionKeyID, config.Security.DBEncryption.EncryptionKey); err != nil { + return err + } + } + + err = db.Transaction(func(tx *storage.Connection) error { + if params.Role != "" { + if terr := user.SetRole(tx, params.Role); terr != nil { + return terr + } + } + + if params.EmailConfirm { + if terr := user.Confirm(tx); terr != nil { + return terr + } + } + + if params.PhoneConfirm { + if terr := user.ConfirmPhone(tx); terr != nil { + return terr + } + } + + if params.Password != nil { + if terr := user.UpdatePassword(tx, nil); terr != nil { + return terr + } + } + + var identities []models.Identity + if params.Email != "" { + if identity, terr := models.FindIdentityByIdAndProvider(tx, user.ID.String(), "email"); terr != nil && !models.IsNotFoundError(terr) { + return terr + } else if identity == nil { + // if the user doesn't have an existing email + // then updating the user's email should create a new email identity + i, terr := a.createNewIdentity(tx, user, "email", structs.Map(provider.Claims{ + Subject: user.ID.String(), + Email: params.Email, + EmailVerified: params.EmailConfirm, + })) + if terr != nil { + return terr + } + identities = append(identities, *i) + } else { + // update the existing email identity + if terr := identity.UpdateIdentityData(tx, map[string]interface{}{ + "email": params.Email, + "email_verified": params.EmailConfirm, + }); terr != nil { + return terr + } + } + if user.IsAnonymous && params.EmailConfirm { + user.IsAnonymous = false + if terr := tx.UpdateOnly(user, "is_anonymous"); terr != nil { + return terr + } + } + + if terr := user.SetEmail(tx, params.Email); terr != nil { + return terr + } + } + + if params.Phone != "" { + if identity, terr := models.FindIdentityByIdAndProvider(tx, user.ID.String(), "phone"); terr != nil && !models.IsNotFoundError(terr) { + return terr + } else if identity == nil { + // if the user doesn't have an existing phone + // then updating the user's phone should create a new phone identity + identity, terr := a.createNewIdentity(tx, user, "phone", structs.Map(provider.Claims{ + Subject: user.ID.String(), + Phone: params.Phone, + PhoneVerified: params.PhoneConfirm, + })) + if terr != nil { + return terr + } + identities = append(identities, *identity) + } else { + // update the existing phone identity + if terr := identity.UpdateIdentityData(tx, map[string]interface{}{ + "phone": params.Phone, + "phone_verified": params.PhoneConfirm, + }); terr != nil { + return terr + } + } + if user.IsAnonymous && params.PhoneConfirm { + user.IsAnonymous = false + if terr := tx.UpdateOnly(user, "is_anonymous"); terr != nil { + return terr + } + } + if terr := user.SetPhone(tx, params.Phone); terr != nil { + return terr + } + } + user.Identities = append(user.Identities, identities...) + + if params.AppMetaData != nil { + if terr := user.UpdateAppMetaData(tx, params.AppMetaData); terr != nil { + return terr + } + } + + if params.UserMetaData != nil { + if terr := user.UpdateUserMetaData(tx, params.UserMetaData); terr != nil { + return terr + } + } + + if banDuration != nil { + if terr := user.Ban(tx, *banDuration); terr != nil { + return terr + } + } + + if terr := models.NewAuditLogEntry(r, tx, adminUser, models.UserModifiedAction, "", map[string]interface{}{ + "user_id": user.ID, + "user_email": user.Email, + "user_phone": user.Phone, + }); terr != nil { + return terr + } + return nil + }) + + if err != nil { + return internalServerError("Error updating user").WithInternalError(err) + } + + return sendJSON(w, http.StatusOK, user) +} + +// adminUserCreate creates a new user based on the provided data +func (a *API) adminUserCreate(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + config := a.config + + adminUser := getAdminUser(ctx) + params, err := a.getAdminParams(r) + if err != nil { + return err + } + + aud := a.requestAud(ctx, r) + if params.Aud != "" { + aud = params.Aud + } + + if params.Email == "" && params.Phone == "" { + return badRequestError(ErrorCodeValidationFailed, "Cannot create a user without either an email or phone") + } + + var providers []string + if params.Email != "" { + params.Email, err = a.validateEmail(params.Email) + if err != nil { + return err + } + if user, err := models.IsDuplicatedEmail(db, params.Email, aud, nil); err != nil { + return internalServerError("Database error checking email").WithInternalError(err) + } else if user != nil { + return unprocessableEntityError(ErrorCodeEmailExists, DuplicateEmailMsg) + } + providers = append(providers, "email") + } + + if params.Phone != "" { + params.Phone, err = validatePhone(params.Phone) + if err != nil { + return err + } + if exists, err := models.IsDuplicatedPhone(db, params.Phone, aud); err != nil { + return internalServerError("Database error checking phone").WithInternalError(err) + } else if exists { + return unprocessableEntityError(ErrorCodePhoneExists, "Phone number already registered by another user") + } + providers = append(providers, "phone") + } + + if params.Password != nil && params.PasswordHash != "" { + return badRequestError(ErrorCodeValidationFailed, "Only a password or a password hash should be provided") + } + + if (params.Password == nil || *params.Password == "") && params.PasswordHash == "" { + password, err := password.Generate(64, 10, 0, false, true) + if err != nil { + return internalServerError("Error generating password").WithInternalError(err) + } + params.Password = &password + } + + var user *models.User + if params.PasswordHash != "" { + user, err = models.NewUserWithPasswordHash(params.Phone, params.Email, params.PasswordHash, aud, params.UserMetaData) + } else { + user, err = models.NewUser(params.Phone, params.Email, *params.Password, aud, params.UserMetaData) + } + + if err != nil { + if errors.Is(err, bcrypt.ErrPasswordTooLong) { + return badRequestError(ErrorCodeValidationFailed, err.Error()) + } + return internalServerError("Error creating user").WithInternalError(err) + } + + if params.Id != "" { + customId, err := uuid.FromString(params.Id) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "ID must conform to the uuid v4 format") + } + if customId == uuid.Nil { + return badRequestError(ErrorCodeValidationFailed, "ID cannot be a nil uuid") + } + user.ID = customId + } + + user.AppMetaData = map[string]interface{}{ + // TODO: Deprecate "provider" field + // default to the first provider in the providers slice + "provider": providers[0], + "providers": providers, + } + + var banDuration *time.Duration + if params.BanDuration != "" { + duration := time.Duration(0) + if params.BanDuration != "none" { + duration, err = time.ParseDuration(params.BanDuration) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "invalid format for ban duration: %v", err) + } + } + banDuration = &duration + } + + err = db.Transaction(func(tx *storage.Connection) error { + if terr := tx.Create(user); terr != nil { + return terr + } + + var identities []models.Identity + if user.GetEmail() != "" { + identity, terr := a.createNewIdentity(tx, user, "email", structs.Map(provider.Claims{ + Subject: user.ID.String(), + Email: user.GetEmail(), + })) + + if terr != nil { + return terr + } + identities = append(identities, *identity) + } + + if user.GetPhone() != "" { + identity, terr := a.createNewIdentity(tx, user, "phone", structs.Map(provider.Claims{ + Subject: user.ID.String(), + Phone: user.GetPhone(), + })) + + if terr != nil { + return terr + } + identities = append(identities, *identity) + } + + user.Identities = identities + + if terr := models.NewAuditLogEntry(r, tx, adminUser, models.UserSignedUpAction, "", map[string]interface{}{ + "user_id": user.ID, + "user_email": user.Email, + "user_phone": user.Phone, + }); terr != nil { + return terr + } + + role := config.JWT.DefaultGroupName + if params.Role != "" { + role = params.Role + } + if terr := user.SetRole(tx, role); terr != nil { + return terr + } + + if params.AppMetaData != nil { + if terr := user.UpdateAppMetaData(tx, params.AppMetaData); terr != nil { + return terr + } + } + + if params.EmailConfirm { + if terr := user.Confirm(tx); terr != nil { + return terr + } + } + + if params.PhoneConfirm { + if terr := user.ConfirmPhone(tx); terr != nil { + return terr + } + } + + if banDuration != nil { + if terr := user.Ban(tx, *banDuration); terr != nil { + return terr + } + } + + return nil + }) + + if err != nil { + return internalServerError("Database error creating new user").WithInternalError(err) + } + + return sendJSON(w, http.StatusOK, user) +} + +// adminUserDelete deletes a user +func (a *API) adminUserDelete(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + user := getUser(ctx) + adminUser := getAdminUser(ctx) + + // ShouldSoftDelete defaults to false + params := &adminUserDeleteParams{} + if body, _ := utilities.GetBodyBytes(r); len(body) != 0 { + // we only want to parse the body if it's not empty + // retrieveRequestParams will handle any errors with stream + if err := retrieveRequestParams(r, params); err != nil { + return err + } + } + + err := a.db.Transaction(func(tx *storage.Connection) error { + if terr := models.NewAuditLogEntry(r, tx, adminUser, models.UserDeletedAction, "", map[string]interface{}{ + "user_id": user.ID, + "user_email": user.Email, + "user_phone": user.Phone, + }); terr != nil { + return internalServerError("Error recording audit log entry").WithInternalError(terr) + } + + if params.ShouldSoftDelete { + if user.DeletedAt != nil { + // user has been soft deleted already + return nil + } + if terr := user.SoftDeleteUser(tx); terr != nil { + return internalServerError("Error soft deleting user").WithInternalError(terr) + } + + if terr := user.SoftDeleteUserIdentities(tx); terr != nil { + return internalServerError("Error soft deleting user identities").WithInternalError(terr) + } + + // hard delete all associated factors + if terr := models.DeleteFactorsByUserId(tx, user.ID); terr != nil { + return internalServerError("Error deleting user's factors").WithInternalError(terr) + } + // hard delete all associated sessions + if terr := models.Logout(tx, user.ID); terr != nil { + return internalServerError("Error deleting user's sessions").WithInternalError(terr) + } + } else { + if terr := tx.Destroy(user); terr != nil { + return internalServerError("Database error deleting user").WithInternalError(terr) + } + } + + return nil + }) + if err != nil { + return err + } + + return sendJSON(w, http.StatusOK, map[string]interface{}{}) +} + +func (a *API) adminUserDeleteFactor(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + user := getUser(ctx) + factor := getFactor(ctx) + + err := a.db.Transaction(func(tx *storage.Connection) error { + if terr := models.NewAuditLogEntry(r, tx, user, models.DeleteFactorAction, r.RemoteAddr, map[string]interface{}{ + "user_id": user.ID, + "factor_id": factor.ID, + }); terr != nil { + return terr + } + if terr := tx.Destroy(factor); terr != nil { + return internalServerError("Database error deleting factor").WithInternalError(terr) + } + return nil + }) + if err != nil { + return err + } + return sendJSON(w, http.StatusOK, factor) +} + +func (a *API) adminUserGetFactors(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + user := getUser(ctx) + return sendJSON(w, http.StatusOK, user.Factors) +} + +// adminUserUpdate updates a single factor object +func (a *API) adminUserUpdateFactor(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + factor := getFactor(ctx) + user := getUser(ctx) + adminUser := getAdminUser(ctx) + params := &adminUserUpdateFactorParams{} + + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + err := a.db.Transaction(func(tx *storage.Connection) error { + if params.FriendlyName != "" { + if terr := factor.UpdateFriendlyName(tx, params.FriendlyName); terr != nil { + return terr + } + } + + if params.Phone != "" && factor.IsPhoneFactor() { + phone, err := validatePhone(params.Phone) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "Invalid phone number format (E.164 required)") + } + if terr := factor.UpdatePhone(tx, phone); terr != nil { + return terr + } + } + + if terr := models.NewAuditLogEntry(r, tx, adminUser, models.UpdateFactorAction, "", map[string]interface{}{ + "user_id": user.ID, + "factor_id": factor.ID, + "factor_type": factor.FactorType, + }); terr != nil { + return terr + } + return nil + }) + if err != nil { + return err + } + + return sendJSON(w, http.StatusOK, factor) +} diff --git a/auth_v2.169.0/internal/api/admin_test.go b/auth_v2.169.0/internal/api/admin_test.go new file mode 100644 index 0000000..a2070d7 --- /dev/null +++ b/auth_v2.169.0/internal/api/admin_test.go @@ -0,0 +1,915 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type AdminTestSuite struct { + suite.Suite + User *models.User + API *API + Config *conf.GlobalConfiguration + + token string +} + +func TestAdmin(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &AdminTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *AdminTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + ts.Config.External.Email.Enabled = true + claims := &AccessTokenClaims{ + Role: "supabase_admin", + } + token, err := jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(ts.Config.JWT.Secret)) + require.NoError(ts.T(), err, "Error generating admin jwt") + ts.token = token +} + +// TestAdminUsersUnauthorized tests API /admin/users route without authentication +func (ts *AdminTestSuite) TestAdminUsersUnauthorized() { + req := httptest.NewRequest(http.MethodGet, "/admin/users", nil) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusUnauthorized, w.Code) +} + +// TestAdminUsers tests API /admin/users route +func (ts *AdminTestSuite) TestAdminUsers() { + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/admin/users", nil) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + assert.Equal(ts.T(), "; rel=\"last\"", w.Header().Get("Link")) + assert.Equal(ts.T(), "0", w.Header().Get("X-Total-Count")) +} + +// TestAdminUsers tests API /admin/users route +func (ts *AdminTestSuite) TestAdminUsers_Pagination() { + u, err := models.NewUser("12345678", "test1@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + u, err = models.NewUser("987654321", "test2@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/admin/users?per_page=1", nil) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + assert.Equal(ts.T(), "; rel=\"next\", ; rel=\"last\"", w.Header().Get("Link")) + assert.Equal(ts.T(), "2", w.Header().Get("X-Total-Count")) + + data := make(map[string]interface{}) + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + for _, user := range data["users"].([]interface{}) { + assert.NotEmpty(ts.T(), user) + } +} + +// TestAdminUsers tests API /admin/users route +func (ts *AdminTestSuite) TestAdminUsers_SortAsc() { + u, err := models.NewUser("", "test1@example.com", "test", ts.Config.JWT.Aud, nil) + u.CreatedAt = time.Now().Add(-time.Minute) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + u, err = models.NewUser("", "test2@example.com", "test", ts.Config.JWT.Aud, nil) + u.CreatedAt = time.Now() + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/admin/users", nil) + qv := req.URL.Query() + qv.Set("sort", "created_at asc") + req.URL.RawQuery = qv.Encode() + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := struct { + Users []*models.User `json:"users"` + Aud string `json:"aud"` + }{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + require.Len(ts.T(), data.Users, 2) + assert.Equal(ts.T(), "test1@example.com", data.Users[0].GetEmail()) + assert.Equal(ts.T(), "test2@example.com", data.Users[1].GetEmail()) +} + +// TestAdminUsers tests API /admin/users route +func (ts *AdminTestSuite) TestAdminUsers_SortDesc() { + u, err := models.NewUser("12345678", "test1@example.com", "test", ts.Config.JWT.Aud, nil) + u.CreatedAt = time.Now().Add(-time.Minute) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + u, err = models.NewUser("987654321", "test2@example.com", "test", ts.Config.JWT.Aud, nil) + u.CreatedAt = time.Now() + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/admin/users", nil) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := struct { + Users []*models.User `json:"users"` + Aud string `json:"aud"` + }{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + require.Len(ts.T(), data.Users, 2) + assert.Equal(ts.T(), "test2@example.com", data.Users[0].GetEmail()) + assert.Equal(ts.T(), "test1@example.com", data.Users[1].GetEmail()) +} + +// TestAdminUsers tests API /admin/users route +func (ts *AdminTestSuite) TestAdminUsers_FilterEmail() { + u, err := models.NewUser("", "test1@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/admin/users?filter=test1", nil) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := struct { + Users []*models.User `json:"users"` + Aud string `json:"aud"` + }{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + require.Len(ts.T(), data.Users, 1) + assert.Equal(ts.T(), "test1@example.com", data.Users[0].GetEmail()) +} + +// TestAdminUsers tests API /admin/users route +func (ts *AdminTestSuite) TestAdminUsers_FilterName() { + u, err := models.NewUser("", "test1@example.com", "test", ts.Config.JWT.Aud, map[string]interface{}{"full_name": "Test User"}) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + u, err = models.NewUser("", "test2@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/admin/users?filter=User", nil) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := struct { + Users []*models.User `json:"users"` + Aud string `json:"aud"` + }{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + require.Len(ts.T(), data.Users, 1) + assert.Equal(ts.T(), "test1@example.com", data.Users[0].GetEmail()) +} + +// TestAdminUserCreate tests API /admin/user route (POST) +func (ts *AdminTestSuite) TestAdminUserCreate() { + cases := []struct { + desc string + params map[string]interface{} + expected map[string]interface{} + }{ + { + desc: "Only phone", + params: map[string]interface{}{ + "phone": "123456789", + "password": "test1", + }, + expected: map[string]interface{}{ + "email": "", + "phone": "123456789", + "isAuthenticated": true, + "provider": "phone", + "providers": []string{"phone"}, + "password": "test1", + }, + }, + { + desc: "With password", + params: map[string]interface{}{ + "email": "test1@example.com", + "phone": "123456789", + "password": "test1", + }, + expected: map[string]interface{}{ + "email": "test1@example.com", + "phone": "123456789", + "isAuthenticated": true, + "provider": "email", + "providers": []string{"email", "phone"}, + "password": "test1", + }, + }, + { + desc: "Without password", + params: map[string]interface{}{ + "email": "test2@example.com", + "phone": "", + }, + expected: map[string]interface{}{ + "email": "test2@example.com", + "phone": "", + "isAuthenticated": false, + "provider": "email", + "providers": []string{"email"}, + }, + }, + { + desc: "With empty string password", + params: map[string]interface{}{ + "email": "test3@example.com", + "phone": "", + "password": "", + }, + expected: map[string]interface{}{ + "email": "test3@example.com", + "phone": "", + "isAuthenticated": false, + "provider": "email", + "providers": []string{"email"}, + "password": "", + }, + }, + { + desc: "Ban created user", + params: map[string]interface{}{ + "email": "test4@example.com", + "phone": "", + "password": "test1", + "ban_duration": "24h", + }, + expected: map[string]interface{}{ + "email": "test4@example.com", + "phone": "", + "isAuthenticated": true, + "provider": "email", + "providers": []string{"email"}, + "password": "test1", + }, + }, + { + desc: "With password hash", + params: map[string]interface{}{ + "email": "test5@example.com", + "password_hash": "$2y$10$SXEz2HeT8PUIGQXo9yeUIem8KzNxgG0d7o/.eGj2rj8KbRgAuRVlq", + }, + expected: map[string]interface{}{ + "email": "test5@example.com", + "phone": "", + "isAuthenticated": true, + "provider": "email", + "providers": []string{"email"}, + "password": "test", + }, + }, + { + desc: "With custom id", + params: map[string]interface{}{ + "id": "fc56ab41-2010-4870-a9b9-767c1dc573fb", + "email": "test6@example.com", + "password": "test", + }, + expected: map[string]interface{}{ + "id": "fc56ab41-2010-4870-a9b9-767c1dc573fb", + "email": "test6@example.com", + "phone": "", + "isAuthenticated": true, + "provider": "email", + "providers": []string{"email"}, + "password": "test", + }, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.params)) + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/admin/users", &buffer) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + ts.Config.External.Phone.Enabled = true + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := models.User{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + assert.Equal(ts.T(), c.expected["email"], data.GetEmail()) + assert.Equal(ts.T(), c.expected["phone"], data.GetPhone()) + assert.Equal(ts.T(), c.expected["provider"], data.AppMetaData["provider"]) + assert.ElementsMatch(ts.T(), c.expected["providers"], data.AppMetaData["providers"]) + + u, err := models.FindUserByID(ts.API.db, data.ID) + require.NoError(ts.T(), err) + + // verify that the corresponding identities were created + require.NotEmpty(ts.T(), u.Identities) + for _, identity := range u.Identities { + require.Equal(ts.T(), u.ID, identity.UserID) + if identity.Provider == "email" { + require.Equal(ts.T(), c.expected["email"], identity.IdentityData["email"]) + } + if identity.Provider == "phone" { + require.Equal(ts.T(), c.expected["phone"], identity.IdentityData["phone"]) + } + } + + if _, ok := c.expected["password"]; ok { + expectedPassword := fmt.Sprintf("%v", c.expected["password"]) + isAuthenticated, _, err := u.Authenticate(context.Background(), ts.API.db, expectedPassword, ts.API.config.Security.DBEncryption.DecryptionKeys, ts.API.config.Security.DBEncryption.Encrypt, ts.API.config.Security.DBEncryption.EncryptionKeyID) + require.NoError(ts.T(), err) + require.Equal(ts.T(), c.expected["isAuthenticated"], isAuthenticated) + } + + if id, ok := c.expected["id"]; ok { + uid, err := uuid.FromString(id.(string)) + require.NoError(ts.T(), err) + require.Equal(ts.T(), uid, data.ID) + } + + // remove created user after each case + require.NoError(ts.T(), ts.API.db.Destroy(u)) + }) + } +} + +// TestAdminUserGet tests API /admin/user route (GET) +func (ts *AdminTestSuite) TestAdminUserGet() { + u, err := models.NewUser("12345678", "test1@example.com", "test", ts.Config.JWT.Aud, map[string]interface{}{"full_name": "Test Get User"}) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("/admin/users/%s", u.ID), nil) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := make(map[string]interface{}) + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + assert.Equal(ts.T(), data["email"], "test1@example.com") + assert.NotNil(ts.T(), data["app_metadata"]) + assert.NotNil(ts.T(), data["user_metadata"]) + md := data["user_metadata"].(map[string]interface{}) + assert.Len(ts.T(), md, 1) + assert.Equal(ts.T(), "Test Get User", md["full_name"]) +} + +// TestAdminUserUpdate tests API /admin/user route (UPDATE) +func (ts *AdminTestSuite) TestAdminUserUpdate() { + u, err := models.NewUser("12345678", "test1@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + var buffer bytes.Buffer + newEmail := "test2@example.com" + newPhone := "234567890" + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "role": "testing", + "app_metadata": map[string]interface{}{ + "roles": []string{"writer", "editor"}, + }, + "user_metadata": map[string]interface{}{ + "name": "David", + }, + "ban_duration": "24h", + "email": newEmail, + "phone": newPhone, + })) + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/admin/users/%s", u.ID), &buffer) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := models.User{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + assert.Equal(ts.T(), "testing", data.Role) + assert.NotNil(ts.T(), data.UserMetaData) + assert.Equal(ts.T(), "David", data.UserMetaData["name"]) + assert.Equal(ts.T(), newEmail, data.GetEmail()) + assert.Equal(ts.T(), newPhone, data.GetPhone()) + + assert.NotNil(ts.T(), data.AppMetaData) + assert.Len(ts.T(), data.AppMetaData["roles"], 2) + assert.Contains(ts.T(), data.AppMetaData["roles"], "writer") + assert.Contains(ts.T(), data.AppMetaData["roles"], "editor") + assert.NotNil(ts.T(), data.BannedUntil) + + u, err = models.FindUserByID(ts.API.db, data.ID) + require.NoError(ts.T(), err) + + // check if the corresponding identities were successfully created + require.NotEmpty(ts.T(), u.Identities) + + for _, identity := range u.Identities { + // for email & phone identities, the providerId is the same as the userId + require.Equal(ts.T(), u.ID.String(), identity.ProviderID) + require.Equal(ts.T(), u.ID, identity.UserID) + if identity.Provider == "email" { + require.Equal(ts.T(), newEmail, identity.IdentityData["email"]) + } + if identity.Provider == "phone" { + require.Equal(ts.T(), newPhone, identity.IdentityData["phone"]) + + } + } +} + +func (ts *AdminTestSuite) TestAdminUserUpdatePasswordFailed() { + u, err := models.NewUser("12345678", "test1@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + var updateEndpoint = fmt.Sprintf("/admin/users/%s", u.ID) + ts.Config.Password.MinLength = 6 + ts.Run("Password doesn't meet minimum length", func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "password": "", + })) + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPut, updateEndpoint, &buffer) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusUnprocessableEntity, w.Code) + }) +} + +func (ts *AdminTestSuite) TestAdminUserUpdateBannedUntilFailed() { + u, err := models.NewUser("", "test1@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + var updateEndpoint = fmt.Sprintf("/admin/users/%s", u.ID) + ts.Config.Password.MinLength = 6 + ts.Run("Incorrect format for ban_duration", func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "ban_duration": "24", + })) + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPut, updateEndpoint, &buffer) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusBadRequest, w.Code) + }) +} + +// TestAdminUserDelete tests API /admin/users route (DELETE) +func (ts *AdminTestSuite) TestAdminUserDelete() { + type expected struct { + code int + err error + } + signupParams := &SignupParams{ + Email: "test-delete@example.com", + Password: "test", + Data: map[string]interface{}{"name": "test"}, + Provider: "email", + Aud: ts.Config.JWT.Aud, + } + cases := []struct { + desc string + body map[string]interface{} + isSoftDelete string + isSSOUser bool + expected expected + }{ + { + desc: "Test admin delete user (default)", + isSoftDelete: "", + isSSOUser: false, + expected: expected{code: http.StatusOK, err: models.UserNotFoundError{}}, + body: nil, + }, + { + desc: "Test admin delete user (hard deletion)", + isSoftDelete: "?is_soft_delete=false", + isSSOUser: false, + expected: expected{code: http.StatusOK, err: models.UserNotFoundError{}}, + body: map[string]interface{}{ + "should_soft_delete": false, + }, + }, + { + desc: "Test admin delete user (soft deletion)", + isSoftDelete: "?is_soft_delete=true", + isSSOUser: false, + expected: expected{code: http.StatusOK, err: models.UserNotFoundError{}}, + body: map[string]interface{}{ + "should_soft_delete": true, + }, + }, + { + desc: "Test admin delete user (soft deletion & sso user)", + isSoftDelete: "?is_soft_delete=true", + isSSOUser: true, + expected: expected{code: http.StatusOK, err: nil}, + body: map[string]interface{}{ + "should_soft_delete": true, + }, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.body)) + u, err := signupParams.ToUserModel(false /* <- isSSOUser */) + require.NoError(ts.T(), err) + u, err = ts.API.signupNewUser(ts.API.db, u) + require.NoError(ts.T(), err) + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodDelete, fmt.Sprintf("/admin/users/%s", u.ID), &buffer) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), c.expected.code, w.Code) + + if c.isSSOUser { + u, err = models.FindUserByID(ts.API.db, u.ID) + require.NotNil(ts.T(), u) + } else { + _, err = models.FindUserByEmailAndAudience(ts.API.db, signupParams.Email, ts.Config.JWT.Aud) + } + require.Equal(ts.T(), c.expected.err, err) + }) + } +} + +func (ts *AdminTestSuite) TestAdminUserSoftDeletion() { + // create user + u, err := models.NewUser("123456789", "test@example.com", "secret", ts.Config.JWT.Aud, map[string]interface{}{"name": "test"}) + require.NoError(ts.T(), err) + u.ConfirmationToken = "some_token" + u.RecoveryToken = "some_token" + u.EmailChangeTokenCurrent = "some_token" + u.EmailChangeTokenNew = "some_token" + u.PhoneChangeToken = "some_token" + u.AppMetaData = map[string]interface{}{ + "provider": "email", + } + require.NoError(ts.T(), ts.API.db.Create(u)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.ConfirmationToken, models.ConfirmationToken)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.RecoveryToken, models.RecoveryToken)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.EmailChangeTokenCurrent, models.EmailChangeTokenCurrent)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.EmailChangeTokenNew, models.EmailChangeTokenNew)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetPhone(), u.PhoneChangeToken, models.PhoneChangeToken)) + + // create user identities + _, err = ts.API.createNewIdentity(ts.API.db, u, "email", map[string]interface{}{ + "sub": "123456", + "email": "test@example.com", + }) + require.NoError(ts.T(), err) + _, err = ts.API.createNewIdentity(ts.API.db, u, "github", map[string]interface{}{ + "sub": "234567", + "email": "test@example.com", + }) + require.NoError(ts.T(), err) + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "should_soft_delete": true, + })) + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodDelete, fmt.Sprintf("/admin/users/%s", u.ID), &buffer) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + // get soft-deleted user from db + deletedUser, err := models.FindUserByID(ts.API.db, u.ID) + require.NoError(ts.T(), err) + + require.Empty(ts.T(), deletedUser.ConfirmationToken) + require.Empty(ts.T(), deletedUser.RecoveryToken) + require.Empty(ts.T(), deletedUser.EmailChangeTokenCurrent) + require.Empty(ts.T(), deletedUser.EmailChangeTokenNew) + require.Empty(ts.T(), deletedUser.EncryptedPassword) + require.Empty(ts.T(), deletedUser.PhoneChangeToken) + require.Empty(ts.T(), deletedUser.UserMetaData) + require.Empty(ts.T(), deletedUser.AppMetaData) + require.NotEmpty(ts.T(), deletedUser.DeletedAt) + require.NotEmpty(ts.T(), deletedUser.GetEmail()) + + // get soft-deleted user's identity from db + deletedIdentities, err := models.FindIdentitiesByUserID(ts.API.db, deletedUser.ID) + require.NoError(ts.T(), err) + + for _, identity := range deletedIdentities { + require.Empty(ts.T(), identity.IdentityData) + } +} + +func (ts *AdminTestSuite) TestAdminUserCreateWithDisabledLogin() { + var cases = []struct { + desc string + customConfig *conf.GlobalConfiguration + userData map[string]interface{} + expected int + }{ + { + desc: "Email Signups Disabled", + customConfig: &conf.GlobalConfiguration{ + JWT: ts.Config.JWT, + External: conf.ProviderConfiguration{ + Email: conf.EmailProviderConfiguration{ + Enabled: false, + }, + }, + }, + userData: map[string]interface{}{ + "email": "test1@example.com", + "password": "test1", + }, + expected: http.StatusOK, + }, + { + desc: "Phone Signups Disabled", + customConfig: &conf.GlobalConfiguration{ + JWT: ts.Config.JWT, + External: conf.ProviderConfiguration{ + Phone: conf.PhoneProviderConfiguration{ + Enabled: false, + }, + }, + }, + userData: map[string]interface{}{ + "phone": "123456789", + "password": "test1", + }, + expected: http.StatusOK, + }, + { + desc: "All Signups Disabled", + customConfig: &conf.GlobalConfiguration{ + JWT: ts.Config.JWT, + DisableSignup: true, + }, + userData: map[string]interface{}{ + "email": "test2@example.com", + "password": "test2", + }, + expected: http.StatusOK, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + // Initialize user data + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.userData)) + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/admin/users", &buffer) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.Config.JWT = c.customConfig.JWT + ts.Config.External = c.customConfig.External + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), c.expected, w.Code) + }) + } +} + +// TestAdminUserDeleteFactor tests API /admin/users//factors// +func (ts *AdminTestSuite) TestAdminUserDeleteFactor() { + u, err := models.NewUser("123456789", "test-delete@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + f := models.NewTOTPFactor(u, "testSimpleName") + require.NoError(ts.T(), f.UpdateStatus(ts.API.db, models.FactorStateVerified)) + require.NoError(ts.T(), f.SetSecret("secretkey", ts.Config.Security.DBEncryption.Encrypt, ts.Config.Security.DBEncryption.EncryptionKeyID, ts.Config.Security.DBEncryption.EncryptionKey)) + require.NoError(ts.T(), ts.API.db.Create(f), "Error saving new test factor") + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodDelete, fmt.Sprintf("/admin/users/%s/factors/%s/", u.ID, f.ID), nil) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + _, err = models.FindFactorByFactorID(ts.API.db, f.ID) + require.EqualError(ts.T(), err, models.FactorNotFoundError{}.Error()) + +} + +// TestAdminUserGetFactor tests API /admin/user//factors/ +func (ts *AdminTestSuite) TestAdminUserGetFactors() { + u, err := models.NewUser("123456789", "test-delete@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + f := models.NewTOTPFactor(u, "testSimpleName") + require.NoError(ts.T(), f.SetSecret("secretkey", ts.Config.Security.DBEncryption.Encrypt, ts.Config.Security.DBEncryption.EncryptionKeyID, ts.Config.Security.DBEncryption.EncryptionKey)) + require.NoError(ts.T(), ts.API.db.Create(f), "Error saving new test factor") + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("/admin/users/%s/factors/", u.ID), nil) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + getFactorsResp := []*models.Factor{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&getFactorsResp)) + require.Equal(ts.T(), getFactorsResp[0].Secret, "") +} + +func (ts *AdminTestSuite) TestAdminUserUpdateFactor() { + u, err := models.NewUser("123456789", "test-delete@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + f := models.NewPhoneFactor(u, "123456789", "testSimpleName") + require.NoError(ts.T(), f.SetSecret("secretkey", ts.Config.Security.DBEncryption.Encrypt, ts.Config.Security.DBEncryption.EncryptionKeyID, ts.Config.Security.DBEncryption.EncryptionKey)) + require.NoError(ts.T(), ts.API.db.Create(f), "Error saving new test factor") + + var cases = []struct { + Desc string + FactorData map[string]interface{} + ExpectedCode int + }{ + { + Desc: "Update Factor friendly name", + FactorData: map[string]interface{}{ + "friendly_name": "john", + }, + ExpectedCode: http.StatusOK, + }, + { + Desc: "Update Factor phone number", + FactorData: map[string]interface{}{ + "phone": "+1976154321", + }, + ExpectedCode: http.StatusOK, + }, + } + + // Initialize factor data + for _, c := range cases { + ts.Run(c.Desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.FactorData)) + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/admin/users/%s/factors/%s/", u.ID, f.ID), &buffer) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), c.ExpectedCode, w.Code) + }) + } +} + +func (ts *AdminTestSuite) TestAdminUserCreateValidationErrors() { + cases := []struct { + desc string + params map[string]interface{} + }{ + { + desc: "create user without email and phone", + params: map[string]interface{}{ + "password": "test_password", + }, + }, + { + desc: "create user with password and password hash", + params: map[string]interface{}{ + "email": "test@example.com", + "password": "test_password", + "password_hash": "$2y$10$Tk6yEdmTbb/eQ/haDMaCsuCsmtPVprjHMcij1RqiJdLGPDXnL3L1a", + }, + }, + { + desc: "invalid ban duration", + params: map[string]interface{}{ + "email": "test@example.com", + "ban_duration": "never", + }, + }, + { + desc: "custom id is nil", + params: map[string]interface{}{ + "id": "00000000-0000-0000-0000-000000000000", + "email": "test@example.com", + }, + }, + { + desc: "bad id format", + params: map[string]interface{}{ + "id": "bad_uuid_format", + "email": "test@example.com", + }, + }, + } + for _, c := range cases { + ts.Run(c.desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.params)) + req := httptest.NewRequest(http.MethodPost, "/admin/users", &buffer) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusBadRequest, w.Code, w) + + data := map[string]interface{}{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + require.Equal(ts.T(), data["error_code"], ErrorCodeValidationFailed) + }) + + } +} diff --git a/auth_v2.169.0/internal/api/anonymous.go b/auth_v2.169.0/internal/api/anonymous.go new file mode 100644 index 0000000..294f860 --- /dev/null +++ b/auth_v2.169.0/internal/api/anonymous.go @@ -0,0 +1,55 @@ +package api + +import ( + "net/http" + + "github.com/supabase/auth/internal/metering" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +func (a *API) SignupAnonymously(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + config := a.config + db := a.db.WithContext(ctx) + aud := a.requestAud(ctx, r) + + if config.DisableSignup { + return unprocessableEntityError(ErrorCodeSignupDisabled, "Signups not allowed for this instance") + } + + params := &SignupParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + params.Aud = aud + params.Provider = "anonymous" + + newUser, err := params.ToUserModel(false /* <- isSSOUser */) + if err != nil { + return err + } + + var grantParams models.GrantParams + grantParams.FillGrantParams(r) + + var token *AccessTokenResponse + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + newUser, terr = a.signupNewUser(tx, newUser) + if terr != nil { + return terr + } + token, terr = a.issueRefreshToken(r, tx, newUser, models.Anonymous, grantParams) + if terr != nil { + return terr + } + return nil + }) + if err != nil { + return internalServerError("Database error creating anonymous user").WithInternalError(err) + } + + metering.RecordLogin("anonymous", newUser.ID) + return sendJSON(w, http.StatusOK, token) +} diff --git a/auth_v2.169.0/internal/api/anonymous_test.go b/auth_v2.169.0/internal/api/anonymous_test.go new file mode 100644 index 0000000..81d900d --- /dev/null +++ b/auth_v2.169.0/internal/api/anonymous_test.go @@ -0,0 +1,329 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + mail "github.com/supabase/auth/internal/mailer" + "github.com/supabase/auth/internal/models" +) + +type AnonymousTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestAnonymous(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &AnonymousTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *AnonymousTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + // Create anonymous user + params := &SignupParams{ + Aud: ts.Config.JWT.Aud, + Provider: "anonymous", + } + u, err := params.ToUserModel(false) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new anonymous test user") +} + +func (ts *AnonymousTestSuite) TestAnonymousLogins() { + ts.Config.External.AnonymousUsers.Enabled = true + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "data": map[string]interface{}{ + "field": "foo", + }, + })) + + req := httptest.NewRequest(http.MethodPost, "/signup", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + assert.NotEmpty(ts.T(), data.User.ID) + assert.Equal(ts.T(), ts.Config.JWT.Aud, data.User.Aud) + assert.Empty(ts.T(), data.User.GetEmail()) + assert.Empty(ts.T(), data.User.GetPhone()) + assert.True(ts.T(), data.User.IsAnonymous) + assert.Equal(ts.T(), models.JSONMap(models.JSONMap{"field": "foo"}), data.User.UserMetaData) +} + +func (ts *AnonymousTestSuite) TestConvertAnonymousUserToPermanent() { + ts.Config.External.AnonymousUsers.Enabled = true + ts.Config.Sms.TestOTP = map[string]string{"1234567890": "000000", "1234560000": "000000"} + // test OTPs still require setting up an sms provider + ts.Config.Sms.Provider = "twilio" + ts.Config.Sms.Twilio.AccountSid = "fake-sid" + ts.Config.Sms.Twilio.AuthToken = "fake-token" + ts.Config.Sms.Twilio.MessageServiceSid = "fake-message-service-sid" + + cases := []struct { + desc string + body map[string]interface{} + verificationType string + }{ + { + desc: "convert anonymous user to permanent user with email", + body: map[string]interface{}{ + "email": "test@example.com", + }, + verificationType: "email_change", + }, + { + desc: "convert anonymous user to permanent user with phone", + body: map[string]interface{}{ + "phone": "1234567890", + }, + verificationType: "phone_change", + }, + { + desc: "convert anonymous user to permanent user with email & password", + body: map[string]interface{}{ + "email": "test2@example.com", + "password": "test-password", + }, + verificationType: "email_change", + }, + { + desc: "convert anonymous user to permanent user with phone & password", + body: map[string]interface{}{ + "phone": "1234560000", + "password": "test-password", + }, + verificationType: "phone_change", + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{})) + + req := httptest.NewRequest(http.MethodPost, "/signup", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + signupResponse := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&signupResponse)) + + // Add email to anonymous user + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.body)) + + req = httptest.NewRequest(http.MethodPut, "/user", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", signupResponse.Token)) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + // Check if anonymous user is still anonymous + user, err := models.FindUserByID(ts.API.db, signupResponse.User.ID) + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), user) + require.True(ts.T(), user.IsAnonymous) + + // Check if user has a password set + if c.body["password"] != nil { + require.True(ts.T(), user.HasPassword()) + } + + switch c.verificationType { + case mail.EmailChangeVerification: + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "token_hash": user.EmailChangeTokenNew, + "type": c.verificationType, + })) + case phoneChangeVerification: + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "phone": user.PhoneChange, + "token": "000000", + "type": c.verificationType, + })) + } + + req = httptest.NewRequest(http.MethodPost, "/verify", &buffer) + req.Header.Set("Content-Type", "application/json") + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + // User is a permanent user and not anonymous anymore + assert.Equal(ts.T(), signupResponse.User.ID, data.User.ID) + assert.Equal(ts.T(), ts.Config.JWT.Aud, data.User.Aud) + assert.False(ts.T(), data.User.IsAnonymous) + + // User should have an identity + assert.Len(ts.T(), data.User.Identities, 1) + + switch c.verificationType { + case mail.EmailChangeVerification: + assert.Equal(ts.T(), c.body["email"], data.User.GetEmail()) + assert.Equal(ts.T(), models.JSONMap(models.JSONMap{"provider": "email", "providers": []interface{}{"email"}}), data.User.AppMetaData) + assert.NotEmpty(ts.T(), data.User.EmailConfirmedAt) + case phoneChangeVerification: + assert.Equal(ts.T(), c.body["phone"], data.User.GetPhone()) + assert.Equal(ts.T(), models.JSONMap(models.JSONMap{"provider": "phone", "providers": []interface{}{"phone"}}), data.User.AppMetaData) + assert.NotEmpty(ts.T(), data.User.PhoneConfirmedAt) + } + }) + } +} + +func (ts *AnonymousTestSuite) TestRateLimitAnonymousSignups() { + var buffer bytes.Buffer + ts.Config.External.AnonymousUsers.Enabled = true + + // It rate limits after 30 requests + for i := 0; i < int(ts.Config.RateLimitAnonymousUsers); i++ { + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{})) + req := httptest.NewRequest(http.MethodPost, "http://localhost/signup", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("My-Custom-Header", "1.2.3.4") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) + } + + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{})) + req := httptest.NewRequest(http.MethodPost, "http://localhost/signup", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("My-Custom-Header", "1.2.3.4") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusTooManyRequests, w.Code) + + // It ignores X-Forwarded-For by default + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{})) + req.Header.Set("X-Forwarded-For", "1.1.1.1") + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusTooManyRequests, w.Code) + + // It doesn't rate limit a new value for the limited header + req.Header.Set("My-Custom-Header", "5.6.7.8") + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusBadRequest, w.Code) +} + +func (ts *AnonymousTestSuite) TestAdminUpdateAnonymousUser() { + claims := &AccessTokenClaims{ + Role: "supabase_admin", + } + adminJwt, err := jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(ts.Config.JWT.Secret)) + require.NoError(ts.T(), err) + + u1, err := models.NewUser("", "", "", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err) + u1.IsAnonymous = true + require.NoError(ts.T(), ts.API.db.Create(u1)) + + u2, err := models.NewUser("", "", "", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err) + u2.IsAnonymous = true + require.NoError(ts.T(), ts.API.db.Create(u2)) + + cases := []struct { + desc string + userId uuid.UUID + body map[string]interface{} + expected map[string]interface{} + expectedIdentities int + }{ + { + desc: "update anonymous user with email and email confirm true", + userId: u1.ID, + body: map[string]interface{}{ + "email": "foo@example.com", + "email_confirm": true, + }, + expected: map[string]interface{}{ + "email": "foo@example.com", + "is_anonymous": false, + }, + expectedIdentities: 1, + }, + { + desc: "update anonymous user with email and email confirm false", + userId: u2.ID, + body: map[string]interface{}{ + "email": "bar@example.com", + "email_confirm": false, + }, + expected: map[string]interface{}{ + "email": "bar@example.com", + "is_anonymous": true, + }, + expectedIdentities: 1, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.body)) + + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/admin/users/%s", c.userId), &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", adminJwt)) + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + var data models.User + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + require.NotNil(ts.T(), data) + require.Len(ts.T(), data.Identities, c.expectedIdentities) + + actual := map[string]interface{}{ + "email": data.GetEmail(), + "is_anonymous": data.IsAnonymous, + } + + require.Equal(ts.T(), c.expected, actual) + }) + } +} diff --git a/auth_v2.169.0/internal/api/api.go b/auth_v2.169.0/internal/api/api.go new file mode 100644 index 0000000..aafcff2 --- /dev/null +++ b/auth_v2.169.0/internal/api/api.go @@ -0,0 +1,318 @@ +package api + +import ( + "net/http" + "regexp" + "time" + + "github.com/rs/cors" + "github.com/sebest/xff" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/mailer" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" + "github.com/supabase/hibp" +) + +const ( + audHeaderName = "X-JWT-AUD" + defaultVersion = "unknown version" +) + +var bearerRegexp = regexp.MustCompile(`^(?:B|b)earer (\S+$)`) + +// API is the main REST API +type API struct { + handler http.Handler + db *storage.Connection + config *conf.GlobalConfiguration + version string + + hibpClient *hibp.PwnedClient + + // overrideTime can be used to override the clock used by handlers. Should only be used in tests! + overrideTime func() time.Time + + limiterOpts *LimiterOptions +} + +func (a *API) Now() time.Time { + if a.overrideTime != nil { + return a.overrideTime() + } + + return time.Now() +} + +// NewAPI instantiates a new REST API +func NewAPI(globalConfig *conf.GlobalConfiguration, db *storage.Connection, opt ...Option) *API { + return NewAPIWithVersion(globalConfig, db, defaultVersion, opt...) +} + +func (a *API) deprecationNotices() { + config := a.config + + log := logrus.WithField("component", "api") + + if config.JWT.AdminGroupName != "" { + log.Warn("DEPRECATION NOTICE: GOTRUE_JWT_ADMIN_GROUP_NAME not supported by Supabase's GoTrue, will be removed soon") + } + + if config.JWT.DefaultGroupName != "" { + log.Warn("DEPRECATION NOTICE: GOTRUE_JWT_DEFAULT_GROUP_NAME not supported by Supabase's GoTrue, will be removed soon") + } +} + +// NewAPIWithVersion creates a new REST API using the specified version +func NewAPIWithVersion(globalConfig *conf.GlobalConfiguration, db *storage.Connection, version string, opt ...Option) *API { + api := &API{config: globalConfig, db: db, version: version} + + for _, o := range opt { + o.apply(api) + } + if api.limiterOpts == nil { + api.limiterOpts = NewLimiterOptions(globalConfig) + } + if api.config.Password.HIBP.Enabled { + httpClient := &http.Client{ + // all HIBP API requests should finish quickly to avoid + // unnecessary slowdowns + Timeout: 5 * time.Second, + } + + api.hibpClient = &hibp.PwnedClient{ + UserAgent: api.config.Password.HIBP.UserAgent, + HTTP: httpClient, + } + + if api.config.Password.HIBP.Bloom.Enabled { + cache := utilities.NewHIBPBloomCache(api.config.Password.HIBP.Bloom.Items, api.config.Password.HIBP.Bloom.FalsePositives) + api.hibpClient.Cache = cache + + logrus.Infof("Pwned passwords cache is %.2f KB", float64(cache.Cap())/(8*1024.0)) + } + } + + api.deprecationNotices() + + xffmw, _ := xff.Default() + logger := observability.NewStructuredLogger(logrus.StandardLogger(), globalConfig) + + r := newRouter() + r.UseBypass(observability.AddRequestID(globalConfig)) + r.UseBypass(logger) + r.UseBypass(xffmw.Handler) + r.UseBypass(recoverer) + + if globalConfig.API.MaxRequestDuration > 0 { + r.UseBypass(timeoutMiddleware(globalConfig.API.MaxRequestDuration)) + } + + // request tracing should be added only when tracing or metrics is enabled + if globalConfig.Tracing.Enabled || globalConfig.Metrics.Enabled { + r.UseBypass(observability.RequestTracing()) + } + + if globalConfig.DB.CleanupEnabled { + cleanup := models.NewCleanup(globalConfig) + r.UseBypass(api.databaseCleanup(cleanup)) + } + + r.Get("/health", api.HealthCheck) + r.Get("/.well-known/jwks.json", api.Jwks) + + r.Route("/callback", func(r *router) { + r.Use(api.isValidExternalHost) + r.Use(api.loadFlowState) + + r.Get("/", api.ExternalProviderCallback) + r.Post("/", api.ExternalProviderCallback) + }) + + r.Route("/", func(r *router) { + r.Use(api.isValidExternalHost) + + r.Get("/settings", api.Settings) + + r.Get("/authorize", api.ExternalProviderRedirect) + + r.With(api.requireAdminCredentials).Post("/invite", api.Invite) + r.With(api.verifyCaptcha).Route("/signup", func(r *router) { + // rate limit per hour + limitAnonymousSignIns := api.limiterOpts.AnonymousSignIns + limitSignups := api.limiterOpts.Signups + r.Post("/", func(w http.ResponseWriter, r *http.Request) error { + params := &SignupParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + if params.Email == "" && params.Phone == "" { + if !api.config.External.AnonymousUsers.Enabled { + return unprocessableEntityError(ErrorCodeAnonymousProviderDisabled, "Anonymous sign-ins are disabled") + } + if _, err := api.limitHandler(limitAnonymousSignIns)(w, r); err != nil { + return err + } + return api.SignupAnonymously(w, r) + } + + // apply ip-based rate limiting on otps + if _, err := api.limitHandler(limitSignups)(w, r); err != nil { + return err + } + return api.Signup(w, r) + }) + }) + r.With(api.limitHandler(api.limiterOpts.Recover)). + With(api.verifyCaptcha).With(api.requireEmailProvider).Post("/recover", api.Recover) + + r.With(api.limitHandler(api.limiterOpts.Resend)). + With(api.verifyCaptcha).Post("/resend", api.Resend) + + r.With(api.limitHandler(api.limiterOpts.MagicLink)). + With(api.verifyCaptcha).Post("/magiclink", api.MagicLink) + + r.With(api.limitHandler(api.limiterOpts.Otp)). + With(api.verifyCaptcha).Post("/otp", api.Otp) + + r.With(api.limitHandler(api.limiterOpts.Token)). + With(api.verifyCaptcha).Post("/token", api.Token) + + r.With(api.limitHandler(api.limiterOpts.Verify)).Route("/verify", func(r *router) { + r.Get("/", api.Verify) + r.Post("/", api.Verify) + }) + + r.With(api.requireAuthentication).Post("/logout", api.Logout) + + r.With(api.requireAuthentication).Route("/reauthenticate", func(r *router) { + r.Get("/", api.Reauthenticate) + }) + + r.With(api.requireAuthentication).Route("/user", func(r *router) { + r.Get("/", api.UserGet) + r.With(api.limitHandler(api.limiterOpts.User)).Put("/", api.UserUpdate) + + r.Route("/identities", func(r *router) { + r.Use(api.requireManualLinkingEnabled) + r.Get("/authorize", api.LinkIdentity) + r.Delete("/{identity_id}", api.DeleteIdentity) + }) + }) + + r.With(api.requireAuthentication).Route("/factors", func(r *router) { + r.Use(api.requireNotAnonymous) + r.Post("/", api.EnrollFactor) + r.Route("/{factor_id}", func(r *router) { + r.Use(api.loadFactor) + + r.With(api.limitHandler(api.limiterOpts.FactorVerify)). + Post("/verify", api.VerifyFactor) + r.With(api.limitHandler(api.limiterOpts.FactorChallenge)). + Post("/challenge", api.ChallengeFactor) + r.Delete("/", api.UnenrollFactor) + + }) + }) + + r.Route("/sso", func(r *router) { + r.Use(api.requireSAMLEnabled) + r.With(api.limitHandler(api.limiterOpts.SSO)). + With(api.verifyCaptcha).Post("/", api.SingleSignOn) + + r.Route("/saml", func(r *router) { + r.Get("/metadata", api.SAMLMetadata) + + r.With(api.limitHandler(api.limiterOpts.SAMLAssertion)). + Post("/acs", api.SamlAcs) + }) + }) + + r.Route("/admin", func(r *router) { + r.Use(api.requireAdminCredentials) + + r.Route("/audit", func(r *router) { + r.Get("/", api.adminAuditLog) + }) + + r.Route("/users", func(r *router) { + r.Get("/", api.adminUsers) + r.Post("/", api.adminUserCreate) + + r.Route("/{user_id}", func(r *router) { + r.Use(api.loadUser) + r.Route("/factors", func(r *router) { + r.Get("/", api.adminUserGetFactors) + r.Route("/{factor_id}", func(r *router) { + r.Use(api.loadFactor) + r.Delete("/", api.adminUserDeleteFactor) + r.Put("/", api.adminUserUpdateFactor) + }) + }) + + r.Get("/", api.adminUserGet) + r.Put("/", api.adminUserUpdate) + r.Delete("/", api.adminUserDelete) + }) + }) + + r.Post("/generate_link", api.adminGenerateLink) + + r.Route("/sso", func(r *router) { + r.Route("/providers", func(r *router) { + r.Get("/", api.adminSSOProvidersList) + r.Post("/", api.adminSSOProvidersCreate) + + r.Route("/{idp_id}", func(r *router) { + r.Use(api.loadSSOProvider) + + r.Get("/", api.adminSSOProvidersGet) + r.Put("/", api.adminSSOProvidersUpdate) + r.Delete("/", api.adminSSOProvidersDelete) + }) + }) + }) + + }) + }) + + corsHandler := cors.New(cors.Options{ + AllowedMethods: []string{http.MethodGet, http.MethodPost, http.MethodPut, http.MethodDelete}, + AllowedHeaders: globalConfig.CORS.AllAllowedHeaders([]string{"Accept", "Authorization", "Content-Type", "X-Client-IP", "X-Client-Info", audHeaderName, useCookieHeader, APIVersionHeaderName}), + ExposedHeaders: []string{"X-Total-Count", "Link", APIVersionHeaderName}, + AllowCredentials: true, + }) + + api.handler = corsHandler.Handler(r) + return api +} + +type HealthCheckResponse struct { + Version string `json:"version"` + Name string `json:"name"` + Description string `json:"description"` +} + +// HealthCheck endpoint indicates if the gotrue api service is available +func (a *API) HealthCheck(w http.ResponseWriter, r *http.Request) error { + return sendJSON(w, http.StatusOK, HealthCheckResponse{ + Version: a.version, + Name: "GoTrue", + Description: "GoTrue is a user registration and authentication API", + }) +} + +// Mailer returns NewMailer with the current tenant config +func (a *API) Mailer() mailer.Mailer { + config := a.config + return mailer.NewMailer(config) +} + +// ServeHTTP implements the http.Handler interface by passing the request along +// to its underlying Handler. +func (a *API) ServeHTTP(w http.ResponseWriter, r *http.Request) { + a.handler.ServeHTTP(w, r) +} diff --git a/auth_v2.169.0/internal/api/api_test.go b/auth_v2.169.0/internal/api/api_test.go new file mode 100644 index 0000000..a472be7 --- /dev/null +++ b/auth_v2.169.0/internal/api/api_test.go @@ -0,0 +1,57 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/storage/test" +) + +const ( + apiTestVersion = "1" + apiTestConfig = "../../hack/test.env" +) + +func init() { + crypto.PasswordHashCost = crypto.QuickHashCost +} + +// setupAPIForTest creates a new API to run tests with. +// Using this function allows us to keep track of the database connection +// and cleaning up data between tests. +func setupAPIForTest() (*API, *conf.GlobalConfiguration, error) { + return setupAPIForTestWithCallback(nil) +} + +func setupAPIForTestWithCallback(cb func(*conf.GlobalConfiguration, *storage.Connection)) (*API, *conf.GlobalConfiguration, error) { + config, err := conf.LoadGlobal(apiTestConfig) + if err != nil { + return nil, nil, err + } + + if cb != nil { + cb(config, nil) + } + + conn, err := test.SetupDBConnection(config) + if err != nil { + return nil, nil, err + } + + if cb != nil { + cb(nil, conn) + } + + limiterOpts := NewLimiterOptions(config) + return NewAPIWithVersion(config, conn, apiTestVersion, limiterOpts), config, nil +} + +func TestEmailEnabledByDefault(t *testing.T) { + api, _, err := setupAPIForTest() + require.NoError(t, err) + + require.True(t, api.config.External.Email.Enabled) +} diff --git a/auth_v2.169.0/internal/api/apiversions.go b/auth_v2.169.0/internal/api/apiversions.go new file mode 100644 index 0000000..b5394a5 --- /dev/null +++ b/auth_v2.169.0/internal/api/apiversions.go @@ -0,0 +1,35 @@ +package api + +import ( + "time" +) + +const APIVersionHeaderName = "X-Supabase-Api-Version" + +type APIVersion = time.Time + +var ( + APIVersionInitial = time.Time{} + APIVersion20240101 = time.Date(2024, time.January, 1, 0, 0, 0, 0, time.UTC) +) + +func DetermineClosestAPIVersion(date string) (APIVersion, error) { + if date == "" { + return APIVersionInitial, nil + } + + parsed, err := time.ParseInLocation("2006-01-02", date, time.UTC) + if err != nil { + return APIVersionInitial, err + } + + if parsed.Compare(APIVersion20240101) >= 0 { + return APIVersion20240101, nil + } + + return APIVersionInitial, nil +} + +func FormatAPIVersion(apiVersion APIVersion) string { + return apiVersion.Format("2006-01-02") +} diff --git a/auth_v2.169.0/internal/api/apiversions_test.go b/auth_v2.169.0/internal/api/apiversions_test.go new file mode 100644 index 0000000..0a96221 --- /dev/null +++ b/auth_v2.169.0/internal/api/apiversions_test.go @@ -0,0 +1,29 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDetermineClosestAPIVersion(t *testing.T) { + version, err := DetermineClosestAPIVersion("") + require.NoError(t, err) + require.Equal(t, APIVersionInitial, version) + + version, err = DetermineClosestAPIVersion("Not a date") + require.Error(t, err) + require.Equal(t, APIVersionInitial, version) + + version, err = DetermineClosestAPIVersion("2023-12-31") + require.NoError(t, err) + require.Equal(t, APIVersionInitial, version) + + version, err = DetermineClosestAPIVersion("2024-01-01") + require.NoError(t, err) + require.Equal(t, APIVersion20240101, version) + + version, err = DetermineClosestAPIVersion("2024-01-02") + require.NoError(t, err) + require.Equal(t, APIVersion20240101, version) +} diff --git a/auth_v2.169.0/internal/api/audit.go b/auth_v2.169.0/internal/api/audit.go new file mode 100644 index 0000000..351a7d2 --- /dev/null +++ b/auth_v2.169.0/internal/api/audit.go @@ -0,0 +1,47 @@ +package api + +import ( + "net/http" + "strings" + + "github.com/supabase/auth/internal/models" +) + +var filterColumnMap = map[string][]string{ + "author": {"actor_username", "actor_name"}, + "action": {"action"}, + "type": {"log_type"}, +} + +func (a *API) adminAuditLog(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + + // aud := a.requestAud(ctx, r) + pageParams, err := paginate(r) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "Bad Pagination Parameters: %v", err) + } + + var col []string + var qval string + q := r.URL.Query().Get("query") + if q != "" { + var exists bool + qparts := strings.SplitN(q, ":", 2) + col, exists = filterColumnMap[qparts[0]] + if !exists || len(qparts) < 2 { + return badRequestError(ErrorCodeValidationFailed, "Invalid query scope: %s", q) + } + qval = qparts[1] + } + + logs, err := models.FindAuditLogEntries(db, col, qval, pageParams) + if err != nil { + return internalServerError("Error searching for audit logs").WithInternalError(err) + } + + addPaginationHeaders(w, r, pageParams) + + return sendJSON(w, http.StatusOK, logs) +} diff --git a/auth_v2.169.0/internal/api/audit_test.go b/auth_v2.169.0/internal/api/audit_test.go new file mode 100644 index 0000000..c8e992e --- /dev/null +++ b/auth_v2.169.0/internal/api/audit_test.go @@ -0,0 +1,139 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type AuditTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration + + token string +} + +func TestAudit(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &AuditTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *AuditTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + ts.token = ts.makeSuperAdmin("") +} + +func (ts *AuditTestSuite) makeSuperAdmin(email string) string { + u, err := models.NewUser("", email, "test", ts.Config.JWT.Aud, map[string]interface{}{"full_name": "Test User"}) + require.NoError(ts.T(), err, "Error making new user") + + u.Role = "supabase_admin" + require.NoError(ts.T(), ts.API.db.Create(u)) + + session, err := models.NewSession(u.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(session)) + + var token string + + req := httptest.NewRequest(http.MethodPost, "/token?grant_type=password", nil) + token, _, err = ts.API.generateAccessToken(req, ts.API.db, u, &session.ID, models.PasswordGrant) + require.NoError(ts.T(), err, "Error generating access token") + + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.Parse(token, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + require.NoError(ts.T(), err, "Error parsing token") + + return token +} + +func (ts *AuditTestSuite) TestAuditGet() { + ts.prepareDeleteEvent() + // CHECK FOR AUDIT LOG + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/admin/audit", nil) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + assert.Equal(ts.T(), "; rel=\"last\"", w.Header().Get("Link")) + assert.Equal(ts.T(), "1", w.Header().Get("X-Total-Count")) + + logs := []models.AuditLogEntry{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&logs)) + + require.Len(ts.T(), logs, 1) + require.Contains(ts.T(), logs[0].Payload, "actor_username") + assert.Equal(ts.T(), "supabase_admin", logs[0].Payload["actor_username"]) + traits, ok := logs[0].Payload["traits"].(map[string]interface{}) + require.True(ts.T(), ok) + require.Contains(ts.T(), traits, "user_email") + assert.Equal(ts.T(), "test-delete@example.com", traits["user_email"]) +} + +func (ts *AuditTestSuite) TestAuditFilters() { + ts.prepareDeleteEvent() + + queries := []string{ + "/admin/audit?query=action:user_deleted", + "/admin/audit?query=type:team", + "/admin/audit?query=author:admin", + } + + for _, q := range queries { + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, q, nil) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + logs := []models.AuditLogEntry{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&logs)) + + require.Len(ts.T(), logs, 1) + require.Contains(ts.T(), logs[0].Payload, "actor_username") + assert.Equal(ts.T(), "supabase_admin", logs[0].Payload["actor_username"]) + traits, ok := logs[0].Payload["traits"].(map[string]interface{}) + require.True(ts.T(), ok) + require.Contains(ts.T(), traits, "user_email") + assert.Equal(ts.T(), "test-delete@example.com", traits["user_email"]) + } +} + +func (ts *AuditTestSuite) prepareDeleteEvent() { + // DELETE USER + u, err := models.NewUser("12345678", "test-delete@example.com", "test", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u), "Error creating user") + + // Setup request + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodDelete, fmt.Sprintf("/admin/users/%s", u.ID), nil) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) +} diff --git a/auth_v2.169.0/internal/api/auth.go b/auth_v2.169.0/internal/api/auth.go new file mode 100644 index 0000000..b03767f --- /dev/null +++ b/auth_v2.169.0/internal/api/auth.go @@ -0,0 +1,141 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v5" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +// requireAuthentication checks incoming requests for tokens presented using the Authorization header +func (a *API) requireAuthentication(w http.ResponseWriter, r *http.Request) (context.Context, error) { + token, err := a.extractBearerToken(r) + if err != nil { + return nil, err + } + + ctx, err := a.parseJWTClaims(token, r) + if err != nil { + return ctx, err + } + + ctx, err = a.maybeLoadUserOrSession(ctx) + if err != nil { + return ctx, err + } + return ctx, err +} + +func (a *API) requireNotAnonymous(w http.ResponseWriter, r *http.Request) (context.Context, error) { + ctx := r.Context() + claims := getClaims(ctx) + if claims.IsAnonymous { + return nil, forbiddenError(ErrorCodeNoAuthorization, "Anonymous user not allowed to perform these actions") + } + return ctx, nil +} + +func (a *API) requireAdmin(ctx context.Context) (context.Context, error) { + // Find the administrative user + claims := getClaims(ctx) + if claims == nil { + return nil, forbiddenError(ErrorCodeBadJWT, "Invalid token") + } + + adminRoles := a.config.JWT.AdminRoles + + if isStringInSlice(claims.Role, adminRoles) { + // successful authentication + return withAdminUser(ctx, &models.User{Role: claims.Role, Email: storage.NullString(claims.Role)}), nil + } + + return nil, forbiddenError(ErrorCodeNotAdmin, "User not allowed").WithInternalMessage(fmt.Sprintf("this token needs to have one of the following roles: %v", strings.Join(adminRoles, ", "))) +} + +func (a *API) extractBearerToken(r *http.Request) (string, error) { + authHeader := r.Header.Get("Authorization") + matches := bearerRegexp.FindStringSubmatch(authHeader) + if len(matches) != 2 { + return "", httpError(http.StatusUnauthorized, ErrorCodeNoAuthorization, "This endpoint requires a Bearer token") + } + + return matches[1], nil +} + +func (a *API) parseJWTClaims(bearer string, r *http.Request) (context.Context, error) { + ctx := r.Context() + config := a.config + + p := jwt.NewParser(jwt.WithValidMethods(config.JWT.ValidMethods)) + token, err := p.ParseWithClaims(bearer, &AccessTokenClaims{}, func(token *jwt.Token) (interface{}, error) { + if kid, ok := token.Header["kid"]; ok { + if kidStr, ok := kid.(string); ok { + return conf.FindPublicKeyByKid(kidStr, &config.JWT) + } + } + if alg, ok := token.Header["alg"]; ok { + if alg == jwt.SigningMethodHS256.Name { + // preserve backward compatibility for cases where the kid is not set + return []byte(config.JWT.Secret), nil + } + } + return nil, fmt.Errorf("missing kid") + }) + if err != nil { + return nil, forbiddenError(ErrorCodeBadJWT, "invalid JWT: unable to parse or verify signature, %v", err).WithInternalError(err) + } + + return withToken(ctx, token), nil +} + +func (a *API) maybeLoadUserOrSession(ctx context.Context) (context.Context, error) { + db := a.db.WithContext(ctx) + claims := getClaims(ctx) + + if claims == nil { + return ctx, forbiddenError(ErrorCodeBadJWT, "invalid token: missing claims") + } + + if claims.Subject == "" { + return nil, forbiddenError(ErrorCodeBadJWT, "invalid claim: missing sub claim") + } + + var user *models.User + if claims.Subject != "" { + userId, err := uuid.FromString(claims.Subject) + if err != nil { + return ctx, badRequestError(ErrorCodeBadJWT, "invalid claim: sub claim must be a UUID").WithInternalError(err) + } + user, err = models.FindUserByID(db, userId) + if err != nil { + if models.IsNotFoundError(err) { + return ctx, forbiddenError(ErrorCodeUserNotFound, "User from sub claim in JWT does not exist") + } + return ctx, err + } + ctx = withUser(ctx, user) + } + + var session *models.Session + if claims.SessionId != "" && claims.SessionId != uuid.Nil.String() { + sessionId, err := uuid.FromString(claims.SessionId) + if err != nil { + return ctx, forbiddenError(ErrorCodeBadJWT, "invalid claim: session_id claim must be a UUID").WithInternalError(err) + } + session, err = models.FindSessionByID(db, sessionId, false) + if err != nil { + if models.IsNotFoundError(err) { + return ctx, forbiddenError(ErrorCodeSessionNotFound, "Session from session_id claim in JWT does not exist").WithInternalError(err).WithInternalMessage(fmt.Sprintf("session id (%s) doesn't exist", sessionId)) + } + return ctx, err + } + ctx = withSession(ctx, session) + } + return ctx, nil +} diff --git a/auth_v2.169.0/internal/api/auth_test.go b/auth_v2.169.0/internal/api/auth_test.go new file mode 100644 index 0000000..71afe66 --- /dev/null +++ b/auth_v2.169.0/internal/api/auth_test.go @@ -0,0 +1,284 @@ +package api + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v5" + jwk "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type AuthTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestAuth(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &AuthTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + suite.Run(t, ts) +} + +func (ts *AuthTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + // Create user + u, err := models.NewUser("", "test@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") +} + +func (ts *AuthTestSuite) TestExtractBearerToken() { + userClaims := &AccessTokenClaims{ + Role: "authenticated", + } + userJwt, err := jwt.NewWithClaims(jwt.SigningMethodHS256, userClaims).SignedString([]byte(ts.Config.JWT.Secret)) + require.NoError(ts.T(), err) + req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) + req.Header.Set("Authorization", "Bearer "+userJwt) + + token, err := ts.API.extractBearerToken(req) + require.NoError(ts.T(), err) + require.Equal(ts.T(), userJwt, token) +} + +func (ts *AuthTestSuite) TestParseJWTClaims() { + cases := []struct { + desc string + key map[string]interface{} + }{ + { + desc: "HMAC key", + key: map[string]interface{}{ + "kty": "oct", + "k": "S1LgKUjeqXDEolv9WPtjUpADVMHU_KYu8uRDrM-pDGg", + "kid": "ac50c3cc-9cf7-4fd6-a11f-fe066fd39118", + "key_ops": []string{"sign", "verify"}, + "alg": "HS256", + }, + }, + { + desc: "RSA key", + key: map[string]interface{}{ + "kty": "RSA", + "n": "2g0B_hMIx5ZPuTUtLRpRr0k314XniYm3AUFgR5FmTZIjrn7vLwsWij-2egGZeHa-y9ypAgB9Q-lQ3AlT7RMPiCIyLQI6TTC8k10NEnj8c0QZwENx1Qr8aBbuZbOP9Cz30EMWZSbzMbz7r8-3rp5wBRBtIPnLlbfZh_p0iBaJfB77-r_mvhOIFM4xS7ef3nkE96dnvbEN5a-HfjzDJIAt-LniUvzMWW2gQcmHiM4oeijE3PHesapLMt2JpsMhSRo8L7tysags9VMoyZ1GnpCdjtRwb_KpY9QTjV6lL8G5nsKFH7bhABYcpjDOvqkfT5nPXj6C7oCo6MPRirPWUTbq2w", + "e": "AQAB", + "d": "OOTj_DNjOxCRRLYHT5lqbt4f3_BkdZKlWYKBaKsbkmnrPYCJUDEIdJIjPrpkHPZ-2hp9TrRp-upJ2t_kMhujFdY2WWAXbkSlL5475vICjODcBzqR3RC8wzwYgBjWGtQQ5RpcIZCELBovYbRFLR7SA8BBeTU0VaBe9gf3l_qpbOT9QIl268uFdWndTjpehGLQRmAtR1snhvTha0b9nsBZsM_K-EfnoF7Q_lPsjwWDvIGpFXao8Ifaa_sFtQkHjHVBMW2Qgx3ZSrEva_brk7w0MNSYI7Nsmr56xFOpFRwZy0v8ZtgQZ4hXmUInRHIoQ2APeds9YmemojvJKVflt9pLIQ", + "p": "-o2hdQ5Z35cIS5APTVULj_BMoPJpgkuX-PSYC1SeBeff9K04kG5zrFMWJy_-27-ys4q754lpNwJdX2CjN1nb6qyn-uKP8B2oLayKs9ebkiOqvm3S2Xblvi_F8x6sOLba3lTYHK8G7U9aMB9U0mhAzzMFdw15XXusVFDvk-zxL28", + "q": "3sp-7HzZE_elKRmebjivcDhkXO2GrcN3EIqYbbXssHZFXJwVE9oc2CErGWa7QetOCr9C--ZuTmX0X3L--CoYr-hMB0dN8lcAhapr3aau-4i7vE3DWSUdcFSyi0BBDg8pWQWbxNyTXBuWeh1cnRBsLjCxAOVTF0y3_BnVR7mbBVU", + "dp": "DuYHGMfOrk3zz1J0pnuNIXT_iX6AqZ_HHKWmuN3CO8Wq-oimWWhH9pJGOfRPqk9-19BDFiSEniHE3ZwIeI0eV5kGsBNyzatlybl90e3bMVhvmb08EXRRevqqQaesQ_8Tiq7u3t3Fgqz6RuxGBfDvEaMOCyNA-T8WYzkg1eH8AX8", + "dq": "opOCK3CvuDJvA57-TdBvtaRxGJ78OLD6oceBlA29useTthDwEJyJj-4kVVTyMRhUyuLnLoro06zytvRjuxR9D2CkmmseJkn2x5OlQwnvhv4wgSj99H9xDBfCcntg_bFyqtO859tObVh0ZogmnTbuuoYtpEm0aLxDRmRTjxOSXEE", + "qi": "8skVE7BDASHXytKSWYbkxD0B3WpXic2rtnLgiMgasdSxul8XwcB-vjVSZprVrxkcmm6ZhszoxOlq8yylBmMvAnG_gEzTls_xapeuEXGYiGaTcpkCt1r-tBKcQkka2SayaWwAljsX4xSw-zKP2koUkEET_tIcbBOW1R4OWfRGqOI", + "kid": "0d24b26c-b3ec-4c02-acfd-d5a54d50b3a4", + "key_ops": []string{"sign", "verify"}, + "alg": "RS256", + }, + }, + { + desc: "EC key", + key: map[string]interface{}{ + "kty": "EC", + "x": "5wsOh-DrNPpm9KkuydtgGs_cv3oNvtR9OdXywt12aS4", + "y": "0y01ZbuH_VQjMEd8fcYaLdiv25EVJ5GOrb79dJJsqrM", + "crv": "P-256", + "d": "EDP4ReMMpAUcf82EF3JYvkm8C5hVAh258Rj6f3HTx7c", + "kid": "10646a77-f470-44a8-8400-2f988d9c9c1a", + "key_ops": []string{"sign", "verify"}, + "alg": "ES256", + }, + }, + { + desc: "Ed25519 key", + key: map[string]interface{}{ + "crv": "Ed25519", + "d": "jVpCLvOxatVkKe1MW9nFRn6Q8VVZPq5yziKU_Z0Yu-c", + "x": "YDkGdufJBQEPO6ylvd9IKfZlzvm9tOG5VCDpkJSSkiA", + "kty": "OKP", + "kid": "ec5e7a96-ea66-456c-826c-d8d6cb928c0f", + "key_ops": []string{"sign", "verify"}, + "alg": "EdDSA", + }, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + bytes, err := json.Marshal(c.key) + require.NoError(ts.T(), err) + privKey, err := jwk.ParseKey(bytes) + require.NoError(ts.T(), err) + pubKey, err := privKey.PublicKey() + require.NoError(ts.T(), err) + ts.Config.JWT.Keys = conf.JwtKeysDecoder{privKey.KeyID(): conf.JwkInfo{ + PublicKey: pubKey, + PrivateKey: privKey, + }} + ts.Config.JWT.ValidMethods = nil + require.NoError(ts.T(), ts.Config.ApplyDefaults()) + + userClaims := &AccessTokenClaims{ + Role: "authenticated", + } + + // get signing key and method from config + jwk, err := conf.GetSigningJwk(&ts.Config.JWT) + require.NoError(ts.T(), err) + signingMethod := conf.GetSigningAlg(jwk) + signingKey, err := conf.GetSigningKey(jwk) + require.NoError(ts.T(), err) + + userJwtToken := jwt.NewWithClaims(signingMethod, userClaims) + require.NoError(ts.T(), err) + userJwtToken.Header["kid"] = jwk.KeyID() + userJwt, err := userJwtToken.SignedString(signingKey) + require.NoError(ts.T(), err) + + req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) + req.Header.Set("Authorization", "Bearer "+userJwt) + ctx, err := ts.API.parseJWTClaims(userJwt, req) + require.NoError(ts.T(), err) + + // check if token is stored in context + token := getToken(ctx) + require.Equal(ts.T(), userJwt, token.Raw) + }) + } +} + +func (ts *AuthTestSuite) TestMaybeLoadUserOrSession() { + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + s, err := models.NewSession(u.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(s)) + + require.NoError(ts.T(), ts.API.db.Load(s)) + + cases := []struct { + Desc string + UserJwtClaims *AccessTokenClaims + ExpectedError error + ExpectedUser *models.User + ExpectedSession *models.Session + }{ + { + Desc: "Missing Subject Claim", + UserJwtClaims: &AccessTokenClaims{ + RegisteredClaims: jwt.RegisteredClaims{ + Subject: "", + }, + Role: "authenticated", + }, + ExpectedError: forbiddenError(ErrorCodeBadJWT, "invalid claim: missing sub claim"), + ExpectedUser: nil, + }, + { + Desc: "Valid Subject Claim", + UserJwtClaims: &AccessTokenClaims{ + RegisteredClaims: jwt.RegisteredClaims{ + Subject: u.ID.String(), + }, + Role: "authenticated", + }, + ExpectedError: nil, + ExpectedUser: u, + }, + { + Desc: "Invalid Subject Claim", + UserJwtClaims: &AccessTokenClaims{ + RegisteredClaims: jwt.RegisteredClaims{ + Subject: "invalid-subject-claim", + }, + Role: "authenticated", + }, + ExpectedError: badRequestError(ErrorCodeBadJWT, "invalid claim: sub claim must be a UUID"), + ExpectedUser: nil, + }, + { + Desc: "Empty Session ID Claim", + UserJwtClaims: &AccessTokenClaims{ + RegisteredClaims: jwt.RegisteredClaims{ + Subject: u.ID.String(), + }, + Role: "authenticated", + SessionId: "", + }, + ExpectedError: nil, + ExpectedUser: u, + }, + { + Desc: "Invalid Session ID Claim", + UserJwtClaims: &AccessTokenClaims{ + RegisteredClaims: jwt.RegisteredClaims{ + Subject: u.ID.String(), + }, + Role: "authenticated", + SessionId: uuid.Nil.String(), + }, + ExpectedError: nil, + ExpectedUser: u, + }, + { + Desc: "Valid Session ID Claim", + UserJwtClaims: &AccessTokenClaims{ + RegisteredClaims: jwt.RegisteredClaims{ + Subject: u.ID.String(), + }, + Role: "authenticated", + SessionId: s.ID.String(), + }, + ExpectedError: nil, + ExpectedUser: u, + ExpectedSession: s, + }, + { + Desc: "Session ID doesn't exist", + UserJwtClaims: &AccessTokenClaims{ + RegisteredClaims: jwt.RegisteredClaims{ + Subject: u.ID.String(), + }, + Role: "authenticated", + SessionId: "73bf9ee0-9e8c-453b-b484-09cb93e2f341", + }, + ExpectedError: forbiddenError(ErrorCodeSessionNotFound, "Session from session_id claim in JWT does not exist").WithInternalError(models.SessionNotFoundError{}).WithInternalMessage("session id (73bf9ee0-9e8c-453b-b484-09cb93e2f341) doesn't exist"), + ExpectedUser: u, + ExpectedSession: nil, + }, + } + + for _, c := range cases { + ts.Run(c.Desc, func() { + userJwt, err := jwt.NewWithClaims(jwt.SigningMethodHS256, c.UserJwtClaims).SignedString([]byte(ts.Config.JWT.Secret)) + require.NoError(ts.T(), err) + + req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) + req.Header.Set("Authorization", "Bearer "+userJwt) + + ctx, err := ts.API.parseJWTClaims(userJwt, req) + require.NoError(ts.T(), err) + ctx, err = ts.API.maybeLoadUserOrSession(ctx) + if c.ExpectedError != nil { + require.Equal(ts.T(), c.ExpectedError.Error(), err.Error()) + } else { + require.Equal(ts.T(), c.ExpectedError, err) + } + require.Equal(ts.T(), c.ExpectedUser, getUser(ctx)) + require.Equal(ts.T(), c.ExpectedSession, getSession(ctx)) + }) + } +} diff --git a/auth_v2.169.0/internal/api/context.go b/auth_v2.169.0/internal/api/context.go new file mode 100644 index 0000000..3047f3d --- /dev/null +++ b/auth_v2.169.0/internal/api/context.go @@ -0,0 +1,243 @@ +package api + +import ( + "context" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/supabase/auth/internal/models" +) + +type contextKey string + +func (c contextKey) String() string { + return "gotrue api context key " + string(c) +} + +const ( + tokenKey = contextKey("jwt") + inviteTokenKey = contextKey("invite_token") + signatureKey = contextKey("signature") + externalProviderTypeKey = contextKey("external_provider_type") + userKey = contextKey("user") + targetUserKey = contextKey("target_user") + factorKey = contextKey("factor") + sessionKey = contextKey("session") + externalReferrerKey = contextKey("external_referrer") + functionHooksKey = contextKey("function_hooks") + adminUserKey = contextKey("admin_user") + oauthTokenKey = contextKey("oauth_token") // for OAuth1.0, also known as request token + oauthVerifierKey = contextKey("oauth_verifier") + ssoProviderKey = contextKey("sso_provider") + externalHostKey = contextKey("external_host") + flowStateKey = contextKey("flow_state_id") +) + +// withToken adds the JWT token to the context. +func withToken(ctx context.Context, token *jwt.Token) context.Context { + return context.WithValue(ctx, tokenKey, token) +} + +// getToken reads the JWT token from the context. +func getToken(ctx context.Context) *jwt.Token { + obj := ctx.Value(tokenKey) + if obj == nil { + return nil + } + + return obj.(*jwt.Token) +} + +func getClaims(ctx context.Context) *AccessTokenClaims { + token := getToken(ctx) + if token == nil { + return nil + } + return token.Claims.(*AccessTokenClaims) +} + +// withUser adds the user to the context. +func withUser(ctx context.Context, u *models.User) context.Context { + return context.WithValue(ctx, userKey, u) +} + +// withTargetUser adds the target user for linking to the context. +func withTargetUser(ctx context.Context, u *models.User) context.Context { + return context.WithValue(ctx, targetUserKey, u) +} + +// with Factor adds the factor id to the context. +func withFactor(ctx context.Context, f *models.Factor) context.Context { + return context.WithValue(ctx, factorKey, f) +} + +// getUser reads the user from the context. +func getUser(ctx context.Context) *models.User { + if ctx == nil { + return nil + } + obj := ctx.Value(userKey) + if obj == nil { + return nil + } + return obj.(*models.User) +} + +// getTargetUser reads the user from the context. +func getTargetUser(ctx context.Context) *models.User { + if ctx == nil { + return nil + } + obj := ctx.Value(targetUserKey) + if obj == nil { + return nil + } + return obj.(*models.User) +} + +// getFactor reads the factor id from the context +func getFactor(ctx context.Context) *models.Factor { + obj := ctx.Value(factorKey) + if obj == nil { + return nil + } + return obj.(*models.Factor) +} + +// withSession adds the session to the context. +func withSession(ctx context.Context, s *models.Session) context.Context { + return context.WithValue(ctx, sessionKey, s) +} + +// getSession reads the session from the context. +func getSession(ctx context.Context) *models.Session { + if ctx == nil { + return nil + } + obj := ctx.Value(sessionKey) + if obj == nil { + return nil + } + return obj.(*models.Session) +} + +// withSignature adds the provided request ID to the context. +func withSignature(ctx context.Context, id string) context.Context { + return context.WithValue(ctx, signatureKey, id) +} + +func withInviteToken(ctx context.Context, token string) context.Context { + return context.WithValue(ctx, inviteTokenKey, token) +} + +func withFlowStateID(ctx context.Context, FlowStateID string) context.Context { + return context.WithValue(ctx, flowStateKey, FlowStateID) +} + +func getFlowStateID(ctx context.Context) string { + obj := ctx.Value(flowStateKey) + if obj == nil { + return "" + } + return obj.(string) +} + +func getInviteToken(ctx context.Context) string { + obj := ctx.Value(inviteTokenKey) + if obj == nil { + return "" + } + + return obj.(string) +} + +// withExternalProviderType adds the provided request ID to the context. +func withExternalProviderType(ctx context.Context, id string) context.Context { + return context.WithValue(ctx, externalProviderTypeKey, id) +} + +// getExternalProviderType reads the request ID from the context. +func getExternalProviderType(ctx context.Context) string { + obj := ctx.Value(externalProviderTypeKey) + if obj == nil { + return "" + } + + return obj.(string) +} + +func withExternalReferrer(ctx context.Context, token string) context.Context { + return context.WithValue(ctx, externalReferrerKey, token) +} + +func getExternalReferrer(ctx context.Context) string { + obj := ctx.Value(externalReferrerKey) + if obj == nil { + return "" + } + + return obj.(string) +} + +// withAdminUser adds the admin user to the context. +func withAdminUser(ctx context.Context, u *models.User) context.Context { + return context.WithValue(ctx, adminUserKey, u) +} + +// getAdminUser reads the admin user from the context. +func getAdminUser(ctx context.Context) *models.User { + obj := ctx.Value(adminUserKey) + if obj == nil { + return nil + } + return obj.(*models.User) +} + +// withRequestToken adds the request token to the context +func withRequestToken(ctx context.Context, token string) context.Context { + return context.WithValue(ctx, oauthTokenKey, token) +} + +func getRequestToken(ctx context.Context) string { + obj := ctx.Value(oauthTokenKey) + if obj == nil { + return "" + } + return obj.(string) +} + +func withOAuthVerifier(ctx context.Context, token string) context.Context { + return context.WithValue(ctx, oauthVerifierKey, token) +} + +func getOAuthVerifier(ctx context.Context) string { + obj := ctx.Value(oauthVerifierKey) + if obj == nil { + return "" + } + return obj.(string) +} + +func withSSOProvider(ctx context.Context, provider *models.SSOProvider) context.Context { + return context.WithValue(ctx, ssoProviderKey, provider) +} + +func getSSOProvider(ctx context.Context) *models.SSOProvider { + obj := ctx.Value(ssoProviderKey) + if obj == nil { + return nil + } + return obj.(*models.SSOProvider) +} + +func withExternalHost(ctx context.Context, u *url.URL) context.Context { + return context.WithValue(ctx, externalHostKey, u) +} + +func getExternalHost(ctx context.Context) *url.URL { + obj := ctx.Value(externalHostKey) + if obj == nil { + return nil + } + return obj.(*url.URL) +} diff --git a/auth_v2.169.0/internal/api/errorcodes.go b/auth_v2.169.0/internal/api/errorcodes.go new file mode 100644 index 0000000..8f09901 --- /dev/null +++ b/auth_v2.169.0/internal/api/errorcodes.go @@ -0,0 +1,95 @@ +package api + +type ErrorCode = string + +const ( + // ErrorCodeUnknown should not be used directly, it only indicates a failure in the error handling system in such a way that an error code was not assigned properly. + ErrorCodeUnknown ErrorCode = "unknown" + + // ErrorCodeUnexpectedFailure signals an unexpected failure such as a 500 Internal Server Error. + ErrorCodeUnexpectedFailure ErrorCode = "unexpected_failure" + + ErrorCodeValidationFailed ErrorCode = "validation_failed" + ErrorCodeBadJSON ErrorCode = "bad_json" + ErrorCodeEmailExists ErrorCode = "email_exists" + ErrorCodePhoneExists ErrorCode = "phone_exists" + ErrorCodeBadJWT ErrorCode = "bad_jwt" + ErrorCodeNotAdmin ErrorCode = "not_admin" + ErrorCodeNoAuthorization ErrorCode = "no_authorization" + ErrorCodeUserNotFound ErrorCode = "user_not_found" + ErrorCodeSessionNotFound ErrorCode = "session_not_found" + ErrorCodeSessionExpired ErrorCode = "session_expired" + ErrorCodeRefreshTokenNotFound ErrorCode = "refresh_token_not_found" + ErrorCodeRefreshTokenAlreadyUsed ErrorCode = "refresh_token_already_used" + ErrorCodeFlowStateNotFound ErrorCode = "flow_state_not_found" + ErrorCodeFlowStateExpired ErrorCode = "flow_state_expired" + ErrorCodeSignupDisabled ErrorCode = "signup_disabled" + ErrorCodeUserBanned ErrorCode = "user_banned" + ErrorCodeProviderEmailNeedsVerification ErrorCode = "provider_email_needs_verification" + ErrorCodeInviteNotFound ErrorCode = "invite_not_found" + ErrorCodeBadOAuthState ErrorCode = "bad_oauth_state" + ErrorCodeBadOAuthCallback ErrorCode = "bad_oauth_callback" + ErrorCodeOAuthProviderNotSupported ErrorCode = "oauth_provider_not_supported" + ErrorCodeUnexpectedAudience ErrorCode = "unexpected_audience" + ErrorCodeSingleIdentityNotDeletable ErrorCode = "single_identity_not_deletable" + ErrorCodeEmailConflictIdentityNotDeletable ErrorCode = "email_conflict_identity_not_deletable" + ErrorCodeIdentityAlreadyExists ErrorCode = "identity_already_exists" + ErrorCodeEmailProviderDisabled ErrorCode = "email_provider_disabled" + ErrorCodePhoneProviderDisabled ErrorCode = "phone_provider_disabled" + ErrorCodeTooManyEnrolledMFAFactors ErrorCode = "too_many_enrolled_mfa_factors" + ErrorCodeMFAFactorNameConflict ErrorCode = "mfa_factor_name_conflict" + ErrorCodeMFAFactorNotFound ErrorCode = "mfa_factor_not_found" + ErrorCodeMFAIPAddressMismatch ErrorCode = "mfa_ip_address_mismatch" + ErrorCodeMFAChallengeExpired ErrorCode = "mfa_challenge_expired" + ErrorCodeMFAVerificationFailed ErrorCode = "mfa_verification_failed" + ErrorCodeMFAVerificationRejected ErrorCode = "mfa_verification_rejected" + ErrorCodeInsufficientAAL ErrorCode = "insufficient_aal" + ErrorCodeCaptchaFailed ErrorCode = "captcha_failed" + ErrorCodeSAMLProviderDisabled ErrorCode = "saml_provider_disabled" + ErrorCodeManualLinkingDisabled ErrorCode = "manual_linking_disabled" + ErrorCodeSMSSendFailed ErrorCode = "sms_send_failed" + ErrorCodeEmailNotConfirmed ErrorCode = "email_not_confirmed" + ErrorCodePhoneNotConfirmed ErrorCode = "phone_not_confirmed" + ErrorCodeSAMLRelayStateNotFound ErrorCode = "saml_relay_state_not_found" + ErrorCodeSAMLRelayStateExpired ErrorCode = "saml_relay_state_expired" + ErrorCodeSAMLIdPNotFound ErrorCode = "saml_idp_not_found" + ErrorCodeSAMLAssertionNoUserID ErrorCode = "saml_assertion_no_user_id" + ErrorCodeSAMLAssertionNoEmail ErrorCode = "saml_assertion_no_email" + ErrorCodeUserAlreadyExists ErrorCode = "user_already_exists" + ErrorCodeSSOProviderNotFound ErrorCode = "sso_provider_not_found" + ErrorCodeSAMLMetadataFetchFailed ErrorCode = "saml_metadata_fetch_failed" + ErrorCodeSAMLIdPAlreadyExists ErrorCode = "saml_idp_already_exists" + ErrorCodeSSODomainAlreadyExists ErrorCode = "sso_domain_already_exists" + ErrorCodeSAMLEntityIDMismatch ErrorCode = "saml_entity_id_mismatch" + ErrorCodeConflict ErrorCode = "conflict" + ErrorCodeProviderDisabled ErrorCode = "provider_disabled" + ErrorCodeUserSSOManaged ErrorCode = "user_sso_managed" + ErrorCodeReauthenticationNeeded ErrorCode = "reauthentication_needed" + ErrorCodeSamePassword ErrorCode = "same_password" + ErrorCodeReauthenticationNotValid ErrorCode = "reauthentication_not_valid" + ErrorCodeOTPExpired ErrorCode = "otp_expired" + ErrorCodeOTPDisabled ErrorCode = "otp_disabled" + ErrorCodeIdentityNotFound ErrorCode = "identity_not_found" + ErrorCodeWeakPassword ErrorCode = "weak_password" + ErrorCodeOverRequestRateLimit ErrorCode = "over_request_rate_limit" + ErrorCodeOverEmailSendRateLimit ErrorCode = "over_email_send_rate_limit" + ErrorCodeOverSMSSendRateLimit ErrorCode = "over_sms_send_rate_limit" + ErrorCodeBadCodeVerifier ErrorCode = "bad_code_verifier" + ErrorCodeAnonymousProviderDisabled ErrorCode = "anonymous_provider_disabled" + ErrorCodeHookTimeout ErrorCode = "hook_timeout" + ErrorCodeHookTimeoutAfterRetry ErrorCode = "hook_timeout_after_retry" + ErrorCodeHookPayloadOverSizeLimit ErrorCode = "hook_payload_over_size_limit" + ErrorCodeHookPayloadInvalidContentType ErrorCode = "hook_payload_invalid_content_type" + ErrorCodeRequestTimeout ErrorCode = "request_timeout" + ErrorCodeMFAPhoneEnrollDisabled ErrorCode = "mfa_phone_enroll_not_enabled" + ErrorCodeMFAPhoneVerifyDisabled ErrorCode = "mfa_phone_verify_not_enabled" + ErrorCodeMFATOTPEnrollDisabled ErrorCode = "mfa_totp_enroll_not_enabled" + ErrorCodeMFATOTPVerifyDisabled ErrorCode = "mfa_totp_verify_not_enabled" + ErrorCodeMFAWebAuthnEnrollDisabled ErrorCode = "mfa_webauthn_enroll_not_enabled" + ErrorCodeMFAWebAuthnVerifyDisabled ErrorCode = "mfa_webauthn_verify_not_enabled" + ErrorCodeMFAVerifiedFactorExists ErrorCode = "mfa_verified_factor_exists" + //#nosec G101 -- Not a secret value. + ErrorCodeInvalidCredentials ErrorCode = "invalid_credentials" + ErrorCodeEmailAddressNotAuthorized ErrorCode = "email_address_not_authorized" + ErrorCodeEmailAddressInvalid ErrorCode = "email_address_invalid" +) diff --git a/auth_v2.169.0/internal/api/errors.go b/auth_v2.169.0/internal/api/errors.go new file mode 100644 index 0000000..0ce9512 --- /dev/null +++ b/auth_v2.169.0/internal/api/errors.go @@ -0,0 +1,330 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "os" + "runtime/debug" + "time" + + "github.com/pkg/errors" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/utilities" +) + +// Common error messages during signup flow +var ( + DuplicateEmailMsg = "A user with this email address has already been registered" + DuplicatePhoneMsg = "A user with this phone number has already been registered" + UserExistsError error = errors.New("user already exists") +) + +const InvalidChannelError = "Invalid channel, supported values are 'sms' or 'whatsapp'. 'whatsapp' is only supported if Twilio or Twilio Verify is used as the provider." + +var oauthErrorMap = map[int]string{ + http.StatusBadRequest: "invalid_request", + http.StatusUnauthorized: "unauthorized_client", + http.StatusForbidden: "access_denied", + http.StatusInternalServerError: "server_error", + http.StatusServiceUnavailable: "temporarily_unavailable", +} + +// OAuthError is the JSON handler for OAuth2 error responses +type OAuthError struct { + Err string `json:"error"` + Description string `json:"error_description,omitempty"` + InternalError error `json:"-"` + InternalMessage string `json:"-"` +} + +func (e *OAuthError) Error() string { + if e.InternalMessage != "" { + return e.InternalMessage + } + return fmt.Sprintf("%s: %s", e.Err, e.Description) +} + +// WithInternalError adds internal error information to the error +func (e *OAuthError) WithInternalError(err error) *OAuthError { + e.InternalError = err + return e +} + +// WithInternalMessage adds internal message information to the error +func (e *OAuthError) WithInternalMessage(fmtString string, args ...interface{}) *OAuthError { + e.InternalMessage = fmt.Sprintf(fmtString, args...) + return e +} + +// Cause returns the root cause error +func (e *OAuthError) Cause() error { + if e.InternalError != nil { + return e.InternalError + } + return e +} + +func oauthError(err string, description string) *OAuthError { + return &OAuthError{Err: err, Description: description} +} + +func badRequestError(errorCode ErrorCode, fmtString string, args ...interface{}) *HTTPError { + return httpError(http.StatusBadRequest, errorCode, fmtString, args...) +} + +func internalServerError(fmtString string, args ...interface{}) *HTTPError { + return httpError(http.StatusInternalServerError, ErrorCodeUnexpectedFailure, fmtString, args...) +} + +func notFoundError(errorCode ErrorCode, fmtString string, args ...interface{}) *HTTPError { + return httpError(http.StatusNotFound, errorCode, fmtString, args...) +} + +func forbiddenError(errorCode ErrorCode, fmtString string, args ...interface{}) *HTTPError { + return httpError(http.StatusForbidden, errorCode, fmtString, args...) +} + +func unprocessableEntityError(errorCode ErrorCode, fmtString string, args ...interface{}) *HTTPError { + return httpError(http.StatusUnprocessableEntity, errorCode, fmtString, args...) +} + +func tooManyRequestsError(errorCode ErrorCode, fmtString string, args ...interface{}) *HTTPError { + return httpError(http.StatusTooManyRequests, errorCode, fmtString, args...) +} + +func conflictError(fmtString string, args ...interface{}) *HTTPError { + return httpError(http.StatusConflict, ErrorCodeConflict, fmtString, args...) +} + +// HTTPError is an error with a message and an HTTP status code. +type HTTPError struct { + HTTPStatus int `json:"code"` // do not rename the JSON tags! + ErrorCode string `json:"error_code,omitempty"` // do not rename the JSON tags! + Message string `json:"msg"` // do not rename the JSON tags! + InternalError error `json:"-"` + InternalMessage string `json:"-"` + ErrorID string `json:"error_id,omitempty"` +} + +func (e *HTTPError) Error() string { + if e.InternalMessage != "" { + return e.InternalMessage + } + return fmt.Sprintf("%d: %s", e.HTTPStatus, e.Message) +} + +func (e *HTTPError) Is(target error) bool { + return e.Error() == target.Error() +} + +// Cause returns the root cause error +func (e *HTTPError) Cause() error { + if e.InternalError != nil { + return e.InternalError + } + return e +} + +// WithInternalError adds internal error information to the error +func (e *HTTPError) WithInternalError(err error) *HTTPError { + e.InternalError = err + return e +} + +// WithInternalMessage adds internal message information to the error +func (e *HTTPError) WithInternalMessage(fmtString string, args ...interface{}) *HTTPError { + e.InternalMessage = fmt.Sprintf(fmtString, args...) + return e +} + +func httpError(httpStatus int, errorCode ErrorCode, fmtString string, args ...interface{}) *HTTPError { + return &HTTPError{ + HTTPStatus: httpStatus, + ErrorCode: errorCode, + Message: fmt.Sprintf(fmtString, args...), + } +} + +// Recoverer is a middleware that recovers from panics, logs the panic (and a +// backtrace), and returns a HTTP 500 (Internal Server Error) status if +// possible. Recoverer prints a request ID if one is provided. +func recoverer(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + defer func() { + if rvr := recover(); rvr != nil { + logEntry := observability.GetLogEntry(r) + if logEntry != nil { + logEntry.Panic(rvr, debug.Stack()) + } else { + fmt.Fprintf(os.Stderr, "Panic: %+v\n", rvr) + debug.PrintStack() + } + + se := &HTTPError{ + HTTPStatus: http.StatusInternalServerError, + Message: http.StatusText(http.StatusInternalServerError), + } + HandleResponseError(se, w, r) + } + }() + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) +} + +// ErrorCause is an error interface that contains the method Cause() for returning root cause errors +type ErrorCause interface { + Cause() error +} + +type HTTPErrorResponse20240101 struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` +} + +func HandleResponseError(err error, w http.ResponseWriter, r *http.Request) { + log := observability.GetLogEntry(r).Entry + errorID := utilities.GetRequestID(r.Context()) + + apiVersion, averr := DetermineClosestAPIVersion(r.Header.Get(APIVersionHeaderName)) + if averr != nil { + log.WithError(averr).Warn("Invalid version passed to " + APIVersionHeaderName + " header, defaulting to initial version") + } else if apiVersion != APIVersionInitial { + // Echo back the determined API version from the request + w.Header().Set(APIVersionHeaderName, FormatAPIVersion(apiVersion)) + } + + switch e := err.(type) { + case *WeakPasswordError: + if apiVersion.Compare(APIVersion20240101) >= 0 { + var output struct { + HTTPErrorResponse20240101 + Payload struct { + Reasons []string `json:"reasons,omitempty"` + } `json:"weak_password,omitempty"` + } + + output.Code = ErrorCodeWeakPassword + output.Message = e.Message + output.Payload.Reasons = e.Reasons + + if jsonErr := sendJSON(w, http.StatusUnprocessableEntity, output); jsonErr != nil && jsonErr != context.DeadlineExceeded { + log.WithError(jsonErr).Warn("Failed to send JSON on ResponseWriter") + } + + } else { + var output struct { + HTTPError + Payload struct { + Reasons []string `json:"reasons,omitempty"` + } `json:"weak_password,omitempty"` + } + + output.HTTPStatus = http.StatusUnprocessableEntity + output.ErrorCode = ErrorCodeWeakPassword + output.Message = e.Message + output.Payload.Reasons = e.Reasons + + w.Header().Set("x-sb-error-code", output.ErrorCode) + + if jsonErr := sendJSON(w, output.HTTPStatus, output); jsonErr != nil && jsonErr != context.DeadlineExceeded { + log.WithError(jsonErr).Warn("Failed to send JSON on ResponseWriter") + } + } + + case *HTTPError: + switch { + case e.HTTPStatus >= http.StatusInternalServerError: + e.ErrorID = errorID + // this will get us the stack trace too + log.WithError(e.Cause()).Error(e.Error()) + case e.HTTPStatus == http.StatusTooManyRequests: + log.WithError(e.Cause()).Warn(e.Error()) + default: + log.WithError(e.Cause()).Info(e.Error()) + } + + if e.ErrorCode != "" { + w.Header().Set("x-sb-error-code", e.ErrorCode) + } + + if apiVersion.Compare(APIVersion20240101) >= 0 { + resp := HTTPErrorResponse20240101{ + Code: e.ErrorCode, + Message: e.Message, + } + + if resp.Code == "" { + if e.HTTPStatus == http.StatusInternalServerError { + resp.Code = ErrorCodeUnexpectedFailure + } else { + resp.Code = ErrorCodeUnknown + } + } + + if jsonErr := sendJSON(w, e.HTTPStatus, resp); jsonErr != nil && jsonErr != context.DeadlineExceeded { + log.WithError(jsonErr).Warn("Failed to send JSON on ResponseWriter") + } + } else { + if e.ErrorCode == "" { + if e.HTTPStatus == http.StatusInternalServerError { + e.ErrorCode = ErrorCodeUnexpectedFailure + } else { + e.ErrorCode = ErrorCodeUnknown + } + } + + // Provide better error messages for certain user-triggered Postgres errors. + if pgErr := utilities.NewPostgresError(e.InternalError); pgErr != nil { + if jsonErr := sendJSON(w, pgErr.HttpStatusCode, pgErr); jsonErr != nil && jsonErr != context.DeadlineExceeded { + log.WithError(jsonErr).Warn("Failed to send JSON on ResponseWriter") + } + return + } + + if jsonErr := sendJSON(w, e.HTTPStatus, e); jsonErr != nil && jsonErr != context.DeadlineExceeded { + log.WithError(jsonErr).Warn("Failed to send JSON on ResponseWriter") + } + } + + case *OAuthError: + log.WithError(e.Cause()).Info(e.Error()) + if jsonErr := sendJSON(w, http.StatusBadRequest, e); jsonErr != nil && jsonErr != context.DeadlineExceeded { + log.WithError(jsonErr).Warn("Failed to send JSON on ResponseWriter") + } + + case ErrorCause: + HandleResponseError(e.Cause(), w, r) + + default: + log.WithError(e).Errorf("Unhandled server error: %s", e.Error()) + + if apiVersion.Compare(APIVersion20240101) >= 0 { + resp := HTTPErrorResponse20240101{ + Code: ErrorCodeUnexpectedFailure, + Message: "Unexpected failure, please check server logs for more information", + } + + if jsonErr := sendJSON(w, http.StatusInternalServerError, resp); jsonErr != nil && jsonErr != context.DeadlineExceeded { + log.WithError(jsonErr).Warn("Failed to send JSON on ResponseWriter") + } + } else { + httpError := HTTPError{ + HTTPStatus: http.StatusInternalServerError, + ErrorCode: ErrorCodeUnexpectedFailure, + Message: "Unexpected failure, please check server logs for more information", + } + + if jsonErr := sendJSON(w, http.StatusInternalServerError, httpError); jsonErr != nil && jsonErr != context.DeadlineExceeded { + log.WithError(jsonErr).Warn("Failed to send JSON on ResponseWriter") + } + } + } +} + +func generateFrequencyLimitErrorMessage(timeStamp *time.Time, maxFrequency time.Duration) string { + now := time.Now() + left := timeStamp.Add(maxFrequency).Sub(now) / time.Second + return fmt.Sprintf("For security purposes, you can only request this after %d seconds.", left) +} diff --git a/auth_v2.169.0/internal/api/errors_test.go b/auth_v2.169.0/internal/api/errors_test.go new file mode 100644 index 0000000..5524672 --- /dev/null +++ b/auth_v2.169.0/internal/api/errors_test.go @@ -0,0 +1,105 @@ +package api + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/observability" +) + +func TestHandleResponseErrorWithHTTPError(t *testing.T) { + examples := []struct { + HTTPError *HTTPError + APIVersion string + ExpectedBody string + }{ + { + HTTPError: badRequestError(ErrorCodeBadJSON, "Unable to parse JSON"), + APIVersion: "", + ExpectedBody: "{\"code\":400,\"error_code\":\"" + ErrorCodeBadJSON + "\",\"msg\":\"Unable to parse JSON\"}", + }, + { + HTTPError: badRequestError(ErrorCodeBadJSON, "Unable to parse JSON"), + APIVersion: "2023-12-31", + ExpectedBody: "{\"code\":400,\"error_code\":\"" + ErrorCodeBadJSON + "\",\"msg\":\"Unable to parse JSON\"}", + }, + { + HTTPError: badRequestError(ErrorCodeBadJSON, "Unable to parse JSON"), + APIVersion: "2024-01-01", + ExpectedBody: "{\"code\":\"" + ErrorCodeBadJSON + "\",\"message\":\"Unable to parse JSON\"}", + }, + { + HTTPError: &HTTPError{ + HTTPStatus: http.StatusBadRequest, + Message: "Uncoded failure", + }, + APIVersion: "2024-01-01", + ExpectedBody: "{\"code\":\"" + ErrorCodeUnknown + "\",\"message\":\"Uncoded failure\"}", + }, + { + HTTPError: &HTTPError{ + HTTPStatus: http.StatusInternalServerError, + Message: "Unexpected failure", + }, + APIVersion: "2024-01-01", + ExpectedBody: "{\"code\":\"" + ErrorCodeUnexpectedFailure + "\",\"message\":\"Unexpected failure\"}", + }, + } + + for _, example := range examples { + rec := httptest.NewRecorder() + req, err := http.NewRequest(http.MethodPost, "http://example.com", nil) + require.NoError(t, err) + + if example.APIVersion != "" { + req.Header.Set(APIVersionHeaderName, example.APIVersion) + } + + HandleResponseError(example.HTTPError, rec, req) + + require.Equal(t, example.HTTPError.HTTPStatus, rec.Code) + require.Equal(t, example.ExpectedBody, rec.Body.String()) + } +} + +func TestRecoverer(t *testing.T) { + var logBuffer bytes.Buffer + config, err := conf.LoadGlobal(apiTestConfig) + require.NoError(t, err) + require.NoError(t, observability.ConfigureLogging(&config.Logging)) + + // logrus should write to the buffer so we can check if the logs are output correctly + logrus.SetOutput(&logBuffer) + panicHandler := recoverer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + panic("test panic") + })) + + w := httptest.NewRecorder() + req, err := http.NewRequest(http.MethodPost, "http://example.com", nil) + require.NoError(t, err) + + panicHandler.ServeHTTP(w, req) + + require.Equal(t, http.StatusInternalServerError, w.Code) + + var data HTTPError + + // panic should return an internal server error + require.NoError(t, json.NewDecoder(w.Body).Decode(&data)) + require.Equal(t, ErrorCodeUnexpectedFailure, data.ErrorCode) + require.Equal(t, http.StatusInternalServerError, data.HTTPStatus) + require.Equal(t, "Internal Server Error", data.Message) + + // panic should log the error message internally + var logs map[string]interface{} + require.NoError(t, json.NewDecoder(&logBuffer).Decode(&logs)) + require.Equal(t, "request panicked", logs["msg"]) + require.Equal(t, "test panic", logs["panic"]) + require.NotEmpty(t, logs["stack"]) +} diff --git a/auth_v2.169.0/internal/api/external.go b/auth_v2.169.0/internal/api/external.go new file mode 100644 index 0000000..768343d --- /dev/null +++ b/auth_v2.169.0/internal/api/external.go @@ -0,0 +1,684 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/fatih/structs" + "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v5" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" + "golang.org/x/oauth2" +) + +// ExternalProviderClaims are the JWT claims sent as the state in the external oauth provider signup flow +type ExternalProviderClaims struct { + AuthMicroserviceClaims + Provider string `json:"provider"` + InviteToken string `json:"invite_token,omitempty"` + Referrer string `json:"referrer,omitempty"` + FlowStateID string `json:"flow_state_id"` + LinkingTargetID string `json:"linking_target_id,omitempty"` +} + +// ExternalProviderRedirect redirects the request to the oauth provider +func (a *API) ExternalProviderRedirect(w http.ResponseWriter, r *http.Request) error { + rurl, err := a.GetExternalProviderRedirectURL(w, r, nil) + if err != nil { + return err + } + http.Redirect(w, r, rurl, http.StatusFound) + return nil +} + +// GetExternalProviderRedirectURL returns the URL to start the oauth flow with the corresponding oauth provider +func (a *API) GetExternalProviderRedirectURL(w http.ResponseWriter, r *http.Request, linkingTargetUser *models.User) (string, error) { + ctx := r.Context() + db := a.db.WithContext(ctx) + config := a.config + + query := r.URL.Query() + providerType := query.Get("provider") + scopes := query.Get("scopes") + codeChallenge := query.Get("code_challenge") + codeChallengeMethod := query.Get("code_challenge_method") + + p, err := a.Provider(ctx, providerType, scopes) + if err != nil { + return "", badRequestError(ErrorCodeValidationFailed, "Unsupported provider: %+v", err).WithInternalError(err) + } + + inviteToken := query.Get("invite_token") + if inviteToken != "" { + _, userErr := models.FindUserByConfirmationToken(db, inviteToken) + if userErr != nil { + if models.IsNotFoundError(userErr) { + return "", notFoundError(ErrorCodeUserNotFound, "User identified by token not found") + } + return "", internalServerError("Database error finding user").WithInternalError(userErr) + } + } + + redirectURL := utilities.GetReferrer(r, config) + log := observability.GetLogEntry(r).Entry + log.WithField("provider", providerType).Info("Redirecting to external provider") + if err := validatePKCEParams(codeChallengeMethod, codeChallenge); err != nil { + return "", err + } + flowType := getFlowFromChallenge(codeChallenge) + + flowStateID := "" + if isPKCEFlow(flowType) { + flowState, err := generateFlowState(a.db, providerType, models.OAuth, codeChallengeMethod, codeChallenge, nil) + if err != nil { + return "", err + } + flowStateID = flowState.ID.String() + } + + claims := ExternalProviderClaims{ + AuthMicroserviceClaims: AuthMicroserviceClaims{ + RegisteredClaims: jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(time.Now().Add(5 * time.Minute)), + }, + SiteURL: config.SiteURL, + InstanceID: uuid.Nil.String(), + }, + Provider: providerType, + InviteToken: inviteToken, + Referrer: redirectURL, + FlowStateID: flowStateID, + } + + if linkingTargetUser != nil { + // this means that the user is performing manual linking + claims.LinkingTargetID = linkingTargetUser.ID.String() + } + + tokenString, err := signJwt(&config.JWT, claims) + if err != nil { + return "", internalServerError("Error creating state").WithInternalError(err) + } + + authUrlParams := make([]oauth2.AuthCodeOption, 0) + query.Del("scopes") + query.Del("provider") + query.Del("code_challenge") + query.Del("code_challenge_method") + for key := range query { + if key == "workos_provider" { + // See https://workos.com/docs/reference/sso/authorize/get + authUrlParams = append(authUrlParams, oauth2.SetAuthURLParam("provider", query.Get(key))) + } else { + authUrlParams = append(authUrlParams, oauth2.SetAuthURLParam(key, query.Get(key))) + } + } + + authURL := p.AuthCodeURL(tokenString, authUrlParams...) + + return authURL, nil +} + +// ExternalProviderCallback handles the callback endpoint in the external oauth provider flow +func (a *API) ExternalProviderCallback(w http.ResponseWriter, r *http.Request) error { + rurl := a.getExternalRedirectURL(r) + u, err := url.Parse(rurl) + if err != nil { + return err + } + redirectErrors(a.internalExternalProviderCallback, w, r, u) + return nil +} + +func (a *API) handleOAuthCallback(r *http.Request) (*OAuthProviderData, error) { + ctx := r.Context() + providerType := getExternalProviderType(ctx) + + var oAuthResponseData *OAuthProviderData + var err error + switch providerType { + case "twitter": + // future OAuth1.0 providers will use this method + oAuthResponseData, err = a.oAuth1Callback(ctx, providerType) + default: + oAuthResponseData, err = a.oAuthCallback(ctx, r, providerType) + } + if err != nil { + return nil, err + } + return oAuthResponseData, nil +} + +func (a *API) internalExternalProviderCallback(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + + var grantParams models.GrantParams + grantParams.FillGrantParams(r) + + providerType := getExternalProviderType(ctx) + data, err := a.handleOAuthCallback(r) + if err != nil { + return err + } + + userData := data.userData + if len(userData.Emails) <= 0 { + return internalServerError("Error getting user email from external provider") + } + userData.Metadata.EmailVerified = false + for _, email := range userData.Emails { + if email.Primary { + userData.Metadata.Email = email.Email + userData.Metadata.EmailVerified = email.Verified + break + } else { + userData.Metadata.Email = email.Email + userData.Metadata.EmailVerified = email.Verified + } + } + providerAccessToken := data.token + providerRefreshToken := data.refreshToken + + var flowState *models.FlowState + // if there's a non-empty FlowStateID we perform PKCE Flow + if flowStateID := getFlowStateID(ctx); flowStateID != "" { + flowState, err = models.FindFlowStateByID(a.db, flowStateID) + if models.IsNotFoundError(err) { + return unprocessableEntityError(ErrorCodeFlowStateNotFound, "Flow state not found").WithInternalError(err) + } else if err != nil { + return internalServerError("Failed to find flow state").WithInternalError(err) + } + + } + + var user *models.User + var token *AccessTokenResponse + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + if targetUser := getTargetUser(ctx); targetUser != nil { + if user, terr = a.linkIdentityToUser(r, ctx, tx, userData, providerType); terr != nil { + return terr + } + } else if inviteToken := getInviteToken(ctx); inviteToken != "" { + if user, terr = a.processInvite(r, tx, userData, inviteToken, providerType); terr != nil { + return terr + } + } else { + if user, terr = a.createAccountFromExternalIdentity(tx, r, userData, providerType); terr != nil { + return terr + } + } + if flowState != nil { + // This means that the callback is using PKCE + flowState.ProviderAccessToken = providerAccessToken + flowState.ProviderRefreshToken = providerRefreshToken + flowState.UserID = &(user.ID) + issueTime := time.Now() + flowState.AuthCodeIssuedAt = &issueTime + + terr = tx.Update(flowState) + } else { + token, terr = a.issueRefreshToken(r, tx, user, models.OAuth, grantParams) + } + + if terr != nil { + return oauthError("server_error", terr.Error()) + } + return nil + }) + + if err != nil { + return err + } + + rurl := a.getExternalRedirectURL(r) + if flowState != nil { + // This means that the callback is using PKCE + // Set the flowState.AuthCode to the query param here + rurl, err = a.prepPKCERedirectURL(rurl, flowState.AuthCode) + if err != nil { + return err + } + } else if token != nil { + q := url.Values{} + q.Set("provider_token", providerAccessToken) + // Because not all providers give out a refresh token + // See corresponding OAuth2 spec: + if providerRefreshToken != "" { + q.Set("provider_refresh_token", providerRefreshToken) + } + + rurl = token.AsRedirectURL(rurl, q) + + } + + http.Redirect(w, r, rurl, http.StatusFound) + return nil +} + +func (a *API) createAccountFromExternalIdentity(tx *storage.Connection, r *http.Request, userData *provider.UserProvidedData, providerType string) (*models.User, error) { + ctx := r.Context() + aud := a.requestAud(ctx, r) + config := a.config + + var user *models.User + var identity *models.Identity + var identityData map[string]interface{} + if userData.Metadata != nil { + identityData = structs.Map(userData.Metadata) + } + + decision, terr := models.DetermineAccountLinking(tx, config, userData.Emails, aud, providerType, userData.Metadata.Subject) + if terr != nil { + return nil, terr + } + + switch decision.Decision { + case models.LinkAccount: + user = decision.User + + if identity, terr = a.createNewIdentity(tx, user, providerType, identityData); terr != nil { + return nil, terr + } + + if terr = user.UpdateUserMetaData(tx, identityData); terr != nil { + return nil, terr + } + + if terr = user.UpdateAppMetaDataProviders(tx); terr != nil { + return nil, terr + } + + case models.CreateAccount: + if config.DisableSignup { + return nil, unprocessableEntityError(ErrorCodeSignupDisabled, "Signups not allowed for this instance") + } + + params := &SignupParams{ + Provider: providerType, + Email: decision.CandidateEmail.Email, + Aud: aud, + Data: identityData, + } + + isSSOUser := false + if strings.HasPrefix(decision.LinkingDomain, "sso:") { + isSSOUser = true + } + + // because params above sets no password, this method is not + // computationally hard so it can be used within a database + // transaction + user, terr = params.ToUserModel(isSSOUser) + if terr != nil { + return nil, terr + } + + if user, terr = a.signupNewUser(tx, user); terr != nil { + return nil, terr + } + + if identity, terr = a.createNewIdentity(tx, user, providerType, identityData); terr != nil { + return nil, terr + } + user.Identities = append(user.Identities, *identity) + case models.AccountExists: + user = decision.User + identity = decision.Identities[0] + + identity.IdentityData = identityData + if terr = tx.UpdateOnly(identity, "identity_data", "last_sign_in_at"); terr != nil { + return nil, terr + } + if terr = user.UpdateUserMetaData(tx, identityData); terr != nil { + return nil, terr + } + if terr = user.UpdateAppMetaDataProviders(tx); terr != nil { + return nil, terr + } + + case models.MultipleAccounts: + return nil, internalServerError("Multiple accounts with the same email address in the same linking domain detected: %v", decision.LinkingDomain) + + default: + return nil, internalServerError("Unknown automatic linking decision: %v", decision.Decision) + } + + if user.IsBanned() { + return nil, forbiddenError(ErrorCodeUserBanned, "User is banned") + } + + if !user.IsConfirmed() { + // The user may have other unconfirmed email + password + // combination, phone or oauth identities. These identities + // need to be removed when a new oauth identity is being added + // to prevent pre-account takeover attacks from happening. + if terr = user.RemoveUnconfirmedIdentities(tx, identity); terr != nil { + return nil, internalServerError("Error updating user").WithInternalError(terr) + } + if decision.CandidateEmail.Verified || config.Mailer.Autoconfirm { + if terr := models.NewAuditLogEntry(r, tx, user, models.UserSignedUpAction, "", map[string]interface{}{ + "provider": providerType, + }); terr != nil { + return nil, terr + } + // fall through to auto-confirm and issue token + if terr = user.Confirm(tx); terr != nil { + return nil, internalServerError("Error updating user").WithInternalError(terr) + } + } else { + emailConfirmationSent := false + if decision.CandidateEmail.Email != "" { + if terr = a.sendConfirmation(r, tx, user, models.ImplicitFlow); terr != nil { + return nil, terr + } + emailConfirmationSent = true + } + if !config.Mailer.AllowUnverifiedEmailSignIns { + if emailConfirmationSent { + return nil, storage.NewCommitWithError(unprocessableEntityError(ErrorCodeProviderEmailNeedsVerification, fmt.Sprintf("Unverified email with %v. A confirmation email has been sent to your %v email", providerType, providerType))) + } + return nil, storage.NewCommitWithError(unprocessableEntityError(ErrorCodeProviderEmailNeedsVerification, fmt.Sprintf("Unverified email with %v. Verify the email with %v in order to sign in", providerType, providerType))) + } + } + } else { + if terr := models.NewAuditLogEntry(r, tx, user, models.LoginAction, "", map[string]interface{}{ + "provider": providerType, + }); terr != nil { + return nil, terr + } + } + + return user, nil +} + +func (a *API) processInvite(r *http.Request, tx *storage.Connection, userData *provider.UserProvidedData, inviteToken, providerType string) (*models.User, error) { + user, err := models.FindUserByConfirmationToken(tx, inviteToken) + if err != nil { + if models.IsNotFoundError(err) { + return nil, notFoundError(ErrorCodeInviteNotFound, "Invite not found") + } + return nil, internalServerError("Database error finding user").WithInternalError(err) + } + + var emailData *provider.Email + var emails []string + for i, e := range userData.Emails { + emails = append(emails, e.Email) + if user.GetEmail() == e.Email { + emailData = &userData.Emails[i] + break + } + } + + if emailData == nil { + return nil, badRequestError(ErrorCodeValidationFailed, "Invited email does not match emails from external provider").WithInternalMessage("invited=%s external=%s", user.Email, strings.Join(emails, ", ")) + } + + var identityData map[string]interface{} + if userData.Metadata != nil { + identityData = structs.Map(userData.Metadata) + } + identity, err := a.createNewIdentity(tx, user, providerType, identityData) + if err != nil { + return nil, err + } + if err := user.UpdateAppMetaData(tx, map[string]interface{}{ + "provider": providerType, + }); err != nil { + return nil, err + } + if err := user.UpdateAppMetaDataProviders(tx); err != nil { + return nil, err + } + if err := user.UpdateUserMetaData(tx, identityData); err != nil { + return nil, internalServerError("Database error updating user").WithInternalError(err) + } + + if err := models.NewAuditLogEntry(r, tx, user, models.InviteAcceptedAction, "", map[string]interface{}{ + "provider": providerType, + }); err != nil { + return nil, err + } + + // an account with a previously unconfirmed email + password + // combination or phone may exist. so now that there is an + // OAuth identity bound to this user, and since they have not + // confirmed their email or phone, they are unaware that a + // potentially malicious door exists into their account; thus + // the password and phone needs to be removed. + if err := user.RemoveUnconfirmedIdentities(tx, identity); err != nil { + return nil, internalServerError("Error updating user").WithInternalError(err) + } + + // confirm because they were able to respond to invite email + if err := user.Confirm(tx); err != nil { + return nil, err + } + return user, nil +} + +func (a *API) loadExternalState(ctx context.Context, r *http.Request) (context.Context, error) { + var state string + switch r.Method { + case http.MethodPost: + state = r.FormValue("state") + default: + state = r.URL.Query().Get("state") + } + if state == "" { + return ctx, badRequestError(ErrorCodeBadOAuthCallback, "OAuth state parameter missing") + } + config := a.config + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods(config.JWT.ValidMethods)) + _, err := p.ParseWithClaims(state, &claims, func(token *jwt.Token) (interface{}, error) { + if kid, ok := token.Header["kid"]; ok { + if kidStr, ok := kid.(string); ok { + return conf.FindPublicKeyByKid(kidStr, &config.JWT) + } + } + if alg, ok := token.Header["alg"]; ok { + if alg == jwt.SigningMethodHS256.Name { + // preserve backward compatibility for cases where the kid is not set + return []byte(config.JWT.Secret), nil + } + } + return nil, fmt.Errorf("missing kid") + }) + if err != nil { + return ctx, badRequestError(ErrorCodeBadOAuthState, "OAuth callback with invalid state").WithInternalError(err) + } + if claims.Provider == "" { + return ctx, badRequestError(ErrorCodeBadOAuthState, "OAuth callback with invalid state (missing provider)") + } + if claims.InviteToken != "" { + ctx = withInviteToken(ctx, claims.InviteToken) + } + if claims.Referrer != "" { + ctx = withExternalReferrer(ctx, claims.Referrer) + } + if claims.FlowStateID != "" { + ctx = withFlowStateID(ctx, claims.FlowStateID) + } + if claims.LinkingTargetID != "" { + linkingTargetUserID, err := uuid.FromString(claims.LinkingTargetID) + if err != nil { + return nil, badRequestError(ErrorCodeBadOAuthState, "OAuth callback with invalid state (linking_target_id must be UUID)") + } + u, err := models.FindUserByID(a.db, linkingTargetUserID) + if err != nil { + if models.IsNotFoundError(err) { + return nil, unprocessableEntityError(ErrorCodeUserNotFound, "Linking target user not found") + } + return nil, internalServerError("Database error loading user").WithInternalError(err) + } + ctx = withTargetUser(ctx, u) + } + ctx = withExternalProviderType(ctx, claims.Provider) + return withSignature(ctx, state), nil +} + +// Provider returns a Provider interface for the given name. +func (a *API) Provider(ctx context.Context, name string, scopes string) (provider.Provider, error) { + config := a.config + name = strings.ToLower(name) + + switch name { + case "apple": + return provider.NewAppleProvider(ctx, config.External.Apple) + case "azure": + return provider.NewAzureProvider(config.External.Azure, scopes) + case "bitbucket": + return provider.NewBitbucketProvider(config.External.Bitbucket) + case "discord": + return provider.NewDiscordProvider(config.External.Discord, scopes) + case "facebook": + return provider.NewFacebookProvider(config.External.Facebook, scopes) + case "figma": + return provider.NewFigmaProvider(config.External.Figma, scopes) + case "fly": + return provider.NewFlyProvider(config.External.Fly, scopes) + case "github": + return provider.NewGithubProvider(config.External.Github, scopes) + case "gitlab": + return provider.NewGitlabProvider(config.External.Gitlab, scopes) + case "google": + return provider.NewGoogleProvider(ctx, config.External.Google, scopes) + case "kakao": + return provider.NewKakaoProvider(config.External.Kakao, scopes) + case "keycloak": + return provider.NewKeycloakProvider(config.External.Keycloak, scopes) + case "linkedin": + return provider.NewLinkedinProvider(config.External.Linkedin, scopes) + case "linkedin_oidc": + return provider.NewLinkedinOIDCProvider(config.External.LinkedinOIDC, scopes) + case "notion": + return provider.NewNotionProvider(config.External.Notion) + case "spotify": + return provider.NewSpotifyProvider(config.External.Spotify, scopes) + case "slack": + return provider.NewSlackProvider(config.External.Slack, scopes) + case "slack_oidc": + return provider.NewSlackOIDCProvider(config.External.SlackOIDC, scopes) + case "twitch": + return provider.NewTwitchProvider(config.External.Twitch, scopes) + case "twitter": + return provider.NewTwitterProvider(config.External.Twitter, scopes) + case "vercel_marketplace": + return provider.NewVercelMarketplaceProvider(config.External.VercelMarketplace, scopes) + case "workos": + return provider.NewWorkOSProvider(config.External.WorkOS) + case "zoom": + return provider.NewZoomProvider(config.External.Zoom) + default: + return nil, fmt.Errorf("Provider %s could not be found", name) + } +} + +func redirectErrors(handler apiHandler, w http.ResponseWriter, r *http.Request, u *url.URL) { + ctx := r.Context() + log := observability.GetLogEntry(r).Entry + errorID := utilities.GetRequestID(ctx) + err := handler(w, r) + if err != nil { + q := getErrorQueryString(err, errorID, log, u.Query()) + u.RawQuery = q.Encode() + + // TODO: deprecate returning error details in the query fragment + hq := url.Values{} + if q.Get("error") != "" { + hq.Set("error", q.Get("error")) + } + if q.Get("error_description") != "" { + hq.Set("error_description", q.Get("error_description")) + } + if q.Get("error_code") != "" { + hq.Set("error_code", q.Get("error_code")) + } + u.Fragment = hq.Encode() + http.Redirect(w, r, u.String(), http.StatusFound) + } +} + +func getErrorQueryString(err error, errorID string, log logrus.FieldLogger, q url.Values) *url.Values { + switch e := err.(type) { + case *HTTPError: + if e.ErrorCode == ErrorCodeSignupDisabled { + q.Set("error", "access_denied") + } else if e.ErrorCode == ErrorCodeUserBanned { + q.Set("error", "access_denied") + } else if e.ErrorCode == ErrorCodeProviderEmailNeedsVerification { + q.Set("error", "access_denied") + } else if str, ok := oauthErrorMap[e.HTTPStatus]; ok { + q.Set("error", str) + } else { + q.Set("error", "server_error") + } + if e.HTTPStatus >= http.StatusInternalServerError { + e.ErrorID = errorID + // this will get us the stack trace too + log.WithError(e.Cause()).Error(e.Error()) + } else { + log.WithError(e.Cause()).Info(e.Error()) + } + q.Set("error_description", e.Message) + q.Set("error_code", e.ErrorCode) + case *OAuthError: + q.Set("error", e.Err) + q.Set("error_description", e.Description) + log.WithError(e.Cause()).Info(e.Error()) + case ErrorCause: + return getErrorQueryString(e.Cause(), errorID, log, q) + default: + error_type, error_description := "server_error", err.Error() + + // Provide better error messages for certain user-triggered Postgres errors. + if pgErr := utilities.NewPostgresError(e); pgErr != nil { + error_description = pgErr.Message + if oauthErrorType, ok := oauthErrorMap[pgErr.HttpStatusCode]; ok { + error_type = oauthErrorType + } + } + + q.Set("error", error_type) + q.Set("error_description", error_description) + } + return &q +} + +func (a *API) getExternalRedirectURL(r *http.Request) string { + ctx := r.Context() + config := a.config + if config.External.RedirectURL != "" { + return config.External.RedirectURL + } + if er := getExternalReferrer(ctx); er != "" { + return er + } + return config.SiteURL +} + +func (a *API) createNewIdentity(tx *storage.Connection, user *models.User, providerType string, identityData map[string]interface{}) (*models.Identity, error) { + identity, err := models.NewIdentity(user, providerType, identityData) + if err != nil { + return nil, err + } + + if terr := tx.Create(identity); terr != nil { + return nil, internalServerError("Error creating identity").WithInternalError(terr) + } + + return identity, nil +} diff --git a/auth_v2.169.0/internal/api/external_apple_test.go b/auth_v2.169.0/internal/api/external_apple_test.go new file mode 100644 index 0000000..5a0b497 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_apple_test.go @@ -0,0 +1,33 @@ +package api + +import ( + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +func (ts *ExternalTestSuite) TestSignupExternalApple() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=apple", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Apple.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Apple.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("email name", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("apple", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} diff --git a/auth_v2.169.0/internal/api/external_azure_test.go b/auth_v2.169.0/internal/api/external_azure_test.go new file mode 100644 index 0000000..aac124c --- /dev/null +++ b/auth_v2.169.0/internal/api/external_azure_test.go @@ -0,0 +1,269 @@ +package api + +import ( + "context" + "crypto" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + jwt "github.com/golang-jwt/jwt/v5" + "github.com/supabase/auth/internal/api/provider" +) + +const ( + azureUser string = `{"name":"Azure Test","email":"azure@example.com","sub":"azuretestid"}` + azureUserNoEmail string = `{"name":"Azure Test","sub":"azuretestid"}` +) + +func idTokenPrivateKey() *rsa.PrivateKey { + // #nosec + der, err := base64.StdEncoding.DecodeString("MIIEpAIBAAKCAQEAvklrFDsVgbhs3DOQICMqm4xdFoi/MHj/T6XH8S7wXWd0roqdWVarwCLV4y3DILkLre4PzNK+hEY5NAnoAKrsCMyyCb4Wdl8HCdJk4ojDqAig+DJw67imqZoxJMFJyIhfMJhwVK1V8GRUPATn855rygLo7wThahMJeEHNiJr3TtV6Rf35KSs7DuyoWIUSjISYabQozKqIvpdUpTpSqjlOQvjdAxggRyycBZSgLzjWhsA8metnAMO48bX4bgiHLR6Kzu/dfPyEVPfgeYpA2ebIY6GzIUxVS0yX8+ExA6jeLCkuepjLHuz5XCJtd6zzGDXr1eX7nA6ZIeUNdFbWRDnPawIDAQABAoIBABH4Qvl1HvHSJc2hvPGcAJER71SKc2uzcYDnCfu30BEyDO3Sv0tJiQyq/YHnt26mqviw66MPH9jD/PDyIou1mHa4RfPvlJV3IeYGjWprOfbrYbAuq0VHec24dv2el0YtwreHHcyRVfVOtDm6yODTzCAWqEKyNktbIuDNbgiBgetayaJecDRoFMF9TOCeMCL92iZytzAr7fi+JWtLkRS/GZRIBjbr8LJ/ueYoCRmIx3MIw0WdPp7v2ZfeRTxP7LxJZ+MAsrq2pstmZYP7K0305e0bCJX1HexfXLs2Ul7u8zaxrXL8zw4/9+/GMsAeU3ffCVnGz/RKL5+T6iuz2RotjFECgYEA+Xk7DGwRXfDg9xba1GVFGeiC4nybqZw/RfZKcz/RRJWSHRJV/ps1avtbca3B19rjI6rewZMO1NWNv/tI2BdXP8vAKUnI9OHJZ+J/eZzmqDE6qu0v0ddRFUDzCMWE0j8BjrUdy44n4NQgopcv14u0iyr9tuhGO6YXn2SuuvEkZokCgYEAw0PNnT55kpkEhXSp7An2hdBJEub9ST7hS6Kcd8let62/qUZ/t5jWigSkWC1A2bMtH55+LgudIFjiehwVzRs7jym2j4jkKZGonyAX1l9IWgXwKl7Pn49lEQH5Yk6MhnXdyLGoFTzXiUyk/fKvgXX7jow1bD3j6sAc8P495I7TyVMCgYAHg6VJrH+har37805IE3zPWPeIRuSRaUlmnBKGAigVfsPV6FV6w8YKIOQSOn+aNtecnWr0Pa+2rXAFllYNXDaej06Mb9KDvcFJRcM9MIKqEkGIIHjOQ0QH9drcKsbjZk5vs/jfxrpgxULuYstoHKclgff+aGSlK02O2YOB0f2csQKBgQCEC/MdNiWCpKXxFg7fB3HF1i/Eb56zjKlQu7uyKeQ6tG3bLEisQNg8Z5034Apt7gRC0KyluMbeHB2z1BBOLu9dBill8X3SOqVcTpiwKKlF76QVEx622YLQOJSMDXBscYK0+KchDY74U3N0JEzZcI7YPCrYcxYRJy+rLVNvn8LK7wKBgQDE8THsZ589e10F0zDBvPK56o8PJnPeH71sgdM2Co4oLzBJ6g0rpJOKfcc03fLHsoJVOAya9WZeIy6K8+WVdcPTadR07S4p8/tcK1eguu5qlmCUOzswrTKAaJoIHO7cddQp3nySIqgYtkGdHKuvlQDMQkEKJS0meOm+vdeAG2rkaA==") + if err != nil { + panic(err) + } + + privateKey, err := x509.ParsePKCS1PrivateKey(der) + if err != nil { + panic(err) + } + + privateKey.E = 65537 + + return privateKey +} + +func setupAzureOverrideVerifiers() { + provider.OverrideVerifiers["https://login.microsoftonline.com/9188040d-6c67-4c5b-b112-36a304b66dad/oauth2/v2.0/authorize"] = func(ctx context.Context, config *oidc.Config) *oidc.IDTokenVerifier { + pk := idTokenPrivateKey() + + return oidc.NewVerifier( + provider.IssuerAzureMicrosoft, + &oidc.StaticKeySet{ + PublicKeys: []crypto.PublicKey{ + &pk.PublicKey, + }, + }, + config, + ) + } +} + +func mintIDToken(user string) string { + var idToken struct { + Issuer string `json:"iss"` + IssuedAt int `json:"iat"` + ExpiresAt int `json:"exp"` + Audience string `json:"aud"` + + Sub string `json:"sub,omitempty"` + Name string `json:"name,omitempty"` + Email string `json:"email,omitempty"` + XmsEdov any `json:"xms_edov,omitempty"` + } + + if err := json.Unmarshal([]byte(user), &idToken); err != nil { + panic(err) + } + + now := time.Now() + + idToken.Issuer = provider.IssuerAzureMicrosoft + idToken.IssuedAt = int(now.Unix()) + idToken.ExpiresAt = int(now.Unix() + 60*60) + idToken.Audience = "testclientid" + + header := base64.RawURLEncoding.EncodeToString([]byte(`{"typ":"JWT","alg":"RS256"}`)) + + data, err := json.Marshal(idToken) + if err != nil { + panic(err) + } + + payload := base64.RawURLEncoding.EncodeToString(data) + sum := sha256.Sum256([]byte(header + "." + payload)) + + pk := idTokenPrivateKey() + sig, err := rsa.SignPKCS1v15(nil, pk, crypto.SHA256, sum[:]) + if err != nil { + panic(err) + } + + token := header + "." + payload + "." + base64.RawURLEncoding.EncodeToString(sig) + + return token +} + +func (ts *ExternalTestSuite) TestSignupExternalAzure() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=azure", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Azure.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Azure.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("openid", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("azure", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func AzureTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, code string, user string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth2/v2.0/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Azure.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{"access_token":"azure_token","expires_in":100000,"id_token":%q}`, mintIDToken(user)) + default: + w.WriteHeader(500) + ts.Fail("unknown azure oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Azure.URL = server.URL + ts.Config.External.Azure.ApiURL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalAzure_AuthorizationCode() { + setupAzureOverrideVerifiers() + + ts.Config.DisableSignup = false + tokenCount := 0 + code := "authcode" + server := AzureTestSignupSetup(ts, &tokenCount, code, azureUser) + defer server.Close() + + u := performAuthorization(ts, "azure", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, -1, "azure@example.com", "Azure Test", "azuretestid", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalAzureDisableSignupErrorWhenNoUser() { + setupAzureOverrideVerifiers() + + ts.Config.DisableSignup = true + tokenCount := 0 + code := "authcode" + server := AzureTestSignupSetup(ts, &tokenCount, code, azureUser) + defer server.Close() + + u := performAuthorization(ts, "azure", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "azure@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalAzureDisableSignupErrorWhenNoEmail() { + setupAzureOverrideVerifiers() + + ts.Config.DisableSignup = true + tokenCount := 0 + code := "authcode" + server := AzureTestSignupSetup(ts, &tokenCount, code, azureUserNoEmail) + defer server.Close() + + u := performAuthorization(ts, "azure", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "azure@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalAzureDisableSignupSuccessWithPrimaryEmail() { + setupAzureOverrideVerifiers() + + ts.Config.DisableSignup = true + + ts.createUser("azuretestid", "azure@example.com", "Azure Test", "", "") + + tokenCount := 0 + code := "authcode" + server := AzureTestSignupSetup(ts, &tokenCount, code, azureUser) + defer server.Close() + + u := performAuthorization(ts, "azure", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, -1, "azure@example.com", "Azure Test", "azuretestid", "") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalAzureSuccessWhenMatchingToken() { + setupAzureOverrideVerifiers() + + // name should be populated from Azure API + ts.createUser("azuretestid", "azure@example.com", "", "", "invite_token") + + tokenCount := 0 + code := "authcode" + server := AzureTestSignupSetup(ts, &tokenCount, code, azureUser) + defer server.Close() + + u := performAuthorization(ts, "azure", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, -1, "azure@example.com", "Azure Test", "azuretestid", "") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalAzureErrorWhenNoMatchingToken() { + setupAzureOverrideVerifiers() + + tokenCount := 0 + code := "authcode" + azureUser := `{"name":"Azure Test","avatar":{"href":"http://example.com/avatar"}}` + server := AzureTestSignupSetup(ts, &tokenCount, code, azureUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "azure", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalAzureErrorWhenWrongToken() { + setupAzureOverrideVerifiers() + + ts.createUser("azuretestid", "azure@example.com", "", "", "invite_token") + + tokenCount := 0 + code := "authcode" + azureUser := `{"name":"Azure Test","avatar":{"href":"http://example.com/avatar"}}` + server := AzureTestSignupSetup(ts, &tokenCount, code, azureUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "azure", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalAzureErrorWhenEmailDoesntMatch() { + setupAzureOverrideVerifiers() + + ts.createUser("azuretestid", "azure@example.com", "", "", "invite_token") + + tokenCount := 0 + code := "authcode" + azureUser := `{"name":"Azure Test", "email":"other@example.com", "avatar":{"href":"http://example.com/avatar"}}` + server := AzureTestSignupSetup(ts, &tokenCount, code, azureUser) + defer server.Close() + + u := performAuthorization(ts, "azure", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/external_bitbucket_test.go b/auth_v2.169.0/internal/api/external_bitbucket_test.go new file mode 100644 index 0000000..66b3bd4 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_bitbucket_test.go @@ -0,0 +1,195 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +const ( + bitbucketUser string = `{"uuid":"bitbucketTestId","display_name":"Bitbucket Test","avatar":{"href":"http://example.com/avatar"}}` +) + +func (ts *ExternalTestSuite) TestSignupExternalBitbucket() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=bitbucket", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Bitbucket.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Bitbucket.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("account email", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("bitbucket", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func BitbucketTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string, emails string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/site/oauth2/access_token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Bitbucket.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"bitbucket_token","expires_in":100000}`) + case "/2.0/user": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, user) + case "/2.0/user/emails": + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, emails) + default: + w.WriteHeader(500) + ts.Fail("unknown bitbucket oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Bitbucket.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalBitbucket_AuthorizationCode() { + ts.Config.DisableSignup = false + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `{"values":[{"email":"bitbucket@example.com","is_primary":true,"is_confirmed":true}]}` + server := BitbucketTestSignupSetup(ts, &tokenCount, &userCount, code, bitbucketUser, emails) + defer server.Close() + + u := performAuthorization(ts, "bitbucket", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "bitbucket@example.com", "Bitbucket Test", "bitbucketTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalBitbucketDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + bitbucketUser := `{"display_name":"Bitbucket Test","avatar":{"href":"http://example.com/avatar"}}` + emails := `{"values":[{"email":"bitbucket@example.com","is_primary":true,"is_confirmed":true}]}` + server := BitbucketTestSignupSetup(ts, &tokenCount, &userCount, code, bitbucketUser, emails) + defer server.Close() + + u := performAuthorization(ts, "bitbucket", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "bitbucket@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalBitbucketDisableSignupErrorWhenNoEmail() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `{"values":[{}]}` + server := BitbucketTestSignupSetup(ts, &tokenCount, &userCount, code, bitbucketUser, emails) + defer server.Close() + + u := performAuthorization(ts, "bitbucket", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "bitbucket@example.com") + +} + +func (ts *ExternalTestSuite) TestSignupExternalBitbucketDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("bitbucketTestId", "bitbucket@example.com", "Bitbucket Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `{"values":[{"email":"bitbucket@example.com","is_primary":true,"is_confirmed":true}]}` + server := BitbucketTestSignupSetup(ts, &tokenCount, &userCount, code, bitbucketUser, emails) + defer server.Close() + + u := performAuthorization(ts, "bitbucket", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "bitbucket@example.com", "Bitbucket Test", "bitbucketTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalBitbucketDisableSignupSuccessWithSecondaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("bitbucketTestId", "secondary@example.com", "Bitbucket Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `{"values":[{"email":"primary@example.com","is_primary":true,"is_confirmed":true},{"email":"secondary@example.com","is_primary":false,"is_confirmed":true}]}` + server := BitbucketTestSignupSetup(ts, &tokenCount, &userCount, code, bitbucketUser, emails) + defer server.Close() + + u := performAuthorization(ts, "bitbucket", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "secondary@example.com", "Bitbucket Test", "bitbucketTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalBitbucketSuccessWhenMatchingToken() { + // name and avatar should be populated from Bitbucket API + ts.createUser("bitbucketTestId", "bitbucket@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `{"values":[{"email":"bitbucket@example.com","is_primary":true,"is_confirmed":true}]}` + server := BitbucketTestSignupSetup(ts, &tokenCount, &userCount, code, bitbucketUser, emails) + defer server.Close() + + u := performAuthorization(ts, "bitbucket", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "bitbucket@example.com", "Bitbucket Test", "bitbucketTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalBitbucketErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + bitbucketUser := `{"display_name":"Bitbucket Test","avatar":{"href":"http://example.com/avatar"}}` + emails := `{"values":[{"email":"bitbucket@example.com","is_primary":true,"is_confirmed":true}]}` + server := BitbucketTestSignupSetup(ts, &tokenCount, &userCount, code, bitbucketUser, emails) + defer server.Close() + + w := performAuthorizationRequest(ts, "bitbucket", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalBitbucketErrorWhenWrongToken() { + ts.createUser("bitbucketTestId", "bitbucket@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + bitbucketUser := `{"display_name":"Bitbucket Test","avatar":{"href":"http://example.com/avatar"}}` + emails := `{"values":[{"email":"bitbucket@example.com","is_primary":true,"is_confirmed":true}]}` + server := BitbucketTestSignupSetup(ts, &tokenCount, &userCount, code, bitbucketUser, emails) + defer server.Close() + + w := performAuthorizationRequest(ts, "bitbucket", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalBitbucketErrorWhenEmailDoesntMatch() { + ts.createUser("bitbucketTestId", "bitbucket@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `{"values":[{"email":"other@example.com","is_primary":true,"is_confirmed":true}]}` + server := BitbucketTestSignupSetup(ts, &tokenCount, &userCount, code, bitbucketUser, emails) + defer server.Close() + + u := performAuthorization(ts, "bitbucket", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/external_discord_test.go b/auth_v2.169.0/internal/api/external_discord_test.go new file mode 100644 index 0000000..7b6be8d --- /dev/null +++ b/auth_v2.169.0/internal/api/external_discord_test.go @@ -0,0 +1,167 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +const ( + discordUser string = `{"id":"discordTestId","avatar":"abc","email":"discord@example.com","username":"Discord Test","verified":true,"discriminator":"0001"}}` + discordUserWrongEmail string = `{"id":"discordTestId","avatar":"abc","email":"other@example.com","username":"Discord Test","verified":true}}` + discordUserNoEmail string = `{"id":"discordTestId","avatar":"abc","username":"Discord Test","verified":true}}` +) + +func (ts *ExternalTestSuite) TestSignupExternalDiscord() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=discord", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Discord.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Discord.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("email identify", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("discord", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func DiscordTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/oauth2/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Discord.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"discord_token","expires_in":100000}`) + case "/api/users/@me": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, user) + default: + w.WriteHeader(500) + ts.Fail("unknown discord oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Discord.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalDiscord_AuthorizationCode() { + ts.Config.DisableSignup = false + tokenCount, userCount := 0, 0 + code := "authcode" + server := DiscordTestSignupSetup(ts, &tokenCount, &userCount, code, discordUser) + defer server.Close() + + u := performAuthorization(ts, "discord", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "discord@example.com", "Discord Test", "discordTestId", "https://cdn.discordapp.com/avatars/discordTestId/abc.png") +} + +func (ts *ExternalTestSuite) TestSignupExternalDiscordDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := DiscordTestSignupSetup(ts, &tokenCount, &userCount, code, discordUser) + defer server.Close() + + u := performAuthorization(ts, "discord", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "discord@example.com") +} +func (ts *ExternalTestSuite) TestSignupExternalDiscordDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := DiscordTestSignupSetup(ts, &tokenCount, &userCount, code, discordUserNoEmail) + defer server.Close() + + u := performAuthorization(ts, "discord", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "discord@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalDiscordDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("discordTestId", "discord@example.com", "Discord Test", "https://cdn.discordapp.com/avatars/discordTestId/abc.png", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := DiscordTestSignupSetup(ts, &tokenCount, &userCount, code, discordUser) + defer server.Close() + + u := performAuthorization(ts, "discord", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "discord@example.com", "Discord Test", "discordTestId", "https://cdn.discordapp.com/avatars/discordTestId/abc.png") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalDiscordSuccessWhenMatchingToken() { + // name and avatar should be populated from Discord API + ts.createUser("discordTestId", "discord@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := DiscordTestSignupSetup(ts, &tokenCount, &userCount, code, discordUser) + defer server.Close() + + u := performAuthorization(ts, "discord", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "discord@example.com", "Discord Test", "discordTestId", "https://cdn.discordapp.com/avatars/discordTestId/abc.png") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalDiscordErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + server := DiscordTestSignupSetup(ts, &tokenCount, &userCount, code, discordUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "discord", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalDiscordErrorWhenWrongToken() { + ts.createUser("discordTestId", "discord@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := DiscordTestSignupSetup(ts, &tokenCount, &userCount, code, discordUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "discord", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalDiscordErrorWhenEmailDoesntMatch() { + ts.createUser("discordTestId", "discord@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := DiscordTestSignupSetup(ts, &tokenCount, &userCount, code, discordUserWrongEmail) + defer server.Close() + + u := performAuthorization(ts, "discord", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/external_facebook_test.go b/auth_v2.169.0/internal/api/external_facebook_test.go new file mode 100644 index 0000000..c1864bb --- /dev/null +++ b/auth_v2.169.0/internal/api/external_facebook_test.go @@ -0,0 +1,167 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +const ( + facebookUser string = `{"id":"facebookTestId","name":"Facebook Test","first_name":"Facebook","last_name":"Test","email":"facebook@example.com","picture":{"data":{"url":"http://example.com/avatar"}}}}` + facebookUserWrongEmail string = `{"id":"facebookTestId","name":"Facebook Test","first_name":"Facebook","last_name":"Test","email":"other@example.com","picture":{"data":{"url":"http://example.com/avatar"}}}}` + facebookUserNoEmail string = `{"id":"facebookTestId","name":"Facebook Test","first_name":"Facebook","last_name":"Test","picture":{"data":{"url":"http://example.com/avatar"}}}}` +) + +func (ts *ExternalTestSuite) TestSignupExternalFacebook() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=facebook", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Facebook.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Facebook.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("email", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("facebook", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func FacebookTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/access_token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Facebook.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"facebook_token","expires_in":100000}`) + case "/me": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, user) + default: + w.WriteHeader(500) + ts.Fail("unknown facebook oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Facebook.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalFacebook_AuthorizationCode() { + ts.Config.DisableSignup = false + tokenCount, userCount := 0, 0 + code := "authcode" + server := FacebookTestSignupSetup(ts, &tokenCount, &userCount, code, facebookUser) + defer server.Close() + + u := performAuthorization(ts, "facebook", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "facebook@example.com", "Facebook Test", "facebookTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalFacebookDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := FacebookTestSignupSetup(ts, &tokenCount, &userCount, code, facebookUser) + defer server.Close() + + u := performAuthorization(ts, "facebook", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "facebook@example.com") +} +func (ts *ExternalTestSuite) TestSignupExternalFacebookDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := FacebookTestSignupSetup(ts, &tokenCount, &userCount, code, facebookUserNoEmail) + defer server.Close() + + u := performAuthorization(ts, "facebook", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "facebook@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalFacebookDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("facebookTestId", "facebook@example.com", "Facebook Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := FacebookTestSignupSetup(ts, &tokenCount, &userCount, code, facebookUser) + defer server.Close() + + u := performAuthorization(ts, "facebook", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "facebook@example.com", "Facebook Test", "facebookTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFacebookSuccessWhenMatchingToken() { + // name and avatar should be populated from Facebook API + ts.createUser("facebookTestId", "facebook@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := FacebookTestSignupSetup(ts, &tokenCount, &userCount, code, facebookUser) + defer server.Close() + + u := performAuthorization(ts, "facebook", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "facebook@example.com", "Facebook Test", "facebookTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFacebookErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + server := FacebookTestSignupSetup(ts, &tokenCount, &userCount, code, facebookUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "facebook", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFacebookErrorWhenWrongToken() { + ts.createUser("facebookTestId", "facebook@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := FacebookTestSignupSetup(ts, &tokenCount, &userCount, code, facebookUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "facebook", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFacebookErrorWhenEmailDoesntMatch() { + ts.createUser("facebookTestId", "facebook@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := FacebookTestSignupSetup(ts, &tokenCount, &userCount, code, facebookUserWrongEmail) + defer server.Close() + + u := performAuthorization(ts, "facebook", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/external_figma_test.go b/auth_v2.169.0/internal/api/external_figma_test.go new file mode 100644 index 0000000..6e119b9 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_figma_test.go @@ -0,0 +1,264 @@ +package api + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "time" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/models" +) + +func (ts *ExternalTestSuite) TestSignupExternalFigma() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=figma", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Figma.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Figma.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("files:read", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("figma", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func FigmaTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, email string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/oauth/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Figma.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"figma_token","expires_in":100000,"refresh_token":"figma_token"}`) + case "/v1/me": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{"id":"figma-test-id","email":"%s","handle":"Figma Test","img_url":"http://example.com/avatar"}`, email) + default: + w.WriteHeader(500) + ts.Fail("unknown figma oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Figma.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalFigma_AuthorizationCode() { + tokenCount, userCount := 0, 0 + code := "authcode" + email := "figma@example.com" + server := FigmaTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "figma", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "figma@example.com", "Figma Test", "figma-test-id", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalFigma_PKCE() { + tokenCount, userCount := 0, 0 + code := "authcode" + + // for the plain challenge method, the code verifier == code challenge + // code challenge has to be between 43 - 128 chars for the plain challenge method + codeVerifier := "testtesttesttesttesttesttesttesttesttesttesttesttesttest" + + email := "figma@example.com" + server := FigmaTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + cases := []struct { + desc string + codeChallengeMethod string + }{ + { + desc: "SHA256", + codeChallengeMethod: "s256", + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + var codeChallenge string + if c.codeChallengeMethod == "s256" { + hashedCodeVerifier := sha256.Sum256([]byte(codeVerifier)) + codeChallenge = base64.RawURLEncoding.EncodeToString(hashedCodeVerifier[:]) + } else { + codeChallenge = codeVerifier + } + // Check for valid auth code returned + u := performPKCEAuthorization(ts, "figma", code, codeChallenge, c.codeChallengeMethod) + m, err := url.ParseQuery(u.RawQuery) + authCode := m["code"][0] + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), authCode) + + // Check for valid provider access token, mock does not return refresh token + user, err := models.FindUserByEmailAndAudience(ts.API.db, "figma@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), user) + flowState, err := models.FindFlowStateByAuthCode(ts.API.db, authCode) + require.NoError(ts.T(), err) + require.Equal(ts.T(), "figma_token", flowState.ProviderAccessToken) + + // Exchange Auth Code for token + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "code_verifier": codeVerifier, + "auth_code": authCode, + })) + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=pkce", &buffer) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + // Validate that access token and provider tokens are present + data := AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + require.NotEmpty(ts.T(), data.Token) + require.NotEmpty(ts.T(), data.RefreshToken) + require.NotEmpty(ts.T(), data.ProviderAccessToken) + require.Equal(ts.T(), data.User.ID, user.ID) + }) + } +} + +func (ts *ExternalTestSuite) TestSignupExternalFigmaDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + email := "figma@example.com" + server := FigmaTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "figma", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "figma@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalFigmaDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + email := "" + server := FigmaTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "figma", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "figma@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalFigmaDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("figma-test-id", "figma@example.com", "Figma Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + email := "figma@example.com" + server := FigmaTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "figma", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "figma@example.com", "Figma Test", "figma-test-id", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFigmaSuccessWhenMatchingToken() { + // name and avatar should be populated from Figma API + ts.createUser("figma-test-id", "figma@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + email := "figma@example.com" + server := FigmaTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "figma", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "figma@example.com", "Figma Test", "figma-test-id", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFigmaErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + email := "figma@example.com" + server := FigmaTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + w := performAuthorizationRequest(ts, "figma", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFigmaErrorWhenWrongToken() { + ts.createUser("figma-test-id", "figma@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + email := "figma@example.com" + server := FigmaTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + w := performAuthorizationRequest(ts, "figma", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFigmaErrorWhenEmailDoesntMatch() { + ts.createUser("figma-test-id", "figma@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + email := "other@example.com" + server := FigmaTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "figma", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalFigmaErrorWhenUserBanned() { + tokenCount, userCount := 0, 0 + code := "authcode" + email := "figma@example.com" + + server := FigmaTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "figma", code, "") + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "figma@example.com", "Figma Test", "figma-test-id", "http://example.com/avatar") + + user, err := models.FindUserByEmailAndAudience(ts.API.db, "figma@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + t := time.Now().Add(24 * time.Hour) + user.BannedUntil = &t + require.NoError(ts.T(), ts.API.db.UpdateOnly(user, "banned_until")) + + u = performAuthorization(ts, "figma", code, "") + assertAuthorizationFailure(ts, u, "User is banned", "access_denied", "") +} diff --git a/auth_v2.169.0/internal/api/external_fly_test.go b/auth_v2.169.0/internal/api/external_fly_test.go new file mode 100644 index 0000000..cf357c9 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_fly_test.go @@ -0,0 +1,264 @@ +package api + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "time" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/models" +) + +func (ts *ExternalTestSuite) TestSignupExternalFly() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=fly", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Fly.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Fly.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("read", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("fly", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func FlyTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, email string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Fly.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"fly_token","expires_in":100000,"refresh_token":"fly_refresh_token"}`) + case "/oauth/token/info": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{"resource_owner_id":"test_resource_owner_id","scope":["read"],"expires_in":1111,"application":{"uid":"test_app_uid"},"created_at":1696003692,"user_id":"test_user_id","user_name":"test_user","email":"%s","organizations":[{"id":"test_org_id","role":"test"}]}`, email) + default: + w.WriteHeader(500) + ts.Fail("unknown fly oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Fly.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalFly_AuthorizationCode() { + tokenCount, userCount := 0, 0 + code := "authcode" + email := "fly@example.com" + server := FlyTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "fly", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "fly@example.com", "test_user", "test_user_id", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalFly_PKCE() { + tokenCount, userCount := 0, 0 + code := "authcode" + + // for the plain challenge method, the code verifier == code challenge + // code challenge has to be between 43 - 128 chars for the plain challenge method + codeVerifier := "testtesttesttesttesttesttesttesttesttesttesttesttesttest" + + email := "fly@example.com" + server := FlyTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + cases := []struct { + desc string + codeChallengeMethod string + }{ + { + desc: "SHA256", + codeChallengeMethod: "s256", + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + var codeChallenge string + if c.codeChallengeMethod == "s256" { + hashedCodeVerifier := sha256.Sum256([]byte(codeVerifier)) + codeChallenge = base64.RawURLEncoding.EncodeToString(hashedCodeVerifier[:]) + } else { + codeChallenge = codeVerifier + } + // Check for valid auth code returned + u := performPKCEAuthorization(ts, "fly", code, codeChallenge, c.codeChallengeMethod) + m, err := url.ParseQuery(u.RawQuery) + authCode := m["code"][0] + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), authCode) + + // Check for valid provider access token, mock does not return refresh token + user, err := models.FindUserByEmailAndAudience(ts.API.db, "fly@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), user) + flowState, err := models.FindFlowStateByAuthCode(ts.API.db, authCode) + require.NoError(ts.T(), err) + require.Equal(ts.T(), "fly_token", flowState.ProviderAccessToken) + + // Exchange Auth Code for token + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "code_verifier": codeVerifier, + "auth_code": authCode, + })) + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=pkce", &buffer) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + // Validate that access token and provider tokens are present + data := AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + require.NotEmpty(ts.T(), data.Token) + require.NotEmpty(ts.T(), data.RefreshToken) + require.NotEmpty(ts.T(), data.ProviderAccessToken) + require.Equal(ts.T(), data.User.ID, user.ID) + }) + } +} + +func (ts *ExternalTestSuite) TestSignupExternalFlyDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + email := "fly@example.com" + server := FlyTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "fly", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", email) +} + +func (ts *ExternalTestSuite) TestSignupExternalFlyDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + email := "" + server := FlyTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "fly", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "fly@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalFlyDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("test_user_id", "fly@example.com", "test_user", "", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + email := "fly@example.com" + server := FlyTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "fly", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "fly@example.com", "test_user", "test_user_id", "") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFlySuccessWhenMatchingToken() { + // name and avatar should be populated from fly API + ts.createUser("test_user_id", "fly@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + email := "fly@example.com" + server := FlyTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "fly", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "fly@example.com", "test_user", "test_user_id", "") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFlyErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + email := "fly@example.com" + server := FlyTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + w := performAuthorizationRequest(ts, "fly", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFlyErrorWhenWrongToken() { + ts.createUser("test_user_id", "fly@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + email := "fly@example.com" + server := FlyTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + w := performAuthorizationRequest(ts, "fly", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalFlyErrorWhenEmailDoesntMatch() { + ts.createUser("test_user_id", "fly@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + email := "other@example.com" + server := FlyTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "fly", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalFlyErrorWhenUserBanned() { + tokenCount, userCount := 0, 0 + code := "authcode" + email := "fly@example.com" + + server := FlyTestSignupSetup(ts, &tokenCount, &userCount, code, email) + defer server.Close() + + u := performAuthorization(ts, "fly", code, "") + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "fly@example.com", "test_user", "test_user_id", "") + + user, err := models.FindUserByEmailAndAudience(ts.API.db, "fly@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + t := time.Now().Add(24 * time.Hour) + user.BannedUntil = &t + require.NoError(ts.T(), ts.API.db.UpdateOnly(user, "banned_until")) + + u = performAuthorization(ts, "fly", code, "") + assertAuthorizationFailure(ts, u, "User is banned", "access_denied", "") +} diff --git a/auth_v2.169.0/internal/api/external_github_test.go b/auth_v2.169.0/internal/api/external_github_test.go new file mode 100644 index 0000000..7b9d31e --- /dev/null +++ b/auth_v2.169.0/internal/api/external_github_test.go @@ -0,0 +1,300 @@ +package api + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "time" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/models" +) + +func (ts *ExternalTestSuite) TestSignupExternalGithub() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=github", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Github.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Github.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("user:email", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("github", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func GitHubTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, emails string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/login/oauth/access_token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Github.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"github_token","expires_in":100000}`) + case "/api/v3/user": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"id":123, "name":"GitHub Test","avatar_url":"http://example.com/avatar"}`) + case "/api/v3/user/emails": + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, emails) + default: + w.WriteHeader(500) + ts.Fail("unknown github oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Github.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalGitHub_AuthorizationCode() { + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"github@example.com", "primary": true, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "github", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "github@example.com", "GitHub Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalGitHub_PKCE() { + tokenCount, userCount := 0, 0 + code := "authcode" + + // for the plain challenge method, the code verifier == code challenge + // code challenge has to be between 43 - 128 chars for the plain challenge method + codeVerifier := "testtesttesttesttesttesttesttesttesttesttesttesttesttest" + + emails := `[{"email":"github@example.com", "primary": true, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + cases := []struct { + desc string + codeChallengeMethod string + }{ + { + desc: "SHA256", + codeChallengeMethod: "s256", + }, + { + desc: "Plain", + codeChallengeMethod: "plain", + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + var codeChallenge string + if c.codeChallengeMethod == "s256" { + hashedCodeVerifier := sha256.Sum256([]byte(codeVerifier)) + codeChallenge = base64.RawURLEncoding.EncodeToString(hashedCodeVerifier[:]) + } else { + codeChallenge = codeVerifier + } + // Check for valid auth code returned + u := performPKCEAuthorization(ts, "github", code, codeChallenge, c.codeChallengeMethod) + m, err := url.ParseQuery(u.RawQuery) + authCode := m["code"][0] + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), authCode) + + // Check for valid provider access token, mock does not return refresh token + user, err := models.FindUserByEmailAndAudience(ts.API.db, "github@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), user) + flowState, err := models.FindFlowStateByAuthCode(ts.API.db, authCode) + require.NoError(ts.T(), err) + require.Equal(ts.T(), "github_token", flowState.ProviderAccessToken) + + // Exchange Auth Code for token + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "code_verifier": codeVerifier, + "auth_code": authCode, + })) + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=pkce", &buffer) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + // Validate that access token and provider tokens are present + data := AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + require.NotEmpty(ts.T(), data.Token) + require.NotEmpty(ts.T(), data.RefreshToken) + require.NotEmpty(ts.T(), data.ProviderAccessToken) + require.Equal(ts.T(), data.User.ID, user.ID) + }) + } + +} + +func (ts *ExternalTestSuite) TestSignupExternalGitHubDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"github@example.com", "primary": true, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "github", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "github@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalGitHubDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"primary": true, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "github", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "github@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalGitHubDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("123", "github@example.com", "GitHub Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"github@example.com", "primary": true, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "github", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "github@example.com", "GitHub Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalGitHubDisableSignupSuccessWithNonPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("123", "secondary@example.com", "GitHub Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"primary@example.com", "primary": true, "verified": true},{"email":"secondary@example.com", "primary": false, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "github", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "secondary@example.com", "GitHub Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGitHubSuccessWhenMatchingToken() { + // name and avatar should be populated from GitHub API + ts.createUser("123", "github@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"github@example.com", "primary": true, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "github", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "github@example.com", "GitHub Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGitHubErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"github@example.com", "primary": true, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + w := performAuthorizationRequest(ts, "github", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGitHubErrorWhenWrongToken() { + ts.createUser("123", "github@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"github@example.com", "primary": true, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + w := performAuthorizationRequest(ts, "github", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGitHubErrorWhenEmailDoesntMatch() { + ts.createUser("123", "github@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"other@example.com", "primary": true, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "github", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalGitHubErrorWhenVerifiedFalse() { + ts.Config.Mailer.AllowUnverifiedEmailSignIns = false + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"github@example.com", "primary": true, "verified": false}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "github", code, "") + + assertAuthorizationFailure(ts, u, "Unverified email with github. A confirmation email has been sent to your github email", "access_denied", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalGitHubErrorWhenUserBanned() { + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"github@example.com", "primary": true, "verified": true}]` + server := GitHubTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "github", code, "") + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "github@example.com", "GitHub Test", "123", "http://example.com/avatar") + + user, err := models.FindUserByEmailAndAudience(ts.API.db, "github@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + t := time.Now().Add(24 * time.Hour) + user.BannedUntil = &t + require.NoError(ts.T(), ts.API.db.UpdateOnly(user, "banned_until")) + + u = performAuthorization(ts, "github", code, "") + assertAuthorizationFailure(ts, u, "User is banned", "access_denied", "") +} diff --git a/auth_v2.169.0/internal/api/external_gitlab_test.go b/auth_v2.169.0/internal/api/external_gitlab_test.go new file mode 100644 index 0000000..5a14a0a --- /dev/null +++ b/auth_v2.169.0/internal/api/external_gitlab_test.go @@ -0,0 +1,199 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +const ( + gitlabUser string = `{"id":123,"email":"gitlab@example.com","name":"GitLab Test","avatar_url":"http://example.com/avatar","confirmed_at":"2012-05-23T09:05:22Z"}` + gitlabUserWrongEmail string = `{"id":123,"email":"other@example.com","name":"GitLab Test","avatar_url":"http://example.com/avatar","confirmed_at":"2012-05-23T09:05:22Z"}` + gitlabUserNoEmail string = `{"id":123,"name":"Gitlab Test","avatar_url":"http://example.com/avatar"}` +) + +func (ts *ExternalTestSuite) TestSignupExternalGitlab() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=gitlab", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Gitlab.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Gitlab.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("read_user", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("gitlab", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func GitlabTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string, emails string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Gitlab.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"gitlab_token","expires_in":100000}`) + case "/api/v4/user": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, user) + case "/api/v4/user/emails": + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, emails) + default: + w.WriteHeader(500) + ts.Fail("unknown gitlab oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Gitlab.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalGitlab_AuthorizationCode() { + // additional emails from GitLab don't return confirm status + ts.Config.Mailer.Autoconfirm = true + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"id":1,"email":"gitlab@example.com"}]` + server := GitlabTestSignupSetup(ts, &tokenCount, &userCount, code, gitlabUser, emails) + defer server.Close() + + u := performAuthorization(ts, "gitlab", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "gitlab@example.com", "GitLab Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalGitLabDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"id":1,"email":"gitlab@example.com"}]` + server := GitlabTestSignupSetup(ts, &tokenCount, &userCount, code, gitlabUser, emails) + defer server.Close() + + u := performAuthorization(ts, "gitlab", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "gitlab@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalGitLabDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[]` + server := GitlabTestSignupSetup(ts, &tokenCount, &userCount, code, gitlabUserNoEmail, emails) + defer server.Close() + + u := performAuthorization(ts, "gitlab", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "gitlab@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalGitLabDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("123", "gitlab@example.com", "GitLab Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := "[]" + server := GitlabTestSignupSetup(ts, &tokenCount, &userCount, code, gitlabUser, emails) + defer server.Close() + + u := performAuthorization(ts, "gitlab", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "gitlab@example.com", "GitLab Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalGitLabDisableSignupSuccessWithSecondaryEmail() { + // additional emails from GitLab don't return confirm status + ts.Config.Mailer.Autoconfirm = true + ts.Config.DisableSignup = true + + ts.createUser("123", "secondary@example.com", "GitLab Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"id":1,"email":"secondary@example.com"}]` + server := GitlabTestSignupSetup(ts, &tokenCount, &userCount, code, gitlabUser, emails) + defer server.Close() + + u := performAuthorization(ts, "gitlab", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "secondary@example.com", "GitLab Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGitLabSuccessWhenMatchingToken() { + // name and avatar should be populated from GitLab API + ts.createUser("123", "gitlab@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := "[]" + server := GitlabTestSignupSetup(ts, &tokenCount, &userCount, code, gitlabUser, emails) + defer server.Close() + + u := performAuthorization(ts, "gitlab", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "gitlab@example.com", "GitLab Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGitLabErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + emails := "[]" + server := GitlabTestSignupSetup(ts, &tokenCount, &userCount, code, gitlabUser, emails) + defer server.Close() + + w := performAuthorizationRequest(ts, "gitlab", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGitLabErrorWhenWrongToken() { + ts.createUser("123", "gitlab@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := "[]" + server := GitlabTestSignupSetup(ts, &tokenCount, &userCount, code, gitlabUser, emails) + defer server.Close() + + w := performAuthorizationRequest(ts, "gitlab", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGitLabErrorWhenEmailDoesntMatch() { + ts.createUser("123", "gitlab@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := "[]" + server := GitlabTestSignupSetup(ts, &tokenCount, &userCount, code, gitlabUserWrongEmail, emails) + defer server.Close() + + u := performAuthorization(ts, "gitlab", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/external_google_test.go b/auth_v2.169.0/internal/api/external_google_test.go new file mode 100644 index 0000000..7b3b6d1 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_google_test.go @@ -0,0 +1,181 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/api/provider" +) + +const ( + googleUser string = `{"id":"googleTestId","name":"Google Test","picture":"http://example.com/avatar","email":"google@example.com","verified_email":true}}` + googleUserWrongEmail string = `{"id":"googleTestId","name":"Google Test","picture":"http://example.com/avatar","email":"other@example.com","verified_email":true}}` + googleUserNoEmail string = `{"id":"googleTestId","name":"Google Test","picture":"http://example.com/avatar","verified_email":false}}` +) + +func (ts *ExternalTestSuite) TestSignupExternalGoogle() { + provider.ResetGoogleProvider() + + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=google", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Google.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Google.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("email profile", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("google", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func GoogleTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string) *httptest.Server { + provider.ResetGoogleProvider() + + var server *httptest.Server + server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/.well-known/openid-configuration": + w.Header().Add("Content-Type", "application/json") + require.NoError(ts.T(), json.NewEncoder(w).Encode(map[string]any{ + "issuer": server.URL, + "token_endpoint": server.URL + "/o/oauth2/token", + })) + case "/o/oauth2/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Google.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"google_token","expires_in":100000}`) + case "/userinfo/v2/me": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, user) + default: + w.WriteHeader(500) + ts.Fail("unknown google oauth call %s", r.URL.Path) + } + })) + + provider.OverrideGoogleProvider(server.URL, server.URL+"/userinfo/v2/me") + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalGoogle_AuthorizationCode() { + ts.Config.DisableSignup = false + tokenCount, userCount := 0, 0 + code := "authcode" + server := GoogleTestSignupSetup(ts, &tokenCount, &userCount, code, googleUser) + defer server.Close() + + u := performAuthorization(ts, "google", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "google@example.com", "Google Test", "googleTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalGoogleDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := GoogleTestSignupSetup(ts, &tokenCount, &userCount, code, googleUser) + defer server.Close() + + u := performAuthorization(ts, "google", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "google@example.com") +} +func (ts *ExternalTestSuite) TestSignupExternalGoogleDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := GoogleTestSignupSetup(ts, &tokenCount, &userCount, code, googleUserNoEmail) + defer server.Close() + + u := performAuthorization(ts, "google", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "google@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalGoogleDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("googleTestId", "google@example.com", "Google Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := GoogleTestSignupSetup(ts, &tokenCount, &userCount, code, googleUser) + defer server.Close() + + u := performAuthorization(ts, "google", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "google@example.com", "Google Test", "googleTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGoogleSuccessWhenMatchingToken() { + // name and avatar should be populated from Google API + ts.createUser("googleTestId", "google@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := GoogleTestSignupSetup(ts, &tokenCount, &userCount, code, googleUser) + defer server.Close() + + u := performAuthorization(ts, "google", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "google@example.com", "Google Test", "googleTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGoogleErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + server := GoogleTestSignupSetup(ts, &tokenCount, &userCount, code, googleUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "google", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGoogleErrorWhenWrongToken() { + ts.createUser("googleTestId", "google@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := GoogleTestSignupSetup(ts, &tokenCount, &userCount, code, googleUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "google", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalGoogleErrorWhenEmailDoesntMatch() { + ts.createUser("googleTestId", "google@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := GoogleTestSignupSetup(ts, &tokenCount, &userCount, code, googleUserWrongEmail) + defer server.Close() + + u := performAuthorization(ts, "google", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/external_kakao_test.go b/auth_v2.169.0/internal/api/external_kakao_test.go new file mode 100644 index 0000000..729f723 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_kakao_test.go @@ -0,0 +1,238 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "time" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/models" +) + +func (ts *ExternalTestSuite) TestSignupExternalKakao() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=kakao", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Kakao.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Kakao.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("kakao", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func KakaoTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, emails string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Kakao.RedirectURI, r.FormValue("redirect_uri")) + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"kakao_token","expires_in":100000}`) + case "/v2/user/me": + *userCount++ + var emailList []provider.Email + if err := json.Unmarshal([]byte(emails), &emailList); err != nil { + ts.Fail("Invalid email json %s", emails) + } + + var email *provider.Email + + for i, e := range emailList { + if len(e.Email) > 0 { + email = &emailList[i] + break + } + } + + w.Header().Add("Content-Type", "application/json") + if email != nil { + fmt.Fprintf(w, ` + { + "id":123, + "kakao_account": { + "profile": { + "nickname":"Kakao Test", + "profile_image_url":"http://example.com/avatar" + }, + "email": "%v", + "is_email_valid": %v, + "is_email_verified": %v + } + }`, email.Email, email.Verified, email.Verified) + } else { + fmt.Fprint(w, ` + { + "id":123, + "kakao_account": { + "profile": { + "nickname":"Kakao Test", + "profile_image_url":"http://example.com/avatar" + } + } + }`) + } + default: + w.WriteHeader(500) + ts.Fail("unknown kakao oauth call %s", r.URL.Path) + } + })) + ts.Config.External.Kakao.URL = server.URL + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalKakao_AuthorizationCode() { + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"kakao@example.com", "primary": true, "verified": true}]` + server := KakaoTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + u := performAuthorization(ts, "kakao", code, "") + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "kakao@example.com", "Kakao Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalKakaoDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"kakao@example.com", "primary": true, "verified": true}]` + server := KakaoTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "kakao", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "kakao@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalKakaoDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"primary": true, "verified": true}]` + server := KakaoTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "kakao", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "kakao@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalKakaoDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("123", "kakao@example.com", "Kakao Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"kakao@example.com", "primary": true, "verified": true}]` + server := KakaoTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "kakao", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "kakao@example.com", "Kakao Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalKakaoSuccessWhenMatchingToken() { + // name and avatar should be populated from Kakao API + ts.createUser("123", "kakao@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"kakao@example.com", "primary": true, "verified": true}]` + server := KakaoTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "kakao", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "kakao@example.com", "Kakao Test", "123", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalKakaoErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"kakao@example.com", "primary": true, "verified": true}]` + server := KakaoTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + w := performAuthorizationRequest(ts, "kakao", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalKakaoErrorWhenWrongToken() { + ts.createUser("123", "kakao@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"kakao@example.com", "primary": true, "verified": true}]` + server := KakaoTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + w := performAuthorizationRequest(ts, "kakao", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalKakaoErrorWhenEmailDoesntMatch() { + ts.createUser("123", "kakao@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"other@example.com", "primary": true, "verified": true}]` + server := KakaoTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "kakao", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalKakaoErrorWhenVerifiedFalse() { + ts.Config.Mailer.AllowUnverifiedEmailSignIns = false + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"kakao@example.com", "primary": true, "verified": false}]` + server := KakaoTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "kakao", code, "") + + assertAuthorizationFailure(ts, u, "Unverified email with kakao. A confirmation email has been sent to your kakao email", "access_denied", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalKakaoErrorWhenUserBanned() { + tokenCount, userCount := 0, 0 + code := "authcode" + emails := `[{"email":"kakao@example.com", "primary": true, "verified": true}]` + server := KakaoTestSignupSetup(ts, &tokenCount, &userCount, code, emails) + defer server.Close() + + u := performAuthorization(ts, "kakao", code, "") + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "kakao@example.com", "Kakao Test", "123", "http://example.com/avatar") + + user, err := models.FindUserByEmailAndAudience(ts.API.db, "kakao@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + t := time.Now().Add(24 * time.Hour) + user.BannedUntil = &t + require.NoError(ts.T(), ts.API.db.UpdateOnly(user, "banned_until")) + + u = performAuthorization(ts, "kakao", code, "") + assertAuthorizationFailure(ts, u, "User is banned", "access_denied", "") +} diff --git a/auth_v2.169.0/internal/api/external_keycloak_test.go b/auth_v2.169.0/internal/api/external_keycloak_test.go new file mode 100644 index 0000000..a0952ea --- /dev/null +++ b/auth_v2.169.0/internal/api/external_keycloak_test.go @@ -0,0 +1,182 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +const ( + keycloakUser string = `{"sub": "keycloaktestid", "name": "Keycloak Test", "email": "keycloak@example.com", "preferred_username": "keycloak", "email_verified": true}` + keycloakUserNoEmail string = `{"sub": "keycloaktestid", "name": "Keycloak Test", "preferred_username": "keycloak", "email_verified": false}` +) + +func (ts *ExternalTestSuite) TestSignupExternalKeycloak() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=keycloak", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Keycloak.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Keycloak.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("profile email", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("keycloak", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func KeycloakTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/protocol/openid-connect/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Keycloak.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"keycloak_token","expires_in":100000}`) + case "/protocol/openid-connect/userinfo": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, user) + default: + w.WriteHeader(500) + ts.Fail("unknown keycloak oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Keycloak.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalKeycloakWithoutURLSetup() { + ts.createUser("keycloaktestid", "keycloak@example.com", "Keycloak Test", "", "") + tokenCount, userCount := 0, 0 + code := "authcode" + server := KeycloakTestSignupSetup(ts, &tokenCount, &userCount, code, keycloakUser) + ts.Config.External.Keycloak.URL = "" + defer server.Close() + + w := performAuthorizationRequest(ts, "keycloak", code) + ts.Equal(w.Code, http.StatusBadRequest) +} + +func (ts *ExternalTestSuite) TestSignupExternalKeycloak_AuthorizationCode() { + ts.Config.DisableSignup = false + ts.createUser("keycloaktestid", "keycloak@example.com", "Keycloak Test", "", "") + tokenCount, userCount := 0, 0 + code := "authcode" + server := KeycloakTestSignupSetup(ts, &tokenCount, &userCount, code, keycloakUser) + defer server.Close() + + u := performAuthorization(ts, "keycloak", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "keycloak@example.com", "Keycloak Test", "keycloaktestid", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalKeycloakDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + server := KeycloakTestSignupSetup(ts, &tokenCount, &userCount, code, keycloakUser) + defer server.Close() + + u := performAuthorization(ts, "keycloak", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "keycloak@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalKeycloakDisableSignupErrorWhenNoEmail() { + ts.Config.DisableSignup = true + tokenCount, userCount := 0, 0 + code := "authcode" + server := KeycloakTestSignupSetup(ts, &tokenCount, &userCount, code, keycloakUserNoEmail) + defer server.Close() + + u := performAuthorization(ts, "keycloak", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "keycloak@example.com") + +} + +func (ts *ExternalTestSuite) TestSignupExternalKeycloakDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("keycloaktestid", "keycloak@example.com", "Keycloak Test", "", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := KeycloakTestSignupSetup(ts, &tokenCount, &userCount, code, keycloakUser) + defer server.Close() + + u := performAuthorization(ts, "keycloak", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "keycloak@example.com", "Keycloak Test", "keycloaktestid", "") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalKeycloakSuccessWhenMatchingToken() { + // name and avatar should be populated from Keycloak API + ts.createUser("keycloaktestid", "keycloak@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := KeycloakTestSignupSetup(ts, &tokenCount, &userCount, code, keycloakUser) + defer server.Close() + + u := performAuthorization(ts, "keycloak", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "keycloak@example.com", "Keycloak Test", "keycloaktestid", "") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalKeycloakErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + keycloakUser := `{"name":"Keycloak Test","avatar":{"href":"http://example.com/avatar"}}` + server := KeycloakTestSignupSetup(ts, &tokenCount, &userCount, code, keycloakUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "keycloak", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalKeycloakErrorWhenWrongToken() { + ts.createUser("keycloaktestid", "keycloak@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + keycloakUser := `{"name":"Keycloak Test","avatar":{"href":"http://example.com/avatar"}}` + server := KeycloakTestSignupSetup(ts, &tokenCount, &userCount, code, keycloakUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "keycloak", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalKeycloakErrorWhenEmailDoesntMatch() { + ts.createUser("keycloaktestid", "keycloak@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + keycloakUser := `{"name":"Keycloak Test", "email":"other@example.com", "avatar":{"href":"http://example.com/avatar"}}` + server := KeycloakTestSignupSetup(ts, &tokenCount, &userCount, code, keycloakUser) + defer server.Close() + + u := performAuthorization(ts, "keycloak", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/external_linkedin_test.go b/auth_v2.169.0/internal/api/external_linkedin_test.go new file mode 100644 index 0000000..fe49932 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_linkedin_test.go @@ -0,0 +1,170 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +const ( + linkedinUser string = `{"id":"linkedinTestId","firstName":{"localized":{"en_US":"Linkedin"},"preferredLocale":{"country":"US","language":"en"}},"lastName":{"localized":{"en_US":"Test"},"preferredLocale":{"country":"US","language":"en"}},"profilePicture":{"displayImage~":{"elements":[{"identifiers":[{"identifier":"http://example.com/avatar"}]}]}}}` + linkedinUserNoProfilePic string = `{"id":"linkedinTestId","firstName":{"localized":{"en_US":"Linkedin"},"preferredLocale":{"country":"US","language":"en"}},"lastName":{"localized":{"en_US":"Test"},"preferredLocale":{"country":"US","language":"en"}},"profilePicture":{"displayImage~":{"elements":[]}}}` + linkedinEmail string = `{"elements": [{"handle": "","handle~": {"emailAddress": "linkedin@example.com"}}]}` + linkedinWrongEmail string = `{"elements": [{"handle": "","handle~": {"emailAddress": "other@example.com"}}]}` +) + +func (ts *ExternalTestSuite) TestSignupExternalLinkedin() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=linkedin", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Linkedin.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Linkedin.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("r_emailaddress r_liteprofile", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("linkedin", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func LinkedinTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string, email string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/v2/accessToken": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Linkedin.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"linkedin_token","expires_in":100000}`) + case "/v2/me": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, user) + case "/v2/emailAddress": + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, email) + default: + w.WriteHeader(500) + ts.Fail("unknown linkedin oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Linkedin.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalLinkedin_AuthorizationCode() { + ts.Config.DisableSignup = false + tokenCount, userCount := 0, 0 + code := "authcode" + server := LinkedinTestSignupSetup(ts, &tokenCount, &userCount, code, linkedinUser, linkedinEmail) + defer server.Close() + + u := performAuthorization(ts, "linkedin", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "linkedin@example.com", "Linkedin Test", "linkedinTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalLinkedinDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := LinkedinTestSignupSetup(ts, &tokenCount, &userCount, code, linkedinUser, linkedinEmail) + defer server.Close() + + u := performAuthorization(ts, "linkedin", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "linkedin@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalLinkedinDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("linkedinTestId", "linkedin@example.com", "Linkedin Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := LinkedinTestSignupSetup(ts, &tokenCount, &userCount, code, linkedinUser, linkedinEmail) + defer server.Close() + + u := performAuthorization(ts, "linkedin", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "linkedin@example.com", "Linkedin Test", "linkedinTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalLinkedinSuccessWhenMatchingToken() { + // name and avatar should be populated from Linkedin API + ts.createUser("linkedinTestId", "linkedin@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := LinkedinTestSignupSetup(ts, &tokenCount, &userCount, code, linkedinUser, linkedinEmail) + defer server.Close() + + u := performAuthorization(ts, "linkedin", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "linkedin@example.com", "Linkedin Test", "linkedinTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalLinkedinErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + server := LinkedinTestSignupSetup(ts, &tokenCount, &userCount, code, linkedinUser, linkedinEmail) + defer server.Close() + + w := performAuthorizationRequest(ts, "linkedin", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalLinkedinErrorWhenWrongToken() { + ts.createUser("linkedinTestId", "linkedin@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := LinkedinTestSignupSetup(ts, &tokenCount, &userCount, code, linkedinUser, linkedinEmail) + defer server.Close() + + w := performAuthorizationRequest(ts, "linkedin", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalLinkedinErrorWhenEmailDoesntMatch() { + ts.createUser("linkedinTestId", "linkedin@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := LinkedinTestSignupSetup(ts, &tokenCount, &userCount, code, linkedinUser, linkedinWrongEmail) + defer server.Close() + + u := performAuthorization(ts, "linkedin", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalLinkedin_MissingProfilePic() { + tokenCount, userCount := 0, 0 + code := "authcode" + server := LinkedinTestSignupSetup(ts, &tokenCount, &userCount, code, linkedinUserNoProfilePic, linkedinEmail) + defer server.Close() + + u := performAuthorization(ts, "linkedin", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "linkedin@example.com", "Linkedin Test", "linkedinTestId", "") +} diff --git a/auth_v2.169.0/internal/api/external_notion_test.go b/auth_v2.169.0/internal/api/external_notion_test.go new file mode 100644 index 0000000..268e449 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_notion_test.go @@ -0,0 +1,170 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +const ( + notionUser string = `{"bot":{"owner":{"user":{"id":"notionTestId","name":"Notion Test","avatar_url":"http://example.com/avatar","person":{"email":"notion@example.com"},"verified_email":true}}}}` + notionUserWrongEmail string = `{"bot":{"owner":{"user":{"id":"notionTestId","name":"Notion Test","avatar_url":"http://example.com/avatar","person":{"email":"other@example.com"},"verified_email":true}}}}` + notionUserNoEmail string = `{"bot":{"owner":{"user":{"id":"notionTestId","name":"Notion Test","avatar_url":"http://example.com/avatar","verified_email":true}}}}` +) + +func (ts *ExternalTestSuite) TestSignupExternalNotion() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=notion", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Notion.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Notion.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("notion", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func NotionTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v1/oauth/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Notion.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"notion_token","expires_in":100000}`) + case "/v1/users/me": + *userCount++ + ts.Contains(r.Header, "Authorization") + ts.Contains(r.Header, "Notion-Version") + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, user) + default: + w.WriteHeader(500) + ts.Fail("unknown notion oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Notion.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalNotion_AuthorizationCode() { + ts.Config.DisableSignup = false + tokenCount, userCount := 0, 0 + code := "authcode" + server := NotionTestSignupSetup(ts, &tokenCount, &userCount, code, notionUser) + defer server.Close() + + u := performAuthorization(ts, "notion", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "notion@example.com", "Notion Test", "notionTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalNotionDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := NotionTestSignupSetup(ts, &tokenCount, &userCount, code, notionUser) + defer server.Close() + + u := performAuthorization(ts, "notion", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "notion@example.com") +} +func (ts *ExternalTestSuite) TestSignupExternalNotionDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := NotionTestSignupSetup(ts, &tokenCount, &userCount, code, notionUserNoEmail) + defer server.Close() + + u := performAuthorization(ts, "notion", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "notion@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalNotionDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("notionTestId", "notion@example.com", "Notion Test", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := NotionTestSignupSetup(ts, &tokenCount, &userCount, code, notionUser) + defer server.Close() + + u := performAuthorization(ts, "notion", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "notion@example.com", "Notion Test", "notionTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalNotionSuccessWhenMatchingToken() { + // name and avatar should be populated from Notion API + ts.createUser("notionTestId", "notion@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := NotionTestSignupSetup(ts, &tokenCount, &userCount, code, notionUser) + defer server.Close() + + u := performAuthorization(ts, "notion", code, "invite_token") + + fmt.Printf("%+v\n", u) + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "notion@example.com", "Notion Test", "notionTestId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalNotionErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + server := NotionTestSignupSetup(ts, &tokenCount, &userCount, code, notionUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "notion", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalNotionErrorWhenWrongToken() { + ts.createUser("notionTestId", "notion@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := NotionTestSignupSetup(ts, &tokenCount, &userCount, code, notionUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "notion", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalNotionErrorWhenEmailDoesntMatch() { + ts.createUser("notionTestId", "notion@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := NotionTestSignupSetup(ts, &tokenCount, &userCount, code, notionUserWrongEmail) + defer server.Close() + + u := performAuthorization(ts, "notion", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/external_oauth.go b/auth_v2.169.0/internal/api/external_oauth.go new file mode 100644 index 0000000..cb098e3 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_oauth.go @@ -0,0 +1,155 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/mrjones/oauth" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/utilities" +) + +// OAuthProviderData contains the userData and token returned by the oauth provider +type OAuthProviderData struct { + userData *provider.UserProvidedData + token string + refreshToken string + code string +} + +// loadFlowState parses the `state` query parameter as a JWS payload, +// extracting the provider requested +func (a *API) loadFlowState(w http.ResponseWriter, r *http.Request) (context.Context, error) { + ctx := r.Context() + oauthToken := r.URL.Query().Get("oauth_token") + if oauthToken != "" { + ctx = withRequestToken(ctx, oauthToken) + } + oauthVerifier := r.URL.Query().Get("oauth_verifier") + if oauthVerifier != "" { + ctx = withOAuthVerifier(ctx, oauthVerifier) + } + + var err error + ctx, err = a.loadExternalState(ctx, r) + if err != nil { + u, uerr := url.ParseRequestURI(a.config.SiteURL) + if uerr != nil { + return ctx, internalServerError("site url is improperly formatted").WithInternalError(uerr) + } + + q := getErrorQueryString(err, utilities.GetRequestID(ctx), observability.GetLogEntry(r).Entry, u.Query()) + u.RawQuery = q.Encode() + + http.Redirect(w, r, u.String(), http.StatusSeeOther) + } + return ctx, err +} + +func (a *API) oAuthCallback(ctx context.Context, r *http.Request, providerType string) (*OAuthProviderData, error) { + var rq url.Values + if err := r.ParseForm(); r.Method == http.MethodPost && err == nil { + rq = r.Form + } else { + rq = r.URL.Query() + } + + extError := rq.Get("error") + if extError != "" { + return nil, oauthError(extError, rq.Get("error_description")) + } + + oauthCode := rq.Get("code") + if oauthCode == "" { + return nil, badRequestError(ErrorCodeBadOAuthCallback, "OAuth callback with missing authorization code missing") + } + + oAuthProvider, err := a.OAuthProvider(ctx, providerType) + if err != nil { + return nil, badRequestError(ErrorCodeOAuthProviderNotSupported, "Unsupported provider: %+v", err).WithInternalError(err) + } + + log := observability.GetLogEntry(r).Entry + log.WithFields(logrus.Fields{ + "provider": providerType, + "code": oauthCode, + }).Debug("Exchanging oauth code") + + token, err := oAuthProvider.GetOAuthToken(oauthCode) + if err != nil { + return nil, internalServerError("Unable to exchange external code: %s", oauthCode).WithInternalError(err) + } + + userData, err := oAuthProvider.GetUserData(ctx, token) + if err != nil { + return nil, internalServerError("Error getting user profile from external provider").WithInternalError(err) + } + + switch externalProvider := oAuthProvider.(type) { + case *provider.AppleProvider: + // apple only returns user info the first time + oauthUser := rq.Get("user") + if oauthUser != "" { + err := externalProvider.ParseUser(oauthUser, userData) + if err != nil { + return nil, err + } + } + } + + return &OAuthProviderData{ + userData: userData, + token: token.AccessToken, + refreshToken: token.RefreshToken, + code: oauthCode, + }, nil +} + +func (a *API) oAuth1Callback(ctx context.Context, providerType string) (*OAuthProviderData, error) { + oAuthProvider, err := a.OAuthProvider(ctx, providerType) + if err != nil { + return nil, badRequestError(ErrorCodeOAuthProviderNotSupported, "Unsupported provider: %+v", err).WithInternalError(err) + } + oauthToken := getRequestToken(ctx) + oauthVerifier := getOAuthVerifier(ctx) + var accessToken *oauth.AccessToken + var userData *provider.UserProvidedData + if twitterProvider, ok := oAuthProvider.(*provider.TwitterProvider); ok { + accessToken, err = twitterProvider.Consumer.AuthorizeToken(&oauth.RequestToken{ + Token: oauthToken, + }, oauthVerifier) + if err != nil { + return nil, internalServerError("Unable to retrieve access token").WithInternalError(err) + } + userData, err = twitterProvider.FetchUserData(ctx, accessToken) + if err != nil { + return nil, internalServerError("Error getting user email from external provider").WithInternalError(err) + } + } + + return &OAuthProviderData{ + userData: userData, + token: accessToken.Token, + refreshToken: "", + }, nil + +} + +// OAuthProvider returns the corresponding oauth provider as an OAuthProvider interface +func (a *API) OAuthProvider(ctx context.Context, name string) (provider.OAuthProvider, error) { + providerCandidate, err := a.Provider(ctx, name, "") + if err != nil { + return nil, err + } + + switch p := providerCandidate.(type) { + case provider.OAuthProvider: + return p, nil + default: + return nil, fmt.Errorf("Provider %v cannot be used for OAuth", name) + } +} diff --git a/auth_v2.169.0/internal/api/external_slack_oidc_test.go b/auth_v2.169.0/internal/api/external_slack_oidc_test.go new file mode 100644 index 0000000..acd2e78 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_slack_oidc_test.go @@ -0,0 +1,33 @@ +package api + +import ( + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +func (ts *ExternalTestSuite) TestSignupExternalSlackOIDC() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=slack_oidc", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Slack.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Slack.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("profile email openid", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("slack_oidc", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} diff --git a/auth_v2.169.0/internal/api/external_test.go b/auth_v2.169.0/internal/api/external_test.go new file mode 100644 index 0000000..bef89d7 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_test.go @@ -0,0 +1,254 @@ +package api + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type ExternalTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestExternal(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &ExternalTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *ExternalTestSuite) SetupTest() { + ts.Config.DisableSignup = false + ts.Config.Mailer.Autoconfirm = false + + models.TruncateAll(ts.API.db) +} + +func (ts *ExternalTestSuite) createUser(providerId string, email string, name string, avatar string, confirmationToken string) (*models.User, error) { + // Cleanup existing user, if they already exist + if u, _ := models.FindUserByEmailAndAudience(ts.API.db, email, ts.Config.JWT.Aud); u != nil { + require.NoError(ts.T(), ts.API.db.Destroy(u), "Error deleting user") + } + + userData := map[string]interface{}{"provider_id": providerId, "full_name": name} + if avatar != "" { + userData["avatar_url"] = avatar + } + u, err := models.NewUser("", email, "test", ts.Config.JWT.Aud, userData) + + if confirmationToken != "" { + u.ConfirmationToken = confirmationToken + } + ts.Require().NoError(err, "Error making new user") + ts.Require().NoError(ts.API.db.Create(u), "Error creating user") + + if confirmationToken != "" { + ts.Require().NoError(models.CreateOneTimeToken(ts.API.db, u.ID, email, u.ConfirmationToken, models.ConfirmationToken), "Error creating one-time confirmation/invite token") + } + + i, err := models.NewIdentity(u, "email", map[string]interface{}{ + "sub": u.ID.String(), + "email": email, + }) + ts.Require().NoError(err) + ts.Require().NoError(ts.API.db.Create(i), "Error creating identity") + + return u, err +} + +func performAuthorizationRequest(ts *ExternalTestSuite, provider string, inviteToken string) *httptest.ResponseRecorder { + authorizeURL := "http://localhost/authorize?provider=" + provider + if inviteToken != "" { + authorizeURL = authorizeURL + "&invite_token=" + inviteToken + } + + req := httptest.NewRequest(http.MethodGet, authorizeURL, nil) + req.Header.Set("Referer", "https://example.netlify.com/admin") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + return w +} + +func performPKCEAuthorizationRequest(ts *ExternalTestSuite, provider, codeChallenge, codeChallengeMethod string) *httptest.ResponseRecorder { + authorizeURL := "http://localhost/authorize?provider=" + provider + if codeChallenge != "" { + authorizeURL = authorizeURL + "&code_challenge=" + codeChallenge + "&code_challenge_method=" + codeChallengeMethod + } + + req := httptest.NewRequest(http.MethodGet, authorizeURL, nil) + req.Header.Set("Referer", "https://example.supabase.com/admin") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + return w +} + +func performPKCEAuthorization(ts *ExternalTestSuite, provider, code, codeChallenge, codeChallengeMethod string) *url.URL { + w := performPKCEAuthorizationRequest(ts, provider, codeChallenge, codeChallengeMethod) + ts.Require().Equal(http.StatusFound, w.Code) + // Get code and state from the redirect + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + state := q.Get("state") + testURL, err := url.Parse("http://localhost/callback") + ts.Require().NoError(err) + v := testURL.Query() + v.Set("code", code) + v.Set("state", state) + testURL.RawQuery = v.Encode() + // Use the code to get a token + req := httptest.NewRequest(http.MethodGet, testURL.String(), nil) + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err = url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + + return u + +} + +func performAuthorization(ts *ExternalTestSuite, provider string, code string, inviteToken string) *url.URL { + w := performAuthorizationRequest(ts, provider, inviteToken) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + state := q.Get("state") + + // auth server callback + testURL, err := url.Parse("http://localhost/callback") + ts.Require().NoError(err) + v := testURL.Query() + v.Set("code", code) + v.Set("state", state) + testURL.RawQuery = v.Encode() + req := httptest.NewRequest(http.MethodGet, testURL.String(), nil) + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err = url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + ts.Require().Equal("/admin", u.Path) + + return u +} + +func assertAuthorizationSuccess(ts *ExternalTestSuite, u *url.URL, tokenCount int, userCount int, email string, name string, providerId string, avatar string) { + // ensure redirect has #access_token=... + v, err := url.ParseQuery(u.RawQuery) + ts.Require().NoError(err) + ts.Require().Empty(v.Get("error_description")) + ts.Require().Empty(v.Get("error")) + + v, err = url.ParseQuery(u.Fragment) + ts.Require().NoError(err) + ts.NotEmpty(v.Get("access_token")) + ts.NotEmpty(v.Get("refresh_token")) + ts.NotEmpty(v.Get("expires_in")) + ts.Equal("bearer", v.Get("token_type")) + + ts.Equal(1, tokenCount) + if userCount > -1 { + ts.Equal(1, userCount) + } + + // ensure user has been created with metadata + user, err := models.FindUserByEmailAndAudience(ts.API.db, email, ts.Config.JWT.Aud) + ts.Require().NoError(err) + ts.Equal(providerId, user.UserMetaData["provider_id"]) + ts.Equal(name, user.UserMetaData["full_name"]) + if avatar == "" { + ts.Equal(nil, user.UserMetaData["avatar_url"]) + } else { + ts.Equal(avatar, user.UserMetaData["avatar_url"]) + } +} + +func assertAuthorizationFailure(ts *ExternalTestSuite, u *url.URL, errorDescription string, errorType string, email string) { + // ensure new sign ups error + v, err := url.ParseQuery(u.RawQuery) + ts.Require().NoError(err) + ts.Require().Equal(errorDescription, v.Get("error_description")) + ts.Require().Equal(errorType, v.Get("error")) + + v, err = url.ParseQuery(u.Fragment) + ts.Require().NoError(err) + ts.Empty(v.Get("access_token")) + ts.Empty(v.Get("refresh_token")) + ts.Empty(v.Get("expires_in")) + ts.Empty(v.Get("token_type")) + + // ensure user is nil + user, err := models.FindUserByEmailAndAudience(ts.API.db, email, ts.Config.JWT.Aud) + ts.Require().Error(err, "User not found") + ts.Require().Nil(user) +} + +// TestSignupExternalUnsupported tests API /authorize for an unsupported external provider +func (ts *ExternalTestSuite) TestSignupExternalUnsupported() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=external", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Equal(w.Code, http.StatusBadRequest) +} + +func (ts *ExternalTestSuite) TestRedirectErrorsShouldPreserveParams() { + // Request with invalid external provider + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=external", nil) + w := httptest.NewRecorder() + cases := []struct { + Desc string + RedirectURL string + QueryParams []string + ErrorMessage string + }{ + { + Desc: "Should preserve redirect query params on error", + RedirectURL: "http://example.com/path?paramforpreservation=value2", + QueryParams: []string{"paramforpreservation"}, + ErrorMessage: "invalid_request", + }, + { + Desc: "Error param should be overwritten", + RedirectURL: "http://example.com/path?error=abc", + QueryParams: []string{"error"}, + ErrorMessage: "invalid_request", + }, + } + for _, c := range cases { + parsedURL, err := url.Parse(c.RedirectURL) + require.Equal(ts.T(), err, nil) + + redirectErrors(ts.API.internalExternalProviderCallback, w, req, parsedURL) + + parsedParams, err := url.ParseQuery(parsedURL.RawQuery) + require.Equal(ts.T(), err, nil) + + // An error and description should be returned + expectedQueryParams := append(c.QueryParams, "error", "error_description") + + for _, expectedQueryParam := range expectedQueryParams { + val, exists := parsedParams[expectedQueryParam] + require.True(ts.T(), exists) + if expectedQueryParam == "error" { + require.Equal(ts.T(), val[0], c.ErrorMessage) + } + } + } +} diff --git a/auth_v2.169.0/internal/api/external_twitch_test.go b/auth_v2.169.0/internal/api/external_twitch_test.go new file mode 100644 index 0000000..694a5ff --- /dev/null +++ b/auth_v2.169.0/internal/api/external_twitch_test.go @@ -0,0 +1,171 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +const ( + twitchUser string = `{"data":[{"id":"twitchTestId","login":"Twitch user","display_name":"Twitch user","type":"","broadcaster_type":"","description":"","profile_image_url":"https://s.gravatar.com/avatar/23463b99b62a72f26ed677cc556c44e8","offline_image_url":"","email":"twitch@example.com"}]}` + twitchUserWrongEmail string = `{"data":[{"id":"twitchTestId","login":"Twitch user","display_name":"Twitch user","type":"","broadcaster_type":"","description":"","profile_image_url":"https://s.gravatar.com/avatar/23463b99b62a72f26ed677cc556c44e8","offline_image_url":"","email":"other@example.com"}]}` +) + +func (ts *ExternalTestSuite) TestSignupExternalTwitch() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=twitch", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Twitch.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Twitch.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("user:read:email", q.Get("scope")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("twitch", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func TwitchTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth2/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Twitch.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"Twitch_token","expires_in":100000}`) + case "/helix/users": + *userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, user) + default: + w.WriteHeader(500) + ts.Fail("unknown Twitch oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Twitch.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalTwitch_AuthorizationCode() { + ts.Config.DisableSignup = false + tokenCount, userCount := 0, 0 + code := "authcode" + server := TwitchTestSignupSetup(ts, &tokenCount, &userCount, code, twitchUser) + defer server.Close() + + u := performAuthorization(ts, "twitch", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "twitch@example.com", "Twitch user", "twitchTestId", "https://s.gravatar.com/avatar/23463b99b62a72f26ed677cc556c44e8") +} + +func (ts *ExternalTestSuite) TestSignupExternalTwitchDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + TwitchUser := `{"data":[{"id":"1","login":"Twitch user","display_name":"Twitch user","type":"","broadcaster_type":"","description":"","profile_image_url":"https://s.gravatar.com/avatar/23463b99b62a72f26ed677cc556c44e8","offline_image_url":"","email":"twitch@example.com"}]}` + server := TwitchTestSignupSetup(ts, &tokenCount, &userCount, code, TwitchUser) + defer server.Close() + + u := performAuthorization(ts, "twitch", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "twitch@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalTwitchDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + TwitchUser := `{"data":[{"id":"1","login":"Twitch user","display_name":"Twitch user","type":"","broadcaster_type":"","description":"","profile_image_url":"https://s.gravatar.com/avatar/23463b99b62a72f26ed677cc556c44e8","offline_image_url":""}]}` + server := TwitchTestSignupSetup(ts, &tokenCount, &userCount, code, TwitchUser) + defer server.Close() + + u := performAuthorization(ts, "twitch", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "twitch@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalTwitchDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("twitchTestId", "twitch@example.com", "Twitch user", "https://s.gravatar.com/avatar/23463b99b62a72f26ed677cc556c44e8", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := TwitchTestSignupSetup(ts, &tokenCount, &userCount, code, twitchUser) + defer server.Close() + + u := performAuthorization(ts, "twitch", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "twitch@example.com", "Twitch user", "twitchTestId", "https://s.gravatar.com/avatar/23463b99b62a72f26ed677cc556c44e8") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalTwitchSuccessWhenMatchingToken() { + // name and avatar should be populated from Twitch API + ts.createUser("twitchTestId", "twitch@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + TwitchUser := `{"data":[{"id":"twitchTestId","login":"Twitch Test","display_name":"Twitch Test","type":"","broadcaster_type":"","description":"","profile_image_url":"https://s.gravatar.com/avatar/23463b99b62a72f26ed677cc556c44e8","offline_image_url":"","email":"twitch@example.com"}]}` + server := TwitchTestSignupSetup(ts, &tokenCount, &userCount, code, TwitchUser) + defer server.Close() + + u := performAuthorization(ts, "twitch", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "twitch@example.com", "Twitch Test", "twitchTestId", "https://s.gravatar.com/avatar/23463b99b62a72f26ed677cc556c44e8") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalTwitchErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + TwitchUser := `{"data":[{"id":"1","login":"Twitch user","display_name":"Twitch user","type":"","broadcaster_type":"","description":"","profile_image_url":"https://s.gravatar.com/avatar/23463b99b62a72f26ed677cc556c44e8","offline_image_url":"","email":"twitch@example.com"}]}` + server := TwitchTestSignupSetup(ts, &tokenCount, &userCount, code, TwitchUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "twitch", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalTwitchErrorWhenWrongToken() { + ts.createUser("twitchTestId", "twitch@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := TwitchTestSignupSetup(ts, &tokenCount, &userCount, code, twitchUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "twitch", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalTwitchErrorWhenEmailDoesntMatch() { + ts.createUser("twitchTestId", "twitch@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := TwitchTestSignupSetup(ts, &tokenCount, &userCount, code, twitchUserWrongEmail) + defer server.Close() + + u := performAuthorization(ts, "twitch", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/external_twitter_test.go b/auth_v2.169.0/internal/api/external_twitter_test.go new file mode 100644 index 0000000..d90d5d3 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_twitter_test.go @@ -0,0 +1,42 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" +) + +func (ts *ExternalTestSuite) TestSignupExternalTwitter() { + server := TwitterTestSignupSetup(ts, nil, nil, "", "") + defer server.Close() + + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=twitter", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + + // Twitter uses OAuth1.0 protocol which only returns an oauth_token on the redirect + q := u.Query() + ts.Equal("twitter_oauth_token", q.Get("oauth_token")) +} + +func TwitterTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/request_token": + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, "oauth_token=twitter_oauth_token&oauth_token_secret=twitter_oauth_token_secret&oauth_callback_confirmed=true") + default: + w.WriteHeader(500) + ts.Fail("unknown google oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Twitter.URL = server.URL + + return server +} diff --git a/auth_v2.169.0/internal/api/external_workos_test.go b/auth_v2.169.0/internal/api/external_workos_test.go new file mode 100644 index 0000000..eedd5b0 --- /dev/null +++ b/auth_v2.169.0/internal/api/external_workos_test.go @@ -0,0 +1,221 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +const ( + workosUser string = `{"id":"test_prof_workos","first_name":"John","last_name":"Doe","email":"workos@example.com","connection_id":"test_conn_1","organization_id":"test_org_1","connection_type":"test","idp_id":"test_idp_1","object": "profile","raw_attributes": {}}` + workosUserWrongEmail string = `{"id":"test_prof_workos","first_name":"John","last_name":"Doe","email":"other@example.com","connection_id":"test_conn_1","organization_id":"test_org_1","connection_type":"test","idp_id":"test_idp_1","object": "profile","raw_attributes": {}}` + workosUserNoEmail string = `{"id":"test_prof_workos","first_name":"John","last_name":"Doe","connection_id":"test_conn_1","organization_id":"test_org_1","connection_type":"test","idp_id":"test_idp_1","object": "profile","raw_attributes": {}}` +) + +func (ts *ExternalTestSuite) TestSignupExternalWorkOSWithConnection() { + connection := "test_connection_id" + req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://localhost/authorize?provider=workos&connection=%s", connection), nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.WorkOS.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.WorkOS.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("", q.Get("scope")) + ts.Equal(connection, q.Get("connection")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("workos", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func (ts *ExternalTestSuite) TestSignupExternalWorkOSWithOrganization() { + organization := "test_organization_id" + req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://localhost/authorize?provider=workos&organization=%s", organization), nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.WorkOS.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.WorkOS.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("", q.Get("scope")) + ts.Equal(organization, q.Get("organization")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("workos", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func (ts *ExternalTestSuite) TestSignupExternalWorkOSWithProvider() { + provider := "test_provider" + req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://localhost/authorize?provider=workos&workos_provider=%s", provider), nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.WorkOS.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.WorkOS.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + ts.Equal("", q.Get("scope")) + ts.Equal(provider, q.Get("provider")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("workos", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func WorkosTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/sso/token": + // WorkOS returns the user data along with the token. + *tokenCount++ + *userCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.WorkOS.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{"access_token":"workos_token","expires_in":100000,"profile":%s}`, user) + default: + fmt.Printf("%s", r.URL.Path) + w.WriteHeader(500) + ts.Fail("unknown workos oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.WorkOS.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalWorkosAuthorizationCode() { + ts.Config.DisableSignup = false + + tokenCount, userCount := 0, 0 + code := "authcode" + server := WorkosTestSignupSetup(ts, &tokenCount, &userCount, code, workosUser) + defer server.Close() + + u := performAuthorization(ts, "workos", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "workos@example.com", "John Doe", "test_prof_workos", "") +} + +func (ts *ExternalTestSuite) TestSignupExternalWorkosDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := WorkosTestSignupSetup(ts, &tokenCount, &userCount, code, workosUser) + defer server.Close() + + u := performAuthorization(ts, "workos", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "workos@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalWorkosDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := WorkosTestSignupSetup(ts, &tokenCount, &userCount, code, workosUserNoEmail) + defer server.Close() + + u := performAuthorization(ts, "workos", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "workos@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalWorkosDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("test_prof_workos", "workos@example.com", "John Doe", "", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := WorkosTestSignupSetup(ts, &tokenCount, &userCount, code, workosUser) + defer server.Close() + + u := performAuthorization(ts, "workos", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "workos@example.com", "John Doe", "test_prof_workos", "") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalWorkosSuccessWhenMatchingToken() { + ts.createUser("test_prof_workos", "workos@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := WorkosTestSignupSetup(ts, &tokenCount, &userCount, code, workosUser) + defer server.Close() + + u := performAuthorization(ts, "workos", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "workos@example.com", "John Doe", "test_prof_workos", "") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalWorkosErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + server := WorkosTestSignupSetup(ts, &tokenCount, &userCount, code, workosUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "workos", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalWorkosErrorWhenWrongToken() { + ts.createUser("test_prof_workos", "workos@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := WorkosTestSignupSetup(ts, &tokenCount, &userCount, code, workosUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "workos", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalWorkosErrorWhenEmailDoesntMatch() { + ts.createUser("test_prof_workos", "workos@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := WorkosTestSignupSetup(ts, &tokenCount, &userCount, code, workosUserWrongEmail) + defer server.Close() + + u := performAuthorization(ts, "workos", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/external_zoom_test.go b/auth_v2.169.0/internal/api/external_zoom_test.go new file mode 100644 index 0000000..ea3f15c --- /dev/null +++ b/auth_v2.169.0/internal/api/external_zoom_test.go @@ -0,0 +1,167 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + + jwt "github.com/golang-jwt/jwt/v5" +) + +const ( + zoomUser string = `{"id":"zoomUserId","first_name":"John","last_name": "Doe","email": "zoom@example.com","verified": 1,"pic_url":"http://example.com/avatar"}` + zoomUserWrongEmail string = `{"id":"zoomUserId","first_name":"John","last_name": "Doe","email": "other@example.com","verified": 1,"pic_url":"http://example.com/avatar"}` + zoomUserNoEmail string = `{"id":"zoomUserId","first_name":"John","last_name": "Doe","verified": 1,"pic_url":"http://example.com/avatar"}` +) + +func (ts *ExternalTestSuite) TestSignupExternalZoom() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=zoom", nil) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + ts.Equal(ts.Config.External.Zoom.RedirectURI, q.Get("redirect_uri")) + ts.Equal(ts.Config.External.Zoom.ClientID, []string{q.Get("client_id")}) + ts.Equal("code", q.Get("response_type")) + + claims := ExternalProviderClaims{} + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.ParseWithClaims(q.Get("state"), &claims, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + ts.Require().NoError(err) + + ts.Equal("zoom", claims.Provider) + ts.Equal(ts.Config.SiteURL, claims.SiteURL) +} + +func ZoomTestSignupSetup(ts *ExternalTestSuite, tokenCount *int, userCount *int, code string, user string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/token": + *tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Zoom.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"zoom_token","expires_in":100000}`) + case "/v2/users/me": + *userCount++ + ts.Contains(r.Header, "Authorization") + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, user) + default: + w.WriteHeader(500) + ts.Fail("unknown zoom oauth call %s", r.URL.Path) + } + })) + + ts.Config.External.Zoom.URL = server.URL + + return server +} + +func (ts *ExternalTestSuite) TestSignupExternalZoomAuthorizationCode() { + ts.Config.DisableSignup = false + tokenCount, userCount := 0, 0 + code := "authcode" + server := ZoomTestSignupSetup(ts, &tokenCount, &userCount, code, zoomUser) + defer server.Close() + + u := performAuthorization(ts, "zoom", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "zoom@example.com", "John Doe", "zoomUserId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestSignupExternalZoomDisableSignupErrorWhenNoUser() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := ZoomTestSignupSetup(ts, &tokenCount, &userCount, code, zoomUser) + defer server.Close() + + u := performAuthorization(ts, "zoom", code, "") + + assertAuthorizationFailure(ts, u, "Signups not allowed for this instance", "access_denied", "zoom@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalZoomDisableSignupErrorWhenEmptyEmail() { + ts.Config.DisableSignup = true + + tokenCount, userCount := 0, 0 + code := "authcode" + server := ZoomTestSignupSetup(ts, &tokenCount, &userCount, code, zoomUserNoEmail) + defer server.Close() + + u := performAuthorization(ts, "zoom", code, "") + + assertAuthorizationFailure(ts, u, "Error getting user email from external provider", "server_error", "zoom@example.com") +} + +func (ts *ExternalTestSuite) TestSignupExternalZoomDisableSignupSuccessWithPrimaryEmail() { + ts.Config.DisableSignup = true + + ts.createUser("zoomUserId", "zoom@example.com", "John Doe", "http://example.com/avatar", "") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := ZoomTestSignupSetup(ts, &tokenCount, &userCount, code, zoomUser) + defer server.Close() + + u := performAuthorization(ts, "zoom", code, "") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "zoom@example.com", "John Doe", "zoomUserId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalZoomSuccessWhenMatchingToken() { + ts.createUser("zoomUserId", "zoom@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := ZoomTestSignupSetup(ts, &tokenCount, &userCount, code, zoomUser) + defer server.Close() + + u := performAuthorization(ts, "zoom", code, "invite_token") + + assertAuthorizationSuccess(ts, u, tokenCount, userCount, "zoom@example.com", "John Doe", "zoomUserId", "http://example.com/avatar") +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalZoomErrorWhenNoMatchingToken() { + tokenCount, userCount := 0, 0 + code := "authcode" + server := ZoomTestSignupSetup(ts, &tokenCount, &userCount, code, zoomUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "zoom", "invite_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalZoomErrorWhenWrongToken() { + ts.createUser("zoomUserId", "zoom@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := ZoomTestSignupSetup(ts, &tokenCount, &userCount, code, zoomUser) + defer server.Close() + + w := performAuthorizationRequest(ts, "zoom", "wrong_token") + ts.Require().Equal(http.StatusNotFound, w.Code) +} + +func (ts *ExternalTestSuite) TestInviteTokenExternalZoomErrorWhenEmailDoesntMatch() { + ts.createUser("zoomUserId", "zoom@example.com", "", "", "invite_token") + + tokenCount, userCount := 0, 0 + code := "authcode" + server := ZoomTestSignupSetup(ts, &tokenCount, &userCount, code, zoomUserWrongEmail) + defer server.Close() + + u := performAuthorization(ts, "zoom", code, "invite_token") + + assertAuthorizationFailure(ts, u, "Invited email does not match emails from external provider", "invalid_request", "") +} diff --git a/auth_v2.169.0/internal/api/helpers.go b/auth_v2.169.0/internal/api/helpers.go new file mode 100644 index 0000000..8a9f326 --- /dev/null +++ b/auth_v2.169.0/internal/api/helpers.go @@ -0,0 +1,103 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/pkg/errors" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/security" + "github.com/supabase/auth/internal/utilities" +) + +func sendJSON(w http.ResponseWriter, status int, obj interface{}) error { + w.Header().Set("Content-Type", "application/json") + b, err := json.Marshal(obj) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("Error encoding json response: %v", obj)) + } + w.WriteHeader(status) + _, err = w.Write(b) + return err +} + +func isAdmin(u *models.User, config *conf.GlobalConfiguration) bool { + return config.JWT.Aud == u.Aud && u.HasRole(config.JWT.AdminGroupName) +} + +func (a *API) requestAud(ctx context.Context, r *http.Request) string { + config := a.config + // First check for an audience in the header + if aud := r.Header.Get(audHeaderName); aud != "" { + return aud + } + + // Then check the token + claims := getClaims(ctx) + + if claims != nil { + aud, _ := claims.GetAudience() + if len(aud) != 0 && aud[0] != "" { + return aud[0] + } + } + + // Finally, return the default if none of the above methods are successful + return config.JWT.Aud +} + +func isStringInSlice(checkValue string, list []string) bool { + for _, val := range list { + if val == checkValue { + return true + } + } + return false +} + +type RequestParams interface { + AdminUserParams | + CreateSSOProviderParams | + EnrollFactorParams | + GenerateLinkParams | + IdTokenGrantParams | + InviteParams | + OtpParams | + PKCEGrantParams | + PasswordGrantParams | + RecoverParams | + RefreshTokenGrantParams | + ResendConfirmationParams | + SignupParams | + SingleSignOnParams | + SmsParams | + UserUpdateParams | + VerifyFactorParams | + VerifyParams | + adminUserUpdateFactorParams | + adminUserDeleteParams | + security.GotrueRequest | + ChallengeFactorParams | + struct { + Email string `json:"email"` + Phone string `json:"phone"` + } | + struct { + Email string `json:"email"` + } +} + +// retrieveRequestParams is a generic method that unmarshals the request body into the params struct provided +func retrieveRequestParams[A RequestParams](r *http.Request, params *A) error { + body, err := utilities.GetBodyBytes(r) + if err != nil { + return internalServerError("Could not read body into byte slice").WithInternalError(err) + } + if err := json.Unmarshal(body, params); err != nil { + return badRequestError(ErrorCodeBadJSON, "Could not parse request body as JSON: %v", err) + } + return nil +} diff --git a/auth_v2.169.0/internal/api/helpers_test.go b/auth_v2.169.0/internal/api/helpers_test.go new file mode 100644 index 0000000..29070e8 --- /dev/null +++ b/auth_v2.169.0/internal/api/helpers_test.go @@ -0,0 +1,151 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" +) + +func TestIsValidCodeChallenge(t *testing.T) { + cases := []struct { + challenge string + isValid bool + expectedError error + }{ + { + challenge: "invalid", + isValid: false, + expectedError: badRequestError(ErrorCodeValidationFailed, "code challenge has to be between %v and %v characters", MinCodeChallengeLength, MaxCodeChallengeLength), + }, + { + challenge: "codechallengecontainsinvalidcharacterslike@$^&*", + isValid: false, + expectedError: badRequestError(ErrorCodeValidationFailed, "code challenge can only contain alphanumeric characters, hyphens, periods, underscores and tildes"), + }, + { + challenge: "validchallengevalidchallengevalidchallengevalidchallenge", + isValid: true, + expectedError: nil, + }, + } + + for _, c := range cases { + valid, err := isValidCodeChallenge(c.challenge) + require.Equal(t, c.isValid, valid) + require.Equal(t, c.expectedError, err) + } +} + +func TestIsValidPKCEParams(t *testing.T) { + cases := []struct { + challengeMethod string + challenge string + expected error + }{ + { + challengeMethod: "", + challenge: "", + expected: nil, + }, + { + challengeMethod: "test", + challenge: "testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttest", + expected: nil, + }, + { + challengeMethod: "test", + challenge: "", + expected: badRequestError(ErrorCodeValidationFailed, InvalidPKCEParamsErrorMessage), + }, + { + challengeMethod: "", + challenge: "test", + expected: badRequestError(ErrorCodeValidationFailed, InvalidPKCEParamsErrorMessage), + }, + } + + for i, c := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + err := validatePKCEParams(c.challengeMethod, c.challenge) + require.Equal(t, c.expected, err) + }) + } +} + +func TestRequestAud(ts *testing.T) { + mockAPI := API{ + config: &conf.GlobalConfiguration{ + JWT: conf.JWTConfiguration{ + Aud: "authenticated", + Secret: "test-secret", + }, + }, + } + + cases := []struct { + desc string + headers map[string]string + payload map[string]interface{} + expectedAud string + }{ + { + desc: "Valid audience slice", + headers: map[string]string{ + audHeaderName: "my_custom_aud", + }, + payload: map[string]interface{}{ + "aud": "authenticated", + }, + expectedAud: "my_custom_aud", + }, + { + desc: "Valid custom audience", + payload: map[string]interface{}{ + "aud": "my_custom_aud", + }, + expectedAud: "my_custom_aud", + }, + { + desc: "Invalid audience", + payload: map[string]interface{}{ + "aud": "", + }, + expectedAud: mockAPI.config.JWT.Aud, + }, + { + desc: "Missing audience", + payload: map[string]interface{}{ + "sub": "d6044b6e-b0ec-4efe-a055-0d2d6ff1dbd8", + }, + expectedAud: mockAPI.config.JWT.Aud, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func(t *testing.T) { + claims := jwt.MapClaims(c.payload) + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + signed, err := token.SignedString([]byte(mockAPI.config.JWT.Secret)) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set("Authorization", fmt.Sprintf("Bearer: %s", signed)) + for k, v := range c.headers { + req.Header.Set(k, v) + } + + // set the token in the request context for requestAud + ctx, err := mockAPI.parseJWTClaims(signed, req) + require.NoError(t, err) + aud := mockAPI.requestAud(ctx, req) + require.Equal(t, c.expectedAud, aud) + }) + } + +} diff --git a/auth_v2.169.0/internal/api/hooks.go b/auth_v2.169.0/internal/api/hooks.go new file mode 100644 index 0000000..2cf99cd --- /dev/null +++ b/auth_v2.169.0/internal/api/hooks.go @@ -0,0 +1,405 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "mime" + "net" + "net/http" + "strings" + "time" + + "github.com/gofrs/uuid" + "github.com/sirupsen/logrus" + standardwebhooks "github.com/standard-webhooks/standard-webhooks/libraries/go" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/hooks" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" +) + +const ( + DefaultHTTPHookTimeout = 5 * time.Second + DefaultHTTPHookRetries = 3 + HTTPHookBackoffDuration = 2 * time.Second + PayloadLimit = 200 * 1024 // 200KB +) + +func (a *API) runPostgresHook(ctx context.Context, tx *storage.Connection, hookConfig conf.ExtensibilityPointConfiguration, input, output any) ([]byte, error) { + db := a.db.WithContext(ctx) + + request, err := json.Marshal(input) + if err != nil { + panic(err) + } + + var response []byte + invokeHookFunc := func(tx *storage.Connection) error { + // We rely on Postgres timeouts to ensure the function doesn't overrun + if terr := tx.RawQuery(fmt.Sprintf("set local statement_timeout TO '%d';", hooks.DefaultTimeout)).Exec(); terr != nil { + return terr + } + + if terr := tx.RawQuery(fmt.Sprintf("select %s(?);", hookConfig.HookName), request).First(&response); terr != nil { + return terr + } + + // reset the timeout + if terr := tx.RawQuery("set local statement_timeout TO default;").Exec(); terr != nil { + return terr + } + + return nil + } + + if tx != nil { + if err := invokeHookFunc(tx); err != nil { + return nil, err + } + } else { + if err := db.Transaction(invokeHookFunc); err != nil { + return nil, err + } + } + + if err := json.Unmarshal(response, output); err != nil { + return response, err + } + + return response, nil +} + +func (a *API) runHTTPHook(r *http.Request, hookConfig conf.ExtensibilityPointConfiguration, input any) ([]byte, error) { + ctx := r.Context() + client := http.Client{ + Timeout: DefaultHTTPHookTimeout, + } + ctx, cancel := context.WithTimeout(ctx, DefaultHTTPHookTimeout) + defer cancel() + + log := observability.GetLogEntry(r).Entry + requestURL := hookConfig.URI + hookLog := log.WithFields(logrus.Fields{ + "component": "auth_hook", + "url": requestURL, + }) + + inputPayload, err := json.Marshal(input) + if err != nil { + return nil, err + } + for i := 0; i < DefaultHTTPHookRetries; i++ { + if i == 0 { + hookLog.Debugf("invocation attempt: %d", i) + } else { + hookLog.Infof("invocation attempt: %d", i) + } + msgID := uuid.Must(uuid.NewV4()) + currentTime := time.Now() + signatureList, err := generateSignatures(hookConfig.HTTPHookSecrets, msgID, currentTime, inputPayload) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, requestURL, bytes.NewBuffer(inputPayload)) + if err != nil { + panic("Failed to make request object") + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("webhook-id", msgID.String()) + req.Header.Set("webhook-timestamp", fmt.Sprintf("%d", currentTime.Unix())) + req.Header.Set("webhook-signature", strings.Join(signatureList, ", ")) + // By default, Go Client sets encoding to gzip, which does not carry a content length header. + req.Header.Set("Accept-Encoding", "identity") + + rsp, err := client.Do(req) + if err != nil && errors.Is(err, context.DeadlineExceeded) { + return nil, unprocessableEntityError(ErrorCodeHookTimeout, fmt.Sprintf("Failed to reach hook within maximum time of %f seconds", DefaultHTTPHookTimeout.Seconds())) + + } else if err != nil { + if terr, ok := err.(net.Error); ok && terr.Timeout() || i < DefaultHTTPHookRetries-1 { + hookLog.Errorf("Request timed out for attempt %d with err %s", i, err) + time.Sleep(HTTPHookBackoffDuration) + continue + } else if i == DefaultHTTPHookRetries-1 { + return nil, unprocessableEntityError(ErrorCodeHookTimeoutAfterRetry, "Failed to reach hook after maximum retries") + } else { + return nil, internalServerError("Failed to trigger auth hook, error making HTTP request").WithInternalError(err) + } + } + + defer rsp.Body.Close() + + switch rsp.StatusCode { + case http.StatusOK, http.StatusNoContent, http.StatusAccepted: + // Header.Get is case insensitive + contentType := rsp.Header.Get("Content-Type") + if contentType == "" { + return nil, badRequestError(ErrorCodeHookPayloadInvalidContentType, "Invalid Content-Type: Missing Content-Type header") + } + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, badRequestError(ErrorCodeHookPayloadInvalidContentType, fmt.Sprintf("Invalid Content-Type header: %s", err.Error())) + } + if mediaType != "application/json" { + return nil, badRequestError(ErrorCodeHookPayloadInvalidContentType, "Invalid JSON response. Received content-type: "+contentType) + } + if rsp.Body == nil { + return nil, nil + } + limitedReader := io.LimitedReader{R: rsp.Body, N: PayloadLimit} + body, err := io.ReadAll(&limitedReader) + if err != nil { + return nil, err + } + if limitedReader.N <= 0 { + // check if the response body still has excess bytes to be read + if n, _ := rsp.Body.Read(make([]byte, 1)); n > 0 { + return nil, unprocessableEntityError(ErrorCodeHookPayloadOverSizeLimit, fmt.Sprintf("Payload size exceeded size limit of %d bytes", PayloadLimit)) + } + } + return body, nil + case http.StatusTooManyRequests, http.StatusServiceUnavailable: + retryAfterHeader := rsp.Header.Get("retry-after") + // Check for truthy values to allow for flexibility to switch to time duration + if retryAfterHeader != "" { + continue + } + return nil, internalServerError("Service currently unavailable due to hook") + case http.StatusBadRequest: + return nil, internalServerError("Invalid payload sent to hook") + case http.StatusUnauthorized: + return nil, internalServerError("Hook requires authorization token") + default: + return nil, internalServerError("Unexpected status code returned from hook: %d", rsp.StatusCode) + } + } + return nil, nil +} + +// invokePostgresHook invokes the hook code. conn can be nil, in which case a new +// transaction is opened. If calling invokeHook within a transaction, always +// pass the current transaction, as pool-exhaustion deadlocks are very easy to +// trigger. +func (a *API) invokeHook(conn *storage.Connection, r *http.Request, input, output any) error { + var err error + var response []byte + + switch input.(type) { + case *hooks.SendSMSInput: + hookOutput, ok := output.(*hooks.SendSMSOutput) + if !ok { + panic("output should be *hooks.SendSMSOutput") + } + if response, err = a.runHook(r, conn, a.config.Hook.SendSMS, input, output); err != nil { + return err + } + if err := json.Unmarshal(response, hookOutput); err != nil { + return internalServerError("Error unmarshaling Send SMS output.").WithInternalError(err) + } + if hookOutput.IsError() { + httpCode := hookOutput.HookError.HTTPCode + + if httpCode == 0 { + httpCode = http.StatusInternalServerError + } + httpError := &HTTPError{ + HTTPStatus: httpCode, + Message: hookOutput.HookError.Message, + } + return httpError.WithInternalError(&hookOutput.HookError) + } + return nil + case *hooks.SendEmailInput: + hookOutput, ok := output.(*hooks.SendEmailOutput) + if !ok { + panic("output should be *hooks.SendEmailOutput") + } + if response, err = a.runHook(r, conn, a.config.Hook.SendEmail, input, output); err != nil { + return err + } + if err := json.Unmarshal(response, hookOutput); err != nil { + return internalServerError("Error unmarshaling Send Email output.").WithInternalError(err) + } + if hookOutput.IsError() { + httpCode := hookOutput.HookError.HTTPCode + + if httpCode == 0 { + httpCode = http.StatusInternalServerError + } + + httpError := &HTTPError{ + HTTPStatus: httpCode, + Message: hookOutput.HookError.Message, + } + + return httpError.WithInternalError(&hookOutput.HookError) + } + return nil + case *hooks.MFAVerificationAttemptInput: + hookOutput, ok := output.(*hooks.MFAVerificationAttemptOutput) + if !ok { + panic("output should be *hooks.MFAVerificationAttemptOutput") + } + if response, err = a.runHook(r, conn, a.config.Hook.MFAVerificationAttempt, input, output); err != nil { + return err + } + if err := json.Unmarshal(response, hookOutput); err != nil { + return internalServerError("Error unmarshaling MFA Verification Attempt output.").WithInternalError(err) + } + if hookOutput.IsError() { + httpCode := hookOutput.HookError.HTTPCode + + if httpCode == 0 { + httpCode = http.StatusInternalServerError + } + + httpError := &HTTPError{ + HTTPStatus: httpCode, + Message: hookOutput.HookError.Message, + } + + return httpError.WithInternalError(&hookOutput.HookError) + } + return nil + case *hooks.PasswordVerificationAttemptInput: + hookOutput, ok := output.(*hooks.PasswordVerificationAttemptOutput) + if !ok { + panic("output should be *hooks.PasswordVerificationAttemptOutput") + } + + if response, err = a.runHook(r, conn, a.config.Hook.PasswordVerificationAttempt, input, output); err != nil { + return err + } + if err := json.Unmarshal(response, hookOutput); err != nil { + return internalServerError("Error unmarshaling Password Verification Attempt output.").WithInternalError(err) + } + if hookOutput.IsError() { + httpCode := hookOutput.HookError.HTTPCode + + if httpCode == 0 { + httpCode = http.StatusInternalServerError + } + + httpError := &HTTPError{ + HTTPStatus: httpCode, + Message: hookOutput.HookError.Message, + } + + return httpError.WithInternalError(&hookOutput.HookError) + } + + return nil + case *hooks.CustomAccessTokenInput: + hookOutput, ok := output.(*hooks.CustomAccessTokenOutput) + if !ok { + panic("output should be *hooks.CustomAccessTokenOutput") + } + if response, err = a.runHook(r, conn, a.config.Hook.CustomAccessToken, input, output); err != nil { + return err + } + if err := json.Unmarshal(response, hookOutput); err != nil { + return internalServerError("Error unmarshaling Custom Access Token output.").WithInternalError(err) + } + + if hookOutput.IsError() { + httpCode := hookOutput.HookError.HTTPCode + + if httpCode == 0 { + httpCode = http.StatusInternalServerError + } + + httpError := &HTTPError{ + HTTPStatus: httpCode, + Message: hookOutput.HookError.Message, + } + + return httpError.WithInternalError(&hookOutput.HookError) + } + if err := validateTokenClaims(hookOutput.Claims); err != nil { + httpCode := hookOutput.HookError.HTTPCode + + if httpCode == 0 { + httpCode = http.StatusInternalServerError + } + httpError := &HTTPError{ + HTTPStatus: httpCode, + Message: err.Error(), + } + + return httpError + } + return nil + } + return nil +} + +func (a *API) runHook(r *http.Request, conn *storage.Connection, hookConfig conf.ExtensibilityPointConfiguration, input, output any) ([]byte, error) { + ctx := r.Context() + + logEntry := observability.GetLogEntry(r) + hookStart := time.Now() + + var response []byte + var err error + + switch { + case strings.HasPrefix(hookConfig.URI, "http:") || strings.HasPrefix(hookConfig.URI, "https:"): + response, err = a.runHTTPHook(r, hookConfig, input) + case strings.HasPrefix(hookConfig.URI, "pg-functions:"): + response, err = a.runPostgresHook(ctx, conn, hookConfig, input, output) + default: + return nil, fmt.Errorf("unsupported protocol: %q only postgres hooks and HTTPS functions are supported at the moment", hookConfig.URI) + } + + duration := time.Since(hookStart) + + if err != nil { + logEntry.Entry.WithFields(logrus.Fields{ + "action": "run_hook", + "hook": hookConfig.URI, + "success": false, + "duration": duration.Microseconds(), + }).WithError(err).Warn("Hook errored out") + + return nil, internalServerError("Error running hook URI: %v", hookConfig.URI).WithInternalError(err) + } + + logEntry.Entry.WithFields(logrus.Fields{ + "action": "run_hook", + "hook": hookConfig.URI, + "success": true, + "duration": duration.Microseconds(), + }).WithError(err).Info("Hook ran successfully") + + return response, nil +} + +func generateSignatures(secrets []string, msgID uuid.UUID, currentTime time.Time, inputPayload []byte) ([]string, error) { + SymmetricSignaturePrefix := "v1," + // TODO(joel): Handle asymmetric case once library has been upgraded + var signatureList []string + for _, secret := range secrets { + if strings.HasPrefix(secret, SymmetricSignaturePrefix) { + trimmedSecret := strings.TrimPrefix(secret, SymmetricSignaturePrefix) + wh, err := standardwebhooks.NewWebhook(trimmedSecret) + if err != nil { + return nil, err + } + signature, err := wh.Sign(msgID.String(), currentTime, inputPayload) + if err != nil { + return nil, err + } + signatureList = append(signatureList, signature) + } else { + return nil, errors.New("invalid signature format") + } + } + return signatureList, nil +} diff --git a/auth_v2.169.0/internal/api/hooks_test.go b/auth_v2.169.0/internal/api/hooks_test.go new file mode 100644 index 0000000..c78ce5f --- /dev/null +++ b/auth_v2.169.0/internal/api/hooks_test.go @@ -0,0 +1,287 @@ +package api + +import ( + "encoding/json" + "net/http" + "testing" + + "net/http/httptest" + + "github.com/pkg/errors" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/hooks" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" + + "gopkg.in/h2non/gock.v1" +) + +var handleApiRequest func(*http.Request) (*http.Response, error) + +type HooksTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration + TestUser *models.User +} + +type MockHttpClient struct { + mock.Mock +} + +func (m *MockHttpClient) Do(req *http.Request) (*http.Response, error) { + return handleApiRequest(req) +} + +func TestHooks(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &HooksTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *HooksTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + u, err := models.NewUser("123456789", "testemail@gmail.com", "securetestpassword", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") + ts.TestUser = u +} + +func (ts *HooksTestSuite) TestRunHTTPHook() { + // setup mock requests for hooks + defer gock.OffAll() + + input := hooks.SendSMSInput{ + User: ts.TestUser, + SMS: hooks.SMS{ + OTP: "123456", + }, + } + testURL := "http://localhost:54321/functions/v1/custom-sms-sender" + ts.Config.Hook.SendSMS.URI = testURL + + unsuccessfulResponse := hooks.AuthHookError{ + HTTPCode: http.StatusUnprocessableEntity, + Message: "test error", + } + + testCases := []struct { + description string + expectError bool + mockResponse hooks.AuthHookError + }{ + { + description: "Hook returns success", + expectError: false, + mockResponse: hooks.AuthHookError{}, + }, + { + description: "Hook returns error", + expectError: true, + mockResponse: unsuccessfulResponse, + }, + } + + gock.New(ts.Config.Hook.SendSMS.URI). + Post("/"). + MatchType("json"). + Reply(http.StatusOK). + JSON(hooks.SendSMSOutput{}) + + gock.New(ts.Config.Hook.SendSMS.URI). + Post("/"). + MatchType("json"). + Reply(http.StatusUnprocessableEntity). + JSON(hooks.SendSMSOutput{HookError: unsuccessfulResponse}) + + for _, tc := range testCases { + ts.Run(tc.description, func() { + req, _ := http.NewRequest("POST", ts.Config.Hook.SendSMS.URI, nil) + body, err := ts.API.runHTTPHook(req, ts.Config.Hook.SendSMS, &input) + + if !tc.expectError { + require.NoError(ts.T(), err) + } else { + require.Error(ts.T(), err) + if body != nil { + var output hooks.SendSMSOutput + require.NoError(ts.T(), json.Unmarshal(body, &output)) + require.Equal(ts.T(), unsuccessfulResponse.HTTPCode, output.HookError.HTTPCode) + require.Equal(ts.T(), unsuccessfulResponse.Message, output.HookError.Message) + } + } + }) + } + require.True(ts.T(), gock.IsDone()) +} + +func (ts *HooksTestSuite) TestShouldRetryWithRetryAfterHeader() { + defer gock.OffAll() + + input := hooks.SendSMSInput{ + User: ts.TestUser, + SMS: hooks.SMS{ + OTP: "123456", + }, + } + testURL := "http://localhost:54321/functions/v1/custom-sms-sender" + ts.Config.Hook.SendSMS.URI = testURL + + gock.New(testURL). + Post("/"). + MatchType("json"). + Reply(http.StatusTooManyRequests). + SetHeader("retry-after", "true").SetHeader("content-type", "application/json") + + // Simulate an additional response for the retry attempt + gock.New(testURL). + Post("/"). + MatchType("json"). + Reply(http.StatusOK). + JSON(hooks.SendSMSOutput{}).SetHeader("content-type", "application/json") + + // Simulate the original HTTP request which triggered the hook + req, err := http.NewRequest("POST", "http://localhost:9998/otp", nil) + require.NoError(ts.T(), err) + + body, err := ts.API.runHTTPHook(req, ts.Config.Hook.SendSMS, &input) + require.NoError(ts.T(), err) + + var output hooks.SendSMSOutput + err = json.Unmarshal(body, &output) + require.NoError(ts.T(), err, "Unmarshal should not fail") + + // Ensure that all expected HTTP interactions (mocks) have been called + require.True(ts.T(), gock.IsDone(), "Expected all mocks to have been called including retry") +} + +func (ts *HooksTestSuite) TestShouldReturnErrorForNonJSONContentType() { + defer gock.OffAll() + + input := hooks.SendSMSInput{ + User: ts.TestUser, + SMS: hooks.SMS{ + OTP: "123456", + }, + } + testURL := "http://localhost:54321/functions/v1/custom-sms-sender" + ts.Config.Hook.SendSMS.URI = testURL + + gock.New(testURL). + Post("/"). + MatchType("json"). + Reply(http.StatusOK). + SetHeader("content-type", "text/plain") + + req, err := http.NewRequest("POST", "http://localhost:9999/otp", nil) + require.NoError(ts.T(), err) + + _, err = ts.API.runHTTPHook(req, ts.Config.Hook.SendSMS, &input) + require.Error(ts.T(), err, "Expected an error due to wrong content type") + require.Contains(ts.T(), err.Error(), "Invalid JSON response.") + + require.True(ts.T(), gock.IsDone(), "Expected all mocks to have been called") +} + +func (ts *HooksTestSuite) TestInvokeHookIntegration() { + // We use the Send Email Hook as illustration + defer gock.OffAll() + hookFunctionSQL := ` + create or replace function invoke_test(input jsonb) + returns json as $$ + begin + return input; + end; $$ language plpgsql;` + require.NoError(ts.T(), ts.API.db.RawQuery(hookFunctionSQL).Exec()) + + testHTTPUri := "http://myauthservice.com/signup" + testHTTPSUri := "https://myauthservice.com/signup" + testPGUri := "pg-functions://postgres/auth/invoke_test" + successOutput := map[string]interface{}{} + authEndpoint := "https://app.myapp.com/otp" + gock.New(testHTTPUri). + Post("/"). + MatchType("json"). + Reply(http.StatusOK). + JSON(successOutput).SetHeader("content-type", "application/json") + + gock.New(testHTTPSUri). + Post("/"). + MatchType("json"). + Reply(http.StatusOK). + JSON(successOutput).SetHeader("content-type", "application/json") + + tests := []struct { + description string + conn *storage.Connection + request *http.Request + input any + output any + uri string + expectedError error + }{ + { + description: "HTTP endpoint success", + conn: nil, + request: httptest.NewRequest("POST", authEndpoint, nil), + input: &hooks.SendEmailInput{}, + output: &hooks.SendEmailOutput{}, + uri: testHTTPUri, + }, + { + description: "HTTPS endpoint success", + conn: nil, + request: httptest.NewRequest("POST", authEndpoint, nil), + input: &hooks.SendEmailInput{}, + output: &hooks.SendEmailOutput{}, + uri: testHTTPSUri, + }, + { + description: "PostgreSQL function success", + conn: ts.API.db, + request: httptest.NewRequest("POST", authEndpoint, nil), + input: &hooks.SendEmailInput{}, + output: &hooks.SendEmailOutput{}, + uri: testPGUri, + }, + { + description: "Unsupported protocol error", + conn: nil, + request: httptest.NewRequest("POST", authEndpoint, nil), + input: &hooks.SendEmailInput{}, + output: &hooks.SendEmailOutput{}, + uri: "ftp://example.com/path", + expectedError: errors.New("unsupported protocol: \"ftp://example.com/path\" only postgres hooks and HTTPS functions are supported at the moment"), + }, + } + + var err error + for _, tc := range tests { + // Set up hook config + ts.Config.Hook.SendEmail.Enabled = true + ts.Config.Hook.SendEmail.URI = tc.uri + require.NoError(ts.T(), ts.Config.Hook.SendEmail.PopulateExtensibilityPoint()) + + ts.Run(tc.description, func() { + err = ts.API.invokeHook(tc.conn, tc.request, tc.input, tc.output) + if tc.expectedError != nil { + require.EqualError(ts.T(), err, tc.expectedError.Error()) + } else { + require.NoError(ts.T(), err) + } + }) + + } + // Ensure that all expected HTTP interactions (mocks) have been called + require.True(ts.T(), gock.IsDone(), "Expected all mocks to have been called including retry") +} diff --git a/auth_v2.169.0/internal/api/identity.go b/auth_v2.169.0/internal/api/identity.go new file mode 100644 index 0000000..53cef86 --- /dev/null +++ b/auth_v2.169.0/internal/api/identity.go @@ -0,0 +1,155 @@ +package api + +import ( + "context" + "net/http" + + "github.com/fatih/structs" + "github.com/go-chi/chi/v5" + "github.com/gofrs/uuid" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +func (a *API) DeleteIdentity(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + + claims := getClaims(ctx) + if claims == nil { + return internalServerError("Could not read claims") + } + + identityID, err := uuid.FromString(chi.URLParam(r, "identity_id")) + if err != nil { + return notFoundError(ErrorCodeValidationFailed, "identity_id must be an UUID") + } + + aud := a.requestAud(ctx, r) + audienceFromClaims, _ := claims.GetAudience() + if len(audienceFromClaims) == 0 || aud != audienceFromClaims[0] { + return forbiddenError(ErrorCodeUnexpectedAudience, "Token audience doesn't match request audience") + } + + user := getUser(ctx) + if len(user.Identities) <= 1 { + return unprocessableEntityError(ErrorCodeSingleIdentityNotDeletable, "User must have at least 1 identity after unlinking") + } + var identityToBeDeleted *models.Identity + for i := range user.Identities { + identity := user.Identities[i] + if identity.ID == identityID { + identityToBeDeleted = &identity + break + } + } + if identityToBeDeleted == nil { + return unprocessableEntityError(ErrorCodeIdentityNotFound, "Identity doesn't exist") + } + + err = a.db.Transaction(func(tx *storage.Connection) error { + if terr := models.NewAuditLogEntry(r, tx, user, models.IdentityUnlinkAction, "", map[string]interface{}{ + "identity_id": identityToBeDeleted.ID, + "provider": identityToBeDeleted.Provider, + "provider_id": identityToBeDeleted.ProviderID, + }); terr != nil { + return internalServerError("Error recording audit log entry").WithInternalError(terr) + } + if terr := tx.Destroy(identityToBeDeleted); terr != nil { + return internalServerError("Database error deleting identity").WithInternalError(terr) + } + + switch identityToBeDeleted.Provider { + case "phone": + user.PhoneConfirmedAt = nil + if terr := user.SetPhone(tx, ""); terr != nil { + return internalServerError("Database error updating user phone").WithInternalError(terr) + } + if terr := tx.UpdateOnly(user, "phone_confirmed_at"); terr != nil { + return internalServerError("Database error updating user phone").WithInternalError(terr) + } + default: + if terr := user.UpdateUserEmailFromIdentities(tx); terr != nil { + if models.IsUniqueConstraintViolatedError(terr) { + return unprocessableEntityError(ErrorCodeEmailConflictIdentityNotDeletable, "Unable to unlink identity due to email conflict").WithInternalError(terr) + } + return internalServerError("Database error updating user email").WithInternalError(terr) + } + } + if terr := user.UpdateAppMetaDataProviders(tx); terr != nil { + return internalServerError("Database error updating user providers").WithInternalError(terr) + } + return nil + }) + if err != nil { + return err + } + + return sendJSON(w, http.StatusOK, map[string]interface{}{}) +} + +func (a *API) LinkIdentity(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + user := getUser(ctx) + rurl, err := a.GetExternalProviderRedirectURL(w, r, user) + if err != nil { + return err + } + skipHTTPRedirect := r.URL.Query().Get("skip_http_redirect") == "true" + if skipHTTPRedirect { + return sendJSON(w, http.StatusOK, map[string]interface{}{ + "url": rurl, + }) + } + http.Redirect(w, r, rurl, http.StatusFound) + return nil +} + +func (a *API) linkIdentityToUser(r *http.Request, ctx context.Context, tx *storage.Connection, userData *provider.UserProvidedData, providerType string) (*models.User, error) { + targetUser := getTargetUser(ctx) + identity, terr := models.FindIdentityByIdAndProvider(tx, userData.Metadata.Subject, providerType) + if terr != nil { + if !models.IsNotFoundError(terr) { + return nil, internalServerError("Database error finding identity for linking").WithInternalError(terr) + } + } + if identity != nil { + if identity.UserID == targetUser.ID { + return nil, unprocessableEntityError(ErrorCodeIdentityAlreadyExists, "Identity is already linked") + } + return nil, unprocessableEntityError(ErrorCodeIdentityAlreadyExists, "Identity is already linked to another user") + } + if _, terr := a.createNewIdentity(tx, targetUser, providerType, structs.Map(userData.Metadata)); terr != nil { + return nil, terr + } + + if targetUser.GetEmail() == "" { + if terr := targetUser.UpdateUserEmailFromIdentities(tx); terr != nil { + if models.IsUniqueConstraintViolatedError(terr) { + return nil, badRequestError(ErrorCodeEmailExists, DuplicateEmailMsg) + } + return nil, terr + } + if !userData.Metadata.EmailVerified { + if terr := a.sendConfirmation(r, tx, targetUser, models.ImplicitFlow); terr != nil { + return nil, terr + } + return nil, storage.NewCommitWithError(unprocessableEntityError(ErrorCodeEmailNotConfirmed, "Unverified email with %v. A confirmation email has been sent to your %v email", providerType, providerType)) + } + if terr := targetUser.Confirm(tx); terr != nil { + return nil, terr + } + + if targetUser.IsAnonymous { + targetUser.IsAnonymous = false + if terr := tx.UpdateOnly(targetUser, "is_anonymous"); terr != nil { + return nil, terr + } + } + } + + if terr := targetUser.UpdateAppMetaDataProviders(tx); terr != nil { + return nil, terr + } + return targetUser, nil +} diff --git a/auth_v2.169.0/internal/api/identity_test.go b/auth_v2.169.0/internal/api/identity_test.go new file mode 100644 index 0000000..999559e --- /dev/null +++ b/auth_v2.169.0/internal/api/identity_test.go @@ -0,0 +1,227 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type IdentityTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestIdentity(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + ts := &IdentityTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + suite.Run(t, ts) +} + +func (ts *IdentityTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + // Create user + u, err := models.NewUser("", "one@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") + require.NoError(ts.T(), u.Confirm(ts.API.db)) + + // Create identity + i, err := models.NewIdentity(u, "email", map[string]interface{}{ + "sub": u.ID.String(), + "email": u.GetEmail(), + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(i)) + + // Create user with 2 identities + u, err = models.NewUser("123456789", "two@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") + require.NoError(ts.T(), u.Confirm(ts.API.db)) + require.NoError(ts.T(), u.ConfirmPhone(ts.API.db)) + + i, err = models.NewIdentity(u, "email", map[string]interface{}{ + "sub": u.ID.String(), + "email": u.GetEmail(), + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(i)) + + i2, err := models.NewIdentity(u, "phone", map[string]interface{}{ + "sub": u.ID.String(), + "phone": u.GetPhone(), + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(i2)) +} + +func (ts *IdentityTestSuite) TestLinkIdentityToUser() { + u, err := models.FindUserByEmailAndAudience(ts.API.db, "one@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + ctx := withTargetUser(context.Background(), u) + + // link a valid identity + testValidUserData := &provider.UserProvidedData{ + Metadata: &provider.Claims{ + Subject: "test_subject", + }, + } + // request is just used as a placeholder in the function + r := httptest.NewRequest(http.MethodGet, "/identities", nil) + u, err = ts.API.linkIdentityToUser(r, ctx, ts.API.db, testValidUserData, "test") + require.NoError(ts.T(), err) + + // load associated identities for the user + ts.API.db.Load(u, "Identities") + require.Len(ts.T(), u.Identities, 2) + require.Equal(ts.T(), u.AppMetaData["provider"], "email") + require.Equal(ts.T(), u.AppMetaData["providers"], []string{"email", "test"}) + + // link an already existing identity + testExistingUserData := &provider.UserProvidedData{ + Metadata: &provider.Claims{ + Subject: u.ID.String(), + }, + } + u, err = ts.API.linkIdentityToUser(r, ctx, ts.API.db, testExistingUserData, "email") + require.ErrorIs(ts.T(), err, unprocessableEntityError(ErrorCodeIdentityAlreadyExists, "Identity is already linked")) + require.Nil(ts.T(), u) +} + +func (ts *IdentityTestSuite) TestUnlinkIdentityError() { + ts.Config.Security.ManualLinkingEnabled = true + userWithOneIdentity, err := models.FindUserByEmailAndAudience(ts.API.db, "one@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + userWithTwoIdentities, err := models.FindUserByEmailAndAudience(ts.API.db, "two@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + cases := []struct { + desc string + user *models.User + identityId uuid.UUID + expectedError *HTTPError + }{ + { + desc: "User must have at least 1 identity after unlinking", + user: userWithOneIdentity, + identityId: userWithOneIdentity.Identities[0].ID, + expectedError: unprocessableEntityError(ErrorCodeSingleIdentityNotDeletable, "User must have at least 1 identity after unlinking"), + }, + { + desc: "Identity doesn't exist", + user: userWithTwoIdentities, + identityId: uuid.Must(uuid.NewV4()), + expectedError: unprocessableEntityError(ErrorCodeIdentityNotFound, "Identity doesn't exist"), + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + token := ts.generateAccessTokenAndSession(c.user) + req, err := http.NewRequest(http.MethodDelete, fmt.Sprintf("/user/identities/%s", c.identityId), nil) + require.NoError(ts.T(), err) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), c.expectedError.HTTPStatus, w.Code) + + var data HTTPError + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + require.Equal(ts.T(), c.expectedError.Message, data.Message) + }) + } +} + +func (ts *IdentityTestSuite) TestUnlinkIdentity() { + ts.Config.Security.ManualLinkingEnabled = true + + // we want to test 2 cases here: unlinking a phone identity and email identity from a user + cases := []struct { + desc string + // the provider to be unlinked + provider string + // the remaining provider that should be linked to the user + providerRemaining string + }{ + { + desc: "Unlink phone identity successfully", + provider: "phone", + providerRemaining: "email", + }, + { + desc: "Unlink email identity successfully", + provider: "email", + providerRemaining: "phone", + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + // teardown and reset the state of the db to prevent running into errors + ts.SetupTest() + u, err := models.FindUserByEmailAndAudience(ts.API.db, "two@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + identity, err := models.FindIdentityByIdAndProvider(ts.API.db, u.ID.String(), c.provider) + require.NoError(ts.T(), err) + + token := ts.generateAccessTokenAndSession(u) + req, err := http.NewRequest(http.MethodDelete, fmt.Sprintf("/user/identities/%s", identity.ID), nil) + require.NoError(ts.T(), err) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + // sanity checks + u, err = models.FindUserByID(ts.API.db, u.ID) + require.NoError(ts.T(), err) + require.Len(ts.T(), u.Identities, 1) + require.Equal(ts.T(), u.Identities[0].Provider, c.providerRemaining) + + // conditional checks depending on the provider that was unlinked + switch c.provider { + case "phone": + require.Equal(ts.T(), "", u.GetPhone()) + require.Nil(ts.T(), u.PhoneConfirmedAt) + case "email": + require.Equal(ts.T(), "", u.GetEmail()) + require.Nil(ts.T(), u.EmailConfirmedAt) + } + + // user still has a phone / email identity linked so it should not be unconfirmed + require.NotNil(ts.T(), u.ConfirmedAt) + }) + } + +} + +func (ts *IdentityTestSuite) generateAccessTokenAndSession(u *models.User) string { + s, err := models.NewSession(u.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(s)) + + req := httptest.NewRequest(http.MethodPost, "/token?grant_type=password", nil) + token, _, err := ts.API.generateAccessToken(req, ts.API.db, u, &s.ID, models.PasswordGrant) + require.NoError(ts.T(), err) + return token + +} diff --git a/auth_v2.169.0/internal/api/invite.go b/auth_v2.169.0/internal/api/invite.go new file mode 100644 index 0000000..f0260dd --- /dev/null +++ b/auth_v2.169.0/internal/api/invite.go @@ -0,0 +1,92 @@ +package api + +import ( + "net/http" + + "github.com/fatih/structs" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +// InviteParams are the parameters the Signup endpoint accepts +type InviteParams struct { + Email string `json:"email"` + Data map[string]interface{} `json:"data"` +} + +// Invite is the endpoint for inviting a new user +func (a *API) Invite(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + adminUser := getAdminUser(ctx) + params := &InviteParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + var err error + params.Email, err = a.validateEmail(params.Email) + if err != nil { + return err + } + + aud := a.requestAud(ctx, r) + user, err := models.FindUserByEmailAndAudience(db, params.Email, aud) + if err != nil && !models.IsNotFoundError(err) { + return internalServerError("Database error finding user").WithInternalError(err) + } + + err = db.Transaction(func(tx *storage.Connection) error { + if user != nil { + if user.IsConfirmed() { + return unprocessableEntityError(ErrorCodeEmailExists, DuplicateEmailMsg) + } + } else { + signupParams := SignupParams{ + Email: params.Email, + Data: params.Data, + Aud: aud, + Provider: "email", + } + + // because params above sets no password, this method + // is not computationally hard so it can be used within + // a database transaction + user, err = signupParams.ToUserModel(false /* <- isSSOUser */) + if err != nil { + return err + } + + user, err = a.signupNewUser(tx, user) + if err != nil { + return err + } + identity, err := a.createNewIdentity(tx, user, "email", structs.Map(provider.Claims{ + Subject: user.ID.String(), + Email: user.GetEmail(), + })) + if err != nil { + return err + } + user.Identities = []models.Identity{*identity} + } + + if terr := models.NewAuditLogEntry(r, tx, adminUser, models.UserInvitedAction, "", map[string]interface{}{ + "user_id": user.ID, + "user_email": user.Email, + }); terr != nil { + return terr + } + + if err := a.sendInvite(r, tx, user); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + + return sendJSON(w, http.StatusOK, user) +} diff --git a/auth_v2.169.0/internal/api/invite_test.go b/auth_v2.169.0/internal/api/invite_test.go new file mode 100644 index 0000000..ff0baca --- /dev/null +++ b/auth_v2.169.0/internal/api/invite_test.go @@ -0,0 +1,404 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/models" +) + +type InviteTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration + + token string +} + +func TestInvite(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &InviteTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *InviteTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + // Setup response recorder with super admin privileges + ts.token = ts.makeSuperAdmin("") +} + +func (ts *InviteTestSuite) makeSuperAdmin(email string) string { + // Cleanup existing user, if they already exist + if u, _ := models.FindUserByEmailAndAudience(ts.API.db, email, ts.Config.JWT.Aud); u != nil { + require.NoError(ts.T(), ts.API.db.Destroy(u), "Error deleting user") + } + + u, err := models.NewUser("123456789", email, "test", ts.Config.JWT.Aud, map[string]interface{}{"full_name": "Test User"}) + require.NoError(ts.T(), err, "Error making new user") + require.NoError(ts.T(), ts.API.db.Create(u)) + + u.Role = "supabase_admin" + + var token string + + session, err := models.NewSession(u.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(session)) + + req := httptest.NewRequest(http.MethodPost, "/invite", nil) + token, _, err = ts.API.generateAccessToken(req, ts.API.db, u, &session.ID, models.Invite) + + require.NoError(ts.T(), err, "Error generating access token") + + p := jwt.NewParser(jwt.WithValidMethods([]string{jwt.SigningMethodHS256.Name})) + _, err = p.Parse(token, func(token *jwt.Token) (interface{}, error) { + return []byte(ts.Config.JWT.Secret), nil + }) + require.NoError(ts.T(), err, "Error parsing token") + + return token +} + +func (ts *InviteTestSuite) TestInvite() { + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + "data": map[string]interface{}{ + "a": 1, + }, + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/invite", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + // Setup response recorder + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) +} + +func (ts *InviteTestSuite) TestInviteAfterSignupShouldNotReturnSensitiveFields() { + // To allow us to send signup and invite request in succession + ts.Config.SMTP.MaxFrequency = 5 + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + "data": map[string]interface{}{ + "a": 1, + }, + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/invite", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + // Setup response recorder + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) + + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + "password": "test123", + "data": map[string]interface{}{ + "a": 1, + }, + })) + + // Setup request + req = httptest.NewRequest(http.MethodPost, "/signup", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + x := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(x, req) + + require.Equal(ts.T(), http.StatusOK, x.Code) + + data := models.User{} + require.NoError(ts.T(), json.NewDecoder(x.Body).Decode(&data)) + // Sensitive fields + require.Equal(ts.T(), 0, len(data.Identities)) + require.Equal(ts.T(), 0, len(data.UserMetaData)) +} + +func (ts *InviteTestSuite) TestInvite_WithoutAccess() { + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + "data": map[string]interface{}{ + "a": 1, + }, + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/invite", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusUnauthorized, w.Code) // 401 OK because the invite request above has no Authorization header +} + +func (ts *InviteTestSuite) TestVerifyInvite() { + cases := []struct { + desc string + email string + requestBody map[string]interface{} + expected int + }{ + { + "Verify invite with password", + "test@example.com", + map[string]interface{}{ + "email": "test@example.com", + "type": "invite", + "token": "asdf", + "password": "testing", + }, + http.StatusOK, + }, + { + "Verify invite with no password", + "test1@example.com", + map[string]interface{}{ + "email": "test1@example.com", + "type": "invite", + "token": "asdf", + }, + http.StatusOK, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + user, err := models.NewUser("", c.email, "", ts.Config.JWT.Aud, nil) + now := time.Now() + user.InvitedAt = &now + user.ConfirmationSentAt = &now + user.EncryptedPassword = nil + user.ConfirmationToken = crypto.GenerateTokenHash(c.email, c.requestBody["token"].(string)) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(user)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, user.ID, user.GetEmail(), user.ConfirmationToken, models.ConfirmationToken)) + + // Find test user + _, err = models.FindUserByEmailAndAudience(ts.API.db, c.email, ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.requestBody)) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/verify", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + assert.Equal(ts.T(), c.expected, w.Code, w.Body.String()) + }) + } +} + +func (ts *InviteTestSuite) TestInviteExternalGitlab() { + tokenCount, userCount := 0, 0 + code := "authcode" + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/token": + tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Gitlab.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"gitlab_token","expires_in":100000}`) + case "/api/v4/user": + userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"name":"Gitlab Test","email":"gitlab@example.com","avatar_url":"http://example.com/avatar","confirmed_at": "2020-01-01T00:00:00.000Z"}`) + case "/api/v4/user/emails": + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `[]`) + default: + w.WriteHeader(http.StatusInternalServerError) + ts.Fail("unknown gitlab oauth call %s", r.URL.Path) + } + })) + defer server.Close() + ts.Config.External.Gitlab.URL = server.URL + + // invite user + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(InviteParams{ + Email: "gitlab@example.com", + })) + req := httptest.NewRequest(http.MethodPost, "http://localhost/invite", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusOK, w.Code) + + // Find test user + user, err := models.FindUserByEmailAndAudience(ts.API.db, "gitlab@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + // get redirect url w/ state + req = httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=gitlab&invite_token="+user.ConfirmationToken, nil) + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + state := q.Get("state") + + // auth server callback + testURL, err := url.Parse("http://localhost/callback") + ts.Require().NoError(err) + v := testURL.Query() + v.Set("code", code) + v.Set("state", state) + testURL.RawQuery = v.Encode() + req = httptest.NewRequest(http.MethodGet, testURL.String(), nil) + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err = url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + + // ensure redirect has #access_token=... + v, err = url.ParseQuery(u.Fragment) + ts.Require().NoError(err) + ts.Require().Empty(v.Get("error_description")) + ts.Require().Empty(v.Get("error")) + + ts.NotEmpty(v.Get("access_token")) + ts.NotEmpty(v.Get("refresh_token")) + ts.NotEmpty(v.Get("expires_in")) + ts.Equal("bearer", v.Get("token_type")) + + ts.Equal(1, tokenCount) + ts.Equal(1, userCount) + + // ensure user has been created with metadata + user, err = models.FindUserByEmailAndAudience(ts.API.db, "gitlab@example.com", ts.Config.JWT.Aud) + ts.Require().NoError(err) + ts.Equal("Gitlab Test", user.UserMetaData["full_name"]) + ts.Equal("http://example.com/avatar", user.UserMetaData["avatar_url"]) + ts.Equal("gitlab", user.AppMetaData["provider"]) + ts.Equal([]interface{}{"gitlab"}, user.AppMetaData["providers"]) +} + +func (ts *InviteTestSuite) TestInviteExternalGitlab_MismatchedEmails() { + tokenCount, userCount := 0, 0 + code := "authcode" + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/token": + tokenCount++ + ts.Equal(code, r.FormValue("code")) + ts.Equal("authorization_code", r.FormValue("grant_type")) + ts.Equal(ts.Config.External.Gitlab.RedirectURI, r.FormValue("redirect_uri")) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"access_token":"gitlab_token","expires_in":100000}`) + case "/api/v4/user": + userCount++ + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `{"name":"Gitlab Test","email":"gitlab+mismatch@example.com","avatar_url":"http://example.com/avatar","confirmed_at": "2020-01-01T00:00:00.000Z"}`) + case "/api/v4/user/emails": + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, `[]`) + default: + w.WriteHeader(500) + ts.Fail("unknown gitlab oauth call %s", r.URL.Path) + } + })) + defer server.Close() + ts.Config.External.Gitlab.URL = server.URL + + // invite user + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(InviteParams{ + Email: "gitlab@example.com", + })) + req := httptest.NewRequest(http.MethodPost, "http://localhost/invite", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusOK, w.Code) + + // Find test user + user, err := models.FindUserByEmailAndAudience(ts.API.db, "gitlab@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + // get redirect url w/ state + req = httptest.NewRequest(http.MethodGet, "http://localhost/authorize?provider=gitlab&invite_token="+user.ConfirmationToken, nil) + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err := url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + q := u.Query() + state := q.Get("state") + + // auth server callback + testURL, err := url.Parse("http://localhost/callback") + ts.Require().NoError(err) + v := testURL.Query() + v.Set("code", code) + v.Set("state", state) + testURL.RawQuery = v.Encode() + req = httptest.NewRequest(http.MethodGet, testURL.String(), nil) + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + ts.Require().Equal(http.StatusFound, w.Code) + u, err = url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + + // ensure redirect has #access_token=... + v, err = url.ParseQuery(u.RawQuery) + ts.Require().NoError(err, u.RawQuery) + ts.Require().NotEmpty(v.Get("error_description")) + ts.Require().Equal("invalid_request", v.Get("error")) +} diff --git a/auth_v2.169.0/internal/api/jwks.go b/auth_v2.169.0/internal/api/jwks.go new file mode 100644 index 0000000..b8304d2 --- /dev/null +++ b/auth_v2.169.0/internal/api/jwks.go @@ -0,0 +1,61 @@ +package api + +import ( + "net/http" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/lestrrat-go/jwx/v2/jwa" + jwk "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/supabase/auth/internal/conf" +) + +type JwksResponse struct { + Keys []jwk.Key `json:"keys"` +} + +func (a *API) Jwks(w http.ResponseWriter, r *http.Request) error { + config := a.config + resp := JwksResponse{ + Keys: []jwk.Key{}, + } + + for _, key := range config.JWT.Keys { + // don't expose hmac jwk in endpoint + if key.PublicKey == nil || key.PublicKey.KeyType() == jwa.OctetSeq { + continue + } + resp.Keys = append(resp.Keys, key.PublicKey) + } + + w.Header().Set("Cache-Control", "public, max-age=600") + return sendJSON(w, http.StatusOK, resp) +} + +func signJwt(config *conf.JWTConfiguration, claims jwt.Claims) (string, error) { + signingJwk, err := conf.GetSigningJwk(config) + if err != nil { + return "", err + } + signingMethod := conf.GetSigningAlg(signingJwk) + token := jwt.NewWithClaims(signingMethod, claims) + if token.Header == nil { + token.Header = make(map[string]interface{}) + } + + if _, ok := token.Header["kid"]; !ok { + if kid := signingJwk.KeyID(); kid != "" { + token.Header["kid"] = kid + } + } + // this serializes the aud claim to a string + jwt.MarshalSingleStringAsArray = false + signingKey, err := conf.GetSigningKey(signingJwk) + if err != nil { + return "", err + } + signed, err := token.SignedString(signingKey) + if err != nil { + return "", err + } + return signed, nil +} diff --git a/auth_v2.169.0/internal/api/jwks_test.go b/auth_v2.169.0/internal/api/jwks_test.go new file mode 100644 index 0000000..786d343 --- /dev/null +++ b/auth_v2.169.0/internal/api/jwks_test.go @@ -0,0 +1,79 @@ +package api + +import ( + "crypto/rand" + "crypto/rsa" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" +) + +func TestJwks(t *testing.T) { + // generate RSA key pair for testing + rsaPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + rsaJwkPrivate, err := jwk.FromRaw(rsaPrivateKey) + require.NoError(t, err) + rsaJwkPublic, err := rsaJwkPrivate.PublicKey() + require.NoError(t, err) + kid := rsaJwkPublic.KeyID() + + cases := []struct { + desc string + config conf.JWTConfiguration + expectedLen int + }{ + { + desc: "hmac key should not be returned", + config: conf.JWTConfiguration{ + Aud: "authenticated", + Secret: "test-secret", + }, + expectedLen: 0, + }, + { + desc: "rsa public key returned", + config: conf.JWTConfiguration{ + Aud: "authenticated", + Secret: "test-secret", + Keys: conf.JwtKeysDecoder{ + kid: conf.JwkInfo{ + PublicKey: rsaJwkPublic, + PrivateKey: rsaJwkPrivate, + }, + }, + }, + expectedLen: 1, + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + mockAPI, _, err := setupAPIForTest() + require.NoError(t, err) + mockAPI.config.JWT = c.config + + req := httptest.NewRequest(http.MethodGet, "/.well-known/jwks.json", nil) + w := httptest.NewRecorder() + mockAPI.handler.ServeHTTP(w, req) + require.Equal(t, http.StatusOK, w.Code) + + var data map[string]interface{} + require.NoError(t, json.NewDecoder(w.Body).Decode(&data)) + require.Len(t, data["keys"], c.expectedLen) + + for _, key := range data["keys"].([]interface{}) { + bytes, err := json.Marshal(key) + require.NoError(t, err) + actualKey, err := jwk.ParseKey(bytes) + require.NoError(t, err) + require.Equal(t, c.config.Keys[kid].PublicKey, actualKey) + } + }) + } +} diff --git a/auth_v2.169.0/internal/api/logout.go b/auth_v2.169.0/internal/api/logout.go new file mode 100644 index 0000000..8afec6a --- /dev/null +++ b/auth_v2.169.0/internal/api/logout.go @@ -0,0 +1,73 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +type LogoutBehavior string + +const ( + LogoutGlobal LogoutBehavior = "global" + LogoutLocal LogoutBehavior = "local" + LogoutOthers LogoutBehavior = "others" +) + +// Logout is the endpoint for logging out a user and thereby revoking any refresh tokens +func (a *API) Logout(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + scope := LogoutGlobal + + if r.URL.Query() != nil { + switch r.URL.Query().Get("scope") { + case "", "global": + scope = LogoutGlobal + + case "local": + scope = LogoutLocal + + case "others": + scope = LogoutOthers + + default: + return badRequestError(ErrorCodeValidationFailed, fmt.Sprintf("Unsupported logout scope %q", r.URL.Query().Get("scope"))) + } + } + + s := getSession(ctx) + u := getUser(ctx) + + err := db.Transaction(func(tx *storage.Connection) error { + if terr := models.NewAuditLogEntry(r, tx, u, models.LogoutAction, "", nil); terr != nil { + return terr + } + + if s == nil { + logrus.Infof("user has an empty session_id claim: %s", u.ID) + } else { + //exhaustive:ignore Default case is handled below. + switch scope { + case LogoutLocal: + return models.LogoutSession(tx, s.ID) + + case LogoutOthers: + return models.LogoutAllExceptMe(tx, s.ID, u.ID) + } + } + + // default mode, log out everywhere + return models.Logout(tx, u.ID) + }) + if err != nil { + return internalServerError("Error logging out user").WithInternalError(err) + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} diff --git a/auth_v2.169.0/internal/api/logout_test.go b/auth_v2.169.0/internal/api/logout_test.go new file mode 100644 index 0000000..b1a0fdb --- /dev/null +++ b/auth_v2.169.0/internal/api/logout_test.go @@ -0,0 +1,75 @@ +package api + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type LogoutTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration + token string +} + +func TestLogout(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &LogoutTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *LogoutTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + u, err := models.NewUser("", "test@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") + + // generate access token to use for logout + var t string + s, err := models.NewSession(u.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(s)) + + req := httptest.NewRequest(http.MethodPost, "/token?grant_type=password", nil) + t, _, err = ts.API.generateAccessToken(req, ts.API.db, u, &s.ID, models.PasswordGrant) + require.NoError(ts.T(), err) + ts.token = t +} + +func (ts *LogoutTestSuite) TestLogoutSuccess() { + for _, scope := range []string{"", "global", "local", "others"} { + ts.SetupTest() + + reqURL, err := url.ParseRequestURI("http://localhost/logout") + require.NoError(ts.T(), err) + + if scope != "" { + query := reqURL.Query() + query.Set("scope", scope) + reqURL.RawQuery = query.Encode() + } + + req := httptest.NewRequest(http.MethodPost, reqURL.String(), nil) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.token)) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusNoContent, w.Code) + } +} diff --git a/auth_v2.169.0/internal/api/magic_link.go b/auth_v2.169.0/internal/api/magic_link.go new file mode 100644 index 0000000..57b0a7d --- /dev/null +++ b/auth_v2.169.0/internal/api/magic_link.go @@ -0,0 +1,164 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +// MagicLinkParams holds the parameters for a magic link request +type MagicLinkParams struct { + Email string `json:"email"` + Data map[string]interface{} `json:"data"` + CodeChallengeMethod string `json:"code_challenge_method"` + CodeChallenge string `json:"code_challenge"` +} + +func (p *MagicLinkParams) Validate(a *API) error { + if p.Email == "" { + return unprocessableEntityError(ErrorCodeValidationFailed, "Password recovery requires an email") + } + var err error + p.Email, err = a.validateEmail(p.Email) + if err != nil { + return err + } + if err := validatePKCEParams(p.CodeChallengeMethod, p.CodeChallenge); err != nil { + return err + } + return nil +} + +// MagicLink sends a recovery email +func (a *API) MagicLink(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + config := a.config + + if !config.External.Email.Enabled { + return unprocessableEntityError(ErrorCodeEmailProviderDisabled, "Email logins are disabled") + } + + if !config.External.Email.MagicLinkEnabled { + return unprocessableEntityError(ErrorCodeEmailProviderDisabled, "Login with magic link is disabled") + } + + params := &MagicLinkParams{} + jsonDecoder := json.NewDecoder(r.Body) + err := jsonDecoder.Decode(params) + if err != nil { + return badRequestError(ErrorCodeBadJSON, "Could not read verification params: %v", err).WithInternalError(err) + } + + if err := params.Validate(a); err != nil { + return err + } + + if params.Data == nil { + params.Data = make(map[string]interface{}) + } + + flowType := getFlowFromChallenge(params.CodeChallenge) + + var isNewUser bool + aud := a.requestAud(ctx, r) + user, err := models.FindUserByEmailAndAudience(db, params.Email, aud) + if err != nil { + if models.IsNotFoundError(err) { + isNewUser = true + } else { + return internalServerError("Database error finding user").WithInternalError(err) + } + } + if user != nil { + isNewUser = !user.IsConfirmed() + } + if isNewUser { + // User either doesn't exist or hasn't completed the signup process. + // Sign them up with temporary password. + password := crypto.GeneratePassword(config.Password.RequiredCharacters, 33) + + signUpParams := &SignupParams{ + Email: params.Email, + Password: password, + Data: params.Data, + CodeChallengeMethod: params.CodeChallengeMethod, + CodeChallenge: params.CodeChallenge, + } + newBodyContent, err := json.Marshal(signUpParams) + if err != nil { + // SignupParams must always be marshallable + panic(fmt.Errorf("failed to marshal SignupParams: %w", err)) + } + r.Body = io.NopCloser(strings.NewReader(string(newBodyContent))) + r.ContentLength = int64(len(string(newBodyContent))) + + fakeResponse := &responseStub{} + if config.Mailer.Autoconfirm { + // signups are autoconfirmed, send magic link after signup + if err := a.Signup(fakeResponse, r); err != nil { + return err + } + newBodyContent := &SignupParams{ + Email: params.Email, + Data: params.Data, + CodeChallengeMethod: params.CodeChallengeMethod, + CodeChallenge: params.CodeChallenge, + } + metadata, err := json.Marshal(newBodyContent) + if err != nil { + // SignupParams must always be marshallable + panic(fmt.Errorf("failed to marshal SignupParams: %w", err)) + } + r.Body = io.NopCloser(bytes.NewReader(metadata)) + return a.MagicLink(w, r) + } + // otherwise confirmation email already contains 'magic link' + if err := a.Signup(fakeResponse, r); err != nil { + return err + } + + return sendJSON(w, http.StatusOK, make(map[string]string)) + } + + if isPKCEFlow(flowType) { + if _, err = generateFlowState(a.db, models.MagicLink.String(), models.MagicLink, params.CodeChallengeMethod, params.CodeChallenge, &user.ID); err != nil { + return err + } + } + + err = db.Transaction(func(tx *storage.Connection) error { + if terr := models.NewAuditLogEntry(r, tx, user, models.UserRecoveryRequestedAction, "", nil); terr != nil { + return terr + } + return a.sendMagicLink(r, tx, user, flowType) + }) + if err != nil { + return err + } + + return sendJSON(w, http.StatusOK, make(map[string]string)) +} + +// responseStub only implement http responsewriter for ignoring +// incoming data from methods where it passed +type responseStub struct { +} + +func (rw *responseStub) Header() http.Header { + return http.Header{} +} + +func (rw *responseStub) Write(data []byte) (int, error) { + return 1, nil +} + +func (rw *responseStub) WriteHeader(statusCode int) { +} diff --git a/auth_v2.169.0/internal/api/mail.go b/auth_v2.169.0/internal/api/mail.go new file mode 100644 index 0000000..f2ea69b --- /dev/null +++ b/auth_v2.169.0/internal/api/mail.go @@ -0,0 +1,685 @@ +package api + +import ( + "net/http" + "regexp" + "strings" + "time" + + "github.com/supabase/auth/internal/hooks" + mail "github.com/supabase/auth/internal/mailer" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/badoux/checkmail" + "github.com/fatih/structs" + "github.com/pkg/errors" + "github.com/sethvargo/go-password/password" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" +) + +var ( + EmailRateLimitExceeded error = errors.New("email rate limit exceeded") +) + +type GenerateLinkParams struct { + Type string `json:"type"` + Email string `json:"email"` + NewEmail string `json:"new_email"` + Password string `json:"password"` + Data map[string]interface{} `json:"data"` + RedirectTo string `json:"redirect_to"` +} + +type GenerateLinkResponse struct { + models.User + ActionLink string `json:"action_link"` + EmailOtp string `json:"email_otp"` + HashedToken string `json:"hashed_token"` + VerificationType string `json:"verification_type"` + RedirectTo string `json:"redirect_to"` +} + +func (a *API) adminGenerateLink(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + config := a.config + mailer := a.Mailer() + adminUser := getAdminUser(ctx) + params := &GenerateLinkParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + var err error + params.Email, err = a.validateEmail(params.Email) + if err != nil { + return err + } + referrer := utilities.GetReferrer(r, config) + if utilities.IsRedirectURLValid(config, params.RedirectTo) { + referrer = params.RedirectTo + } + + aud := a.requestAud(ctx, r) + user, err := models.FindUserByEmailAndAudience(db, params.Email, aud) + if err != nil { + if models.IsNotFoundError(err) { + switch params.Type { + case mail.MagicLinkVerification: + params.Type = mail.SignupVerification + params.Password, err = password.Generate(64, 10, 1, false, true) + if err != nil { + // password generation must always succeed + panic(err) + } + case mail.RecoveryVerification, mail.EmailChangeCurrentVerification, mail.EmailChangeNewVerification: + return notFoundError(ErrorCodeUserNotFound, "User with this email not found") + } + } else { + return internalServerError("Database error finding user").WithInternalError(err) + } + } + + var url string + now := time.Now() + otp := crypto.GenerateOtp(config.Mailer.OtpLength) + + hashedToken := crypto.GenerateTokenHash(params.Email, otp) + + var signupUser *models.User + if params.Type == mail.SignupVerification && user == nil { + signupParams := &SignupParams{ + Email: params.Email, + Password: params.Password, + Data: params.Data, + Provider: "email", + Aud: aud, + } + + if err := a.validateSignupParams(ctx, signupParams); err != nil { + return err + } + + signupUser, err = signupParams.ToUserModel(false /* <- isSSOUser */) + if err != nil { + return err + } + } + + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + switch params.Type { + case mail.MagicLinkVerification, mail.RecoveryVerification: + if terr = models.NewAuditLogEntry(r, tx, user, models.UserRecoveryRequestedAction, "", nil); terr != nil { + return terr + } + user.RecoveryToken = hashedToken + user.RecoverySentAt = &now + terr = tx.UpdateOnly(user, "recovery_token", "recovery_sent_at") + if terr != nil { + terr = errors.Wrap(terr, "Database error updating user for recovery") + return terr + } + + terr = models.CreateOneTimeToken(tx, user.ID, user.GetEmail(), user.RecoveryToken, models.RecoveryToken) + if terr != nil { + terr = errors.Wrap(terr, "Database error creating recovery token in admin") + return terr + } + case mail.InviteVerification: + if user != nil { + if user.IsConfirmed() { + return unprocessableEntityError(ErrorCodeEmailExists, DuplicateEmailMsg) + } + } else { + signupParams := &SignupParams{ + Email: params.Email, + Data: params.Data, + Provider: "email", + Aud: aud, + } + + // because params above sets no password, this + // method is not computationally hard so it can + // be used within a database transaction + user, terr = signupParams.ToUserModel(false /* <- isSSOUser */) + if terr != nil { + return terr + } + + user, terr = a.signupNewUser(tx, user) + if terr != nil { + return terr + } + identity, terr := a.createNewIdentity(tx, user, "email", structs.Map(provider.Claims{ + Subject: user.ID.String(), + Email: user.GetEmail(), + })) + if terr != nil { + return terr + } + user.Identities = []models.Identity{*identity} + } + if terr = models.NewAuditLogEntry(r, tx, adminUser, models.UserInvitedAction, "", map[string]interface{}{ + "user_id": user.ID, + "user_email": user.Email, + }); terr != nil { + return terr + } + user.ConfirmationToken = hashedToken + user.ConfirmationSentAt = &now + user.InvitedAt = &now + terr = tx.UpdateOnly(user, "confirmation_token", "confirmation_sent_at", "invited_at") + if terr != nil { + terr = errors.Wrap(terr, "Database error updating user for invite") + return terr + } + terr = models.CreateOneTimeToken(tx, user.ID, user.GetEmail(), user.ConfirmationToken, models.ConfirmationToken) + if terr != nil { + terr = errors.Wrap(terr, "Database error creating confirmation token for invite in admin") + return terr + } + case mail.SignupVerification: + if user != nil { + if user.IsConfirmed() { + return unprocessableEntityError(ErrorCodeEmailExists, DuplicateEmailMsg) + } + if err := user.UpdateUserMetaData(tx, params.Data); err != nil { + return internalServerError("Database error updating user").WithInternalError(err) + } + } else { + // you should never use SignupParams with + // password here to generate a new user, use + // signupUser which is a model generated from + // SignupParams above + user, terr = a.signupNewUser(tx, signupUser) + if terr != nil { + return terr + } + identity, terr := a.createNewIdentity(tx, user, "email", structs.Map(provider.Claims{ + Subject: user.ID.String(), + Email: user.GetEmail(), + })) + if terr != nil { + return terr + } + user.Identities = []models.Identity{*identity} + } + user.ConfirmationToken = hashedToken + user.ConfirmationSentAt = &now + terr = tx.UpdateOnly(user, "confirmation_token", "confirmation_sent_at") + if terr != nil { + terr = errors.Wrap(terr, "Database error updating user for confirmation") + return terr + } + terr = models.CreateOneTimeToken(tx, user.ID, user.GetEmail(), user.ConfirmationToken, models.ConfirmationToken) + if terr != nil { + terr = errors.Wrap(terr, "Database error creating confirmation token for signup in admin") + return terr + } + case mail.EmailChangeCurrentVerification, mail.EmailChangeNewVerification: + if !config.Mailer.SecureEmailChangeEnabled && params.Type == "email_change_current" { + return badRequestError(ErrorCodeValidationFailed, "Enable secure email change to generate link for current email") + } + params.NewEmail, terr = a.validateEmail(params.NewEmail) + if terr != nil { + return terr + } + if duplicateUser, terr := models.IsDuplicatedEmail(tx, params.NewEmail, user.Aud, user); terr != nil { + return internalServerError("Database error checking email").WithInternalError(terr) + } else if duplicateUser != nil { + return unprocessableEntityError(ErrorCodeEmailExists, DuplicateEmailMsg) + } + now := time.Now() + user.EmailChangeSentAt = &now + user.EmailChange = params.NewEmail + user.EmailChangeConfirmStatus = zeroConfirmation + if params.Type == "email_change_current" { + user.EmailChangeTokenCurrent = hashedToken + } else if params.Type == "email_change_new" { + user.EmailChangeTokenNew = crypto.GenerateTokenHash(params.NewEmail, otp) + } + terr = tx.UpdateOnly(user, "email_change_token_current", "email_change_token_new", "email_change", "email_change_sent_at", "email_change_confirm_status") + if terr != nil { + terr = errors.Wrap(terr, "Database error updating user for email change") + return terr + } + if user.EmailChangeTokenCurrent != "" { + terr = models.CreateOneTimeToken(tx, user.ID, user.GetEmail(), user.EmailChangeTokenCurrent, models.EmailChangeTokenCurrent) + if terr != nil { + terr = errors.Wrap(terr, "Database error creating email change token current in admin") + return terr + } + } + if user.EmailChangeTokenNew != "" { + terr = models.CreateOneTimeToken(tx, user.ID, user.EmailChange, user.EmailChangeTokenNew, models.EmailChangeTokenNew) + if terr != nil { + terr = errors.Wrap(terr, "Database error creating email change token new in admin") + return terr + } + } + default: + return badRequestError(ErrorCodeValidationFailed, "Invalid email action link type requested: %v", params.Type) + } + + if terr != nil { + return terr + } + + externalURL := getExternalHost(ctx) + url, terr = mailer.GetEmailActionLink(user, params.Type, referrer, externalURL) + if terr != nil { + return terr + } + return nil + }) + + if err != nil { + return err + } + + resp := GenerateLinkResponse{ + User: *user, + ActionLink: url, + EmailOtp: otp, + HashedToken: hashedToken, + VerificationType: params.Type, + RedirectTo: referrer, + } + + return sendJSON(w, http.StatusOK, resp) +} + +func (a *API) sendConfirmation(r *http.Request, tx *storage.Connection, u *models.User, flowType models.FlowType) error { + var err error + + config := a.config + maxFrequency := config.SMTP.MaxFrequency + otpLength := config.Mailer.OtpLength + + if err = validateSentWithinFrequencyLimit(u.ConfirmationSentAt, maxFrequency); err != nil { + return err + } + oldToken := u.ConfirmationToken + otp := crypto.GenerateOtp(otpLength) + + token := crypto.GenerateTokenHash(u.GetEmail(), otp) + u.ConfirmationToken = addFlowPrefixToToken(token, flowType) + now := time.Now() + if err = a.sendEmail(r, tx, u, mail.SignupVerification, otp, "", u.ConfirmationToken); err != nil { + u.ConfirmationToken = oldToken + if errors.Is(err, EmailRateLimitExceeded) { + return tooManyRequestsError(ErrorCodeOverEmailSendRateLimit, EmailRateLimitExceeded.Error()) + } else if herr, ok := err.(*HTTPError); ok { + return herr + } + return internalServerError("Error sending confirmation email").WithInternalError(err) + } + u.ConfirmationSentAt = &now + if err := tx.UpdateOnly(u, "confirmation_token", "confirmation_sent_at"); err != nil { + return internalServerError("Error sending confirmation email").WithInternalError(errors.Wrap(err, "Database error updating user for confirmation")) + } + + if err := models.CreateOneTimeToken(tx, u.ID, u.GetEmail(), u.ConfirmationToken, models.ConfirmationToken); err != nil { + return internalServerError("Error sending confirmation email").WithInternalError(errors.Wrap(err, "Database error creating confirmation token")) + } + + return nil +} + +func (a *API) sendInvite(r *http.Request, tx *storage.Connection, u *models.User) error { + config := a.config + otpLength := config.Mailer.OtpLength + var err error + oldToken := u.ConfirmationToken + otp := crypto.GenerateOtp(otpLength) + + u.ConfirmationToken = crypto.GenerateTokenHash(u.GetEmail(), otp) + now := time.Now() + if err = a.sendEmail(r, tx, u, mail.InviteVerification, otp, "", u.ConfirmationToken); err != nil { + u.ConfirmationToken = oldToken + if errors.Is(err, EmailRateLimitExceeded) { + return tooManyRequestsError(ErrorCodeOverEmailSendRateLimit, EmailRateLimitExceeded.Error()) + } else if herr, ok := err.(*HTTPError); ok { + return herr + } + return internalServerError("Error sending invite email").WithInternalError(err) + } + u.InvitedAt = &now + u.ConfirmationSentAt = &now + err = tx.UpdateOnly(u, "confirmation_token", "confirmation_sent_at", "invited_at") + if err != nil { + return internalServerError("Error inviting user").WithInternalError(errors.Wrap(err, "Database error updating user for invite")) + } + + err = models.CreateOneTimeToken(tx, u.ID, u.GetEmail(), u.ConfirmationToken, models.ConfirmationToken) + if err != nil { + return internalServerError("Error inviting user").WithInternalError(errors.Wrap(err, "Database error creating confirmation token for invite")) + } + + return nil +} + +func (a *API) sendPasswordRecovery(r *http.Request, tx *storage.Connection, u *models.User, flowType models.FlowType) error { + config := a.config + otpLength := config.Mailer.OtpLength + + if err := validateSentWithinFrequencyLimit(u.RecoverySentAt, config.SMTP.MaxFrequency); err != nil { + return err + } + + oldToken := u.RecoveryToken + otp := crypto.GenerateOtp(otpLength) + + token := crypto.GenerateTokenHash(u.GetEmail(), otp) + u.RecoveryToken = addFlowPrefixToToken(token, flowType) + now := time.Now() + if err := a.sendEmail(r, tx, u, mail.RecoveryVerification, otp, "", u.RecoveryToken); err != nil { + u.RecoveryToken = oldToken + if errors.Is(err, EmailRateLimitExceeded) { + return tooManyRequestsError(ErrorCodeOverEmailSendRateLimit, EmailRateLimitExceeded.Error()) + } else if herr, ok := err.(*HTTPError); ok { + return herr + } + return internalServerError("Error sending recovery email").WithInternalError(err) + } + u.RecoverySentAt = &now + + if err := tx.UpdateOnly(u, "recovery_token", "recovery_sent_at"); err != nil { + return internalServerError("Error sending recovery email").WithInternalError(errors.Wrap(err, "Database error updating user for recovery")) + } + + if err := models.CreateOneTimeToken(tx, u.ID, u.GetEmail(), u.RecoveryToken, models.RecoveryToken); err != nil { + return internalServerError("Error sending recovery email").WithInternalError(errors.Wrap(err, "Database error creating recovery token")) + } + + return nil +} + +func (a *API) sendReauthenticationOtp(r *http.Request, tx *storage.Connection, u *models.User) error { + config := a.config + maxFrequency := config.SMTP.MaxFrequency + otpLength := config.Mailer.OtpLength + + if err := validateSentWithinFrequencyLimit(u.ReauthenticationSentAt, maxFrequency); err != nil { + return err + } + + oldToken := u.ReauthenticationToken + otp := crypto.GenerateOtp(otpLength) + + u.ReauthenticationToken = crypto.GenerateTokenHash(u.GetEmail(), otp) + now := time.Now() + + if err := a.sendEmail(r, tx, u, mail.ReauthenticationVerification, otp, "", u.ReauthenticationToken); err != nil { + u.ReauthenticationToken = oldToken + if errors.Is(err, EmailRateLimitExceeded) { + return tooManyRequestsError(ErrorCodeOverEmailSendRateLimit, EmailRateLimitExceeded.Error()) + } else if herr, ok := err.(*HTTPError); ok { + return herr + } + return internalServerError("Error sending reauthentication email").WithInternalError(err) + } + u.ReauthenticationSentAt = &now + if err := tx.UpdateOnly(u, "reauthentication_token", "reauthentication_sent_at"); err != nil { + return internalServerError("Error sending reauthentication email").WithInternalError(errors.Wrap(err, "Database error updating user for reauthentication")) + } + + if err := models.CreateOneTimeToken(tx, u.ID, u.GetEmail(), u.ReauthenticationToken, models.ReauthenticationToken); err != nil { + return internalServerError("Error sending reauthentication email").WithInternalError(errors.Wrap(err, "Database error creating reauthentication token")) + } + + return nil +} + +func (a *API) sendMagicLink(r *http.Request, tx *storage.Connection, u *models.User, flowType models.FlowType) error { + var err error + config := a.config + otpLength := config.Mailer.OtpLength + + // since Magic Link is just a recovery with a different template and behaviour + // around new users we will reuse the recovery db timer to prevent potential abuse + if err := validateSentWithinFrequencyLimit(u.RecoverySentAt, config.SMTP.MaxFrequency); err != nil { + return err + } + + oldToken := u.RecoveryToken + otp := crypto.GenerateOtp(otpLength) + + token := crypto.GenerateTokenHash(u.GetEmail(), otp) + u.RecoveryToken = addFlowPrefixToToken(token, flowType) + + now := time.Now() + if err = a.sendEmail(r, tx, u, mail.MagicLinkVerification, otp, "", u.RecoveryToken); err != nil { + u.RecoveryToken = oldToken + if errors.Is(err, EmailRateLimitExceeded) { + return tooManyRequestsError(ErrorCodeOverEmailSendRateLimit, EmailRateLimitExceeded.Error()) + } else if herr, ok := err.(*HTTPError); ok { + return herr + } + return internalServerError("Error sending magic link email").WithInternalError(err) + } + u.RecoverySentAt = &now + if err := tx.UpdateOnly(u, "recovery_token", "recovery_sent_at"); err != nil { + return internalServerError("Error sending magic link email").WithInternalError(errors.Wrap(err, "Database error updating user for recovery")) + } + + if err := models.CreateOneTimeToken(tx, u.ID, u.GetEmail(), u.RecoveryToken, models.RecoveryToken); err != nil { + return internalServerError("Error sending magic link email").WithInternalError(errors.Wrap(err, "Database error creating recovery token")) + } + + return nil +} + +// sendEmailChange sends out an email change token to the new email. +func (a *API) sendEmailChange(r *http.Request, tx *storage.Connection, u *models.User, email string, flowType models.FlowType) error { + config := a.config + otpLength := config.Mailer.OtpLength + + if err := validateSentWithinFrequencyLimit(u.EmailChangeSentAt, config.SMTP.MaxFrequency); err != nil { + return err + } + + otpNew := crypto.GenerateOtp(otpLength) + + u.EmailChange = email + token := crypto.GenerateTokenHash(u.EmailChange, otpNew) + u.EmailChangeTokenNew = addFlowPrefixToToken(token, flowType) + + otpCurrent := "" + if config.Mailer.SecureEmailChangeEnabled && u.GetEmail() != "" { + otpCurrent = crypto.GenerateOtp(otpLength) + + currentToken := crypto.GenerateTokenHash(u.GetEmail(), otpCurrent) + u.EmailChangeTokenCurrent = addFlowPrefixToToken(currentToken, flowType) + } + + u.EmailChangeConfirmStatus = zeroConfirmation + now := time.Now() + + if err := a.sendEmail(r, tx, u, mail.EmailChangeVerification, otpCurrent, otpNew, u.EmailChangeTokenNew); err != nil { + if errors.Is(err, EmailRateLimitExceeded) { + return tooManyRequestsError(ErrorCodeOverEmailSendRateLimit, EmailRateLimitExceeded.Error()) + } else if herr, ok := err.(*HTTPError); ok { + return herr + } + return internalServerError("Error sending email change email").WithInternalError(err) + } + + u.EmailChangeSentAt = &now + if err := tx.UpdateOnly( + u, + "email_change_token_current", + "email_change_token_new", + "email_change", + "email_change_sent_at", + "email_change_confirm_status", + ); err != nil { + return internalServerError("Error sending email change email").WithInternalError(errors.Wrap(err, "Database error updating user for email change")) + } + + if u.EmailChangeTokenCurrent != "" { + if err := models.CreateOneTimeToken(tx, u.ID, u.GetEmail(), u.EmailChangeTokenCurrent, models.EmailChangeTokenCurrent); err != nil { + return internalServerError("Error sending email change email").WithInternalError(errors.Wrap(err, "Database error creating email change token current")) + } + } + + if u.EmailChangeTokenNew != "" { + if err := models.CreateOneTimeToken(tx, u.ID, u.EmailChange, u.EmailChangeTokenNew, models.EmailChangeTokenNew); err != nil { + return internalServerError("Error sending email change email").WithInternalError(errors.Wrap(err, "Database error creating email change token new")) + } + } + + return nil +} + +func (a *API) validateEmail(email string) (string, error) { + if email == "" { + return "", badRequestError(ErrorCodeValidationFailed, "An email address is required") + } + if len(email) > 255 { + return "", badRequestError(ErrorCodeValidationFailed, "An email address is too long") + } + if err := checkmail.ValidateFormat(email); err != nil { + return "", badRequestError(ErrorCodeValidationFailed, "Unable to validate email address: "+err.Error()) + } + + return strings.ToLower(email), nil +} + +func validateSentWithinFrequencyLimit(sentAt *time.Time, frequency time.Duration) error { + if sentAt != nil && sentAt.Add(frequency).After(time.Now()) { + return tooManyRequestsError(ErrorCodeOverEmailSendRateLimit, generateFrequencyLimitErrorMessage(sentAt, frequency)) + } + return nil +} + +var emailLabelPattern = regexp.MustCompile("[+][^@]+@") + +func (a *API) checkEmailAddressAuthorization(email string) bool { + if len(a.config.External.Email.AuthorizedAddresses) > 0 { + // allow labelled emails when authorization rules are in place + normalized := emailLabelPattern.ReplaceAllString(email, "@") + + for _, authorizedAddress := range a.config.External.Email.AuthorizedAddresses { + if strings.EqualFold(normalized, authorizedAddress) { + return true + } + } + + return false + } + + return true +} + +func (a *API) sendEmail(r *http.Request, tx *storage.Connection, u *models.User, emailActionType, otp, otpNew, tokenHashWithPrefix string) error { + ctx := r.Context() + config := a.config + referrerURL := utilities.GetReferrer(r, config) + externalURL := getExternalHost(ctx) + + if emailActionType != mail.EmailChangeVerification { + if u.GetEmail() != "" && !a.checkEmailAddressAuthorization(u.GetEmail()) { + return badRequestError(ErrorCodeEmailAddressNotAuthorized, "Email address %q cannot be used as it is not authorized", u.GetEmail()) + } + } else { + // first check that the user can update their address to the + // new one in u.EmailChange + if u.EmailChange != "" && !a.checkEmailAddressAuthorization(u.EmailChange) { + return badRequestError(ErrorCodeEmailAddressNotAuthorized, "Email address %q cannot be used as it is not authorized", u.EmailChange) + } + + // if secure email change is enabled, check that the user + // account (which could have been created before the authorized + // address authorization restriction was enabled) can even + // receive the confirmation message to the existing address + if config.Mailer.SecureEmailChangeEnabled && u.GetEmail() != "" && !a.checkEmailAddressAuthorization(u.GetEmail()) { + return badRequestError(ErrorCodeEmailAddressNotAuthorized, "Email address %q cannot be used as it is not authorized", u.GetEmail()) + } + } + + // if the number of events is set to zero, we immediately apply rate limits. + if config.RateLimitEmailSent.Events == 0 { + emailRateLimitCounter.Add( + ctx, + 1, + metric.WithAttributeSet(attribute.NewSet(attribute.String("path", r.URL.Path))), + ) + return EmailRateLimitExceeded + } + + // TODO(km): Deprecate this behaviour - rate limits should still be applied to autoconfirm + if !config.Mailer.Autoconfirm { + // apply rate limiting before the email is sent out + if ok := a.limiterOpts.Email.Allow(); !ok { + emailRateLimitCounter.Add( + ctx, + 1, + metric.WithAttributeSet(attribute.NewSet(attribute.String("path", r.URL.Path))), + ) + return EmailRateLimitExceeded + } + } + + if config.Hook.SendEmail.Enabled { + // When secure email change is disabled, we place the token for the new email on emailData.Token + if emailActionType == mail.EmailChangeVerification && !config.Mailer.SecureEmailChangeEnabled && u.GetEmail() != "" { + otp = otpNew + } + + emailData := mail.EmailData{ + Token: otp, + EmailActionType: emailActionType, + RedirectTo: referrerURL, + SiteURL: externalURL.String(), + TokenHash: tokenHashWithPrefix, + } + if emailActionType == mail.EmailChangeVerification && config.Mailer.SecureEmailChangeEnabled && u.GetEmail() != "" { + emailData.TokenNew = otpNew + emailData.TokenHashNew = u.EmailChangeTokenCurrent + } + input := hooks.SendEmailInput{ + User: u, + EmailData: emailData, + } + output := hooks.SendEmailOutput{} + return a.invokeHook(tx, r, &input, &output) + } + + mr := a.Mailer() + var err error + switch emailActionType { + case mail.SignupVerification: + err = mr.ConfirmationMail(r, u, otp, referrerURL, externalURL) + case mail.MagicLinkVerification: + err = mr.MagicLinkMail(r, u, otp, referrerURL, externalURL) + case mail.ReauthenticationVerification: + err = mr.ReauthenticateMail(r, u, otp) + case mail.RecoveryVerification: + err = mr.RecoveryMail(r, u, otp, referrerURL, externalURL) + case mail.InviteVerification: + err = mr.InviteMail(r, u, otp, referrerURL, externalURL) + case mail.EmailChangeVerification: + err = mr.EmailChangeMail(r, u, otpNew, otp, referrerURL, externalURL) + default: + err = errors.New("invalid email action type") + } + + switch { + case errors.Is(err, mail.ErrInvalidEmailAddress), + errors.Is(err, mail.ErrInvalidEmailFormat), + errors.Is(err, mail.ErrInvalidEmailDNS): + return badRequestError( + ErrorCodeEmailAddressInvalid, + "Email address %q is invalid", + u.GetEmail()) + default: + return err + } +} diff --git a/auth_v2.169.0/internal/api/mail_test.go b/auth_v2.169.0/internal/api/mail_test.go new file mode 100644 index 0000000..87fa946 --- /dev/null +++ b/auth_v2.169.0/internal/api/mail_test.go @@ -0,0 +1,256 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/gobwas/glob" + "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/models" +) + +type MailTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestMail(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &MailTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *MailTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + ts.Config.Mailer.SecureEmailChangeEnabled = true + + // Create User + u, err := models.NewUser("12345678", "test@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating new user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new user") +} + +func (ts *MailTestSuite) TestValidateEmail() { + cases := []struct { + desc string + email string + expectedEmail string + expectedError error + }{ + { + desc: "valid email", + email: "test@example.com", + expectedEmail: "test@example.com", + expectedError: nil, + }, + { + desc: "email should be normalized", + email: "TEST@EXAMPLE.COM", + expectedEmail: "test@example.com", + expectedError: nil, + }, + { + desc: "empty email should return error", + email: "", + expectedEmail: "", + expectedError: badRequestError(ErrorCodeValidationFailed, "An email address is required"), + }, + { + desc: "email length exceeds 255 characters", + // email has 256 characters + email: "testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest@example.com", + expectedEmail: "", + expectedError: badRequestError(ErrorCodeValidationFailed, "An email address is too long"), + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + email, err := ts.API.validateEmail(c.email) + require.Equal(ts.T(), c.expectedError, err) + require.Equal(ts.T(), c.expectedEmail, email) + }) + } +} + +func (ts *MailTestSuite) TestGenerateLink() { + // create admin jwt + claims := &AccessTokenClaims{ + Role: "supabase_admin", + } + token, err := jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(ts.Config.JWT.Secret)) + require.NoError(ts.T(), err, "Error generating admin jwt") + + ts.setURIAllowListMap("http://localhost:8000/**") + // create test cases + cases := []struct { + Desc string + Body GenerateLinkParams + ExpectedCode int + ExpectedResponse map[string]interface{} + }{ + { + Desc: "Generate signup link for new user", + Body: GenerateLinkParams{ + Email: "new_user@example.com", + Password: "secret123", + Type: "signup", + }, + ExpectedCode: http.StatusOK, + ExpectedResponse: map[string]interface{}{ + "redirect_to": ts.Config.SiteURL, + }, + }, + { + Desc: "Generate signup link for existing user", + Body: GenerateLinkParams{ + Email: "test@example.com", + Password: "secret123", + Type: "signup", + }, + ExpectedCode: http.StatusOK, + ExpectedResponse: map[string]interface{}{ + "redirect_to": ts.Config.SiteURL, + }, + }, + { + Desc: "Generate signup link with custom redirect url", + Body: GenerateLinkParams{ + Email: "test@example.com", + Password: "secret123", + Type: "signup", + RedirectTo: "http://localhost:8000/welcome", + }, + ExpectedCode: http.StatusOK, + ExpectedResponse: map[string]interface{}{ + "redirect_to": "http://localhost:8000/welcome", + }, + }, + { + Desc: "Generate magic link", + Body: GenerateLinkParams{ + Email: "test@example.com", + Type: "magiclink", + }, + ExpectedCode: http.StatusOK, + ExpectedResponse: map[string]interface{}{ + "redirect_to": ts.Config.SiteURL, + }, + }, + { + Desc: "Generate invite link", + Body: GenerateLinkParams{ + Email: "test@example.com", + Type: "invite", + }, + ExpectedCode: http.StatusOK, + ExpectedResponse: map[string]interface{}{ + "redirect_to": ts.Config.SiteURL, + }, + }, + { + Desc: "Generate recovery link", + Body: GenerateLinkParams{ + Email: "test@example.com", + Type: "recovery", + }, + ExpectedCode: http.StatusOK, + ExpectedResponse: map[string]interface{}{ + "redirect_to": ts.Config.SiteURL, + }, + }, + { + Desc: "Generate email change link", + Body: GenerateLinkParams{ + Email: "test@example.com", + NewEmail: "new@example.com", + Type: "email_change_current", + }, + ExpectedCode: http.StatusOK, + ExpectedResponse: map[string]interface{}{ + "redirect_to": ts.Config.SiteURL, + }, + }, + { + Desc: "Generate email change link", + Body: GenerateLinkParams{ + Email: "test@example.com", + NewEmail: "new@example.com", + Type: "email_change_new", + }, + ExpectedCode: http.StatusOK, + ExpectedResponse: map[string]interface{}{ + "redirect_to": ts.Config.SiteURL, + }, + }, + } + + customDomainUrl, err := url.ParseRequestURI("https://example.gotrue.com") + require.NoError(ts.T(), err) + + originalHosts := ts.API.config.Mailer.ExternalHosts + ts.API.config.Mailer.ExternalHosts = []string{ + "example.gotrue.com", + } + + for _, c := range cases { + ts.Run(c.Desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.Body)) + req := httptest.NewRequest(http.MethodPost, customDomainUrl.String()+"/admin/generate_link", &buffer) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), c.ExpectedCode, w.Code) + + data := make(map[string]interface{}) + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + require.Contains(ts.T(), data, "action_link") + require.Contains(ts.T(), data, "email_otp") + require.Contains(ts.T(), data, "hashed_token") + require.Contains(ts.T(), data, "redirect_to") + require.Equal(ts.T(), c.Body.Type, data["verification_type"]) + + // check if redirect_to is correct + require.Equal(ts.T(), c.ExpectedResponse["redirect_to"], data["redirect_to"]) + + // check if hashed_token matches hash function of email and the raw otp + require.Equal(ts.T(), crypto.GenerateTokenHash(c.Body.Email, data["email_otp"].(string)), data["hashed_token"]) + + // check if the host used in the email link matches the initial request host + u, err := url.ParseRequestURI(data["action_link"].(string)) + require.NoError(ts.T(), err) + require.Equal(ts.T(), req.Host, u.Host) + }) + } + + ts.API.config.Mailer.ExternalHosts = originalHosts +} + +func (ts *MailTestSuite) setURIAllowListMap(uris ...string) { + for _, uri := range uris { + g := glob.MustCompile(uri, '.', '/') + ts.Config.URIAllowListMap[uri] = g + } +} diff --git a/auth_v2.169.0/internal/api/mfa.go b/auth_v2.169.0/internal/api/mfa.go new file mode 100644 index 0000000..4ac2b9b --- /dev/null +++ b/auth_v2.169.0/internal/api/mfa.go @@ -0,0 +1,1029 @@ +package api + +import ( + "bytes" + "crypto/subtle" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/aaronarduino/goqrsvg" + svg "github.com/ajstarks/svgo" + "github.com/boombuler/barcode/qr" + wbnprotocol "github.com/go-webauthn/webauthn/protocol" + "github.com/go-webauthn/webauthn/webauthn" + "github.com/gofrs/uuid" + "github.com/pquerna/otp" + "github.com/pquerna/otp/totp" + "github.com/supabase/auth/internal/api/sms_provider" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/hooks" + "github.com/supabase/auth/internal/metering" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" +) + +const DefaultQRSize = 3 + +type EnrollFactorParams struct { + FriendlyName string `json:"friendly_name"` + FactorType string `json:"factor_type"` + Issuer string `json:"issuer"` + Phone string `json:"phone"` +} + +type TOTPObject struct { + QRCode string `json:"qr_code,omitempty"` + Secret string `json:"secret,omitempty"` + URI string `json:"uri,omitempty"` +} + +type EnrollFactorResponse struct { + ID uuid.UUID `json:"id"` + Type string `json:"type"` + FriendlyName string `json:"friendly_name"` + TOTP *TOTPObject `json:"totp,omitempty"` + Phone string `json:"phone,omitempty"` +} + +type ChallengeFactorParams struct { + Channel string `json:"channel"` + WebAuthn *WebAuthnParams `json:"web_authn,omitempty"` +} + +type VerifyFactorParams struct { + ChallengeID uuid.UUID `json:"challenge_id"` + Code string `json:"code"` + WebAuthn *WebAuthnParams `json:"web_authn,omitempty"` +} + +type ChallengeFactorResponse struct { + ID uuid.UUID `json:"id"` + Type string `json:"type"` + ExpiresAt int64 `json:"expires_at,omitempty"` + CredentialRequestOptions *wbnprotocol.CredentialAssertion `json:"credential_request_options,omitempty"` + CredentialCreationOptions *wbnprotocol.CredentialCreation `json:"credential_creation_options,omitempty"` +} + +type UnenrollFactorResponse struct { + ID uuid.UUID `json:"id"` +} + +type WebAuthnParams struct { + RPID string `json:"rp_id,omitempty"` + // Can encode multiple origins as comma separated values like: "origin1,origin2" + RPOrigins string `json:"rp_origins,omitempty"` + AssertionResponse json.RawMessage `json:"assertion_response,omitempty"` + CreationResponse json.RawMessage `json:"creation_response,omitempty"` +} + +func (w *WebAuthnParams) GetRPOrigins() []string { + if w.RPOrigins == "" { + return nil + } + return strings.Split(w.RPOrigins, ",") +} + +func (w *WebAuthnParams) ToConfig() (*webauthn.WebAuthn, error) { + if w.RPID == "" { + return nil, fmt.Errorf("webAuthn RP ID cannot be empty") + } + + origins := w.GetRPOrigins() + if len(origins) == 0 { + return nil, fmt.Errorf("webAuthn RP Origins cannot be empty") + } + + var validOrigins []string + var invalidOrigins []string + + for _, origin := range origins { + parsedURL, err := url.Parse(origin) + if err != nil || (parsedURL.Scheme != "https" && !(parsedURL.Scheme == "http" && parsedURL.Hostname() == "localhost")) || parsedURL.Host == "" { + invalidOrigins = append(invalidOrigins, origin) + } else { + validOrigins = append(validOrigins, origin) + } + } + + if len(invalidOrigins) > 0 { + return nil, fmt.Errorf("invalid RP origins: %s", strings.Join(invalidOrigins, ", ")) + } + + wconfig := &webauthn.Config{ + // DisplayName is optional in spec but required to be non-empty in libary, we use the RPID as a placeholder. + RPDisplayName: w.RPID, + RPID: w.RPID, + RPOrigins: validOrigins, + } + + return webauthn.New(wconfig) +} + +const ( + QRCodeGenerationErrorMessage = "Error generating QR Code" +) + +func validateFactors(db *storage.Connection, user *models.User, newFactorName string, config *conf.GlobalConfiguration, session *models.Session) error { + if err := models.DeleteExpiredFactors(db, config.MFA.FactorExpiryDuration); err != nil { + return err + } + if err := db.Load(user, "Factors"); err != nil { + return err + } + factorCount := len(user.Factors) + numVerifiedFactors := 0 + + for _, factor := range user.Factors { + if factor.FriendlyName == newFactorName { + return unprocessableEntityError( + ErrorCodeMFAFactorNameConflict, + fmt.Sprintf("A factor with the friendly name %q for this user already exists", newFactorName), + ) + } + if factor.IsVerified() { + numVerifiedFactors++ + } + } + + if factorCount >= int(config.MFA.MaxEnrolledFactors) { + return unprocessableEntityError(ErrorCodeTooManyEnrolledMFAFactors, "Maximum number of verified factors reached, unenroll to continue") + } + + if numVerifiedFactors >= config.MFA.MaxVerifiedFactors { + return unprocessableEntityError(ErrorCodeTooManyEnrolledMFAFactors, "Maximum number of verified factors reached, unenroll to continue") + } + + if numVerifiedFactors > 0 && session != nil && !session.IsAAL2() { + return forbiddenError(ErrorCodeInsufficientAAL, "AAL2 required to enroll a new factor") + } + + return nil +} + +func (a *API) enrollPhoneFactor(w http.ResponseWriter, r *http.Request, params *EnrollFactorParams) error { + ctx := r.Context() + user := getUser(ctx) + session := getSession(ctx) + db := a.db.WithContext(ctx) + if params.Phone == "" { + return badRequestError(ErrorCodeValidationFailed, "Phone number required to enroll Phone factor") + } + + phone, err := validatePhone(params.Phone) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "Invalid phone number format (E.164 required)") + } + + var factorsToDelete []models.Factor + for _, factor := range user.Factors { + if factor.IsPhoneFactor() && factor.Phone.String() == phone { + if factor.IsVerified() { + return unprocessableEntityError( + ErrorCodeMFAVerifiedFactorExists, + "A verified phone factor already exists, unenroll the existing factor to continue", + ) + } else if factor.IsUnverified() { + factorsToDelete = append(factorsToDelete, factor) + } + } + } + + if err := db.Destroy(&factorsToDelete); err != nil { + return internalServerError("Database error deleting unverified phone factors").WithInternalError(err) + } + + if err := validateFactors(db, user, params.FriendlyName, a.config, session); err != nil { + return err + } + + factor := models.NewPhoneFactor(user, phone, params.FriendlyName) + err = db.Transaction(func(tx *storage.Connection) error { + if terr := tx.Create(factor); terr != nil { + return terr + } + if terr := models.NewAuditLogEntry(r, tx, user, models.EnrollFactorAction, r.RemoteAddr, map[string]interface{}{ + "factor_id": factor.ID, + "factor_type": factor.FactorType, + }); terr != nil { + return terr + } + return nil + }) + if err != nil { + return err + } + return sendJSON(w, http.StatusOK, &EnrollFactorResponse{ + ID: factor.ID, + Type: models.Phone, + FriendlyName: factor.FriendlyName, + Phone: params.Phone, + }) +} + +func (a *API) enrollWebAuthnFactor(w http.ResponseWriter, r *http.Request, params *EnrollFactorParams) error { + ctx := r.Context() + user := getUser(ctx) + session := getSession(ctx) + db := a.db.WithContext(ctx) + + if err := validateFactors(db, user, params.FriendlyName, a.config, session); err != nil { + return err + } + + factor := models.NewWebAuthnFactor(user, params.FriendlyName) + err := db.Transaction(func(tx *storage.Connection) error { + if terr := tx.Create(factor); terr != nil { + return terr + } + if terr := models.NewAuditLogEntry(r, tx, user, models.EnrollFactorAction, r.RemoteAddr, map[string]interface{}{ + "factor_id": factor.ID, + "factor_type": factor.FactorType, + }); terr != nil { + return terr + } + return nil + }) + if err != nil { + return err + } + return sendJSON(w, http.StatusOK, &EnrollFactorResponse{ + ID: factor.ID, + Type: models.WebAuthn, + FriendlyName: factor.FriendlyName, + }) +} + +func (a *API) enrollTOTPFactor(w http.ResponseWriter, r *http.Request, params *EnrollFactorParams) error { + ctx := r.Context() + user := getUser(ctx) + db := a.db.WithContext(ctx) + config := a.config + session := getSession(ctx) + issuer := "" + if params.Issuer == "" { + u, err := url.ParseRequestURI(config.SiteURL) + if err != nil { + return internalServerError("site url is improperly formatted") + } + issuer = u.Host + } else { + issuer = params.Issuer + } + + if err := validateFactors(db, user, params.FriendlyName, config, session); err != nil { + return err + } + + var factor *models.Factor + var buf bytes.Buffer + var key *otp.Key + key, err := totp.Generate(totp.GenerateOpts{ + Issuer: issuer, + AccountName: user.GetEmail(), + }) + if err != nil { + return internalServerError(QRCodeGenerationErrorMessage).WithInternalError(err) + } + + svgData := svg.New(&buf) + qrCode, _ := qr.Encode(key.String(), qr.H, qr.Auto) + qs := goqrsvg.NewQrSVG(qrCode, DefaultQRSize) + qs.StartQrSVG(svgData) + if err = qs.WriteQrSVG(svgData); err != nil { + return internalServerError(QRCodeGenerationErrorMessage).WithInternalError(err) + } + svgData.End() + + factor = models.NewTOTPFactor(user, params.FriendlyName) + if err := factor.SetSecret(key.Secret(), config.Security.DBEncryption.Encrypt, config.Security.DBEncryption.EncryptionKeyID, config.Security.DBEncryption.EncryptionKey); err != nil { + return err + } + + err = db.Transaction(func(tx *storage.Connection) error { + if terr := tx.Create(factor); terr != nil { + return terr + } + + if terr := models.NewAuditLogEntry(r, tx, user, models.EnrollFactorAction, r.RemoteAddr, map[string]interface{}{ + "factor_id": factor.ID, + }); terr != nil { + return terr + } + return nil + }) + if err != nil { + return err + } + return sendJSON(w, http.StatusOK, &EnrollFactorResponse{ + ID: factor.ID, + Type: models.TOTP, + FriendlyName: factor.FriendlyName, + TOTP: &TOTPObject{ + // See: https://css-tricks.com/probably-dont-base64-svg/ + QRCode: buf.String(), + Secret: key.Secret(), + URI: key.URL(), + }, + }) +} + +func (a *API) EnrollFactor(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + user := getUser(ctx) + session := getSession(ctx) + config := a.config + + if session == nil || user == nil { + return internalServerError("A valid session and a registered user are required to enroll a factor") + } + params := &EnrollFactorParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + switch params.FactorType { + case models.Phone: + if !config.MFA.Phone.EnrollEnabled { + return unprocessableEntityError(ErrorCodeMFAPhoneEnrollDisabled, "MFA enroll is disabled for Phone") + } + return a.enrollPhoneFactor(w, r, params) + case models.TOTP: + if !config.MFA.TOTP.EnrollEnabled { + return unprocessableEntityError(ErrorCodeMFATOTPEnrollDisabled, "MFA enroll is disabled for TOTP") + } + return a.enrollTOTPFactor(w, r, params) + case models.WebAuthn: + if !config.MFA.WebAuthn.EnrollEnabled { + return unprocessableEntityError(ErrorCodeMFAWebAuthnEnrollDisabled, "MFA enroll is disabled for WebAuthn") + } + return a.enrollWebAuthnFactor(w, r, params) + default: + return badRequestError(ErrorCodeValidationFailed, "factor_type needs to be totp, phone, or webauthn") + } + +} + +func (a *API) challengePhoneFactor(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + config := a.config + db := a.db.WithContext(ctx) + user := getUser(ctx) + factor := getFactor(ctx) + ipAddress := utilities.GetIPAddress(r) + params := &ChallengeFactorParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + channel := params.Channel + if channel == "" { + channel = sms_provider.SMSProvider + } + if !sms_provider.IsValidMessageChannel(channel, config) { + return badRequestError(ErrorCodeValidationFailed, InvalidChannelError) + } + + if factor.IsPhoneFactor() && factor.LastChallengedAt != nil { + if !factor.LastChallengedAt.Add(config.MFA.Phone.MaxFrequency).Before(time.Now()) { + return tooManyRequestsError(ErrorCodeOverSMSSendRateLimit, generateFrequencyLimitErrorMessage(factor.LastChallengedAt, config.MFA.Phone.MaxFrequency)) + } + } + + otp := crypto.GenerateOtp(config.MFA.Phone.OtpLength) + + challenge, err := factor.CreatePhoneChallenge(ipAddress, otp, config.Security.DBEncryption.Encrypt, config.Security.DBEncryption.EncryptionKeyID, config.Security.DBEncryption.EncryptionKey) + if err != nil { + return internalServerError("error creating SMS Challenge") + } + + message, err := generateSMSFromTemplate(config.MFA.Phone.SMSTemplate, otp) + if err != nil { + return internalServerError("error generating sms template").WithInternalError(err) + } + + if config.Hook.SendSMS.Enabled { + input := hooks.SendSMSInput{ + User: user, + SMS: hooks.SMS{ + OTP: otp, + SMSType: "mfa", + }, + } + output := hooks.SendSMSOutput{} + err := a.invokeHook(a.db, r, &input, &output) + if err != nil { + return internalServerError("error invoking hook") + } + } else { + smsProvider, err := sms_provider.GetSmsProvider(*config) + if err != nil { + return internalServerError("Failed to get SMS provider").WithInternalError(err) + } + // We omit messageID for now, can consider reinstating if there are requests. + if _, err = smsProvider.SendMessage(factor.Phone.String(), message, channel, otp); err != nil { + return internalServerError("error sending message").WithInternalError(err) + } + } + if err := db.Transaction(func(tx *storage.Connection) error { + if terr := factor.WriteChallengeToDatabase(tx, challenge); terr != nil { + return terr + } + + if terr := models.NewAuditLogEntry(r, tx, user, models.CreateChallengeAction, r.RemoteAddr, map[string]interface{}{ + "factor_id": factor.ID, + "factor_status": factor.Status, + }); terr != nil { + return terr + } + return nil + }); err != nil { + return err + } + return sendJSON(w, http.StatusOK, &ChallengeFactorResponse{ + ID: challenge.ID, + Type: factor.FactorType, + ExpiresAt: challenge.GetExpiryTime(config.MFA.ChallengeExpiryDuration).Unix(), + }) +} + +func (a *API) challengeTOTPFactor(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + config := a.config + db := a.db.WithContext(ctx) + + user := getUser(ctx) + factor := getFactor(ctx) + ipAddress := utilities.GetIPAddress(r) + + challenge := factor.CreateChallenge(ipAddress) + + if err := db.Transaction(func(tx *storage.Connection) error { + if terr := factor.WriteChallengeToDatabase(tx, challenge); terr != nil { + return terr + } + if terr := models.NewAuditLogEntry(r, tx, user, models.CreateChallengeAction, r.RemoteAddr, map[string]interface{}{ + "factor_id": factor.ID, + "factor_status": factor.Status, + }); terr != nil { + return terr + } + return nil + }); err != nil { + return err + } + + return sendJSON(w, http.StatusOK, &ChallengeFactorResponse{ + ID: challenge.ID, + Type: factor.FactorType, + ExpiresAt: challenge.GetExpiryTime(config.MFA.ChallengeExpiryDuration).Unix(), + }) +} + +func (a *API) challengeWebAuthnFactor(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + config := a.config + + user := getUser(ctx) + factor := getFactor(ctx) + ipAddress := utilities.GetIPAddress(r) + + params := &ChallengeFactorParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + if params.WebAuthn == nil { + return badRequestError(ErrorCodeValidationFailed, "web_authn config required") + } + webAuthn, err := params.WebAuthn.ToConfig() + if err != nil { + return err + } + var response *ChallengeFactorResponse + var ws *models.WebAuthnSessionData + var challenge *models.Challenge + if factor.IsUnverified() { + options, session, err := webAuthn.BeginRegistration(user) + if err != nil { + return internalServerError("Failed to generate WebAuthn registration data").WithInternalError(err) + } + ws = &models.WebAuthnSessionData{ + SessionData: session, + } + challenge = ws.ToChallenge(factor.ID, ipAddress) + + response = &ChallengeFactorResponse{ + CredentialCreationOptions: options, + Type: factor.FactorType, + ID: challenge.ID, + } + + } else if factor.IsVerified() { + options, session, err := webAuthn.BeginLogin(user) + if err != nil { + return err + } + ws = &models.WebAuthnSessionData{ + SessionData: session, + } + challenge = ws.ToChallenge(factor.ID, ipAddress) + response = &ChallengeFactorResponse{ + CredentialRequestOptions: options, + Type: factor.FactorType, + ID: challenge.ID, + } + + } + + if err := factor.WriteChallengeToDatabase(db, challenge); err != nil { + return err + } + response.ExpiresAt = challenge.GetExpiryTime(config.MFA.ChallengeExpiryDuration).Unix() + + return sendJSON(w, http.StatusOK, response) + +} + +func (a *API) validateChallenge(r *http.Request, db *storage.Connection, factor *models.Factor, challengeID uuid.UUID) (*models.Challenge, error) { + config := a.config + currentIP := utilities.GetIPAddress(r) + + challenge, err := factor.FindChallengeByID(db, challengeID) + if err != nil { + if models.IsNotFoundError(err) { + return nil, unprocessableEntityError(ErrorCodeMFAFactorNotFound, "MFA factor with the provided challenge ID not found") + } + return nil, internalServerError("Database error finding Challenge").WithInternalError(err) + } + + if challenge.VerifiedAt != nil || challenge.IPAddress != currentIP { + return nil, unprocessableEntityError(ErrorCodeMFAIPAddressMismatch, "Challenge and verify IP addresses mismatch.") + } + + if challenge.HasExpired(config.MFA.ChallengeExpiryDuration) { + if err := db.Destroy(challenge); err != nil { + return nil, internalServerError("Database error deleting challenge").WithInternalError(err) + } + return nil, unprocessableEntityError(ErrorCodeMFAChallengeExpired, "MFA challenge %v has expired, verify against another challenge or create a new challenge.", challenge.ID) + } + + return challenge, nil +} + +func (a *API) ChallengeFactor(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + config := a.config + factor := getFactor(ctx) + + switch factor.FactorType { + case models.Phone: + if !config.MFA.Phone.VerifyEnabled { + return unprocessableEntityError(ErrorCodeMFAPhoneVerifyDisabled, "MFA verification is disabled for Phone") + } + return a.challengePhoneFactor(w, r) + + case models.TOTP: + if !config.MFA.TOTP.VerifyEnabled { + return unprocessableEntityError(ErrorCodeMFATOTPVerifyDisabled, "MFA verification is disabled for TOTP") + } + return a.challengeTOTPFactor(w, r) + case models.WebAuthn: + if !config.MFA.WebAuthn.VerifyEnabled { + return unprocessableEntityError(ErrorCodeMFAWebAuthnVerifyDisabled, "MFA verification is disabled for WebAuthn") + } + return a.challengeWebAuthnFactor(w, r) + default: + return badRequestError(ErrorCodeValidationFailed, "factor_type needs to be totp, phone, or webauthn") + } + +} + +func (a *API) verifyTOTPFactor(w http.ResponseWriter, r *http.Request, params *VerifyFactorParams) error { + var err error + ctx := r.Context() + user := getUser(ctx) + factor := getFactor(ctx) + config := a.config + db := a.db.WithContext(ctx) + + challenge, err := a.validateChallenge(r, db, factor, params.ChallengeID) + if err != nil { + return err + } + + secret, shouldReEncrypt, err := factor.GetSecret(config.Security.DBEncryption.DecryptionKeys, config.Security.DBEncryption.Encrypt, config.Security.DBEncryption.EncryptionKeyID) + if err != nil { + return internalServerError("Database error verifying MFA TOTP secret").WithInternalError(err) + } + + valid, verr := totp.ValidateCustom(params.Code, secret, time.Now().UTC(), totp.ValidateOpts{ + Period: 30, + Skew: 1, + Digits: otp.DigitsSix, + Algorithm: otp.AlgorithmSHA1, + }) + + if config.Hook.MFAVerificationAttempt.Enabled { + input := hooks.MFAVerificationAttemptInput{ + UserID: user.ID, + FactorID: factor.ID, + Valid: valid, + } + + output := hooks.MFAVerificationAttemptOutput{} + err := a.invokeHook(nil, r, &input, &output) + if err != nil { + return err + } + + if output.Decision == hooks.HookRejection { + if err := models.Logout(db, user.ID); err != nil { + return err + } + + if output.Message == "" { + output.Message = hooks.DefaultMFAHookRejectionMessage + } + + return forbiddenError(ErrorCodeMFAVerificationRejected, output.Message) + } + } + if !valid { + if shouldReEncrypt && config.Security.DBEncryption.Encrypt { + if err := factor.SetSecret(secret, true, config.Security.DBEncryption.EncryptionKeyID, config.Security.DBEncryption.EncryptionKey); err != nil { + return err + } + + if err := db.UpdateOnly(factor, "secret"); err != nil { + return err + } + } + return unprocessableEntityError(ErrorCodeMFAVerificationFailed, "Invalid TOTP code entered").WithInternalError(verr) + } + + var token *AccessTokenResponse + + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + if terr = models.NewAuditLogEntry(r, tx, user, models.VerifyFactorAction, r.RemoteAddr, map[string]interface{}{ + "factor_id": factor.ID, + "challenge_id": challenge.ID, + "factor_type": factor.FactorType, + }); terr != nil { + return terr + } + if terr = challenge.Verify(tx); terr != nil { + return terr + } + if !factor.IsVerified() { + if terr = factor.UpdateStatus(tx, models.FactorStateVerified); terr != nil { + return terr + } + } + if shouldReEncrypt && config.Security.DBEncryption.Encrypt { + es, terr := crypto.NewEncryptedString(factor.ID.String(), []byte(secret), config.Security.DBEncryption.EncryptionKeyID, config.Security.DBEncryption.EncryptionKey) + if terr != nil { + return terr + } + + factor.Secret = es.String() + if terr := tx.UpdateOnly(factor, "secret"); terr != nil { + return terr + } + } + user, terr = models.FindUserByID(tx, user.ID) + if terr != nil { + return terr + } + + token, terr = a.updateMFASessionAndClaims(r, tx, user, models.TOTPSignIn, models.GrantParams{ + FactorID: &factor.ID, + }) + if terr != nil { + return terr + } + if terr = models.InvalidateSessionsWithAALLessThan(tx, user.ID, models.AAL2.String()); terr != nil { + return internalServerError("Failed to update sessions. %s", terr) + } + if terr = models.DeleteUnverifiedFactors(tx, user, factor.FactorType); terr != nil { + return internalServerError("Error removing unverified factors. %s", terr) + } + return nil + }) + if err != nil { + return err + } + metering.RecordLogin(string(models.MFACodeLoginAction), user.ID) + + return sendJSON(w, http.StatusOK, token) + +} + +func (a *API) verifyPhoneFactor(w http.ResponseWriter, r *http.Request, params *VerifyFactorParams) error { + ctx := r.Context() + config := a.config + user := getUser(ctx) + factor := getFactor(ctx) + db := a.db.WithContext(ctx) + currentIP := utilities.GetIPAddress(r) + + challenge, err := a.validateChallenge(r, db, factor, params.ChallengeID) + if err != nil { + return err + } + + if challenge.VerifiedAt != nil || challenge.IPAddress != currentIP { + return unprocessableEntityError(ErrorCodeMFAIPAddressMismatch, "Challenge and verify IP addresses mismatch") + } + + if challenge.HasExpired(config.MFA.ChallengeExpiryDuration) { + if err := db.Destroy(challenge); err != nil { + return internalServerError("Database error deleting challenge").WithInternalError(err) + } + return unprocessableEntityError(ErrorCodeMFAChallengeExpired, "MFA challenge %v has expired, verify against another challenge or create a new challenge.", challenge.ID) + } + var valid bool + var otpCode string + var shouldReEncrypt bool + if config.Sms.IsTwilioVerifyProvider() { + smsProvider, err := sms_provider.GetSmsProvider(*config) + if err != nil { + return internalServerError("Failed to get SMS provider").WithInternalError(err) + } + if err := smsProvider.VerifyOTP(factor.Phone.String(), params.Code); err != nil { + return forbiddenError(ErrorCodeOTPExpired, "Token has expired or is invalid").WithInternalError(err) + } + valid = true + } else { + otpCode, shouldReEncrypt, err = challenge.GetOtpCode(config.Security.DBEncryption.DecryptionKeys, config.Security.DBEncryption.Encrypt, config.Security.DBEncryption.EncryptionKeyID) + if err != nil { + return internalServerError("Database error verifying MFA TOTP secret").WithInternalError(err) + } + valid = subtle.ConstantTimeCompare([]byte(otpCode), []byte(params.Code)) == 1 + } + if config.Hook.MFAVerificationAttempt.Enabled { + input := hooks.MFAVerificationAttemptInput{ + UserID: user.ID, + FactorID: factor.ID, + FactorType: factor.FactorType, + Valid: valid, + } + + output := hooks.MFAVerificationAttemptOutput{} + err := a.invokeHook(nil, r, &input, &output) + if err != nil { + return err + } + + if output.Decision == hooks.HookRejection { + if err := models.Logout(db, user.ID); err != nil { + return err + } + + if output.Message == "" { + output.Message = hooks.DefaultMFAHookRejectionMessage + } + + return forbiddenError(ErrorCodeMFAVerificationRejected, output.Message) + } + } + if !valid { + if shouldReEncrypt && config.Security.DBEncryption.Encrypt { + if err := challenge.SetOtpCode(otpCode, true, config.Security.DBEncryption.EncryptionKeyID, config.Security.DBEncryption.EncryptionKey); err != nil { + return err + } + + if err := db.UpdateOnly(challenge, "otp_code"); err != nil { + return err + } + } + return unprocessableEntityError(ErrorCodeMFAVerificationFailed, "Invalid MFA Phone code entered") + } + + var token *AccessTokenResponse + + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + if terr = models.NewAuditLogEntry(r, tx, user, models.VerifyFactorAction, r.RemoteAddr, map[string]interface{}{ + "factor_id": factor.ID, + "challenge_id": challenge.ID, + "factor_type": factor.FactorType, + }); terr != nil { + return terr + } + if terr = challenge.Verify(tx); terr != nil { + return terr + } + if !factor.IsVerified() { + if terr = factor.UpdateStatus(tx, models.FactorStateVerified); terr != nil { + return terr + } + } + user, terr = models.FindUserByID(tx, user.ID) + if terr != nil { + return terr + } + + token, terr = a.updateMFASessionAndClaims(r, tx, user, models.MFAPhone, models.GrantParams{ + FactorID: &factor.ID, + }) + if terr != nil { + return terr + } + if terr = models.InvalidateSessionsWithAALLessThan(tx, user.ID, models.AAL2.String()); terr != nil { + return internalServerError("Failed to update sessions. %s", terr) + } + if terr = models.DeleteUnverifiedFactors(tx, user, factor.FactorType); terr != nil { + return internalServerError("Error removing unverified factors. %s", terr) + } + return nil + }) + if err != nil { + return err + } + metering.RecordLogin(string(models.MFACodeLoginAction), user.ID) + + return sendJSON(w, http.StatusOK, token) +} + +func (a *API) verifyWebAuthnFactor(w http.ResponseWriter, r *http.Request, params *VerifyFactorParams) error { + ctx := r.Context() + user := getUser(ctx) + factor := getFactor(ctx) + db := a.db.WithContext(ctx) + + var webAuthn *webauthn.WebAuthn + var credential *webauthn.Credential + var err error + + switch { + case params.WebAuthn == nil: + return badRequestError(ErrorCodeValidationFailed, "WebAuthn config required") + case factor.IsVerified() && params.WebAuthn.AssertionResponse == nil: + return badRequestError(ErrorCodeValidationFailed, "creation_response required to login") + case factor.IsUnverified() && params.WebAuthn.CreationResponse == nil: + return badRequestError(ErrorCodeValidationFailed, "assertion_response required to login") + default: + webAuthn, err = params.WebAuthn.ToConfig() + if err != nil { + return err + } + } + + challenge, err := a.validateChallenge(r, db, factor, params.ChallengeID) + if err != nil { + return err + } + webAuthnSession := *challenge.WebAuthnSessionData.SessionData + // Once the challenge is validated, we consume the challenge + if err := db.Destroy(challenge); err != nil { + return internalServerError("Database error deleting challenge").WithInternalError(err) + } + + if factor.IsUnverified() { + parsedResponse, err := wbnprotocol.ParseCredentialCreationResponseBody(bytes.NewReader(params.WebAuthn.CreationResponse)) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "Invalid credential_creation_response") + } + credential, err = webAuthn.CreateCredential(user, webAuthnSession, parsedResponse) + if err != nil { + return err + } + + } else if factor.IsVerified() { + parsedResponse, err := wbnprotocol.ParseCredentialRequestResponseBody(bytes.NewReader(params.WebAuthn.AssertionResponse)) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "Invalid credential_request_response") + } + credential, err = webAuthn.ValidateLogin(user, webAuthnSession, parsedResponse) + if err != nil { + return internalServerError("Failed to validate WebAuthn MFA response").WithInternalError(err) + } + } + var token *AccessTokenResponse + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + if terr = models.NewAuditLogEntry(r, tx, user, models.VerifyFactorAction, r.RemoteAddr, map[string]interface{}{ + "factor_id": factor.ID, + "challenge_id": challenge.ID, + "factor_type": factor.FactorType, + }); terr != nil { + return terr + } + // Challenge verification not needed as the challenge is destroyed on use + if !factor.IsVerified() { + if terr = factor.UpdateStatus(tx, models.FactorStateVerified); terr != nil { + return terr + } + if terr = factor.SaveWebAuthnCredential(tx, credential); terr != nil { + return terr + } + } + user, terr = models.FindUserByID(tx, user.ID) + if terr != nil { + return terr + } + token, terr = a.updateMFASessionAndClaims(r, tx, user, models.MFAWebAuthn, models.GrantParams{ + FactorID: &factor.ID, + }) + if terr != nil { + return terr + } + if terr = models.InvalidateSessionsWithAALLessThan(tx, user.ID, models.AAL2.String()); terr != nil { + return internalServerError("Failed to update session").WithInternalError(terr) + } + if terr = models.DeleteUnverifiedFactors(tx, user, models.WebAuthn); terr != nil { + return internalServerError("Failed to remove unverified MFA WebAuthn factors").WithInternalError(terr) + } + return nil + }) + if err != nil { + return err + } + metering.RecordLogin(string(models.MFACodeLoginAction), user.ID) + + return sendJSON(w, http.StatusOK, token) +} + +func (a *API) VerifyFactor(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + factor := getFactor(ctx) + config := a.config + + params := &VerifyFactorParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + if params.Code == "" && factor.FactorType != models.WebAuthn { + return badRequestError(ErrorCodeValidationFailed, "Code needs to be non-empty") + } + + switch factor.FactorType { + case models.Phone: + if !config.MFA.Phone.VerifyEnabled { + return unprocessableEntityError(ErrorCodeMFAPhoneVerifyDisabled, "MFA verification is disabled for Phone") + } + + return a.verifyPhoneFactor(w, r, params) + case models.TOTP: + if !config.MFA.TOTP.VerifyEnabled { + return unprocessableEntityError(ErrorCodeMFATOTPVerifyDisabled, "MFA verification is disabled for TOTP") + } + return a.verifyTOTPFactor(w, r, params) + case models.WebAuthn: + if !config.MFA.WebAuthn.VerifyEnabled { + return unprocessableEntityError(ErrorCodeMFAWebAuthnEnrollDisabled, "MFA verification is disabled for WebAuthn") + } + return a.verifyWebAuthnFactor(w, r, params) + default: + return badRequestError(ErrorCodeValidationFailed, "factor_type needs to be totp, phone, or webauthn") + } + +} + +func (a *API) UnenrollFactor(w http.ResponseWriter, r *http.Request) error { + var err error + ctx := r.Context() + user := getUser(ctx) + factor := getFactor(ctx) + session := getSession(ctx) + db := a.db.WithContext(ctx) + + if factor == nil || session == nil || user == nil { + return internalServerError("A valid session and factor are required to unenroll a factor") + } + + if factor.IsVerified() && !session.IsAAL2() { + return unprocessableEntityError(ErrorCodeInsufficientAAL, "AAL2 required to unenroll verified factor") + } + + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + if terr := tx.Destroy(factor); terr != nil { + return terr + } + if terr = models.NewAuditLogEntry(r, tx, user, models.UnenrollFactorAction, r.RemoteAddr, map[string]interface{}{ + "factor_id": factor.ID, + "factor_status": factor.Status, + "session_id": session.ID, + }); terr != nil { + return terr + } + if terr = factor.DowngradeSessionsToAAL1(tx); terr != nil { + return terr + } + return nil + }) + if err != nil { + return err + } + + return sendJSON(w, http.StatusOK, &UnenrollFactorResponse{ + ID: factor.ID, + }) +} diff --git a/auth_v2.169.0/internal/api/mfa_test.go b/auth_v2.169.0/internal/api/mfa_test.go new file mode 100644 index 0000000..653f38f --- /dev/null +++ b/auth_v2.169.0/internal/api/mfa_test.go @@ -0,0 +1,1011 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/gofrs/uuid" + + "github.com/pquerna/otp" + "github.com/supabase/auth/internal/api/sms_provider" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/utilities" + + "github.com/pquerna/otp/totp" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type MFATestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration + TestDomain string + TestEmail string + TestOTPKey *otp.Key + TestPassword string + TestUser *models.User + TestSession *models.Session + TestSecondarySession *models.Session +} + +func TestMFA(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + ts := &MFATestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + suite.Run(t, ts) +} + +func (ts *MFATestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + ts.TestEmail = "test@example.com" + ts.TestPassword = "password" + // Create user + u, err := models.NewUser("123456789", ts.TestEmail, ts.TestPassword, ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") + // Create Factor + f := models.NewTOTPFactor(u, "test_factor") + require.NoError(ts.T(), f.SetSecret("secretkey", ts.Config.Security.DBEncryption.Encrypt, ts.Config.Security.DBEncryption.EncryptionKeyID, ts.Config.Security.DBEncryption.EncryptionKey)) + require.NoError(ts.T(), ts.API.db.Create(f), "Error saving new test factor") + // Create corresponding session + s, err := models.NewSession(u.ID, &f.ID) + require.NoError(ts.T(), err, "Error creating test session") + require.NoError(ts.T(), ts.API.db.Create(s), "Error saving test session") + + u, err = models.FindUserByEmailAndAudience(ts.API.db, ts.TestEmail, ts.Config.JWT.Aud) + ts.Require().NoError(err) + + ts.TestUser = u + ts.TestSession = s + + secondarySession, err := models.NewSession(ts.TestUser.ID, &f.ID) + require.NoError(ts.T(), err, "Error creating test session") + require.NoError(ts.T(), ts.API.db.Create(secondarySession), "Error saving test session") + + ts.TestSecondarySession = secondarySession + + // Generate TOTP related settings + testDomain := strings.Split(ts.TestEmail, "@")[1] + ts.TestDomain = testDomain + + // By default MFA Phone is disabled + ts.Config.MFA.Phone.EnrollEnabled = true + ts.Config.MFA.Phone.VerifyEnabled = true + + ts.Config.MFA.WebAuthn.EnrollEnabled = true + ts.Config.MFA.WebAuthn.VerifyEnabled = true + + key, err := totp.Generate(totp.GenerateOpts{ + Issuer: ts.TestDomain, + AccountName: ts.TestEmail, + }) + require.NoError(ts.T(), err) + ts.TestOTPKey = key + +} + +func (ts *MFATestSuite) generateAAL1Token(user *models.User, sessionId *uuid.UUID) string { + // Not an actual path. Dummy request to simulate a signup request that we can use in generateAccessToken + req := httptest.NewRequest(http.MethodPost, "/factors", nil) + token, _, err := ts.API.generateAccessToken(req, ts.API.db, user, sessionId, models.TOTPSignIn) + require.NoError(ts.T(), err, "Error generating access token") + return token +} + +func (ts *MFATestSuite) TestEnrollFactor() { + testFriendlyName := "bob" + alternativeFriendlyName := "john" + + token := ts.generateAAL1Token(ts.TestUser, &ts.TestSession.ID) + + var cases = []struct { + desc string + friendlyName string + factorType string + issuer string + phone string + expectedCode int + }{ + { + desc: "TOTP: No issuer", + friendlyName: alternativeFriendlyName, + factorType: models.TOTP, + issuer: "", + phone: "", + expectedCode: http.StatusOK, + }, + { + desc: "Invalid factor type", + friendlyName: testFriendlyName, + factorType: "invalid_factor", + issuer: ts.TestDomain, + phone: "", + expectedCode: http.StatusBadRequest, + }, + { + desc: "TOTP: Factor has friendly name", + friendlyName: testFriendlyName, + factorType: models.TOTP, + issuer: ts.TestDomain, + phone: "", + expectedCode: http.StatusOK, + }, + { + desc: "TOTP: Enrolling without friendly name", + friendlyName: "", + factorType: models.TOTP, + issuer: ts.TestDomain, + phone: "", + expectedCode: http.StatusOK, + }, + { + desc: "Phone: Enroll with friendly name", + friendlyName: "phone_factor", + factorType: models.Phone, + phone: "+12345677889", + expectedCode: http.StatusOK, + }, + { + desc: "Phone: Enroll with invalid phone number", + friendlyName: "phone_factor", + factorType: models.Phone, + phone: "+1", + expectedCode: http.StatusBadRequest, + }, + { + desc: "Phone: Enroll without phone number should return error", + friendlyName: "phone_factor_fail", + factorType: models.Phone, + phone: "", + expectedCode: http.StatusBadRequest, + }, + { + desc: "WebAuthn: Enroll with friendly name", + friendlyName: "webauthn_factor", + factorType: models.WebAuthn, + expectedCode: http.StatusOK, + }, + } + for _, c := range cases { + ts.Run(c.desc, func() { + w := performEnrollFlow(ts, token, c.friendlyName, c.factorType, c.issuer, c.phone, c.expectedCode) + enrollResp := EnrollFactorResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&enrollResp)) + + if c.expectedCode == http.StatusOK { + addedFactor, err := models.FindFactorByFactorID(ts.API.db, enrollResp.ID) + require.NoError(ts.T(), err) + require.False(ts.T(), addedFactor.IsVerified()) + + if c.friendlyName != "" { + require.Equal(ts.T(), c.friendlyName, addedFactor.FriendlyName) + } + + if c.factorType == models.TOTP { + qrCode := enrollResp.TOTP.QRCode + hasSVGStartAndEnd := strings.Contains(qrCode, "") + require.True(ts.T(), hasSVGStartAndEnd) + require.Equal(ts.T(), c.friendlyName, enrollResp.FriendlyName) + } + } + + }) + } +} + +func (ts *MFATestSuite) TestDuplicateEnrollPhoneFactor() { + testPhoneNumber := "+12345677889" + altPhoneNumber := "+987412444444" + friendlyName := "phone_factor" + altFriendlyName := "alt_phone_factor" + token := ts.generateAAL1Token(ts.TestUser, &ts.TestSession.ID) + + var cases = []struct { + desc string + earlierFactorName string + laterFactorName string + phone string + secondPhone string + expectedCode int + expectedNumberOfFactors int + }{ + { + desc: "Phone: Only the latest factor should persist when enrolling two unverified phone factors with the same number", + earlierFactorName: friendlyName, + laterFactorName: altFriendlyName, + phone: testPhoneNumber, + secondPhone: testPhoneNumber, + expectedNumberOfFactors: 1, + }, + + { + desc: "Phone: Both factors should persist when enrolling two different unverified numbers", + earlierFactorName: friendlyName, + laterFactorName: altFriendlyName, + phone: testPhoneNumber, + secondPhone: altPhoneNumber, + expectedNumberOfFactors: 2, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + // Delete all test factors to start from clean slate + require.NoError(ts.T(), ts.API.db.Destroy(ts.TestUser.Factors)) + _ = performEnrollFlow(ts, token, c.earlierFactorName, models.Phone, ts.TestDomain, c.phone, http.StatusOK) + + w := performEnrollFlow(ts, token, c.laterFactorName, models.Phone, ts.TestDomain, c.secondPhone, http.StatusOK) + enrollResp := EnrollFactorResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&enrollResp)) + + laterFactor, err := models.FindFactorByFactorID(ts.API.db, enrollResp.ID) + require.NoError(ts.T(), err) + require.False(ts.T(), laterFactor.IsVerified()) + + require.NoError(ts.T(), ts.API.db.Eager("Factors").Find(ts.TestUser, ts.TestUser.ID)) + require.Equal(ts.T(), len(ts.TestUser.Factors), c.expectedNumberOfFactors) + + }) + } +} + +func (ts *MFATestSuite) TestDuplicateEnrollPhoneFactorWithVerified() { + testPhoneNumber := "+12345677889" + friendlyName := "phone_factor" + altFriendlyName := "alt_phone_factor" + token := ts.generateAAL1Token(ts.TestUser, &ts.TestSession.ID) + + ts.Run("Phone: Enrolling a factor with the same number as an existing verified phone factor should result in an error", func() { + require.NoError(ts.T(), ts.API.db.Destroy(ts.TestUser.Factors)) + + // Setup verified factor + w := performEnrollFlow(ts, token, friendlyName, models.Phone, ts.TestDomain, testPhoneNumber, http.StatusOK) + enrollResp := EnrollFactorResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&enrollResp)) + firstFactor, err := models.FindFactorByFactorID(ts.API.db, enrollResp.ID) + require.NoError(ts.T(), err) + require.NoError(ts.T(), firstFactor.UpdateStatus(ts.API.db, models.FactorStateVerified)) + + expectedStatusCode := http.StatusUnprocessableEntity + _ = performEnrollFlow(ts, token, altFriendlyName, models.Phone, ts.TestDomain, testPhoneNumber, expectedStatusCode) + + require.NoError(ts.T(), ts.API.db.Eager("Factors").Find(ts.TestUser, ts.TestUser.ID)) + require.Equal(ts.T(), len(ts.TestUser.Factors), 1) + }) +} + +func (ts *MFATestSuite) TestDuplicateTOTPEnrollsReturnExpectedMessage() { + friendlyName := "mary" + issuer := "https://issuer.com" + token := ts.generateAAL1Token(ts.TestUser, &ts.TestSession.ID) + _ = performEnrollFlow(ts, token, friendlyName, models.TOTP, issuer, "", http.StatusOK) + response := performEnrollFlow(ts, token, friendlyName, models.TOTP, issuer, "", http.StatusUnprocessableEntity) + + var errorResponse HTTPError + err := json.NewDecoder(response.Body).Decode(&errorResponse) + require.NoError(ts.T(), err) + + require.Contains(ts.T(), errorResponse.ErrorCode, ErrorCodeMFAFactorNameConflict) +} + +func (ts *MFATestSuite) AAL2RequiredToUpdatePasswordAfterEnrollment() { + resp := performTestSignupAndVerify(ts, ts.TestEmail, ts.TestPassword, true /* <- requireStatusOK */) + accessTokenResp := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(resp.Body).Decode(&accessTokenResp)) + + var w *httptest.ResponseRecorder + var buffer bytes.Buffer + token := accessTokenResp.Token + // Update Password to new password + newPassword := "newpass" + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "password": newPassword, + })) + + req := httptest.NewRequest(http.MethodPut, "http://localhost/user", &buffer) + req.Header.Set("Content-Type", "application/json") + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + // Logout + reqURL := "http://localhost/logout" + req = httptest.NewRequest(http.MethodPost, reqURL, nil) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + w = httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusNoContent, w.Code) + + // Get AAL1 token + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": ts.TestEmail, + "password": newPassword, + })) + + req = httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=password", &buffer) + req.Header.Set("Content-Type", "application/json") + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + session1 := AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&session1)) + + // Update Password again, this should fail + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "password": ts.TestPassword, + })) + + req = httptest.NewRequest(http.MethodPut, "http://localhost/user", &buffer) + req.Header.Set("Content-Type", "application/json") + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", session1.Token)) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusUnauthorized, w.Code) + +} + +func (ts *MFATestSuite) TestMultipleEnrollsCleanupExpiredFactors() { + // All factors are deleted when a subsequent enroll is made + ts.API.config.MFA.FactorExpiryDuration = 0 * time.Second + // Verified factor should not be deleted (Factor 1) + resp := performTestSignupAndVerify(ts, ts.TestEmail, ts.TestPassword, true /* <- requireStatusOK */) + numFactors := 5 + accessTokenResp := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(resp.Body).Decode(&accessTokenResp)) + + var w *httptest.ResponseRecorder + token := accessTokenResp.Token + for i := 0; i < numFactors; i++ { + w = performEnrollFlow(ts, token, "first-name", models.TOTP, "https://issuer.com", "", http.StatusOK) + } + + enrollResp := EnrollFactorResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&enrollResp)) + + // Make a challenge so last, unverified factor isn't deleted on next enroll (Factor 2) + _ = performChallengeFlow(ts, enrollResp.ID, token) + + // Enroll another Factor (Factor 3) + _ = performEnrollFlow(ts, token, "second-name", models.TOTP, "https://issuer.com", "", http.StatusOK) + require.NoError(ts.T(), ts.API.db.Eager("Factors").Find(ts.TestUser, ts.TestUser.ID)) + require.Equal(ts.T(), 3, len(ts.TestUser.Factors)) +} + +func (ts *MFATestSuite) TestChallengeTOTPFactor() { + // Test Factor is a TOTP Factor + f := ts.TestUser.Factors[0] + token := ts.generateAAL1Token(ts.TestUser, &ts.TestSession.ID) + w := performChallengeFlow(ts, f.ID, token) + challengeResp := ChallengeFactorResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&challengeResp)) + + require.Equal(ts.T(), http.StatusOK, w.Code) + require.Equal(ts.T(), challengeResp.Type, models.TOTP) + +} + +func (ts *MFATestSuite) TestChallengeSMSFactor() { + // Challenge should still work with phone provider disabled + ts.Config.External.Phone.Enabled = false + ts.Config.Hook.SendSMS.Enabled = true + ts.Config.Hook.SendSMS.URI = "pg-functions://postgres/auth/send_sms_mfa_mock" + + ts.Config.MFA.Phone.MaxFrequency = 0 * time.Second + + require.NoError(ts.T(), ts.Config.Hook.SendSMS.PopulateExtensibilityPoint()) + require.NoError(ts.T(), ts.API.db.RawQuery(` + create or replace function send_sms_mfa_mock(input jsonb) + returns json as $$ + begin + return input; + end; $$ language plpgsql;`).Exec()) + + phone := "+1234567" + friendlyName := "testchallengesmsfactor" + + f := models.NewPhoneFactor(ts.TestUser, phone, friendlyName) + require.NoError(ts.T(), ts.API.db.Create(f), "Error creating new SMS factor") + token := ts.generateAAL1Token(ts.TestUser, &ts.TestSession.ID) + + var cases = []struct { + desc string + channel string + expectedCode int + }{ + { + desc: "SMS Channel", + channel: sms_provider.SMSProvider, + expectedCode: http.StatusOK, + }, + { + desc: "WhatsApp Channel", + channel: sms_provider.WhatsappProvider, + expectedCode: http.StatusOK, + }, + } + + for _, tc := range cases { + ts.Run(tc.desc, func() { + w := performSMSChallengeFlow(ts, f.ID, token, tc.channel) + challengeResp := ChallengeFactorResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&challengeResp)) + require.Equal(ts.T(), challengeResp.Type, models.Phone) + require.Equal(ts.T(), tc.expectedCode, w.Code, tc.desc) + }) + } +} + +func (ts *MFATestSuite) TestMFAVerifyFactor() { + cases := []struct { + desc string + validChallenge bool + validCode bool + factorType string + expectedHTTPCode int + }{ + { + desc: "Invalid: Valid code and expired challenge", + validChallenge: false, + validCode: true, + factorType: models.TOTP, + expectedHTTPCode: http.StatusUnprocessableEntity, + }, + { + desc: "Invalid: Invalid code and valid challenge", + validChallenge: true, + validCode: false, + factorType: models.TOTP, + expectedHTTPCode: http.StatusUnprocessableEntity, + }, + { + desc: "Valid /verify request", + validChallenge: true, + validCode: true, + factorType: models.TOTP, + expectedHTTPCode: http.StatusOK, + }, + { + desc: "Invalid: Valid code and expired challenge (SMS)", + validChallenge: false, + validCode: true, + factorType: models.Phone, + expectedHTTPCode: http.StatusUnprocessableEntity, + }, + { + desc: "Invalid: Invalid code and valid challenge (SMS)", + validChallenge: true, + validCode: false, + factorType: models.Phone, + expectedHTTPCode: http.StatusUnprocessableEntity, + }, + { + desc: "Valid /verify request (SMS)", + validChallenge: true, + validCode: true, + factorType: models.Phone, + expectedHTTPCode: http.StatusOK, + }, + } + for _, v := range cases { + ts.Run(v.desc, func() { + // Authenticate users and set secret + var buffer bytes.Buffer + r, err := models.GrantAuthenticatedUser(ts.API.db, ts.TestUser, models.GrantParams{}) + require.NoError(ts.T(), err) + token := ts.generateAAL1Token(ts.TestUser, r.SessionId) + var f *models.Factor + var sharedSecret string + + if v.factorType == models.TOTP { + friendlyName := uuid.Must(uuid.NewV4()).String() + f = models.NewTOTPFactor(ts.TestUser, friendlyName) + sharedSecret = ts.TestOTPKey.Secret() + f.Secret = sharedSecret + require.NoError(ts.T(), ts.API.db.Create(f), "Error updating new test factor") + } else if v.factorType == models.Phone { + friendlyName := uuid.Must(uuid.NewV4()).String() + numDigits := 10 + otp := crypto.GenerateOtp(numDigits) + require.NoError(ts.T(), err) + phone := fmt.Sprintf("+%s", otp) + f = models.NewPhoneFactor(ts.TestUser, phone, friendlyName) + require.NoError(ts.T(), ts.API.db.Create(f), "Error creating new SMS factor") + } + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, fmt.Sprintf("/factors/%s/verify", f.ID), &buffer) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + var c *models.Challenge + var code string + if v.factorType == models.TOTP { + c = f.CreateChallenge(utilities.GetIPAddress(req)) + // Verify TOTP code + code, err = totp.GenerateCode(sharedSecret, time.Now().UTC()) + require.NoError(ts.T(), err) + } else if v.factorType == models.Phone { + code = "123456" + c, err = f.CreatePhoneChallenge(utilities.GetIPAddress(req), code, ts.Config.Security.DBEncryption.Encrypt, ts.Config.Security.DBEncryption.EncryptionKeyID, ts.Config.Security.DBEncryption.EncryptionKey) + require.NoError(ts.T(), err) + } + + if !v.validCode && v.factorType == models.TOTP { + code, err = totp.GenerateCode(sharedSecret, time.Now().UTC().Add(-1*time.Minute*time.Duration(1))) + require.NoError(ts.T(), err) + + } else if !v.validCode && v.factorType == models.Phone { + invalidSuffix := "1" + code += invalidSuffix + } + + require.NoError(ts.T(), ts.API.db.Create(c), "Error saving new test challenge") + if !v.validChallenge { + // Set challenge creation so that it has expired in present time. + newCreatedAt := time.Now().UTC().Add(-1 * time.Second * time.Duration(ts.Config.MFA.ChallengeExpiryDuration+1)) + // created_at is managed by buffalo(ORM) needs to be raw query to be updated + err := ts.API.db.RawQuery("UPDATE auth.mfa_challenges SET created_at = ? WHERE factor_id = ?", newCreatedAt, f.ID).Exec() + require.NoError(ts.T(), err, "Error updating new test challenge") + } + + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "challenge_id": c.ID, + "code": code, + })) + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), v.expectedHTTPCode, w.Code) + + if v.expectedHTTPCode == http.StatusOK { + // Ensure alternate session has been deleted + _, err = models.FindSessionByID(ts.API.db, ts.TestSecondarySession.ID, false) + require.EqualError(ts.T(), err, models.SessionNotFoundError{}.Error()) + } + if !v.validChallenge { + // Ensure invalid challenges are deleted + _, err := f.FindChallengeByID(ts.API.db, c.ID) + require.EqualError(ts.T(), err, models.ChallengeNotFoundError{}.Error()) + } + }) + } +} + +func (ts *MFATestSuite) TestUnenrollVerifiedFactor() { + cases := []struct { + desc string + isAAL2 bool + expectedHTTPCode int + }{ + { + desc: "Verified Factor: AAL1", + isAAL2: false, + expectedHTTPCode: http.StatusUnprocessableEntity, + }, + { + desc: "Verified Factor: AAL2, Success", + isAAL2: true, + expectedHTTPCode: http.StatusOK, + }, + } + for _, v := range cases { + ts.Run(v.desc, func() { + var buffer bytes.Buffer + + // Create Session to test behaviour which downgrades other sessions + f := ts.TestUser.Factors[0] + require.NoError(ts.T(), f.UpdateStatus(ts.API.db, models.FactorStateVerified)) + if v.isAAL2 { + ts.TestSession.UpdateAALAndAssociatedFactor(ts.API.db, models.AAL2, &f.ID) + } + token := ts.generateAAL1Token(ts.TestUser, &ts.TestSession.ID) + w := ServeAuthenticatedRequest(ts, http.MethodDelete, fmt.Sprintf("/factors/%s", f.ID), token, buffer) + require.Equal(ts.T(), v.expectedHTTPCode, w.Code) + + if v.expectedHTTPCode == http.StatusOK { + _, err := models.FindFactorByFactorID(ts.API.db, f.ID) + require.EqualError(ts.T(), err, models.FactorNotFoundError{}.Error()) + session, _ := models.FindSessionByID(ts.API.db, ts.TestSecondarySession.ID, false) + require.Equal(ts.T(), models.AAL1.String(), session.GetAAL()) + require.Nil(ts.T(), session.FactorID) + + } + }) + } + +} + +func (ts *MFATestSuite) TestUnenrollUnverifiedFactor() { + var buffer bytes.Buffer + f := ts.TestUser.Factors[0] + + token := ts.generateAAL1Token(ts.TestUser, &ts.TestSession.ID) + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "factor_id": f.ID, + })) + + w := ServeAuthenticatedRequest(ts, http.MethodDelete, fmt.Sprintf("/factors/%s", f.ID), token, buffer) + require.Equal(ts.T(), http.StatusOK, w.Code) + + _, err := models.FindFactorByFactorID(ts.API.db, f.ID) + require.EqualError(ts.T(), err, models.FactorNotFoundError{}.Error()) + session, _ := models.FindSessionByID(ts.API.db, ts.TestSecondarySession.ID, false) + require.Equal(ts.T(), models.AAL1.String(), session.GetAAL()) + require.Nil(ts.T(), session.FactorID) + +} + +// Integration Tests +func (ts *MFATestSuite) TestSessionsMaintainAALOnRefresh() { + ts.Config.Security.RefreshTokenRotationEnabled = true + resp := performTestSignupAndVerify(ts, ts.TestEmail, ts.TestPassword, true /* <- requireStatusOK */) + accessTokenResp := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(resp.Body).Decode(&accessTokenResp)) + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": accessTokenResp.RefreshToken, + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + ctx, err := ts.API.parseJWTClaims(data.Token, req) + require.NoError(ts.T(), err) + ctx, err = ts.API.maybeLoadUserOrSession(ctx) + require.NoError(ts.T(), err) + require.True(ts.T(), getSession(ctx).IsAAL2()) +} + +// Performing MFA Verification followed by a sign in should return an AAL1 session and an AAL2 session +func (ts *MFATestSuite) TestMFAFollowedByPasswordSignIn() { + ts.Config.Security.RefreshTokenRotationEnabled = true + resp := performTestSignupAndVerify(ts, ts.TestEmail, ts.TestPassword, true /* <- requireStatusOK */) + accessTokenResp := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(resp.Body).Decode(&accessTokenResp)) + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": ts.TestEmail, + "password": ts.TestPassword, + })) + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=password", &buffer) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + ctx, err := ts.API.parseJWTClaims(data.Token, req) + require.NoError(ts.T(), err) + + ctx, err = ts.API.maybeLoadUserOrSession(ctx) + require.NoError(ts.T(), err) + + require.Equal(ts.T(), models.AAL1.String(), getSession(ctx).GetAAL()) + session, err := models.FindSessionByUserID(ts.API.db, accessTokenResp.User.ID) + require.NoError(ts.T(), err) + require.True(ts.T(), session.IsAAL2()) +} + +func (ts *MFATestSuite) TestChallengeWebAuthnFactor() { + factor := models.NewWebAuthnFactor(ts.TestUser, "WebAuthnfactor") + validWebAuthnConfiguration := &WebAuthnParams{ + RPID: "localhost", + RPOrigins: "http://localhost:3000", + } + require.NoError(ts.T(), ts.API.db.Create(factor), "Error saving new test factor") + token := ts.generateAAL1Token(ts.TestUser, &ts.TestSession.ID) + w := performChallengeWebAuthnFlow(ts, factor.ID, token, validWebAuthnConfiguration) + require.Equal(ts.T(), http.StatusOK, w.Code) +} + +func performChallengeWebAuthnFlow(ts *MFATestSuite, factorID uuid.UUID, token string, webauthn *WebAuthnParams) *httptest.ResponseRecorder { + var buffer bytes.Buffer + err := json.NewEncoder(&buffer).Encode(ChallengeFactorParams{WebAuthn: webauthn}) + require.NoError(ts.T(), err) + w := ServeAuthenticatedRequest(ts, http.MethodPost, fmt.Sprintf("http://localhost/factors/%s/challenge", factorID), token, buffer) + require.Equal(ts.T(), http.StatusOK, w.Code) + return w +} + +func (ts *MFATestSuite) TestChallengeFactorNotOwnedByUser() { + var buffer bytes.Buffer + email := "nomfaenabled@test.com" + password := "testpassword" + signUpResp := signUp(ts, email, password) + + friendlyName := "testfactor" + phoneNumber := "+1234567" + + otherUsersPhoneFactor := models.NewPhoneFactor(ts.TestUser, phoneNumber, friendlyName) + require.NoError(ts.T(), ts.API.db.Create(otherUsersPhoneFactor), "Error creating factor") + + w := ServeAuthenticatedRequest(ts, http.MethodPost, fmt.Sprintf("http://localhost/factors/%s/challenge", otherUsersPhoneFactor.ID), signUpResp.Token, buffer) + + expectedError := notFoundError(ErrorCodeMFAFactorNotFound, "Factor not found") + + var data HTTPError + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + require.Equal(ts.T(), expectedError.ErrorCode, data.ErrorCode) + require.Equal(ts.T(), http.StatusNotFound, w.Code) + +} + +func signUp(ts *MFATestSuite, email, password string) (signUpResp AccessTokenResponse) { + ts.API.config.Mailer.Autoconfirm = true + var buffer bytes.Buffer + + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": email, + "password": password, + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/signup", &buffer) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + data := AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + return data +} + +func performTestSignupAndVerify(ts *MFATestSuite, email, password string, requireStatusOK bool) *httptest.ResponseRecorder { + signUpResp := signUp(ts, email, password) + resp := performEnrollAndVerify(ts, signUpResp.Token, requireStatusOK) + + return resp + +} + +func performEnrollFlow(ts *MFATestSuite, token, friendlyName, factorType, issuer string, phone string, expectedCode int) *httptest.ResponseRecorder { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(EnrollFactorParams{FriendlyName: friendlyName, FactorType: factorType, Issuer: issuer, Phone: phone})) + w := ServeAuthenticatedRequest(ts, http.MethodPost, "http://localhost/factors/", token, buffer) + require.Equal(ts.T(), expectedCode, w.Code) + return w +} + +func ServeAuthenticatedRequest(ts *MFATestSuite, method, path, token string, buffer bytes.Buffer) *httptest.ResponseRecorder { + w := httptest.NewRecorder() + req := httptest.NewRequest(method, path, &buffer) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + req.Header.Set("Content-Type", "application/json") + + ts.API.handler.ServeHTTP(w, req) + return w +} + +func performVerifyFlow(ts *MFATestSuite, challengeID, factorID uuid.UUID, token string, requireStatusOK bool) *httptest.ResponseRecorder { + var buffer bytes.Buffer + + factor, err := models.FindFactorByFactorID(ts.API.db, factorID) + require.NoError(ts.T(), err) + require.NotNil(ts.T(), factor) + + totpSecret := factor.Secret + + if es := crypto.ParseEncryptedString(factor.Secret); es != nil { + secret, err := es.Decrypt(factor.ID.String(), ts.API.config.Security.DBEncryption.DecryptionKeys) + require.NoError(ts.T(), err) + require.NotNil(ts.T(), secret) + + totpSecret = string(secret) + } + + code, err := totp.GenerateCode(totpSecret, time.Now().UTC()) + require.NoError(ts.T(), err) + + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "challenge_id": challengeID, + "code": code, + })) + + y := ServeAuthenticatedRequest(ts, http.MethodPost, fmt.Sprintf("/factors/%s/verify", factorID), token, buffer) + + if requireStatusOK { + require.Equal(ts.T(), http.StatusOK, y.Code) + } + return y +} + +func performChallengeFlow(ts *MFATestSuite, factorID uuid.UUID, token string) *httptest.ResponseRecorder { + var buffer bytes.Buffer + w := ServeAuthenticatedRequest(ts, http.MethodPost, fmt.Sprintf("http://localhost/factors/%s/challenge", factorID), token, buffer) + require.Equal(ts.T(), http.StatusOK, w.Code) + return w + +} + +func performSMSChallengeFlow(ts *MFATestSuite, factorID uuid.UUID, token, channel string) *httptest.ResponseRecorder { + params := ChallengeFactorParams{ + Channel: channel, + } + var buffer bytes.Buffer + if err := json.NewEncoder(&buffer).Encode(params); err != nil { + panic(err) // handle the error appropriately in real code + } + + w := ServeAuthenticatedRequest(ts, http.MethodPost, fmt.Sprintf("http://localhost/factors/%s/challenge", factorID), token, buffer) + require.Equal(ts.T(), http.StatusOK, w.Code) + return w + +} + +func performEnrollAndVerify(ts *MFATestSuite, token string, requireStatusOK bool) *httptest.ResponseRecorder { + w := performEnrollFlow(ts, token, "", models.TOTP, ts.TestDomain, "", http.StatusOK) + enrollResp := EnrollFactorResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&enrollResp)) + factorID := enrollResp.ID + + // Challenge + w = performChallengeFlow(ts, factorID, token) + + challengeResp := EnrollFactorResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&challengeResp)) + challengeID := challengeResp.ID + + // Verify + y := performVerifyFlow(ts, challengeID, factorID, token, requireStatusOK) + + return y +} + +func (ts *MFATestSuite) TestVerificationHooks() { + type verificationHookTestCase struct { + desc string + enabled bool + uri string + hookFunctionSQL string + emailSuffix string + expectToken bool + expectedCode int + cleanupHookFunction string + } + cases := []verificationHookTestCase{ + { + desc: "Default Success", + enabled: true, + uri: "pg-functions://postgres/auth/verification_hook", + hookFunctionSQL: ` + create or replace function verification_hook(input jsonb) + returns json as $$ + begin + return json_build_object('decision', 'continue'); + end; $$ language plpgsql;`, + emailSuffix: "success", + expectToken: true, + expectedCode: http.StatusOK, + cleanupHookFunction: "verification_hook(input jsonb)", + }, + { + desc: "Error", + enabled: true, + uri: "pg-functions://postgres/auth/test_verification_hook_error", + hookFunctionSQL: ` + create or replace function test_verification_hook_error(input jsonb) + returns json as $$ + begin + RAISE EXCEPTION 'Intentional Error for Testing'; + end; $$ language plpgsql;`, + emailSuffix: "error", + expectToken: false, + expectedCode: http.StatusInternalServerError, + cleanupHookFunction: "test_verification_hook_error(input jsonb)", + }, + { + desc: "Reject - Enabled", + enabled: true, + uri: "pg-functions://postgres/auth/verification_hook_reject", + hookFunctionSQL: ` + create or replace function verification_hook_reject(input jsonb) + returns json as $$ + begin + return json_build_object( + 'decision', 'reject', + 'message', 'authentication attempt rejected' + ); + end; $$ language plpgsql;`, + emailSuffix: "reject_enabled", + expectToken: false, + expectedCode: http.StatusForbidden, + cleanupHookFunction: "verification_hook_reject(input jsonb)", + }, + { + desc: "Reject - Disabled", + enabled: false, + uri: "pg-functions://postgres/auth/verification_hook_reject", + hookFunctionSQL: ` + create or replace function verification_hook_reject(input jsonb) + returns json as $$ + begin + return json_build_object( + 'decision', 'reject', + 'message', 'authentication attempt rejected' + ); + end; $$ language plpgsql;`, + emailSuffix: "reject_disabled", + expectToken: true, + expectedCode: http.StatusOK, + cleanupHookFunction: "verification_hook_reject(input jsonb)", + }, + { + desc: "Timeout", + enabled: true, + uri: "pg-functions://postgres/auth/test_verification_hook_timeout", + hookFunctionSQL: ` + create or replace function test_verification_hook_timeout(input jsonb) + returns json as $$ + begin + PERFORM pg_sleep(3); + return json_build_object( + 'decision', 'continue' + ); + end; $$ language plpgsql;`, + emailSuffix: "timeout", + expectToken: false, + expectedCode: http.StatusInternalServerError, + cleanupHookFunction: "test_verification_hook_timeout(input jsonb)", + }, + } + + for _, c := range cases { + ts.T().Run(c.desc, func(t *testing.T) { + ts.Config.Hook.MFAVerificationAttempt.Enabled = c.enabled + ts.Config.Hook.MFAVerificationAttempt.URI = c.uri + require.NoError(ts.T(), ts.Config.Hook.MFAVerificationAttempt.PopulateExtensibilityPoint()) + + err := ts.API.db.RawQuery(c.hookFunctionSQL).Exec() + require.NoError(t, err) + + email := fmt.Sprintf("testemail_%s@gmail.com", c.emailSuffix) + password := "testpassword" + resp := performTestSignupAndVerify(ts, email, password, c.expectToken) + require.Equal(ts.T(), c.expectedCode, resp.Code) + accessTokenResp := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(resp.Body).Decode(&accessTokenResp)) + + if c.expectToken { + require.NotEqual(t, "", accessTokenResp.Token) + } else { + require.Equal(t, "", accessTokenResp.Token) + } + + cleanupHook(ts, c.cleanupHookFunction) + }) + } +} + +func cleanupHook(ts *MFATestSuite, hookName string) { + cleanupHookSQL := fmt.Sprintf("drop function if exists %s", hookName) + err := ts.API.db.RawQuery(cleanupHookSQL).Exec() + require.NoError(ts.T(), err) +} diff --git a/auth_v2.169.0/internal/api/middleware.go b/auth_v2.169.0/internal/api/middleware.go new file mode 100644 index 0000000..d387936 --- /dev/null +++ b/auth_v2.169.0/internal/api/middleware.go @@ -0,0 +1,401 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + chimiddleware "github.com/go-chi/chi/v5/middleware" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/security" + "github.com/supabase/auth/internal/utilities" + + "github.com/didip/tollbooth/v5" + "github.com/didip/tollbooth/v5/limiter" + jwt "github.com/golang-jwt/jwt/v5" +) + +type FunctionHooks map[string][]string + +type AuthMicroserviceClaims struct { + jwt.RegisteredClaims + SiteURL string `json:"site_url"` + InstanceID string `json:"id"` + FunctionHooks FunctionHooks `json:"function_hooks"` +} + +func (f *FunctionHooks) UnmarshalJSON(b []byte) error { + var raw map[string][]string + err := json.Unmarshal(b, &raw) + if err == nil { + *f = FunctionHooks(raw) + return nil + } + // If unmarshaling into map[string][]string fails, try legacy format. + var legacy map[string]string + err = json.Unmarshal(b, &legacy) + if err != nil { + return err + } + if *f == nil { + *f = make(FunctionHooks) + } + for event, hook := range legacy { + (*f)[event] = []string{hook} + } + return nil +} + +var emailRateLimitCounter = observability.ObtainMetricCounter("gotrue_email_rate_limit_counter", "Number of times an email rate limit has been triggered") + +func (a *API) limitHandler(lmt *limiter.Limiter) middlewareHandler { + return func(w http.ResponseWriter, req *http.Request) (context.Context, error) { + c := req.Context() + + if limitHeader := a.config.RateLimitHeader; limitHeader != "" { + key := req.Header.Get(limitHeader) + + if key == "" { + log := observability.GetLogEntry(req).Entry + log.WithField("header", limitHeader).Warn("request does not have a value for the rate limiting header, rate limiting is not applied") + return c, nil + } else { + err := tollbooth.LimitByKeys(lmt, []string{key}) + if err != nil { + return c, tooManyRequestsError(ErrorCodeOverRequestRateLimit, "Request rate limit reached") + } + } + } + return c, nil + } +} + +func (a *API) requireAdminCredentials(w http.ResponseWriter, req *http.Request) (context.Context, error) { + t, err := a.extractBearerToken(req) + if err != nil || t == "" { + return nil, err + } + + ctx, err := a.parseJWTClaims(t, req) + if err != nil { + return nil, err + } + + return a.requireAdmin(ctx) +} + +func (a *API) requireEmailProvider(w http.ResponseWriter, req *http.Request) (context.Context, error) { + ctx := req.Context() + config := a.config + + if !config.External.Email.Enabled { + return nil, badRequestError(ErrorCodeEmailProviderDisabled, "Email logins are disabled") + } + + return ctx, nil +} + +func (a *API) verifyCaptcha(w http.ResponseWriter, req *http.Request) (context.Context, error) { + ctx := req.Context() + config := a.config + + if !config.Security.Captcha.Enabled { + return ctx, nil + } + if _, err := a.requireAdminCredentials(w, req); err == nil { + // skip captcha validation if authorization header contains an admin role + return ctx, nil + } + if shouldIgnore := isIgnoreCaptchaRoute(req); shouldIgnore { + return ctx, nil + } + + body := &security.GotrueRequest{} + if err := retrieveRequestParams(req, body); err != nil { + return nil, err + } + + verificationResult, err := security.VerifyRequest(body, utilities.GetIPAddress(req), strings.TrimSpace(config.Security.Captcha.Secret), config.Security.Captcha.Provider) + if err != nil { + return nil, internalServerError("captcha verification process failed").WithInternalError(err) + } + + if !verificationResult.Success { + return nil, badRequestError(ErrorCodeCaptchaFailed, "captcha protection: request disallowed (%s)", strings.Join(verificationResult.ErrorCodes, ", ")) + } + + return ctx, nil +} + +func isIgnoreCaptchaRoute(req *http.Request) bool { + // captcha shouldn't be enabled on the following grant_types + // id_token, refresh_token, pkce + if req.URL.Path == "/token" && req.FormValue("grant_type") != "password" { + return true + } + return false +} + +func (a *API) isValidExternalHost(w http.ResponseWriter, req *http.Request) (context.Context, error) { + ctx := req.Context() + config := a.config + + xForwardedHost := req.Header.Get("X-Forwarded-Host") + xForwardedProto := req.Header.Get("X-Forwarded-Proto") + reqHost := req.URL.Hostname() + + if len(config.Mailer.ExternalHosts) > 0 { + // this server is configured to accept multiple external hosts, validate the host from the X-Forwarded-Host or Host headers + + hostname := "" + protocol := "https" + + if xForwardedHost != "" { + for _, host := range config.Mailer.ExternalHosts { + if host == xForwardedHost { + hostname = host + break + } + } + } else if reqHost != "" { + for _, host := range config.Mailer.ExternalHosts { + if host == reqHost { + hostname = host + break + } + } + } + + if hostname != "" { + if hostname == "localhost" { + // allow the use of HTTP only if the accepted hostname was localhost + if xForwardedProto == "http" || req.URL.Scheme == "http" { + protocol = "http" + } + } + + externalHostURL, err := url.ParseRequestURI(fmt.Sprintf("%s://%s", protocol, hostname)) + if err != nil { + return ctx, err + } + + return withExternalHost(ctx, externalHostURL), nil + } + } + + if xForwardedHost != "" || reqHost != "" { + // host has been provided to the request, but it hasn't been + // added to the allow list, raise a log message + // in Supabase platform the X-Forwarded-Host and full request + // URL are likely sanitzied before they reach the server + + fields := make(logrus.Fields) + + if xForwardedHost != "" { + fields["x_forwarded_host"] = xForwardedHost + } + + if xForwardedProto != "" { + fields["x_forwarded_proto"] = xForwardedProto + } + + if reqHost != "" { + fields["request_url_host"] = reqHost + + if req.URL.Scheme != "" { + fields["request_url_scheme"] = req.URL.Scheme + } + } + + logrus.WithFields(fields).Info("Request received external host in X-Forwarded-Host or Host headers, but the values have not been added to GOTRUE_MAILER_EXTERNAL_HOSTS and will not be used. To suppress this message add the host, or sanitize the headers before the request reaches Auth.") + } + + // either the provided external hosts don't match the allow list, or + // the server is not configured to accept multiple hosts -- use the + // configured external URL instead + + externalHostURL, err := url.ParseRequestURI(config.API.ExternalURL) + if err != nil { + return ctx, err + } + + return withExternalHost(ctx, externalHostURL), nil +} + +func (a *API) requireSAMLEnabled(w http.ResponseWriter, req *http.Request) (context.Context, error) { + ctx := req.Context() + if !a.config.SAML.Enabled { + return nil, notFoundError(ErrorCodeSAMLProviderDisabled, "SAML 2.0 is disabled") + } + return ctx, nil +} + +func (a *API) requireManualLinkingEnabled(w http.ResponseWriter, req *http.Request) (context.Context, error) { + ctx := req.Context() + if !a.config.Security.ManualLinkingEnabled { + return nil, notFoundError(ErrorCodeManualLinkingDisabled, "Manual linking is disabled") + } + return ctx, nil +} + +func (a *API) databaseCleanup(cleanup models.Cleaner) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wrappedResp := chimiddleware.NewWrapResponseWriter(w, r.ProtoMajor) + next.ServeHTTP(wrappedResp, r) + switch r.Method { + case http.MethodPost, http.MethodPut, http.MethodPatch, http.MethodDelete: + if (wrappedResp.Status() / 100) != 2 { + // don't do any cleanups for non-2xx responses + return + } + // continue + default: + return + } + + db := a.db.WithContext(r.Context()) + log := observability.GetLogEntry(r).Entry + + affectedRows, err := cleanup.Clean(db) + if err != nil { + log.WithError(err).WithField("affected_rows", affectedRows).Warn("database cleanup failed") + } else if affectedRows > 0 { + log.WithField("affected_rows", affectedRows).Debug("cleaned up expired or stale rows") + } + }) + } +} + +// timeoutResponseWriter is a http.ResponseWriter that queues up a response +// body to be sent if the serving completes before the context has exceeded its +// deadline. +type timeoutResponseWriter struct { + sync.Mutex + + header http.Header + wroteHeader bool + snapHeader http.Header // snapshot of the header at the time WriteHeader was called + statusCode int + buf bytes.Buffer +} + +func (t *timeoutResponseWriter) Header() http.Header { + t.Lock() + defer t.Unlock() + + return t.header +} + +func (t *timeoutResponseWriter) Write(bytes []byte) (int, error) { + t.Lock() + defer t.Unlock() + + if !t.wroteHeader { + t.writeHeaderLocked(http.StatusOK) + } + + return t.buf.Write(bytes) +} + +func (t *timeoutResponseWriter) WriteHeader(statusCode int) { + t.Lock() + defer t.Unlock() + + t.writeHeaderLocked(statusCode) +} + +func (t *timeoutResponseWriter) writeHeaderLocked(statusCode int) { + if t.wroteHeader { + // ignore multiple calls to WriteHeader + // once WriteHeader has been called once, a snapshot of the header map is taken + // and saved in snapHeader to be used in finallyWrite + return + } + + t.statusCode = statusCode + t.wroteHeader = true + t.snapHeader = t.header.Clone() +} + +func (t *timeoutResponseWriter) finallyWrite(w http.ResponseWriter) { + t.Lock() + defer t.Unlock() + + dst := w.Header() + for k, vv := range t.snapHeader { + dst[k] = vv + } + + if !t.wroteHeader { + t.statusCode = http.StatusOK + } + + w.WriteHeader(t.statusCode) + if _, err := w.Write(t.buf.Bytes()); err != nil { + logrus.WithError(err).Warn("Write failed") + } +} + +func timeoutMiddleware(timeout time.Duration) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), timeout) + defer cancel() + + timeoutWriter := &timeoutResponseWriter{ + header: make(http.Header), + } + + panicChan := make(chan any, 1) + serverDone := make(chan struct{}) + go func() { + defer func() { + if p := recover(); p != nil { + panicChan <- p + } + }() + + next.ServeHTTP(timeoutWriter, r.WithContext(ctx)) + close(serverDone) + }() + + select { + case p := <-panicChan: + panic(p) + + case <-serverDone: + timeoutWriter.finallyWrite(w) + + case <-ctx.Done(): + err := ctx.Err() + + if err == context.DeadlineExceeded { + httpError := &HTTPError{ + HTTPStatus: http.StatusGatewayTimeout, + ErrorCode: ErrorCodeRequestTimeout, + Message: "Processing this request timed out, please retry after a moment.", + } + + httpError = httpError.WithInternalError(err) + + HandleResponseError(httpError, w, r) + } else { + // unrecognized context error, so we should wait for the server to finish + // and write out the response + <-serverDone + + timeoutWriter.finallyWrite(w) + } + } + }) + } +} diff --git a/auth_v2.169.0/internal/api/middleware_test.go b/auth_v2.169.0/internal/api/middleware_test.go new file mode 100644 index 0000000..98dd6a8 --- /dev/null +++ b/auth_v2.169.0/internal/api/middleware_test.go @@ -0,0 +1,510 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/didip/tollbooth/v5" + "github.com/didip/tollbooth/v5/limiter" + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage" +) + +const ( + HCaptchaSecret string = "0x0000000000000000000000000000000000000000" + CaptchaResponse string = "10000000-aaaa-bbbb-cccc-000000000001" + TurnstileCaptchaSecret string = "1x0000000000000000000000000000000AA" +) + +type MiddlewareTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestMiddlewareFunctions(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &MiddlewareTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *MiddlewareTestSuite) TestVerifyCaptchaValid() { + ts.Config.Security.Captcha.Enabled = true + + adminClaims := &AccessTokenClaims{ + Role: "supabase_admin", + } + adminJwt, err := jwt.NewWithClaims(jwt.SigningMethodHS256, adminClaims).SignedString([]byte(ts.Config.JWT.Secret)) + require.NoError(ts.T(), err) + cases := []struct { + desc string + adminJwt string + captcha_token string + captcha_provider string + }{ + { + "Valid captcha response", + "", + CaptchaResponse, + "hcaptcha", + }, + { + "Valid captcha response", + "", + CaptchaResponse, + "turnstile", + }, + { + "Ignore captcha if admin role is present", + adminJwt, + "", + "hcaptcha", + }, + { + "Ignore captcha if admin role is present", + adminJwt, + "", + "turnstile", + }, + } + for _, c := range cases { + ts.Config.Security.Captcha.Provider = c.captcha_provider + if c.captcha_provider == "turnstile" { + ts.Config.Security.Captcha.Secret = TurnstileCaptchaSecret + } else if c.captcha_provider == "hcaptcha" { + ts.Config.Security.Captcha.Secret = HCaptchaSecret + } + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + "password": "secret", + "gotrue_meta_security": map[string]interface{}{ + "captcha_token": c.captcha_token, + }, + })) + req := httptest.NewRequest(http.MethodPost, "http://localhost", &buffer) + req.Header.Set("Content-Type", "application/json") + if c.adminJwt != "" { + req.Header.Set("Authorization", "Bearer "+c.adminJwt) + } + + beforeCtx := context.Background() + req = req.WithContext(beforeCtx) + + w := httptest.NewRecorder() + + afterCtx, err := ts.API.verifyCaptcha(w, req) + require.NoError(ts.T(), err) + + body, err := io.ReadAll(req.Body) + require.NoError(ts.T(), err) + + // re-initialize buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + "password": "secret", + "gotrue_meta_security": map[string]interface{}{ + "captcha_token": c.captcha_token, + }, + })) + + // check if body is the same + require.Equal(ts.T(), body, buffer.Bytes()) + require.Equal(ts.T(), afterCtx, beforeCtx) + } +} + +func (ts *MiddlewareTestSuite) TestVerifyCaptchaInvalid() { + cases := []struct { + desc string + captchaConf *conf.CaptchaConfiguration + expectedCode int + expectedMsg string + }{ + { + "Captcha validation failed", + &conf.CaptchaConfiguration{ + Enabled: true, + Provider: "hcaptcha", + Secret: "test", + }, + http.StatusBadRequest, + "captcha protection: request disallowed (not-using-dummy-secret)", + }, + { + "Captcha validation failed", + &conf.CaptchaConfiguration{ + Enabled: true, + Provider: "turnstile", + Secret: "anothertest", + }, + http.StatusBadRequest, + "captcha protection: request disallowed (invalid-input-secret)", + }, + } + for _, c := range cases { + ts.Run(c.desc, func() { + ts.Config.Security.Captcha = *c.captchaConf + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + "password": "secret", + "gotrue_meta_security": map[string]interface{}{ + "captcha_token": CaptchaResponse, + }, + })) + req := httptest.NewRequest(http.MethodPost, "http://localhost", &buffer) + req.Header.Set("Content-Type", "application/json") + + req = req.WithContext(context.Background()) + + w := httptest.NewRecorder() + + _, err := ts.API.verifyCaptcha(w, req) + require.Equal(ts.T(), c.expectedCode, err.(*HTTPError).HTTPStatus) + require.Equal(ts.T(), c.expectedMsg, err.(*HTTPError).Message) + }) + } +} + +func (ts *MiddlewareTestSuite) TestIsValidExternalHost() { + cases := []struct { + desc string + externalHosts []string + + requestURL string + headers http.Header + + expectedURL string + }{ + { + desc: "no defined external hosts, no headers, no absolute request URL", + requestURL: "/some-path", + expectedURL: ts.API.config.API.ExternalURL, + }, + + { + desc: "no defined external hosts, unauthorized X-Forwarded-Host without any external hosts", + headers: http.Header{ + "X-Forwarded-Host": []string{ + "external-host.com", + }, + }, + requestURL: "/some-path", + expectedURL: ts.API.config.API.ExternalURL, + }, + + { + desc: "defined external hosts, unauthorized X-Forwarded-Host", + externalHosts: []string{"authorized-host.com"}, + headers: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{ + "external-host.com", + }, + }, + requestURL: "/some-path", + expectedURL: ts.API.config.API.ExternalURL, + }, + + { + desc: "no defined external hosts, unauthorized Host", + requestURL: "https://external-host.com/some-path", + expectedURL: ts.API.config.API.ExternalURL, + }, + + { + desc: "defined external hosts, unauthorized Host", + externalHosts: []string{"authorized-host.com"}, + requestURL: "https://external-host.com/some-path", + expectedURL: ts.API.config.API.ExternalURL, + }, + + { + desc: "defined external hosts, authorized X-Forwarded-Host", + externalHosts: []string{"authorized-host.com"}, + headers: http.Header{ + "X-Forwarded-Proto": []string{"http"}, // this should be ignored and default to HTTPS + "X-Forwarded-Host": []string{ + "authorized-host.com", + }, + }, + requestURL: "https://X-Forwarded-Host-takes-precedence.com/some-path", + expectedURL: "https://authorized-host.com", + }, + + { + desc: "defined external hosts, authorized Host", + externalHosts: []string{"authorized-host.com"}, + requestURL: "https://authorized-host.com/some-path", + expectedURL: "https://authorized-host.com", + }, + + { + desc: "defined external hosts, authorized X-Forwarded-Host", + externalHosts: []string{"authorized-host.com"}, + headers: http.Header{ + "X-Forwarded-Proto": []string{"http"}, // this should be ignored and default to HTTPS + "X-Forwarded-Host": []string{ + "authorized-host.com", + }, + }, + requestURL: "https://X-Forwarded-Host-takes-precedence.com/some-path", + expectedURL: "https://authorized-host.com", + }, + + { + desc: "defined external hosts, authorized localhost in X-Forwarded-Host with HTTP", + externalHosts: []string{"localhost"}, + headers: http.Header{ + "X-Forwarded-Proto": []string{"http"}, + "X-Forwarded-Host": []string{ + "localhost", + }, + }, + requestURL: "/some-path", + expectedURL: "http://localhost", + }, + + { + desc: "defined external hosts, authorized localhost in Host with HTTP", + externalHosts: []string{"localhost"}, + requestURL: "http://localhost:3000/some-path", + expectedURL: "http://localhost", + }, + } + + require.NotEmpty(ts.T(), ts.API.config.API.ExternalURL) + + for _, c := range cases { + ts.Run(c.desc, func() { + req := httptest.NewRequest(http.MethodPost, c.requestURL, nil) + if c.headers != nil { + req.Header = c.headers + } + + originalHosts := ts.API.config.Mailer.ExternalHosts + ts.API.config.Mailer.ExternalHosts = c.externalHosts + + w := httptest.NewRecorder() + ctx, err := ts.API.isValidExternalHost(w, req) + + ts.API.config.Mailer.ExternalHosts = originalHosts + + require.NoError(ts.T(), err) + + externalURL := getExternalHost(ctx) + require.Equal(ts.T(), c.expectedURL, externalURL.String()) + }) + } +} + +func (ts *MiddlewareTestSuite) TestRequireSAMLEnabled() { + cases := []struct { + desc string + isEnabled bool + expectedErr error + }{ + { + desc: "SAML not enabled", + isEnabled: false, + expectedErr: notFoundError(ErrorCodeSAMLProviderDisabled, "SAML 2.0 is disabled"), + }, + { + desc: "SAML enabled", + isEnabled: true, + expectedErr: nil, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + ts.Config.SAML.Enabled = c.isEnabled + req := httptest.NewRequest("GET", "http://localhost", nil) + w := httptest.NewRecorder() + + _, err := ts.API.requireSAMLEnabled(w, req) + require.Equal(ts.T(), c.expectedErr, err) + }) + } +} + +func TestFunctionHooksUnmarshalJSON(t *testing.T) { + tests := []struct { + in string + ok bool + }{ + {`{ "signup" : "identity-signup" }`, true}, + {`{ "signup" : ["identity-signup"] }`, true}, + {`{ "signup" : {"foo" : "bar"} }`, false}, + } + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + var f FunctionHooks + err := json.Unmarshal([]byte(tt.in), &f) + if tt.ok { + assert.NoError(t, err) + assert.Equal(t, FunctionHooks{"signup": {"identity-signup"}}, f) + } else { + assert.Error(t, err) + } + }) + } +} + +func (ts *MiddlewareTestSuite) TestTimeoutMiddleware() { + ts.Config.API.MaxRequestDuration = 5 * time.Microsecond + req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) + w := httptest.NewRecorder() + + timeoutHandler := timeoutMiddleware(ts.Config.API.MaxRequestDuration) + + slowHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Sleep for 1 second to simulate a slow handler which should trigger the timeout + time.Sleep(1 * time.Second) + ts.API.handler.ServeHTTP(w, r) + }) + timeoutHandler(slowHandler).ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusGatewayTimeout, w.Code) + + var data map[string]interface{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + require.Equal(ts.T(), ErrorCodeRequestTimeout, data["error_code"]) + require.Equal(ts.T(), float64(504), data["code"]) + require.NotNil(ts.T(), data["msg"]) +} + +func TestTimeoutResponseWriter(t *testing.T) { + // timeoutResponseWriter should exhitbit a similar behavior as http.ResponseWriter + req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) + w1 := httptest.NewRecorder() + w2 := httptest.NewRecorder() + + timeoutHandler := timeoutMiddleware(time.Second * 10) + + redirectHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // tries to redirect twice + http.Redirect(w, r, "http://localhost:3001/#message=first_message", http.StatusSeeOther) + + // overwrites the first + http.Redirect(w, r, "http://localhost:3001/second", http.StatusSeeOther) + }) + timeoutHandler(redirectHandler).ServeHTTP(w1, req) + redirectHandler.ServeHTTP(w2, req) + + require.Equal(t, w1.Result(), w2.Result()) +} + +func (ts *MiddlewareTestSuite) TestLimitHandler() { + ts.Config.RateLimitHeader = "X-Rate-Limit" + lmt := tollbooth.NewLimiter(5, &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Hour, + }) + + okHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + b, _ := json.Marshal(map[string]interface{}{"message": "ok"}) + w.Write([]byte(b)) + }) + + for i := 0; i < 5; i++ { + req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) + req.Header.Add(ts.Config.RateLimitHeader, "0.0.0.0") + w := httptest.NewRecorder() + ts.API.limitHandler(lmt).handler(okHandler).ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + var data map[string]interface{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + require.Equal(ts.T(), "ok", data["message"]) + } + + // 6th request should fail and return a rate limit exceeded error + req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) + req.Header.Add(ts.Config.RateLimitHeader, "0.0.0.0") + w := httptest.NewRecorder() + ts.API.limitHandler(lmt).handler(okHandler).ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusTooManyRequests, w.Code) +} + +type MockCleanup struct { + mock.Mock +} + +func (m *MockCleanup) Clean(db *storage.Connection) (int, error) { + m.Called(db) + return 0, nil +} + +func (ts *MiddlewareTestSuite) TestDatabaseCleanup() { + testHandler := func(statusCode int) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(statusCode) + b, _ := json.Marshal(map[string]interface{}{"message": "ok"}) + w.Write([]byte(b)) + }) + } + + cases := []struct { + desc string + statusCode int + method string + }{ + { + desc: "Run cleanup successfully", + statusCode: http.StatusOK, + method: http.MethodPost, + }, + { + desc: "Skip cleanup if GET", + statusCode: http.StatusOK, + method: http.MethodGet, + }, + { + desc: "Skip cleanup if 3xx", + statusCode: http.StatusSeeOther, + method: http.MethodPost, + }, + { + desc: "Skip cleanup if 4xx", + statusCode: http.StatusBadRequest, + method: http.MethodPost, + }, + { + desc: "Skip cleanup if 5xx", + statusCode: http.StatusInternalServerError, + method: http.MethodPost, + }, + } + + mockCleanup := new(MockCleanup) + mockCleanup.On("Clean", mock.Anything).Return(0, nil) + for _, c := range cases { + ts.Run("DatabaseCleanup", func() { + req := httptest.NewRequest(c.method, "http://localhost", nil) + w := httptest.NewRecorder() + ts.API.databaseCleanup(mockCleanup)(testHandler(c.statusCode)).ServeHTTP(w, req) + require.Equal(ts.T(), c.statusCode, w.Code) + }) + } + mockCleanup.AssertNumberOfCalls(ts.T(), "Clean", 1) +} diff --git a/auth_v2.169.0/internal/api/opentelemetry-tracer_test.go b/auth_v2.169.0/internal/api/opentelemetry-tracer_test.go new file mode 100644 index 0000000..4aeddce --- /dev/null +++ b/auth_v2.169.0/internal/api/opentelemetry-tracer_test.go @@ -0,0 +1,93 @@ +package api + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" +) + +type OpenTelemetryTracerTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestOpenTelemetryTracer(t *testing.T) { + api, config, err := setupAPIForTestWithCallback(func(config *conf.GlobalConfiguration, conn *storage.Connection) { + if config != nil { + config.Tracing.Enabled = true + config.Tracing.Exporter = conf.OpenTelemetryTracing + } + }) + + require.NoError(t, err) + + ts := &OpenTelemetryTracerTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func getAttribute(attributes []attribute.KeyValue, key attribute.Key) *attribute.Value { + for _, value := range attributes { + if value.Key == key { + return &value.Value + } + } + + return nil +} + +func (ts *OpenTelemetryTracerTestSuite) TestOpenTelemetryTracer_Spans() { + exporter := tracetest.NewInMemoryExporter() + bsp := sdktrace.NewSimpleSpanProcessor(exporter) + traceProvider := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.AlwaysSample()), + sdktrace.WithSpanProcessor(bsp), + ) + otel.SetTracerProvider(traceProvider) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://localhost/something1", nil) + req.Header.Set("User-Agent", "whatever") + ts.API.handler.ServeHTTP(w, req) + + req = httptest.NewRequest(http.MethodGet, "http://localhost/something2", nil) + req.Header.Set("User-Agent", "whatever") + ts.API.handler.ServeHTTP(w, req) + + spanStubs := exporter.GetSpans() + spans := spanStubs.Snapshots() + + if assert.Equal(ts.T(), 2, len(spans)) { + attributes1 := spans[0].Attributes() + method1 := getAttribute(attributes1, semconv.HTTPMethodKey) + assert.Equal(ts.T(), "POST", method1.AsString()) + url1 := getAttribute(attributes1, semconv.HTTPTargetKey) + assert.Equal(ts.T(), "/something1", url1.AsString()) + statusCode1 := getAttribute(attributes1, semconv.HTTPStatusCodeKey) + assert.Equal(ts.T(), int64(404), statusCode1.AsInt64()) + + attributes2 := spans[1].Attributes() + method2 := getAttribute(attributes2, semconv.HTTPMethodKey) + assert.Equal(ts.T(), "GET", method2.AsString()) + url2 := getAttribute(attributes2, semconv.HTTPTargetKey) + assert.Equal(ts.T(), "/something2", url2.AsString()) + statusCode2 := getAttribute(attributes2, semconv.HTTPStatusCodeKey) + assert.Equal(ts.T(), int64(404), statusCode2.AsInt64()) + } +} diff --git a/auth_v2.169.0/internal/api/options.go b/auth_v2.169.0/internal/api/options.go new file mode 100644 index 0000000..9053c2f --- /dev/null +++ b/auth_v2.169.0/internal/api/options.go @@ -0,0 +1,102 @@ +package api + +import ( + "time" + + "github.com/didip/tollbooth/v5" + "github.com/didip/tollbooth/v5/limiter" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/ratelimit" +) + +type Option interface { + apply(*API) +} + +type LimiterOptions struct { + Email ratelimit.Limiter + Phone ratelimit.Limiter + + Signups *limiter.Limiter + AnonymousSignIns *limiter.Limiter + Recover *limiter.Limiter + Resend *limiter.Limiter + MagicLink *limiter.Limiter + Otp *limiter.Limiter + Token *limiter.Limiter + Verify *limiter.Limiter + User *limiter.Limiter + FactorVerify *limiter.Limiter + FactorChallenge *limiter.Limiter + SSO *limiter.Limiter + SAMLAssertion *limiter.Limiter +} + +func (lo *LimiterOptions) apply(a *API) { a.limiterOpts = lo } + +func NewLimiterOptions(gc *conf.GlobalConfiguration) *LimiterOptions { + o := &LimiterOptions{} + + o.Email = ratelimit.New(gc.RateLimitEmailSent) + o.Phone = ratelimit.New(gc.RateLimitSmsSent) + + o.AnonymousSignIns = tollbooth.NewLimiter(gc.RateLimitAnonymousUsers/(60*60), + &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Hour, + }).SetBurst(int(gc.RateLimitAnonymousUsers)).SetMethods([]string{"POST"}) + + o.Token = tollbooth.NewLimiter(gc.RateLimitTokenRefresh/(60*5), + &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Hour, + }).SetBurst(30) + + o.Verify = tollbooth.NewLimiter(gc.RateLimitVerify/(60*5), + &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Hour, + }).SetBurst(30) + + o.User = tollbooth.NewLimiter(gc.RateLimitOtp/(60*5), + &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Hour, + }).SetBurst(30) + + o.FactorVerify = tollbooth.NewLimiter(gc.MFA.RateLimitChallengeAndVerify/60, + &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Minute, + }).SetBurst(30) + + o.FactorChallenge = tollbooth.NewLimiter(gc.MFA.RateLimitChallengeAndVerify/60, + &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Minute, + }).SetBurst(30) + + o.SSO = tollbooth.NewLimiter(gc.RateLimitSso/(60*5), + &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Hour, + }).SetBurst(30) + + o.SAMLAssertion = tollbooth.NewLimiter(gc.SAML.RateLimitAssertion/(60*5), + &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Hour, + }).SetBurst(30) + + o.Signups = tollbooth.NewLimiter(gc.RateLimitOtp/(60*5), + &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Hour, + }).SetBurst(30) + + // These all use the OTP limit per 5 min with 1hour ttl and burst of 30. + o.Recover = newLimiterPer5mOver1h(gc.RateLimitOtp) + o.Resend = newLimiterPer5mOver1h(gc.RateLimitOtp) + o.MagicLink = newLimiterPer5mOver1h(gc.RateLimitOtp) + o.Otp = newLimiterPer5mOver1h(gc.RateLimitOtp) + return o +} + +func newLimiterPer5mOver1h(rate float64) *limiter.Limiter { + freq := rate / (60 * 5) + lim := tollbooth.NewLimiter(freq, &limiter.ExpirableOptions{ + DefaultExpirationTTL: time.Hour, + }).SetBurst(30) + return lim +} diff --git a/auth_v2.169.0/internal/api/options_test.go b/auth_v2.169.0/internal/api/options_test.go new file mode 100644 index 0000000..c4c1d16 --- /dev/null +++ b/auth_v2.169.0/internal/api/options_test.go @@ -0,0 +1,30 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/supabase/auth/internal/conf" +) + +func TestNewLimiterOptions(t *testing.T) { + cfg := &conf.GlobalConfiguration{} + cfg.ApplyDefaults() + + rl := NewLimiterOptions(cfg) + assert.NotNil(t, rl.Email) + assert.NotNil(t, rl.Phone) + assert.NotNil(t, rl.Signups) + assert.NotNil(t, rl.AnonymousSignIns) + assert.NotNil(t, rl.Recover) + assert.NotNil(t, rl.Resend) + assert.NotNil(t, rl.MagicLink) + assert.NotNil(t, rl.Otp) + assert.NotNil(t, rl.Token) + assert.NotNil(t, rl.Verify) + assert.NotNil(t, rl.User) + assert.NotNil(t, rl.FactorVerify) + assert.NotNil(t, rl.FactorChallenge) + assert.NotNil(t, rl.SSO) + assert.NotNil(t, rl.SAMLAssertion) +} diff --git a/auth_v2.169.0/internal/api/otp.go b/auth_v2.169.0/internal/api/otp.go new file mode 100644 index 0000000..1821da3 --- /dev/null +++ b/auth_v2.169.0/internal/api/otp.go @@ -0,0 +1,237 @@ +package api + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + + "github.com/sethvargo/go-password/password" + "github.com/supabase/auth/internal/api/sms_provider" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +// OtpParams contains the request body params for the otp endpoint +type OtpParams struct { + Email string `json:"email"` + Phone string `json:"phone"` + CreateUser bool `json:"create_user"` + Data map[string]interface{} `json:"data"` + Channel string `json:"channel"` + CodeChallengeMethod string `json:"code_challenge_method"` + CodeChallenge string `json:"code_challenge"` +} + +// SmsParams contains the request body params for sms otp +type SmsParams struct { + Phone string `json:"phone"` + Channel string `json:"channel"` + Data map[string]interface{} `json:"data"` + CodeChallengeMethod string `json:"code_challenge_method"` + CodeChallenge string `json:"code_challenge"` +} + +func (p *OtpParams) Validate() error { + if p.Email != "" && p.Phone != "" { + return badRequestError(ErrorCodeValidationFailed, "Only an email address or phone number should be provided") + } + if p.Email != "" && p.Channel != "" { + return badRequestError(ErrorCodeValidationFailed, "Channel should only be specified with Phone OTP") + } + if err := validatePKCEParams(p.CodeChallengeMethod, p.CodeChallenge); err != nil { + return err + } + return nil +} + +func (p *SmsParams) Validate(config *conf.GlobalConfiguration) error { + var err error + p.Phone, err = validatePhone(p.Phone) + if err != nil { + return err + } + if !sms_provider.IsValidMessageChannel(p.Channel, config) { + return badRequestError(ErrorCodeValidationFailed, InvalidChannelError) + } + return nil +} + +// Otp returns the MagicLink or SmsOtp handler based on the request body params +func (a *API) Otp(w http.ResponseWriter, r *http.Request) error { + params := &OtpParams{ + CreateUser: true, + } + if params.Data == nil { + params.Data = make(map[string]interface{}) + } + + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + if err := params.Validate(); err != nil { + return err + } + if params.Data == nil { + params.Data = make(map[string]interface{}) + } + + if ok, err := a.shouldCreateUser(r, params); !ok { + return unprocessableEntityError(ErrorCodeOTPDisabled, "Signups not allowed for otp") + } else if err != nil { + return err + } + + if params.Email != "" { + return a.MagicLink(w, r) + } else if params.Phone != "" { + return a.SmsOtp(w, r) + } + + return badRequestError(ErrorCodeValidationFailed, "One of email or phone must be set") +} + +type SmsOtpResponse struct { + MessageID string `json:"message_id,omitempty"` +} + +// SmsOtp sends the user an otp via sms +func (a *API) SmsOtp(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + config := a.config + + if !config.External.Phone.Enabled { + return badRequestError(ErrorCodePhoneProviderDisabled, "Unsupported phone provider") + } + var err error + + params := &SmsParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + // For backwards compatibility, we default to SMS if params Channel is not specified + if params.Phone != "" && params.Channel == "" { + params.Channel = sms_provider.SMSProvider + } + + if err := params.Validate(config); err != nil { + return err + } + + var isNewUser bool + aud := a.requestAud(ctx, r) + user, err := models.FindUserByPhoneAndAudience(db, params.Phone, aud) + if err != nil { + if models.IsNotFoundError(err) { + isNewUser = true + } else { + return internalServerError("Database error finding user").WithInternalError(err) + } + } + if user != nil { + isNewUser = !user.IsPhoneConfirmed() + } + if isNewUser { + // User either doesn't exist or hasn't completed the signup process. + // Sign them up with temporary password. + password, err := password.Generate(64, 10, 1, false, true) + if err != nil { + return internalServerError("error creating user").WithInternalError(err) + } + + signUpParams := &SignupParams{ + Phone: params.Phone, + Password: password, + Data: params.Data, + Channel: params.Channel, + } + newBodyContent, err := json.Marshal(signUpParams) + if err != nil { + // SignupParams must be marshallable + panic(err) + } + r.Body = io.NopCloser(bytes.NewReader(newBodyContent)) + + fakeResponse := &responseStub{} + + if config.Sms.Autoconfirm { + // signups are autoconfirmed, send otp after signup + if err := a.Signup(fakeResponse, r); err != nil { + return err + } + + signUpParams := &SignupParams{ + Phone: params.Phone, + Channel: params.Channel, + } + newBodyContent, err := json.Marshal(signUpParams) + if err != nil { + // SignupParams must be marshallable + panic(err) + } + r.Body = io.NopCloser(bytes.NewReader(newBodyContent)) + return a.SmsOtp(w, r) + } + + if err := a.Signup(fakeResponse, r); err != nil { + return err + } + return sendJSON(w, http.StatusOK, make(map[string]string)) + } + + messageID := "" + err = db.Transaction(func(tx *storage.Connection) error { + if err := models.NewAuditLogEntry(r, tx, user, models.UserRecoveryRequestedAction, "", map[string]interface{}{ + "channel": params.Channel, + }); err != nil { + return err + } + mID, serr := a.sendPhoneConfirmation(r, tx, user, params.Phone, phoneConfirmationOtp, params.Channel) + if serr != nil { + return serr + } + messageID = mID + return nil + }) + + if err != nil { + return err + } + + return sendJSON(w, http.StatusOK, SmsOtpResponse{ + MessageID: messageID, + }) +} + +func (a *API) shouldCreateUser(r *http.Request, params *OtpParams) (bool, error) { + ctx := r.Context() + db := a.db.WithContext(ctx) + + if !params.CreateUser { + ctx := r.Context() + aud := a.requestAud(ctx, r) + var err error + if params.Email != "" { + params.Email, err = a.validateEmail(params.Email) + if err != nil { + return false, err + } + _, err = models.FindUserByEmailAndAudience(db, params.Email, aud) + } else if params.Phone != "" { + params.Phone, err = validatePhone(params.Phone) + if err != nil { + return false, err + } + _, err = models.FindUserByPhoneAndAudience(db, params.Phone, aud) + } + + if err != nil && models.IsNotFoundError(err) { + return false, nil + } + } + return true, nil +} diff --git a/auth_v2.169.0/internal/api/otp_test.go b/auth_v2.169.0/internal/api/otp_test.go new file mode 100644 index 0000000..c72fbc3 --- /dev/null +++ b/auth_v2.169.0/internal/api/otp_test.go @@ -0,0 +1,311 @@ +package api + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type OtpTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestOtp(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &OtpTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *OtpTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + +} + +func (ts *OtpTestSuite) TestOtpPKCE() { + ts.Config.External.Phone.Enabled = true + testCodeChallenge := "testtesttesttesttesttesttestteststeststesttesttesttest" + + var buffer bytes.Buffer + cases := []struct { + desc string + params OtpParams + expected struct { + code int + response map[string]interface{} + } + }{ + { + desc: "Test (PKCE) Success Magiclink Otp", + params: OtpParams{ + Email: "test@example.com", + CreateUser: true, + CodeChallengeMethod: "s256", + CodeChallenge: testCodeChallenge, + }, + expected: struct { + code int + response map[string]interface{} + }{ + http.StatusOK, + make(map[string]interface{}), + }, + }, + { + desc: "Test (PKCE) Failure, no code challenge", + params: OtpParams{ + Email: "test@example.com", + CreateUser: true, + CodeChallengeMethod: "s256", + }, + expected: struct { + code int + response map[string]interface{} + }{ + http.StatusBadRequest, + map[string]interface{}{ + "code": float64(http.StatusBadRequest), + "error_code": ErrorCodeValidationFailed, + "msg": "PKCE flow requires code_challenge_method and code_challenge", + }, + }, + }, + { + desc: "Test (PKCE) Failure, no code challenge method", + params: OtpParams{ + Email: "test@example.com", + CreateUser: true, + CodeChallenge: testCodeChallenge, + }, + expected: struct { + code int + response map[string]interface{} + }{ + http.StatusBadRequest, + map[string]interface{}{ + "code": float64(http.StatusBadRequest), + "error_code": ErrorCodeValidationFailed, + "msg": "PKCE flow requires code_challenge_method and code_challenge", + }, + }, + }, + { + desc: "Test (PKCE) Success, phone with valid params", + params: OtpParams{ + Phone: "123456789", + CreateUser: true, + CodeChallengeMethod: "s256", + CodeChallenge: testCodeChallenge, + }, + expected: struct { + code int + response map[string]interface{} + }{ + http.StatusInternalServerError, + map[string]interface{}{ + "code": float64(http.StatusInternalServerError), + "msg": "Unable to get SMS provider", + }, + }, + }, + } + for _, c := range cases { + ts.Run(c.desc, func() { + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.params)) + + req := httptest.NewRequest(http.MethodPost, "/otp", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), c.expected.code, w.Code) + data := make(map[string]interface{}) + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + }) + } +} + +func (ts *OtpTestSuite) TestOtp() { + // Configured to allow testing of invalid channel params + ts.Config.External.Phone.Enabled = true + cases := []struct { + desc string + params OtpParams + expected struct { + code int + response map[string]interface{} + } + }{ + { + desc: "Test Success Magiclink Otp", + params: OtpParams{ + Email: "test@example.com", + CreateUser: true, + Data: map[string]interface{}{ + "somedata": "metadata", + }, + }, + expected: struct { + code int + response map[string]interface{} + }{ + http.StatusOK, + make(map[string]interface{}), + }, + }, + { + desc: "Test Failure Pass Both Email & Phone", + params: OtpParams{ + Email: "test@example.com", + Phone: "123456789", + CreateUser: true, + }, + expected: struct { + code int + response map[string]interface{} + }{ + http.StatusBadRequest, + map[string]interface{}{ + "code": float64(http.StatusBadRequest), + "error_code": ErrorCodeValidationFailed, + "msg": "Only an email address or phone number should be provided", + }, + }, + }, + { + desc: "Test Failure invalid channel param", + params: OtpParams{ + Phone: "123456789", + Channel: "invalidchannel", + CreateUser: true, + }, + expected: struct { + code int + response map[string]interface{} + }{ + http.StatusBadRequest, + map[string]interface{}{ + "code": float64(http.StatusBadRequest), + "error_code": ErrorCodeValidationFailed, + "msg": InvalidChannelError, + }, + }, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.params)) + + req := httptest.NewRequest(http.MethodPost, "/otp", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), c.expected.code, w.Code) + + data := make(map[string]interface{}) + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + // response should be empty + assert.Equal(ts.T(), data, c.expected.response) + }) + } +} + +func (ts *OtpTestSuite) TestNoSignupsForOtp() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "newuser@example.com", + "create_user": false, + })) + + req := httptest.NewRequest(http.MethodPost, "/otp", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), http.StatusUnprocessableEntity, w.Code) + + data := make(map[string]interface{}) + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + // response should be empty + assert.Equal(ts.T(), data, map[string]interface{}{ + "code": float64(http.StatusUnprocessableEntity), + "error_code": ErrorCodeOTPDisabled, + "msg": "Signups not allowed for otp", + }) +} + +func (ts *OtpTestSuite) TestSubsequentOtp() { + ts.Config.SMTP.MaxFrequency = 0 + userEmail := "foo@example.com" + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": userEmail, + })) + + req := httptest.NewRequest(http.MethodPost, "/otp", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), http.StatusOK, w.Code) + + newUser, err := models.FindUserByEmailAndAudience(ts.API.db, userEmail, ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), newUser.ConfirmationToken) + require.NotEmpty(ts.T(), newUser.ConfirmationSentAt) + require.Empty(ts.T(), newUser.RecoveryToken) + require.Empty(ts.T(), newUser.RecoverySentAt) + require.Empty(ts.T(), newUser.EmailConfirmedAt) + + // since the signup process hasn't been completed, + // subsequent requests for another magiclink should not create a recovery token + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": userEmail, + })) + + req = httptest.NewRequest(http.MethodPost, "/otp", &buffer) + req.Header.Set("Content-Type", "application/json") + + w = httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), http.StatusOK, w.Code) + + user, err := models.FindUserByEmailAndAudience(ts.API.db, userEmail, ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), user.ConfirmationToken) + require.NotEmpty(ts.T(), user.ConfirmationSentAt) + require.Empty(ts.T(), user.RecoveryToken) + require.Empty(ts.T(), user.RecoverySentAt) + require.Empty(ts.T(), user.EmailConfirmedAt) +} diff --git a/auth_v2.169.0/internal/api/pagination.go b/auth_v2.169.0/internal/api/pagination.go new file mode 100644 index 0000000..386f403 --- /dev/null +++ b/auth_v2.169.0/internal/api/pagination.go @@ -0,0 +1,64 @@ +package api + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + + "github.com/supabase/auth/internal/models" +) + +const defaultPerPage = 50 + +func calculateTotalPages(perPage, total uint64) uint64 { + pages := total / perPage + if total%perPage > 0 { + return pages + 1 + } + return pages +} + +func addPaginationHeaders(w http.ResponseWriter, r *http.Request, p *models.Pagination) { + totalPages := calculateTotalPages(p.PerPage, p.Count) + url, _ := url.ParseRequestURI(r.URL.String()) + query := url.Query() + header := "" + if totalPages > p.Page { + query.Set("page", fmt.Sprintf("%v", p.Page+1)) + url.RawQuery = query.Encode() + header += "<" + url.String() + ">; rel=\"next\", " + } + query.Set("page", fmt.Sprintf("%v", totalPages)) + url.RawQuery = query.Encode() + header += "<" + url.String() + ">; rel=\"last\"" + + w.Header().Add("Link", header) + w.Header().Add("X-Total-Count", fmt.Sprintf("%v", p.Count)) +} + +func paginate(r *http.Request) (*models.Pagination, error) { + params := r.URL.Query() + queryPage := params.Get("page") + queryPerPage := params.Get("per_page") + var page uint64 = 1 + var perPage uint64 = defaultPerPage + var err error + if queryPage != "" { + page, err = strconv.ParseUint(queryPage, 10, 64) + if err != nil { + return nil, err + } + } + if queryPerPage != "" { + perPage, err = strconv.ParseUint(queryPerPage, 10, 64) + if err != nil { + return nil, err + } + } + + return &models.Pagination{ + Page: page, + PerPage: perPage, + }, nil +} diff --git a/auth_v2.169.0/internal/api/password.go b/auth_v2.169.0/internal/api/password.go new file mode 100644 index 0000000..73de368 --- /dev/null +++ b/auth_v2.169.0/internal/api/password.go @@ -0,0 +1,73 @@ +package api + +import ( + "context" + "fmt" + "strings" + + "github.com/sirupsen/logrus" +) + +// BCrypt hashed passwords have a 72 character limit +const MaxPasswordLength = 72 + +// WeakPasswordError encodes an error that a password does not meet strength +// requirements. It is handled specially in errors.go as it gets transformed to +// a HTTPError with a special weak_password field that encodes the Reasons +// slice. +type WeakPasswordError struct { + Message string `json:"message,omitempty"` + Reasons []string `json:"reasons,omitempty"` +} + +func (e *WeakPasswordError) Error() string { + return e.Message +} + +func (a *API) checkPasswordStrength(ctx context.Context, password string) error { + config := a.config + + if len(password) > MaxPasswordLength { + return badRequestError(ErrorCodeValidationFailed, fmt.Sprintf("Password cannot be longer than %v characters", MaxPasswordLength)) + } + + var messages, reasons []string + + if len(password) < config.Password.MinLength { + reasons = append(reasons, "length") + messages = append(messages, fmt.Sprintf("Password should be at least %d characters.", config.Password.MinLength)) + } + + for _, characterSet := range config.Password.RequiredCharacters { + if characterSet != "" && !strings.ContainsAny(password, characterSet) { + reasons = append(reasons, "characters") + + messages = append(messages, fmt.Sprintf("Password should contain at least one character of each: %s.", strings.Join(config.Password.RequiredCharacters, ", "))) + + break + } + } + + if config.Password.HIBP.Enabled { + pwned, err := a.hibpClient.Check(ctx, password) + if err != nil { + if config.Password.HIBP.FailClosed { + return internalServerError("Unable to perform password strength check with HaveIBeenPwned.org.").WithInternalError(err) + } else { + logrus.WithError(err).Warn("Unable to perform password strength check with HaveIBeenPwned.org, pwned passwords are being allowed") + } + } else if pwned { + reasons = append(reasons, "pwned") + messages = append(messages, "Password is known to be weak and easy to guess, please choose a different one.") + } + } + + if len(reasons) > 0 { + return &WeakPasswordError{ + Message: strings.Join(messages, " "), + Reasons: reasons, + } + } + + return nil +} diff --git a/auth_v2.169.0/internal/api/password_test.go b/auth_v2.169.0/internal/api/password_test.go new file mode 100644 index 0000000..f95f6f6 --- /dev/null +++ b/auth_v2.169.0/internal/api/password_test.go @@ -0,0 +1,117 @@ +package api + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" +) + +func TestPasswordStrengthChecks(t *testing.T) { + examples := []struct { + MinLength int + RequiredCharacters []string + + Password string + Reasons []string + }{ + { + MinLength: 6, + Password: "12345", + Reasons: []string{ + "length", + }, + }, + { + MinLength: 6, + RequiredCharacters: []string{ + "a", + "b", + "c", + }, + Password: "123", + Reasons: []string{ + "length", + "characters", + }, + }, + { + MinLength: 6, + RequiredCharacters: []string{ + "a", + "b", + "c", + }, + Password: "a123", + Reasons: []string{ + "length", + "characters", + }, + }, + { + MinLength: 6, + RequiredCharacters: []string{ + "a", + "b", + "c", + }, + Password: "ab123", + Reasons: []string{ + "length", + "characters", + }, + }, + { + MinLength: 6, + RequiredCharacters: []string{ + "a", + "b", + "c", + }, + Password: "c123", + Reasons: []string{ + "length", + "characters", + }, + }, + { + MinLength: 6, + RequiredCharacters: []string{ + "a", + "b", + "c", + }, + Password: "abc123", + Reasons: nil, + }, + { + MinLength: 6, + RequiredCharacters: []string{}, + Password: "zZgXb5gzyCNrV36qwbOSbKVQsVJd28mC1TwRpeB0y6sFNICJyjD6bILKJMsjyKDzBdaY5tmi8zY9BWJYmt3vULLmyafjIDLYjy8qhETu0mS2jj1uQBgSAzJn9Zjm8EFa", + Reasons: nil, + }, + } + + for i, example := range examples { + api := &API{ + config: &conf.GlobalConfiguration{ + Password: conf.PasswordConfiguration{ + MinLength: example.MinLength, + RequiredCharacters: conf.PasswordRequiredCharacters(example.RequiredCharacters), + }, + }, + } + + err := api.checkPasswordStrength(context.Background(), example.Password) + + switch e := err.(type) { + case *WeakPasswordError: + require.Equal(t, e.Reasons, example.Reasons, "Example %d failed with wrong reasons", i) + case *HTTPError: + require.Equal(t, e.ErrorCode, ErrorCodeValidationFailed, "Example %d failed with wrong error code", i) + default: + require.NoError(t, err, "Example %d failed with error", i) + } + } +} diff --git a/auth_v2.169.0/internal/api/phone.go b/auth_v2.169.0/internal/api/phone.go new file mode 100644 index 0000000..5033888 --- /dev/null +++ b/auth_v2.169.0/internal/api/phone.go @@ -0,0 +1,169 @@ +package api + +import ( + "bytes" + "net/http" + "regexp" + "strings" + "text/template" + "time" + + "github.com/supabase/auth/internal/hooks" + + "github.com/pkg/errors" + "github.com/supabase/auth/internal/api/sms_provider" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +var e164Format = regexp.MustCompile("^[1-9][0-9]{1,14}$") + +const ( + phoneConfirmationOtp = "confirmation" + phoneReauthenticationOtp = "reauthentication" +) + +func validatePhone(phone string) (string, error) { + phone = formatPhoneNumber(phone) + if isValid := validateE164Format(phone); !isValid { + return "", badRequestError(ErrorCodeValidationFailed, "Invalid phone number format (E.164 required)") + } + return phone, nil +} + +// validateE164Format checks if phone number follows the E.164 format +func validateE164Format(phone string) bool { + return e164Format.MatchString(phone) +} + +// formatPhoneNumber removes "+" and whitespaces in a phone number +func formatPhoneNumber(phone string) string { + return strings.ReplaceAll(strings.TrimPrefix(phone, "+"), " ", "") +} + +// sendPhoneConfirmation sends an otp to the user's phone number +func (a *API) sendPhoneConfirmation(r *http.Request, tx *storage.Connection, user *models.User, phone, otpType string, channel string) (string, error) { + config := a.config + + var token *string + var sentAt *time.Time + + includeFields := []string{} + switch otpType { + case phoneChangeVerification: + token = &user.PhoneChangeToken + sentAt = user.PhoneChangeSentAt + user.PhoneChange = phone + includeFields = append(includeFields, "phone_change", "phone_change_token", "phone_change_sent_at") + case phoneConfirmationOtp: + token = &user.ConfirmationToken + sentAt = user.ConfirmationSentAt + includeFields = append(includeFields, "confirmation_token", "confirmation_sent_at") + case phoneReauthenticationOtp: + token = &user.ReauthenticationToken + sentAt = user.ReauthenticationSentAt + includeFields = append(includeFields, "reauthentication_token", "reauthentication_sent_at") + default: + return "", internalServerError("invalid otp type") + } + + // intentionally keeping this before the test OTP, so that the behavior + // of regular and test OTPs is similar + if sentAt != nil && !sentAt.Add(config.Sms.MaxFrequency).Before(time.Now()) { + return "", tooManyRequestsError(ErrorCodeOverSMSSendRateLimit, generateFrequencyLimitErrorMessage(sentAt, config.Sms.MaxFrequency)) + } + + now := time.Now() + + var otp, messageID string + + if testOTP, ok := config.Sms.GetTestOTP(phone, now); ok { + otp = testOTP + messageID = "test-otp" + } + + // not using test OTPs + if otp == "" { + // TODO(km): Deprecate this behaviour - rate limits should still be applied to autoconfirm + if !config.Sms.Autoconfirm { + // apply rate limiting before the sms is sent out + if ok := a.limiterOpts.Phone.Allow(); !ok { + return "", tooManyRequestsError(ErrorCodeOverSMSSendRateLimit, "SMS rate limit exceeded") + } + } + otp = crypto.GenerateOtp(config.Sms.OtpLength) + + if config.Hook.SendSMS.Enabled { + input := hooks.SendSMSInput{ + User: user, + SMS: hooks.SMS{ + OTP: otp, + }, + } + output := hooks.SendSMSOutput{} + err := a.invokeHook(tx, r, &input, &output) + if err != nil { + return "", err + } + } else { + smsProvider, err := sms_provider.GetSmsProvider(*config) + if err != nil { + return "", internalServerError("Unable to get SMS provider").WithInternalError(err) + } + message, err := generateSMSFromTemplate(config.Sms.SMSTemplate, otp) + if err != nil { + return "", internalServerError("error generating sms template").WithInternalError(err) + } + messageID, err := smsProvider.SendMessage(phone, message, channel, otp) + if err != nil { + return messageID, unprocessableEntityError(ErrorCodeSMSSendFailed, "Error sending %s OTP to provider: %v", otpType, err) + } + } + } + + *token = crypto.GenerateTokenHash(phone, otp) + + switch otpType { + case phoneConfirmationOtp: + user.ConfirmationSentAt = &now + case phoneChangeVerification: + user.PhoneChangeSentAt = &now + case phoneReauthenticationOtp: + user.ReauthenticationSentAt = &now + } + + if err := tx.UpdateOnly(user, includeFields...); err != nil { + return messageID, errors.Wrap(err, "Database error updating user for phone") + } + + var ottErr error + switch otpType { + case phoneConfirmationOtp: + if err := models.CreateOneTimeToken(tx, user.ID, user.GetPhone(), user.ConfirmationToken, models.ConfirmationToken); err != nil { + ottErr = errors.Wrap(err, "Database error creating confirmation token for phone") + } + case phoneChangeVerification: + if err := models.CreateOneTimeToken(tx, user.ID, user.PhoneChange, user.PhoneChangeToken, models.PhoneChangeToken); err != nil { + ottErr = errors.Wrap(err, "Database error creating phone change token") + } + case phoneReauthenticationOtp: + if err := models.CreateOneTimeToken(tx, user.ID, user.GetPhone(), user.ReauthenticationToken, models.ReauthenticationToken); err != nil { + ottErr = errors.Wrap(err, "Database error creating reauthentication token for phone") + } + } + if ottErr != nil { + return messageID, internalServerError("error creating one time token").WithInternalError(ottErr) + } + return messageID, nil +} + +func generateSMSFromTemplate(SMSTemplate *template.Template, otp string) (string, error) { + var message bytes.Buffer + if err := SMSTemplate.Execute(&message, struct { + Code string + }{Code: otp}); err != nil { + return "", err + } + return message.String(), nil +} diff --git a/auth_v2.169.0/internal/api/phone_test.go b/auth_v2.169.0/internal/api/phone_test.go new file mode 100644 index 0000000..adc50f1 --- /dev/null +++ b/auth_v2.169.0/internal/api/phone_test.go @@ -0,0 +1,443 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/api/sms_provider" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type PhoneTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +type TestSmsProvider struct { + mock.Mock + + SentMessages int +} + +func (t *TestSmsProvider) SendMessage(phone, message, channel, otp string) (string, error) { + t.SentMessages += 1 + return "", nil +} +func (t *TestSmsProvider) VerifyOTP(phone, otp string) error { + return nil +} + +func TestPhone(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &PhoneTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *PhoneTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + // Create user + u, err := models.NewUser("123456789", "", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") +} + +func (ts *PhoneTestSuite) TestValidateE164Format() { + isValid := validateE164Format("0123456789") + assert.Equal(ts.T(), false, isValid) +} + +func (ts *PhoneTestSuite) TestFormatPhoneNumber() { + actual := formatPhoneNumber("+1 23456789 ") + assert.Equal(ts.T(), "123456789", actual) +} + +func doTestSendPhoneConfirmation(ts *PhoneTestSuite, useTestOTP bool) { + u, err := models.FindUserByPhoneAndAudience(ts.API.db, "123456789", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + req, err := http.NewRequest("POST", "http://localhost:9998/otp", nil) + require.NoError(ts.T(), err) + cases := []struct { + desc string + otpType string + expected error + }{ + { + desc: "send confirmation otp", + otpType: phoneConfirmationOtp, + expected: nil, + }, + { + desc: "send phone_change otp", + otpType: phoneChangeVerification, + expected: nil, + }, + { + desc: "send recovery otp", + otpType: phoneReauthenticationOtp, + expected: nil, + }, + { + desc: "send invalid otp type ", + otpType: "invalid otp type", + expected: internalServerError("invalid otp type"), + }, + } + + if useTestOTP { + ts.API.config.Sms.TestOTP = map[string]string{ + "123456789": "123456", + } + } else { + ts.API.config.Sms.TestOTP = nil + } + + for _, c := range cases { + ts.Run(c.desc, func() { + provider := &TestSmsProvider{} + sms_provider.MockProvider = provider + + _, err = ts.API.sendPhoneConfirmation(req, ts.API.db, u, "123456789", c.otpType, sms_provider.SMSProvider) + require.Equal(ts.T(), c.expected, err) + u, err = models.FindUserByPhoneAndAudience(ts.API.db, "123456789", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + if c.expected == nil { + if useTestOTP { + require.Equal(ts.T(), provider.SentMessages, 0) + } else { + require.Equal(ts.T(), provider.SentMessages, 1) + } + } + + switch c.otpType { + case phoneConfirmationOtp: + require.NotEmpty(ts.T(), u.ConfirmationToken) + require.NotEmpty(ts.T(), u.ConfirmationSentAt) + case phoneChangeVerification: + require.NotEmpty(ts.T(), u.PhoneChangeToken) + require.NotEmpty(ts.T(), u.PhoneChangeSentAt) + case phoneReauthenticationOtp: + require.NotEmpty(ts.T(), u.ReauthenticationToken) + require.NotEmpty(ts.T(), u.ReauthenticationSentAt) + default: + } + }) + } + // Reset at end of test + ts.API.config.Sms.TestOTP = nil + +} + +func (ts *PhoneTestSuite) TestSendPhoneConfirmation() { + doTestSendPhoneConfirmation(ts, false) +} + +func (ts *PhoneTestSuite) TestSendPhoneConfirmationWithTestOTP() { + doTestSendPhoneConfirmation(ts, true) +} + +func (ts *PhoneTestSuite) TestMissingSmsProviderConfig() { + u, err := models.FindUserByPhoneAndAudience(ts.API.db, "123456789", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + now := time.Now() + u.PhoneConfirmedAt = &now + require.NoError(ts.T(), ts.API.db.Update(u), "Error updating new test user") + + s, err := models.NewSession(u.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(s)) + + req := httptest.NewRequest(http.MethodPost, "/token?grant_type=password", nil) + token, _, err := ts.API.generateAccessToken(req, ts.API.db, u, &s.ID, models.PasswordGrant) + require.NoError(ts.T(), err) + + cases := []struct { + desc string + endpoint string + method string + header string + body map[string]string + expected map[string]interface{} + }{ + { + desc: "Signup", + endpoint: "/signup", + method: http.MethodPost, + header: "", + body: map[string]string{ + "phone": "1234567890", + "password": "testpassword", + }, + expected: map[string]interface{}{ + "code": http.StatusInternalServerError, + "message": "Unable to get SMS provider", + }, + }, + { + desc: "Sms OTP", + endpoint: "/otp", + method: http.MethodPost, + header: "", + body: map[string]string{ + "phone": "123456789", + }, + expected: map[string]interface{}{ + "code": http.StatusInternalServerError, + "message": "Unable to get SMS provider", + }, + }, + { + desc: "Phone change", + endpoint: "/user", + method: http.MethodPut, + header: token, + body: map[string]string{ + "phone": "111111111", + }, + expected: map[string]interface{}{ + "code": http.StatusInternalServerError, + "message": "Unable to get SMS provider", + }, + }, + { + desc: "Reauthenticate", + endpoint: "/reauthenticate", + method: http.MethodGet, + header: "", + body: nil, + expected: map[string]interface{}{ + "code": http.StatusInternalServerError, + "message": "Unable to get SMS provider", + }, + }, + } + + smsProviders := []string{"twilio", "messagebird", "textlocal", "vonage"} + ts.Config.External.Phone.Enabled = true + ts.Config.Sms.Twilio.AccountSid = "" + ts.Config.Sms.Messagebird.AccessKey = "" + ts.Config.Sms.Textlocal.ApiKey = "" + ts.Config.Sms.Vonage.ApiKey = "" + + for _, c := range cases { + for _, provider := range smsProviders { + ts.Config.Sms.Provider = provider + desc := fmt.Sprintf("[%v] %v", provider, c.desc) + ts.Run(desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.body)) + + req := httptest.NewRequest(c.method, "http://localhost"+c.endpoint, &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), c.expected["code"], w.Code) + + body := w.Body.String() + require.True(ts.T(), + strings.Contains(body, "Unable to get SMS provider") || + strings.Contains(body, "Error finding SMS provider") || + strings.Contains(body, "Failed to get SMS provider"), + "unexpected body message %q", body, + ) + }) + } + } +} +func (ts *PhoneTestSuite) TestSendSMSHook() { + u, err := models.FindUserByPhoneAndAudience(ts.API.db, "123456789", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + now := time.Now() + u.PhoneConfirmedAt = &now + require.NoError(ts.T(), ts.API.db.Update(u), "Error updating new test user") + + s, err := models.NewSession(u.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(s)) + + req := httptest.NewRequest(http.MethodPost, "/token?grant_type=password", nil) + token, _, err := ts.API.generateAccessToken(req, ts.API.db, u, &s.ID, models.PasswordGrant) + require.NoError(ts.T(), err) + + // We setup a job table to enqueue SMS requests to send. Similar in spirit to the pg_boss postgres extension + createJobsTableSQL := `CREATE TABLE job_queue ( + id serial PRIMARY KEY, + job_type text, + payload jsonb, + status text DEFAULT 'pending', -- Possible values: 'pending', 'processing', 'completed', 'failed' + created_at timestamp without time zone DEFAULT NOW() + );` + require.NoError(ts.T(), ts.API.db.RawQuery(createJobsTableSQL).Exec()) + + type sendSMSHookTestCase struct { + desc string + uri string + endpoint string + method string + header string + body map[string]string + hookFunctionSQL string + expectedCode int + expectToken bool + hookFunctionIdentifier string + } + cases := []sendSMSHookTestCase{ + { + desc: "Phone signup using Hook", + endpoint: "/signup", + method: http.MethodPost, + uri: "pg-functions://postgres/auth/send_sms_signup", + hookFunctionSQL: ` + create or replace function send_sms_signup(input jsonb) + returns json as $$ + begin + insert into job_queue(job_type, payload) + values ('sms_signup', input); + return input; + end; $$ language plpgsql;`, + header: "", + body: map[string]string{ + "phone": "1234567890", + "password": "testpassword", + }, + expectedCode: http.StatusOK, + hookFunctionIdentifier: "send_sms_signup(input jsonb)", + }, + { + desc: "SMS OTP sign in using hook", + endpoint: "/otp", + method: http.MethodPost, + uri: "pg-functions://postgres/auth/send_sms_otp", + hookFunctionSQL: ` + create or replace function send_sms_otp(input jsonb) + returns json as $$ + begin + insert into job_queue(job_type, payload) + values ('sms_signup', input); + return input; + end; $$ language plpgsql;`, + header: "", + body: map[string]string{ + "phone": "123456789", + }, + expectToken: false, + expectedCode: http.StatusOK, + hookFunctionIdentifier: "send_sms_otp(input jsonb)", + }, + { + desc: "Phone Change", + endpoint: "/user", + method: http.MethodPut, + uri: "pg-functions://postgres/auth/send_sms_phone_change", + hookFunctionSQL: ` + create or replace function send_sms_phone_change(input jsonb) + returns json as $$ + begin + insert into job_queue(job_type, payload) + values ('phone_change', input); + return input; + end; $$ language plpgsql;`, + header: token, + body: map[string]string{ + "phone": "111111111", + }, + expectToken: true, + expectedCode: http.StatusOK, + hookFunctionIdentifier: "send_sms_phone_change(input jsonb)", + }, + { + desc: "Reauthenticate", + endpoint: "/reauthenticate", + method: http.MethodGet, + uri: "pg-functions://postgres/auth/reauthenticate", + hookFunctionSQL: ` + create or replace function reauthenticate(input jsonb) + returns json as $$ + begin + return input; + end; $$ language plpgsql;`, + header: "", + body: nil, + expectToken: true, + expectedCode: http.StatusOK, + hookFunctionIdentifier: "reauthenticate(input jsonb)", + }, + { + desc: "SMS OTP Hook (Error)", + endpoint: "/otp", + method: http.MethodPost, + uri: "pg-functions://postgres/auth/send_sms_otp_failure", + hookFunctionSQL: ` + create or replace function send_sms_otp(input jsonb) + returns json as $$ + begin + RAISE EXCEPTION 'Intentional Error for Testing'; + end; $$ language plpgsql;`, + header: "", + body: map[string]string{ + "phone": "123456789", + }, + expectToken: false, + expectedCode: http.StatusInternalServerError, + hookFunctionIdentifier: "send_sms_otp_failure(input jsonb)", + }, + } + + for _, c := range cases { + ts.T().Run(c.desc, func(t *testing.T) { + + ts.Config.External.Phone.Enabled = true + ts.Config.Hook.SendSMS.Enabled = true + ts.Config.Hook.SendSMS.URI = c.uri + // Disable FrequencyLimit to allow back to back sending + ts.Config.Sms.MaxFrequency = 0 * time.Second + require.NoError(ts.T(), ts.Config.Hook.SendSMS.PopulateExtensibilityPoint()) + + require.NoError(t, ts.API.db.RawQuery(c.hookFunctionSQL).Exec()) + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.body)) + req := httptest.NewRequest(c.method, "http://localhost"+c.endpoint, &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + require.Equal(t, c.expectedCode, w.Code, "Unexpected HTTP status code") + + // Delete the function and reset env + cleanupHookSQL := fmt.Sprintf("drop function if exists %s", ts.Config.Hook.SendSMS.HookName) + require.NoError(t, ts.API.db.RawQuery(cleanupHookSQL).Exec()) + ts.Config.Hook.SendSMS.Enabled = false + ts.Config.Sms.MaxFrequency = 1 * time.Second + }) + } + + // Cleanup + deleteJobsTableSQL := `drop table if exists job_queue` + require.NoError(ts.T(), ts.API.db.RawQuery(deleteJobsTableSQL).Exec()) + +} diff --git a/auth_v2.169.0/internal/api/pkce.go b/auth_v2.169.0/internal/api/pkce.go new file mode 100644 index 0000000..5ac7566 --- /dev/null +++ b/auth_v2.169.0/internal/api/pkce.go @@ -0,0 +1,98 @@ +package api + +import ( + "regexp" + + "github.com/gofrs/uuid" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +const ( + PKCEPrefix = "pkce_" + MinCodeChallengeLength = 43 + MaxCodeChallengeLength = 128 + InvalidPKCEParamsErrorMessage = "PKCE flow requires code_challenge_method and code_challenge" +) + +var codeChallengePattern = regexp.MustCompile("^[a-zA-Z._~0-9-]+$") + +func isValidCodeChallenge(codeChallenge string) (bool, error) { + // See RFC 7636 Section 4.2: https://www.rfc-editor.org/rfc/rfc7636#section-4.2 + switch codeChallengeLength := len(codeChallenge); { + case codeChallengeLength < MinCodeChallengeLength, codeChallengeLength > MaxCodeChallengeLength: + return false, badRequestError(ErrorCodeValidationFailed, "code challenge has to be between %v and %v characters", MinCodeChallengeLength, MaxCodeChallengeLength) + case !codeChallengePattern.MatchString(codeChallenge): + return false, badRequestError(ErrorCodeValidationFailed, "code challenge can only contain alphanumeric characters, hyphens, periods, underscores and tildes") + default: + return true, nil + } +} + +func addFlowPrefixToToken(token string, flowType models.FlowType) string { + if isPKCEFlow(flowType) { + return flowType.String() + "_" + token + } else if isImplicitFlow(flowType) { + return token + } + return token +} + +func issueAuthCode(tx *storage.Connection, user *models.User, authenticationMethod models.AuthenticationMethod) (string, error) { + flowState, err := models.FindFlowStateByUserID(tx, user.ID.String(), authenticationMethod) + if err != nil && models.IsNotFoundError(err) { + return "", unprocessableEntityError(ErrorCodeFlowStateNotFound, "No valid flow state found for user.") + } else if err != nil { + return "", err + } + if err := flowState.RecordAuthCodeIssuedAtTime(tx); err != nil { + return "", err + } + + return flowState.AuthCode, nil +} + +func isPKCEFlow(flowType models.FlowType) bool { + return flowType == models.PKCEFlow +} + +func isImplicitFlow(flowType models.FlowType) bool { + return flowType == models.ImplicitFlow +} + +func validatePKCEParams(codeChallengeMethod, codeChallenge string) error { + switch true { + case (codeChallenge == "") != (codeChallengeMethod == ""): + return badRequestError(ErrorCodeValidationFailed, InvalidPKCEParamsErrorMessage) + case codeChallenge != "": + if valid, err := isValidCodeChallenge(codeChallenge); !valid { + return err + } + default: + // if both params are empty, just return nil + return nil + } + return nil +} + +func getFlowFromChallenge(codeChallenge string) models.FlowType { + if codeChallenge != "" { + return models.PKCEFlow + } else { + return models.ImplicitFlow + } +} + +// Should only be used with Auth Code of PKCE Flows +func generateFlowState(tx *storage.Connection, providerType string, authenticationMethod models.AuthenticationMethod, codeChallengeMethodParam string, codeChallenge string, userID *uuid.UUID) (*models.FlowState, error) { + codeChallengeMethod, err := models.ParseCodeChallengeMethod(codeChallengeMethodParam) + if err != nil { + return nil, err + } + flowState := models.NewFlowState(providerType, codeChallenge, codeChallengeMethod, authenticationMethod, userID) + if err := tx.Create(flowState); err != nil { + return nil, err + } + return flowState, nil + +} diff --git a/auth_v2.169.0/internal/api/provider/apple.go b/auth_v2.169.0/internal/api/provider/apple.go new file mode 100644 index 0000000..508eaf1 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/apple.go @@ -0,0 +1,144 @@ +package provider + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + "strings" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const IssuerApple = "https://appleid.apple.com" + +// AppleProvider stores the custom config for apple provider +type AppleProvider struct { + *oauth2.Config + oidc *oidc.Provider +} + +type IsPrivateEmail bool + +// Apple returns an is_private_email field that could be a string or boolean value so we need to implement a custom unmarshaler +// https://developer.apple.com/documentation/sign_in_with_apple/sign_in_with_apple_rest_api/authenticating_users_with_sign_in_with_apple +func (b *IsPrivateEmail) UnmarshalJSON(data []byte) error { + var boolVal bool + if err := json.Unmarshal(data, &boolVal); err == nil { + *b = IsPrivateEmail(boolVal) + return nil + } + + // ignore the error and try to unmarshal as a string + var strVal string + if err := json.Unmarshal(data, &strVal); err != nil { + return err + } + + var err error + boolVal, err = strconv.ParseBool(strVal) + if err != nil { + return err + } + + *b = IsPrivateEmail(boolVal) + return nil +} + +type appleName struct { + FirstName string `json:"firstName"` + LastName string `json:"lastName"` +} + +type appleUser struct { + Name appleName `json:"name"` + Email string `json:"email"` +} + +// NewAppleProvider creates a Apple account provider. +func NewAppleProvider(ctx context.Context, ext conf.OAuthProviderConfiguration) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + if ext.URL != "" { + logrus.Warn("Apple OAuth provider has URL config set which is ignored (check GOTRUE_EXTERNAL_APPLE_URL)") + } + + oidcProvider, err := oidc.NewProvider(ctx, IssuerApple) + if err != nil { + return nil, err + } + + return &AppleProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oidcProvider.Endpoint(), + Scopes: []string{ + "email", + "name", + }, + RedirectURL: ext.RedirectURI, + }, + oidc: oidcProvider, + }, nil +} + +// GetOAuthToken returns the apple provider access token +func (p AppleProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + opts := []oauth2.AuthCodeOption{ + oauth2.SetAuthURLParam("client_id", p.ClientID), + oauth2.SetAuthURLParam("secret", p.ClientSecret), + } + return p.Exchange(context.Background(), code, opts...) +} + +func (p AppleProvider) AuthCodeURL(state string, args ...oauth2.AuthCodeOption) string { + opts := make([]oauth2.AuthCodeOption, 0, 1) + opts = append(opts, oauth2.SetAuthURLParam("response_mode", "form_post")) + authURL := p.Config.AuthCodeURL(state, opts...) + if authURL != "" { + if u, err := url.Parse(authURL); err != nil { + u.RawQuery = strings.ReplaceAll(u.RawQuery, "+", "%20") + authURL = u.String() + } + } + return authURL +} + +// GetUserData returns the user data fetched from the apple provider +func (p AppleProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + idToken := tok.Extra("id_token") + if tok.AccessToken == "" || idToken == nil { + // Apple returns user data only the first time + return &UserProvidedData{}, nil + } + + _, data, err := ParseIDToken(ctx, p.oidc, &oidc.Config{ + ClientID: p.ClientID, + }, idToken.(string), ParseIDTokenOptions{ + AccessToken: tok.AccessToken, + }) + if err != nil { + return nil, err + } + + return data, nil +} + +// ParseUser parses the apple user's info +func (p AppleProvider) ParseUser(data string, userData *UserProvidedData) error { + u := &appleUser{} + err := json.Unmarshal([]byte(data), u) + if err != nil { + return err + } + + userData.Metadata.Name = strings.TrimSpace(u.Name.FirstName + " " + u.Name.LastName) + userData.Metadata.FullName = strings.TrimSpace(u.Name.FirstName + " " + u.Name.LastName) + return nil +} diff --git a/auth_v2.169.0/internal/api/provider/azure.go b/auth_v2.169.0/internal/api/provider/azure.go new file mode 100644 index 0000000..4a341f4 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/azure.go @@ -0,0 +1,164 @@ +package provider + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const IssuerAzureCommon = "https://login.microsoftonline.com/common/v2.0" +const IssuerAzureOrganizations = "https://login.microsoftonline.com/organizations/v2.0" + +// IssuerAzureMicrosoft is the OIDC issuer for microsoft.com accounts: +// https://learn.microsoft.com/en-us/azure/active-directory/develop/id-token-claims-reference#payload-claims +const IssuerAzureMicrosoft = "https://login.microsoftonline.com/9188040d-6c67-4c5b-b112-36a304b66dad/v2.0" + +const ( + defaultAzureAuthBase = "login.microsoftonline.com/common" +) + +type azureProvider struct { + *oauth2.Config + + // ExpectedIssuer contains the OIDC issuer that should be expected when + // the authorize flow completes. For example, when using the "common" + // endpoint the authorization flow will end with an ID token that + // contains any issuer. In this case, ExpectedIssuer is an empty + // string, because any issuer is allowed. But if a developer sets up a + // tenant-specific authorization endpoint, then we must ensure that the + // ID token received is issued by that specific issuer, and so + // ExpectedIssuer contains the issuer URL of that tenant. + ExpectedIssuer string +} + +var azureIssuerRegexp = regexp.MustCompile("^https://login[.]microsoftonline[.]com/([^/]+)/v2[.]0/?$") +var azureCIAMIssuerRegexp = regexp.MustCompile("^https://[a-z0-9-]+[.]ciamlogin[.]com/([^/]+)/v2[.]0/?$") + +func IsAzureIssuer(issuer string) bool { + return azureIssuerRegexp.MatchString(issuer) +} + +func IsAzureCIAMIssuer(issuer string) bool { + return azureCIAMIssuerRegexp.MatchString(issuer) +} + +// NewAzureProvider creates a Azure account provider. +func NewAzureProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + oauthScopes := []string{"openid"} + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + authHost := chooseHost(ext.URL, defaultAzureAuthBase) + expectedIssuer := "" + + if ext.URL != "" { + expectedIssuer = authHost + "/v2.0" + + if !IsAzureIssuer(expectedIssuer) || !IsAzureCIAMIssuer(expectedIssuer) || expectedIssuer == IssuerAzureCommon || expectedIssuer == IssuerAzureOrganizations { + // in tests, the URL is a local server which should not + // be the expected issuer + // also, IssuerAzure (common) never actually issues any + // ID tokens so it needs to be ignored + expectedIssuer = "" + } + } + + return &azureProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authHost + "/oauth2/v2.0/authorize", + TokenURL: authHost + "/oauth2/v2.0/token", + }, + RedirectURL: ext.RedirectURI, + Scopes: oauthScopes, + }, + ExpectedIssuer: expectedIssuer, + }, nil +} + +func (g azureProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func DetectAzureIDTokenIssuer(ctx context.Context, idToken string) (string, error) { + var payload struct { + Issuer string `json:"iss"` + } + + parts := strings.Split(idToken, ".") + if len(parts) != 3 { + return "", fmt.Errorf("azure: invalid ID token") + } + + payloadBytes, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + return "", fmt.Errorf("azure: invalid ID token %w", err) + } + + if err := json.Unmarshal(payloadBytes, &payload); err != nil { + return "", fmt.Errorf("azure: invalid ID token %w", err) + } + + return payload.Issuer, nil +} + +func (g azureProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + idToken := tok.Extra("id_token") + + if idToken != nil { + issuer, err := DetectAzureIDTokenIssuer(ctx, idToken.(string)) + if err != nil { + return nil, err + } + + // Allow basic Azure issuers, except when the expected issuer + // is configured to be the Azure CIAM issuer, allow CIAM + // issuers to pass. + if !IsAzureIssuer(issuer) && (IsAzureCIAMIssuer(g.ExpectedIssuer) && !IsAzureCIAMIssuer(issuer)) { + return nil, fmt.Errorf("azure: ID token issuer not valid %q", issuer) + } + + if g.ExpectedIssuer != "" && issuer != g.ExpectedIssuer { + // Since ExpectedIssuer was set, then the developer had + // setup GoTrue to use the tenant-specific + // authorization endpoint, which in-turn means that + // only those tenant's ID tokens will be accepted. + return nil, fmt.Errorf("azure: ID token issuer %q does not match expected issuer %q", issuer, g.ExpectedIssuer) + } + + provider, err := oidc.NewProvider(ctx, issuer) + if err != nil { + return nil, err + } + + _, data, err := ParseIDToken(ctx, provider, &oidc.Config{ + ClientID: g.ClientID, + }, idToken.(string), ParseIDTokenOptions{ + AccessToken: tok.AccessToken, + }) + if err != nil { + return nil, err + } + + return data, nil + } + + // Only ID tokens supported, UserInfo endpoint has a history of being less secure. + + return nil, fmt.Errorf("azure: no OIDC ID token present in response") +} diff --git a/auth_v2.169.0/internal/api/provider/azure_test.go b/auth_v2.169.0/internal/api/provider/azure_test.go new file mode 100644 index 0000000..316cb08 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/azure_test.go @@ -0,0 +1,29 @@ +package provider + +import "testing" + +func TestIsAzureIssuer(t *testing.T) { + positiveExamples := []string{ + "https://login.microsoftonline.com/9188040d-6c67-4c5b-b112-36a304b66dad/v2.0", + "https://login.microsoftonline.com/9188040d-6c67-4c5b-b112-36a304b66dad/v2.0/", + "https://login.microsoftonline.com/common/v2.0", + } + + negativeExamples := []string{ + "http://login.microsoftonline.com/9188040d-6c67-4c5b-b112-36a304b66dad/v2.0", + "https://login.microsoftonline.com/9188040d-6c67-4c5b-b112-36a304b66dad/v2.0?something=else", + "https://login.microsoftonline.com/9188040d-6c67-4c5b-b112-36a304b66dad/v2.0/extra", + } + + for _, example := range positiveExamples { + if !IsAzureIssuer(example) { + t.Errorf("Example %q should be treated as a valid Azure issuer", example) + } + } + + for _, example := range negativeExamples { + if IsAzureIssuer(example) { + t.Errorf("Example %q should be treated as not a valid Azure issuer", example) + } + } +} diff --git a/auth_v2.169.0/internal/api/provider/bitbucket.go b/auth_v2.169.0/internal/api/provider/bitbucket.go new file mode 100644 index 0000000..e5fae5c --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/bitbucket.go @@ -0,0 +1,104 @@ +package provider + +import ( + "context" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const ( + defaultBitbucketAuthBase = "bitbucket.org" + defaultBitbucketAPIBase = "api.bitbucket.org" +) + +type bitbucketProvider struct { + *oauth2.Config + APIPath string +} + +type bitbucketUser struct { + Name string `json:"display_name"` + ID string `json:"uuid"` + Avatar struct { + Href string `json:"href"` + } `json:"avatar"` +} + +type bitbucketEmail struct { + Email string `json:"email"` + Primary bool `json:"is_primary"` + Verified bool `json:"is_confirmed"` +} + +type bitbucketEmails struct { + Values []bitbucketEmail `json:"values"` +} + +// NewBitbucketProvider creates a Bitbucket account provider. +func NewBitbucketProvider(ext conf.OAuthProviderConfiguration) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + authHost := chooseHost(ext.URL, defaultBitbucketAuthBase) + apiPath := chooseHost(ext.URL, defaultBitbucketAPIBase) + "/2.0" + + return &bitbucketProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authHost + "/site/oauth2/authorize", + TokenURL: authHost + "/site/oauth2/access_token", + }, + RedirectURL: ext.RedirectURI, + Scopes: []string{"account", "email"}, + }, + APIPath: apiPath, + }, nil +} + +func (g bitbucketProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g bitbucketProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u bitbucketUser + if err := makeRequest(ctx, tok, g.Config, g.APIPath+"/user", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + + var emails bitbucketEmails + if err := makeRequest(ctx, tok, g.Config, g.APIPath+"/user/emails", &emails); err != nil { + return nil, err + } + + if len(emails.Values) > 0 { + for _, e := range emails.Values { + if e.Email != "" { + data.Emails = append(data.Emails, Email{ + Email: e.Email, + Verified: e.Verified, + Primary: e.Primary, + }) + } + } + } + + data.Metadata = &Claims{ + Issuer: g.APIPath, + Subject: u.ID, + Name: u.Name, + Picture: u.Avatar.Href, + + // To be deprecated + AvatarURL: u.Avatar.Href, + FullName: u.Name, + ProviderId: u.ID, + } + + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/discord.go b/auth_v2.169.0/internal/api/provider/discord.go new file mode 100644 index 0000000..50d413b --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/discord.go @@ -0,0 +1,120 @@ +package provider + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const ( + defaultDiscordAPIBase = "discord.com" +) + +type discordProvider struct { + *oauth2.Config + APIPath string +} + +type discordUser struct { + Avatar string `json:"avatar"` + Discriminator string `json:"discriminator"` + Email string `json:"email"` + ID string `json:"id"` + Name string `json:"username"` + GlobalName string `json:"global_name"` + Verified bool `json:"verified"` +} + +// NewDiscordProvider creates a Discord account provider. +func NewDiscordProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + apiPath := chooseHost(ext.URL, defaultDiscordAPIBase) + "/api" + + oauthScopes := []string{ + "email", + "identify", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &discordProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: apiPath + "/oauth2/authorize", + TokenURL: apiPath + "/oauth2/token", + }, + Scopes: oauthScopes, + RedirectURL: ext.RedirectURI, + }, + APIPath: apiPath, + }, nil +} + +func (g discordProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g discordProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u discordUser + if err := makeRequest(ctx, tok, g.Config, g.APIPath+"/users/@me", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Email != "" { + data.Emails = []Email{{ + Email: u.Email, + Verified: u.Verified, + Primary: true, + }} + } + + var avatarURL string + extension := "png" + if u.Avatar == "" { + if intDiscriminator, err := strconv.Atoi(u.Discriminator); err != nil { + return nil, err + } else { + // https://discord.com/developers/docs/reference#image-formatting-cdn-endpoints: + // In the case of the Default User Avatar endpoint, the value for + // user_discriminator in the path should be the user's discriminator modulo 5 + avatarURL = fmt.Sprintf("https://cdn.discordapp.com/embed/avatars/%d.%s", intDiscriminator%5, extension) + } + } else { + // https://discord.com/developers/docs/reference#image-formatting: + // "In the case of endpoints that support GIFs, the hash will begin with a_ + // if it is available in GIF format." + if strings.HasPrefix(u.Avatar, "a_") { + extension = "gif" + } + avatarURL = fmt.Sprintf("https://cdn.discordapp.com/avatars/%s/%s.%s", u.ID, u.Avatar, extension) + } + + data.Metadata = &Claims{ + Issuer: g.APIPath, + Subject: u.ID, + Name: fmt.Sprintf("%v#%v", u.Name, u.Discriminator), + Picture: avatarURL, + CustomClaims: map[string]interface{}{ + "global_name": u.GlobalName, + }, + + // To be deprecated + AvatarURL: avatarURL, + FullName: u.Name, + ProviderId: u.ID, + } + + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/errors.go b/auth_v2.169.0/internal/api/provider/errors.go new file mode 100644 index 0000000..67a20ea --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/errors.go @@ -0,0 +1,49 @@ +package provider + +import "fmt" + +type HTTPError struct { + Code int `json:"code"` + Message string `json:"msg"` + InternalError error `json:"-"` + InternalMessage string `json:"-"` + ErrorID string `json:"error_id,omitempty"` +} + +func (e *HTTPError) Error() string { + if e.InternalMessage != "" { + return e.InternalMessage + } + return fmt.Sprintf("%d: %s", e.Code, e.Message) +} + +func (e *HTTPError) Is(target error) bool { + return e.Error() == target.Error() +} + +// Cause returns the root cause error +func (e *HTTPError) Cause() error { + if e.InternalError != nil { + return e.InternalError + } + return e +} + +// WithInternalError adds internal error information to the error +func (e *HTTPError) WithInternalError(err error) *HTTPError { + e.InternalError = err + return e +} + +// WithInternalMessage adds internal message information to the error +func (e *HTTPError) WithInternalMessage(fmtString string, args ...interface{}) *HTTPError { + e.InternalMessage = fmt.Sprintf(fmtString, args...) + return e +} + +func httpError(code int, fmtString string, args ...interface{}) *HTTPError { + return &HTTPError{ + Code: code, + Message: fmt.Sprintf(fmtString, args...), + } +} diff --git a/auth_v2.169.0/internal/api/provider/facebook.go b/auth_v2.169.0/internal/api/provider/facebook.go new file mode 100644 index 0000000..e73c419 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/facebook.go @@ -0,0 +1,112 @@ +package provider + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const IssuerFacebook = "https://www.facebook.com" + +const ( + defaultFacebookAuthBase = "www.facebook.com" + defaultFacebookTokenBase = "graph.facebook.com" //#nosec G101 -- Not a secret value. + defaultFacebookAPIBase = "graph.facebook.com" +) + +type facebookProvider struct { + *oauth2.Config + ProfileURL string +} + +type facebookUser struct { + ID string `json:"id"` + Email string `json:"email"` + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + Alias string `json:"name"` + Avatar struct { + Data struct { + URL string `json:"url"` + } `json:"data"` + } `json:"picture"` +} + +// NewFacebookProvider creates a Facebook account provider. +func NewFacebookProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + authHost := chooseHost(ext.URL, defaultFacebookAuthBase) + tokenHost := chooseHost(ext.URL, defaultFacebookTokenBase) + profileURL := chooseHost(ext.URL, defaultFacebookAPIBase) + "/me?fields=email,first_name,last_name,name,picture" + + oauthScopes := []string{ + "email", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &facebookProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + RedirectURL: ext.RedirectURI, + Endpoint: oauth2.Endpoint{ + AuthURL: authHost + "/dialog/oauth", + TokenURL: tokenHost + "/oauth/access_token", + }, + Scopes: oauthScopes, + }, + ProfileURL: profileURL, + }, nil +} + +func (p facebookProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return p.Exchange(context.Background(), code) +} + +func (p facebookProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + hash := hmac.New(sha256.New, []byte(p.Config.ClientSecret)) + hash.Write([]byte(tok.AccessToken)) + appsecretProof := hex.EncodeToString(hash.Sum(nil)) + + var u facebookUser + url := p.ProfileURL + "&appsecret_proof=" + appsecretProof + if err := makeRequest(ctx, tok, p.Config, url, &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Email != "" { + data.Emails = []Email{{ + Email: u.Email, + Verified: true, + Primary: true, + }} + } + + data.Metadata = &Claims{ + Issuer: p.ProfileURL, + Subject: u.ID, + Name: strings.TrimSpace(u.FirstName + " " + u.LastName), + NickName: u.Alias, + Picture: u.Avatar.Data.URL, + + // To be deprecated + Slug: u.Alias, + AvatarURL: u.Avatar.Data.URL, + FullName: strings.TrimSpace(u.FirstName + " " + u.LastName), + ProviderId: u.ID, + } + + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/figma.go b/auth_v2.169.0/internal/api/provider/figma.go new file mode 100644 index 0000000..ba812da --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/figma.go @@ -0,0 +1,95 @@ +package provider + +import ( + "context" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +// Figma +// Reference: https://www.figma.com/developers/api#oauth2 + +const ( + defaultFigmaAuthBase = "www.figma.com" + defaultFigmaAPIBase = "api.figma.com" +) + +type figmaProvider struct { + *oauth2.Config + APIHost string +} + +type figmaUser struct { + ID string `json:"id"` + Email string `json:"email"` + Name string `json:"handle"` + AvatarURL string `json:"img_url"` +} + +// NewFigmaProvider creates a Figma account provider. +func NewFigmaProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + authHost := chooseHost(ext.URL, defaultFigmaAuthBase) + apiHost := chooseHost(ext.URL, defaultFigmaAPIBase) + + oauthScopes := []string{ + "files:read", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &figmaProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authHost + "/oauth", + TokenURL: authHost + "/api/oauth/token", + }, + RedirectURL: ext.RedirectURI, + Scopes: oauthScopes, + }, + APIHost: apiHost, + }, nil +} + +func (p figmaProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return p.Exchange(context.Background(), code) +} + +func (p figmaProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u figmaUser + if err := makeRequest(ctx, tok, p.Config, p.APIHost+"/v1/me", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Email != "" { + data.Emails = []Email{{ + Email: u.Email, + Verified: true, + Primary: true, + }} + } + + data.Metadata = &Claims{ + Issuer: p.APIHost, + Subject: u.ID, + Name: u.Name, + Email: u.Email, + EmailVerified: true, + + // To be deprecated + AvatarURL: u.AvatarURL, + FullName: u.Name, + ProviderId: u.ID, + } + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/fly.go b/auth_v2.169.0/internal/api/provider/fly.go new file mode 100644 index 0000000..d933752 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/fly.go @@ -0,0 +1,103 @@ +package provider + +import ( + "context" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const ( + defaultFlyAPIBase = "api.fly.io" +) + +type flyProvider struct { + *oauth2.Config + APIPath string +} + +type flyUser struct { + ResourceOwnerID string `json:"resource_owner_id"` + UserID string `json:"user_id"` + UserName string `json:"user_name"` + Email string `json:"email"` + Organizations []struct { + ID string `json:"id"` + Role string `json:"role"` + } `json:"organizations"` + Scope []string `json:"scope"` + Application map[string]string `json:"application"` + ExpiresIn int `json:"expires_in"` + CreatedAt int `json:"created_at"` +} + +// NewFlyProvider creates a Fly oauth provider. +func NewFlyProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + authHost := chooseHost(ext.URL, defaultFlyAPIBase) + + // Fly only provides the "read" scope. + // https://fly.io/docs/reference/extensions_api/#single-sign-on-flow + oauthScopes := []string{ + "read", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &flyProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authHost + "/oauth/authorize", + TokenURL: authHost + "/oauth/token", + }, + RedirectURL: ext.RedirectURI, + Scopes: oauthScopes, + }, + APIPath: authHost, + }, nil +} + +func (p flyProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return p.Exchange(context.Background(), code) +} + +func (p flyProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u flyUser + if err := makeRequest(ctx, tok, p.Config, p.APIPath+"/oauth/token/info", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Email != "" { + data.Emails = []Email{{ + Email: u.Email, + Verified: true, + Primary: true, + }} + } + + data.Metadata = &Claims{ + Issuer: p.APIPath, + Subject: u.UserID, + FullName: u.UserName, + Email: u.Email, + EmailVerified: true, + ProviderId: u.UserID, + CustomClaims: map[string]interface{}{ + "resource_owner_id": u.ResourceOwnerID, + "organizations": u.Organizations, + "application": u.Application, + "scope": u.Scope, + "created_at": u.CreatedAt, + }, + } + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/github.go b/auth_v2.169.0/internal/api/provider/github.go new file mode 100644 index 0000000..0da3e88 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/github.go @@ -0,0 +1,110 @@ +package provider + +import ( + "context" + "strconv" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +// Github + +const ( + defaultGitHubAuthBase = "github.com" + defaultGitHubAPIBase = "api.github.com" +) + +type githubProvider struct { + *oauth2.Config + APIHost string +} + +type githubUser struct { + ID int `json:"id"` + UserName string `json:"login"` + Email string `json:"email"` + Name string `json:"name"` + AvatarURL string `json:"avatar_url"` +} + +type githubUserEmail struct { + Email string `json:"email"` + Primary bool `json:"primary"` + Verified bool `json:"verified"` +} + +// NewGithubProvider creates a Github account provider. +func NewGithubProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + authHost := chooseHost(ext.URL, defaultGitHubAuthBase) + apiHost := chooseHost(ext.URL, defaultGitHubAPIBase) + if !strings.HasSuffix(apiHost, defaultGitHubAPIBase) { + apiHost += "/api/v3" + } + + oauthScopes := []string{ + "user:email", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &githubProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authHost + "/login/oauth/authorize", + TokenURL: authHost + "/login/oauth/access_token", + }, + RedirectURL: ext.RedirectURI, + Scopes: oauthScopes, + }, + APIHost: apiHost, + }, nil +} + +func (g githubProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g githubProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u githubUser + if err := makeRequest(ctx, tok, g.Config, g.APIHost+"/user", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{ + Metadata: &Claims{ + Issuer: g.APIHost, + Subject: strconv.Itoa(u.ID), + Name: u.Name, + PreferredUsername: u.UserName, + + // To be deprecated + AvatarURL: u.AvatarURL, + FullName: u.Name, + ProviderId: strconv.Itoa(u.ID), + UserNameKey: u.UserName, + }, + } + + var emails []*githubUserEmail + if err := makeRequest(ctx, tok, g.Config, g.APIHost+"/user/emails", &emails); err != nil { + return nil, err + } + + for _, e := range emails { + if e.Email != "" { + data.Emails = append(data.Emails, Email{Email: e.Email, Verified: e.Verified, Primary: e.Primary}) + } + } + + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/gitlab.go b/auth_v2.169.0/internal/api/provider/gitlab.go new file mode 100644 index 0000000..4b5d70c --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/gitlab.go @@ -0,0 +1,107 @@ +package provider + +import ( + "context" + "strconv" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +// Gitlab + +const defaultGitLabAuthBase = "gitlab.com" + +type gitlabProvider struct { + *oauth2.Config + Host string +} + +type gitlabUser struct { + Email string `json:"email"` + Name string `json:"name"` + AvatarURL string `json:"avatar_url"` + ConfirmedAt string `json:"confirmed_at"` + ID int `json:"id"` +} + +type gitlabUserEmail struct { + ID int `json:"id"` + Email string `json:"email"` +} + +// NewGitlabProvider creates a Gitlab account provider. +func NewGitlabProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + oauthScopes := []string{ + "read_user", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + host := chooseHost(ext.URL, defaultGitLabAuthBase) + return &gitlabProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: host + "/oauth/authorize", + TokenURL: host + "/oauth/token", + }, + RedirectURL: ext.RedirectURI, + Scopes: oauthScopes, + }, + Host: host, + }, nil +} + +func (g gitlabProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g gitlabProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u gitlabUser + + if err := makeRequest(ctx, tok, g.Config, g.Host+"/api/v4/user", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + + var emails []*gitlabUserEmail + if err := makeRequest(ctx, tok, g.Config, g.Host+"/api/v4/user/emails", &emails); err != nil { + return nil, err + } + + for _, e := range emails { + // additional emails from GitLab don't return confirm status + if e.Email != "" { + data.Emails = append(data.Emails, Email{Email: e.Email, Verified: false, Primary: false}) + } + } + + if u.Email != "" { + verified := u.ConfirmedAt != "" + data.Emails = append(data.Emails, Email{Email: u.Email, Verified: verified, Primary: true}) + } + + data.Metadata = &Claims{ + Issuer: g.Host, + Subject: strconv.Itoa(u.ID), + Name: u.Name, + Picture: u.AvatarURL, + + // To be deprecated + AvatarURL: u.AvatarURL, + FullName: u.Name, + ProviderId: strconv.Itoa(u.ID), + } + + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/google.go b/auth_v2.169.0/internal/api/provider/google.go new file mode 100644 index 0000000..03b76ae --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/google.go @@ -0,0 +1,144 @@ +package provider + +import ( + "context" + "strings" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +type googleUser struct { + ID string `json:"id"` + Subject string `json:"sub"` + Issuer string `json:"iss"` + Name string `json:"name"` + AvatarURL string `json:"picture"` + Email string `json:"email"` + VerifiedEmail bool `json:"verified_email"` + EmailVerified bool `json:"email_verified"` + HostedDomain string `json:"hd"` +} + +func (u googleUser) IsEmailVerified() bool { + return u.VerifiedEmail || u.EmailVerified +} + +const IssuerGoogle = "https://accounts.google.com" + +var internalIssuerGoogle = IssuerGoogle + +type googleProvider struct { + *oauth2.Config + + oidc *oidc.Provider +} + +// NewGoogleProvider creates a Google OAuth2 identity provider. +func NewGoogleProvider(ctx context.Context, ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + if ext.URL != "" { + logrus.Warn("Google OAuth provider has URL config set which is ignored (check GOTRUE_EXTERNAL_GOOGLE_URL)") + } + + oauthScopes := []string{ + "email", + "profile", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + oidcProvider, err := oidc.NewProvider(ctx, internalIssuerGoogle) + if err != nil { + return nil, err + } + + return &googleProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oidcProvider.Endpoint(), + Scopes: oauthScopes, + RedirectURL: ext.RedirectURI, + }, + oidc: oidcProvider, + }, nil +} + +func (g googleProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +const UserInfoEndpointGoogle = "https://www.googleapis.com/userinfo/v2/me" + +var internalUserInfoEndpointGoogle = UserInfoEndpointGoogle + +func (g googleProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + if idToken := tok.Extra("id_token"); idToken != nil { + _, data, err := ParseIDToken(ctx, g.oidc, &oidc.Config{ + ClientID: g.Config.ClientID, + }, idToken.(string), ParseIDTokenOptions{ + AccessToken: tok.AccessToken, + }) + if err != nil { + return nil, err + } + + return data, nil + } + + // This whole section offers legacy support in case the Google OAuth2 + // flow does not return an ID Token for the user, which appears to + // always be the case. + logrus.Info("Using Google OAuth2 user info endpoint, an ID token was not returned by Google") + + var u googleUser + if err := makeRequest(ctx, tok, g.Config, internalUserInfoEndpointGoogle, &u); err != nil { + return nil, err + } + + var data UserProvidedData + + if u.Email != "" { + data.Emails = append(data.Emails, Email{ + Email: u.Email, + Verified: u.IsEmailVerified(), + Primary: true, + }) + } + + data.Metadata = &Claims{ + Issuer: internalUserInfoEndpointGoogle, + Subject: u.ID, + Name: u.Name, + Picture: u.AvatarURL, + Email: u.Email, + EmailVerified: u.IsEmailVerified(), + + // To be deprecated + AvatarURL: u.AvatarURL, + FullName: u.Name, + ProviderId: u.ID, + } + + return &data, nil +} + +// ResetGoogleProvider should only be used in tests! +func ResetGoogleProvider() { + internalIssuerGoogle = IssuerGoogle + internalUserInfoEndpointGoogle = UserInfoEndpointGoogle +} + +// OverrideGoogleProvider should only be used in tests! +func OverrideGoogleProvider(issuer, userInfo string) { + internalIssuerGoogle = issuer + internalUserInfoEndpointGoogle = userInfo +} diff --git a/auth_v2.169.0/internal/api/provider/kakao.go b/auth_v2.169.0/internal/api/provider/kakao.go new file mode 100644 index 0000000..2482b97 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/kakao.go @@ -0,0 +1,107 @@ +package provider + +import ( + "context" + "strconv" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const ( + defaultKakaoAuthBase = "kauth.kakao.com" + defaultKakaoAPIBase = "kapi.kakao.com" + IssuerKakao = "https://kauth.kakao.com" +) + +type kakaoProvider struct { + *oauth2.Config + APIHost string +} + +type kakaoUser struct { + ID int `json:"id"` + Account struct { + Profile struct { + Name string `json:"nickname"` + ProfileImageURL string `json:"profile_image_url"` + } `json:"profile"` + Email string `json:"email"` + EmailValid bool `json:"is_email_valid"` + EmailVerified bool `json:"is_email_verified"` + } `json:"kakao_account"` +} + +func (p kakaoProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return p.Exchange(context.Background(), code) +} + +func (p kakaoProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u kakaoUser + + if err := makeRequest(ctx, tok, p.Config, p.APIHost+"/v2/user/me", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + + if u.Account.Email != "" { + data.Emails = []Email{ + { + Email: u.Account.Email, + Verified: u.Account.EmailVerified && u.Account.EmailValid, + Primary: true, + }, + } + } + + data.Metadata = &Claims{ + Issuer: p.APIHost, + Subject: strconv.Itoa(u.ID), + + Name: u.Account.Profile.Name, + PreferredUsername: u.Account.Profile.Name, + + // To be deprecated + AvatarURL: u.Account.Profile.ProfileImageURL, + FullName: u.Account.Profile.Name, + ProviderId: strconv.Itoa(u.ID), + UserNameKey: u.Account.Profile.Name, + } + return data, nil +} + +func NewKakaoProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + authHost := chooseHost(ext.URL, defaultKakaoAuthBase) + apiHost := chooseHost(ext.URL, defaultKakaoAPIBase) + + oauthScopes := []string{ + "account_email", + "profile_image", + "profile_nickname", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &kakaoProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthStyle: oauth2.AuthStyleInParams, + AuthURL: authHost + "/oauth/authorize", + TokenURL: authHost + "/oauth/token", + }, + RedirectURL: ext.RedirectURI, + Scopes: oauthScopes, + }, + APIHost: apiHost, + }, nil +} diff --git a/auth_v2.169.0/internal/api/provider/keycloak.go b/auth_v2.169.0/internal/api/provider/keycloak.go new file mode 100644 index 0000000..39ccec5 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/keycloak.go @@ -0,0 +1,98 @@ +package provider + +import ( + "context" + "errors" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +// Keycloak +type keycloakProvider struct { + *oauth2.Config + Host string +} + +type keycloakUser struct { + Name string `json:"name"` + Sub string `json:"sub"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` +} + +// NewKeycloakProvider creates a Keycloak account provider. +func NewKeycloakProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + oauthScopes := []string{ + "profile", + "email", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + if ext.URL == "" { + return nil, errors.New("unable to find URL for the Keycloak provider") + } + + extURLlen := len(ext.URL) + if ext.URL[extURLlen-1] == '/' { + ext.URL = ext.URL[:extURLlen-1] + } + + return &keycloakProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: ext.URL + "/protocol/openid-connect/auth", + TokenURL: ext.URL + "/protocol/openid-connect/token", + }, + RedirectURL: ext.RedirectURI, + Scopes: oauthScopes, + }, + Host: ext.URL, + }, nil +} + +func (g keycloakProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g keycloakProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u keycloakUser + + if err := makeRequest(ctx, tok, g.Config, g.Host+"/protocol/openid-connect/userinfo", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Email != "" { + data.Emails = []Email{{ + Email: u.Email, + Verified: u.EmailVerified, + Primary: true, + }} + } + + data.Metadata = &Claims{ + Issuer: g.Host, + Subject: u.Sub, + Name: u.Name, + Email: u.Email, + EmailVerified: u.EmailVerified, + + // To be deprecated + FullName: u.Name, + ProviderId: u.Sub, + } + + return data, nil + +} diff --git a/auth_v2.169.0/internal/api/provider/linkedin.go b/auth_v2.169.0/internal/api/provider/linkedin.go new file mode 100644 index 0000000..bc33515 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/linkedin.go @@ -0,0 +1,149 @@ +package provider + +import ( + "context" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const ( + defaultLinkedinAPIBase = "api.linkedin.com" +) + +type linkedinProvider struct { + *oauth2.Config + APIPath string + UserInfoURL string + UserEmailUrl string +} + +// See https://docs.microsoft.com/en-us/linkedin/consumer/integrations/self-serve/sign-in-with-linkedin?context=linkedin/consumer/context +// for retrieving a member's profile. This requires the r_liteprofile scope. +type linkedinUser struct { + ID string `json:"id"` + FirstName linkedinName `json:"firstName"` + LastName linkedinName `json:"lastName"` + AvatarURL struct { + DisplayImage struct { + Elements []struct { + Identifiers []struct { + Identifier string `json:"identifier"` + } `json:"identifiers"` + } `json:"elements"` + } `json:"displayImage~"` + } `json:"profilePicture"` +} + +func (u *linkedinUser) getAvatarUrl() string { + avatarURL := "" + if len(u.AvatarURL.DisplayImage.Elements) > 0 { + avatarURL = u.AvatarURL.DisplayImage.Elements[0].Identifiers[0].Identifier + } + return avatarURL +} + +type linkedinName struct { + Localized interface{} `json:"localized"` + PreferredLocale linkedinLocale `json:"preferredLocale"` +} + +type linkedinLocale struct { + Country string `json:"country"` + Language string `json:"language"` +} + +// See https://docs.microsoft.com/en-us/linkedin/consumer/integrations/self-serve/sign-in-with-linkedin?context=linkedin/consumer/context#retrieving-member-email-address +// for retrieving a member email address. This requires the r_email_address scope. +type linkedinElements struct { + Elements []struct { + Handle string `json:"handle"` + HandleTilde struct { + EmailAddress string `json:"emailAddress"` + } `json:"handle~"` + } `json:"elements"` +} + +// NewLinkedinProvider creates a Linkedin account provider. +func NewLinkedinProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + apiPath := chooseHost(ext.URL, defaultLinkedinAPIBase) + + oauthScopes := []string{ + "r_emailaddress", + "r_liteprofile", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &linkedinProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: apiPath + "/oauth/v2/authorization", + TokenURL: apiPath + "/oauth/v2/accessToken", + }, + Scopes: oauthScopes, + RedirectURL: ext.RedirectURI, + }, + APIPath: apiPath, + }, nil +} + +func (g linkedinProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func GetName(name linkedinName) string { + key := name.PreferredLocale.Language + "_" + name.PreferredLocale.Country + myMap := name.Localized.(map[string]interface{}) + return myMap[key].(string) +} + +func (g linkedinProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u linkedinUser + if err := makeRequest(ctx, tok, g.Config, g.APIPath+"/v2/me?projection=(id,firstName,lastName,profilePicture(displayImage~:playableStreams))", &u); err != nil { + return nil, err + } + + var e linkedinElements + // Note: Use primary contact api for handling phone numbers + if err := makeRequest(ctx, tok, g.Config, g.APIPath+"/v2/emailAddress?q=members&projection=(elements*(handle~))", &e); err != nil { + return nil, err + } + + data := &UserProvidedData{} + + if e.Elements[0].HandleTilde.EmailAddress != "" { + // linkedin only returns the primary email which is verified for the r_emailaddress scope. + data.Emails = []Email{{ + Email: e.Elements[0].HandleTilde.EmailAddress, + Primary: true, + Verified: true, + }} + } + + avatarURL := u.getAvatarUrl() + + data.Metadata = &Claims{ + Issuer: g.APIPath, + Subject: u.ID, + Name: strings.TrimSpace(GetName(u.FirstName) + " " + GetName(u.LastName)), + Picture: avatarURL, + Email: e.Elements[0].HandleTilde.EmailAddress, + EmailVerified: true, + + // To be deprecated + AvatarURL: avatarURL, + FullName: strings.TrimSpace(GetName(u.FirstName) + " " + GetName(u.LastName)), + ProviderId: u.ID, + } + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/linkedin_oidc.go b/auth_v2.169.0/internal/api/provider/linkedin_oidc.go new file mode 100644 index 0000000..a5d94fa --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/linkedin_oidc.go @@ -0,0 +1,81 @@ +package provider + +import ( + "context" + "strings" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const ( + defaultLinkedinOIDCAPIBase = "api.linkedin.com" + IssuerLinkedin = "https://www.linkedin.com/oauth" +) + +type linkedinOIDCProvider struct { + *oauth2.Config + oidc *oidc.Provider + APIPath string +} + +// NewLinkedinOIDCProvider creates a Linkedin account provider via OIDC. +func NewLinkedinOIDCProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + apiPath := chooseHost(ext.URL, defaultLinkedinOIDCAPIBase) + + oauthScopes := []string{ + "openid", + "email", + "profile", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + oidcProvider, err := oidc.NewProvider(context.Background(), IssuerLinkedin) + if err != nil { + return nil, err + } + + return &linkedinOIDCProvider{ + oidc: oidcProvider, + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: apiPath + "/oauth/v2/authorization", + TokenURL: apiPath + "/oauth/v2/accessToken", + }, + Scopes: oauthScopes, + RedirectURL: ext.RedirectURI, + }, + APIPath: apiPath, + }, nil +} + +func (g linkedinOIDCProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g linkedinOIDCProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + idToken := tok.Extra("id_token") + if tok.AccessToken == "" || idToken == nil { + return &UserProvidedData{}, nil + } + + _, data, err := ParseIDToken(ctx, g.oidc, &oidc.Config{ + ClientID: g.ClientID, + }, idToken.(string), ParseIDTokenOptions{ + AccessToken: tok.AccessToken, + }) + if err != nil { + return nil, err + } + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/notion.go b/auth_v2.169.0/internal/api/provider/notion.go new file mode 100644 index 0000000..f8d0ee7 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/notion.go @@ -0,0 +1,121 @@ +package provider + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/utilities" + "golang.org/x/oauth2" +) + +const ( + defaultNotionApiBase = "api.notion.com" + notionApiVersion = "2021-08-16" +) + +type notionProvider struct { + *oauth2.Config + APIPath string +} + +type notionUser struct { + Bot struct { + Owner struct { + User struct { + ID string `json:"id"` + Name string `json:"name"` + AvatarURL string `json:"avatar_url"` + Person struct { + Email string `json:"email"` + } `json:"person"` + } `json:"user"` + } `json:"owner"` + } `json:"bot"` +} + +// NewNotionProvider creates a Notion account provider. +func NewNotionProvider(ext conf.OAuthProviderConfiguration) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + authHost := chooseHost(ext.URL, defaultNotionApiBase) + + return ¬ionProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authHost + "/v1/oauth/authorize", + TokenURL: authHost + "/v1/oauth/token", + }, + RedirectURL: ext.RedirectURI, + }, + APIPath: authHost, + }, nil +} + +func (g notionProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g notionProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u notionUser + + // Perform http request, because we need to set the Notion-Version header + req, err := http.NewRequest("GET", g.APIPath+"/v1/users/me", nil) + + if err != nil { + return nil, err + } + + // set headers + req.Header.Set("Notion-Version", notionApiVersion) + req.Header.Set("Authorization", "Bearer "+tok.AccessToken) + + client := &http.Client{Timeout: defaultTimeout} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer utilities.SafeClose(resp.Body) + + if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices { + return nil, fmt.Errorf("a %v error occurred with retrieving user from notion", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + err = json.Unmarshal(body, &u) + if err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Bot.Owner.User.Person.Email != "" { + data.Emails = []Email{{ + Email: u.Bot.Owner.User.Person.Email, + Verified: true, // Notion dosen't provide data on if email is verified. + Primary: true, + }} + } + + data.Metadata = &Claims{ + Issuer: g.APIPath, + Subject: u.Bot.Owner.User.ID, + Name: u.Bot.Owner.User.Name, + Picture: u.Bot.Owner.User.AvatarURL, + + // To be deprecated + AvatarURL: u.Bot.Owner.User.AvatarURL, + FullName: u.Bot.Owner.User.Name, + ProviderId: u.Bot.Owner.User.ID, + } + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/oidc.go b/auth_v2.169.0/internal/api/provider/oidc.go new file mode 100644 index 0000000..51c88e6 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/oidc.go @@ -0,0 +1,410 @@ +package provider + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/golang-jwt/jwt/v5" +) + +type ParseIDTokenOptions struct { + SkipAccessTokenCheck bool + AccessToken string +} + +// OverrideVerifiers can be used to set a custom verifier for an OIDC provider +// (identified by the provider's Endpoint().AuthURL string). Should only be +// used in tests. +var OverrideVerifiers = make(map[string]func(context.Context, *oidc.Config) *oidc.IDTokenVerifier) + +// OverrideClock can be used to set a custom clock function to be used when +// parsing ID tokens. Should only be used in tests. +var OverrideClock func() time.Time + +func ParseIDToken(ctx context.Context, provider *oidc.Provider, config *oidc.Config, idToken string, options ParseIDTokenOptions) (*oidc.IDToken, *UserProvidedData, error) { + if config == nil { + config = &oidc.Config{ + // aud claim check to be performed by other flows + SkipClientIDCheck: true, + } + } + + if OverrideClock != nil { + clonedConfig := *config + clonedConfig.Now = OverrideClock + config = &clonedConfig + } + + verifier := provider.VerifierContext(ctx, config) + overrideVerifier, ok := OverrideVerifiers[provider.Endpoint().AuthURL] + if ok && overrideVerifier != nil { + verifier = overrideVerifier(ctx, config) + } + + token, err := verifier.Verify(ctx, idToken) + if err != nil { + return nil, nil, err + } + + var data *UserProvidedData + + switch token.Issuer { + case IssuerGoogle: + token, data, err = parseGoogleIDToken(token) + case IssuerApple: + token, data, err = parseAppleIDToken(token) + case IssuerLinkedin: + token, data, err = parseLinkedinIDToken(token) + case IssuerKakao: + token, data, err = parseKakaoIDToken(token) + case IssuerVercelMarketplace: + token, data, err = parseVercelMarketplaceIDToken(token) + default: + if IsAzureIssuer(token.Issuer) { + token, data, err = parseAzureIDToken(token) + } else { + token, data, err = parseGenericIDToken(token) + } + } + + if err != nil { + return nil, nil, err + } + + if !options.SkipAccessTokenCheck && token.AccessTokenHash != "" { + if err := token.VerifyAccessToken(options.AccessToken); err != nil { + return nil, nil, err + } + } + + return token, data, nil +} + +func parseGoogleIDToken(token *oidc.IDToken) (*oidc.IDToken, *UserProvidedData, error) { + var claims googleUser + if err := token.Claims(&claims); err != nil { + return nil, nil, err + } + + var data UserProvidedData + + if claims.Email != "" { + data.Emails = append(data.Emails, Email{ + Email: claims.Email, + Verified: claims.IsEmailVerified(), + Primary: true, + }) + } + + data.Metadata = &Claims{ + Issuer: claims.Issuer, + Subject: claims.Subject, + Name: claims.Name, + Picture: claims.AvatarURL, + + // To be deprecated + AvatarURL: claims.AvatarURL, + FullName: claims.Name, + ProviderId: claims.Subject, + } + + if claims.HostedDomain != "" { + data.Metadata.CustomClaims = map[string]any{ + "hd": claims.HostedDomain, + } + } + + return token, &data, nil +} + +type AppleIDTokenClaims struct { + jwt.RegisteredClaims + + Email string `json:"email"` + + AuthTime *float64 `json:"auth_time"` + IsPrivateEmail *IsPrivateEmail `json:"is_private_email"` +} + +func parseAppleIDToken(token *oidc.IDToken) (*oidc.IDToken, *UserProvidedData, error) { + var claims AppleIDTokenClaims + if err := token.Claims(&claims); err != nil { + return nil, nil, err + } + + var data UserProvidedData + + data.Emails = append(data.Emails, Email{ + Email: claims.Email, + Verified: true, + Primary: true, + }) + + data.Metadata = &Claims{ + Issuer: token.Issuer, + Subject: token.Subject, + ProviderId: token.Subject, + CustomClaims: make(map[string]any), + } + + if claims.IsPrivateEmail != nil { + data.Metadata.CustomClaims["is_private_email"] = *claims.IsPrivateEmail + } + + if claims.AuthTime != nil { + data.Metadata.CustomClaims["auth_time"] = *claims.AuthTime + } + + if len(data.Metadata.CustomClaims) < 1 { + data.Metadata.CustomClaims = nil + } + + return token, &data, nil +} + +type LinkedinIDTokenClaims struct { + jwt.RegisteredClaims + + Email string `json:"email"` + EmailVerified string `json:"email_verified"` + FamilyName string `json:"family_name"` + GivenName string `json:"given_name"` + Locale string `json:"locale"` + Picture string `json:"picture"` +} + +func parseLinkedinIDToken(token *oidc.IDToken) (*oidc.IDToken, *UserProvidedData, error) { + var claims LinkedinIDTokenClaims + if err := token.Claims(&claims); err != nil { + return nil, nil, err + } + + var data UserProvidedData + emailVerified, err := strconv.ParseBool(claims.EmailVerified) + if err != nil { + return nil, nil, err + } + + if claims.Email != "" { + data.Emails = append(data.Emails, Email{ + Email: claims.Email, + Verified: emailVerified, + Primary: true, + }) + } + + data.Metadata = &Claims{ + Issuer: token.Issuer, + Subject: token.Subject, + Name: strings.TrimSpace(claims.GivenName + " " + claims.FamilyName), + GivenName: claims.GivenName, + FamilyName: claims.FamilyName, + Locale: claims.Locale, + Picture: claims.Picture, + ProviderId: token.Subject, + } + + return token, &data, nil +} + +type AzureIDTokenClaims struct { + jwt.RegisteredClaims + + Email string `json:"email"` + Name string `json:"name"` + PreferredUsername string `json:"preferred_username"` + XMicrosoftEmailDomainOwnerVerified any `json:"xms_edov"` +} + +func (c *AzureIDTokenClaims) IsEmailVerified() bool { + emailVerified := false + + edov := c.XMicrosoftEmailDomainOwnerVerified + + // If xms_edov is not set, and an email is present or xms_edov is true, + // only then is the email regarded as verified. + // https://learn.microsoft.com/en-us/azure/active-directory/develop/migrate-off-email-claim-authorization#using-the-xms_edov-optional-claim-to-determine-email-verification-status-and-migrate-users + if edov == nil { + // An email is provided, but xms_edov is not -- probably not + // configured, so we must assume the email is verified as Azure + // will only send out a potentially unverified email address in + // single-tenanat apps. + emailVerified = c.Email != "" + } else { + edovBool := false + + // Azure can't be trusted with how they encode the xms_edov + // claim. Sometimes it's "xms_edov": "1", sometimes "xms_edov": true. + switch v := edov.(type) { + case bool: + edovBool = v + + case string: + edovBool = v == "1" || v == "true" + + default: + edovBool = false + } + + emailVerified = c.Email != "" && edovBool + } + + return emailVerified +} + +// removeAzureClaimsFromCustomClaims contains the list of claims to be removed +// from the CustomClaims map. See: +// https://learn.microsoft.com/en-us/azure/active-directory/develop/id-token-claims-reference +var removeAzureClaimsFromCustomClaims = []string{ + "aud", + "iss", + "iat", + "nbf", + "exp", + "c_hash", + "at_hash", + "aio", + "nonce", + "rh", + "uti", + "jti", + "ver", + "sub", + "name", + "preferred_username", +} + +func parseAzureIDToken(token *oidc.IDToken) (*oidc.IDToken, *UserProvidedData, error) { + var data UserProvidedData + + var azureClaims AzureIDTokenClaims + if err := token.Claims(&azureClaims); err != nil { + return nil, nil, err + } + + data.Metadata = &Claims{ + Issuer: token.Issuer, + Subject: token.Subject, + ProviderId: token.Subject, + PreferredUsername: azureClaims.PreferredUsername, + FullName: azureClaims.Name, + CustomClaims: make(map[string]any), + } + + if azureClaims.Email != "" { + data.Emails = []Email{{ + Email: azureClaims.Email, + Verified: azureClaims.IsEmailVerified(), + Primary: true, + }} + } + + if err := token.Claims(&data.Metadata.CustomClaims); err != nil { + return nil, nil, err + } + + if data.Metadata.CustomClaims != nil { + for _, claim := range removeAzureClaimsFromCustomClaims { + delete(data.Metadata.CustomClaims, claim) + } + } + + return token, &data, nil +} + +type KakaoIDTokenClaims struct { + jwt.RegisteredClaims + + Email string `json:"email"` + Nickname string `json:"nickname"` + Picture string `json:"picture"` +} + +func parseKakaoIDToken(token *oidc.IDToken) (*oidc.IDToken, *UserProvidedData, error) { + var claims KakaoIDTokenClaims + + if err := token.Claims(&claims); err != nil { + return nil, nil, err + } + + var data UserProvidedData + + if claims.Email != "" { + data.Emails = append(data.Emails, Email{ + Email: claims.Email, + Verified: true, + Primary: true, + }) + } + + data.Metadata = &Claims{ + Issuer: token.Issuer, + Subject: token.Subject, + Name: claims.Nickname, + PreferredUsername: claims.Nickname, + ProviderId: token.Subject, + Picture: claims.Picture, + } + + return token, &data, nil +} + +type VercelMarketplaceIDTokenClaims struct { + jwt.RegisteredClaims + + UserEmail string `json:"user_email"` + UserName string `json:"user_name"` + UserAvatarUrl string `json:"user_avatar_url"` +} + +func parseVercelMarketplaceIDToken(token *oidc.IDToken) (*oidc.IDToken, *UserProvidedData, error) { + var claims VercelMarketplaceIDTokenClaims + + if err := token.Claims(&claims); err != nil { + return nil, nil, err + } + + var data UserProvidedData + + data.Emails = append(data.Emails, Email{ + Email: claims.UserEmail, + Verified: true, + Primary: true, + }) + + data.Metadata = &Claims{ + Issuer: token.Issuer, + Subject: token.Subject, + ProviderId: token.Subject, + Name: claims.UserName, + Picture: claims.UserAvatarUrl, + } + + return token, &data, nil +} + +func parseGenericIDToken(token *oidc.IDToken) (*oidc.IDToken, *UserProvidedData, error) { + var data UserProvidedData + + if err := token.Claims(&data.Metadata); err != nil { + return nil, nil, err + } + + if data.Metadata.Email != "" { + data.Emails = append(data.Emails, Email{ + Email: data.Metadata.Email, + Verified: data.Metadata.EmailVerified, + Primary: true, + }) + } + + if len(data.Emails) <= 0 { + return nil, nil, fmt.Errorf("provider: Generic OIDC ID token from issuer %q must contain an email address", token.Issuer) + } + + return token, &data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/oidc_test.go b/auth_v2.169.0/internal/api/provider/oidc_test.go new file mode 100644 index 0000000..e088cd4 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/oidc_test.go @@ -0,0 +1,185 @@ +package provider + +import ( + "context" + "crypto" + "crypto/rsa" + "encoding/base64" + "math/big" + "testing" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/stretchr/testify/require" +) + +type realIDToken struct { + AccessToken string + IDToken string + Time time.Time + Email string + Verifier func(context.Context, *oidc.Config) *oidc.IDTokenVerifier +} + +func googleIDTokenVerifier(ctx context.Context, config *oidc.Config) *oidc.IDTokenVerifier { + keyBytes, err := base64.RawURLEncoding.DecodeString("pP-rCe4jkKX6mq8yP1GcBZcxJzmxKWicHHor1S3Q49u6Oe-bQsk5NsK5mdR7Y7liGV9n0ikXSM42dYKQdxbhKA-7--fFon5isJoHr4fIwL2CCwVm5QWlK37q6PiH2_F1M0hRorHfkCb4nI56ZvfygvuOH4LIS82OzIgmsYbeEfwDRpeMSxWKwlpa3pX3GZ6jG7FgzJGBvmBkagpgsa2JZdyU4gEGMOkHdSzi5Ii-6RGfFLhhI1OMxC9P2JaU5yjMN2pikfFIq_dbpm75yNUGpWJNVywtrlNvvJfA74UMN_lVCAaSR0A03BUMg6ljB65gFllpKF224uWBA8tpjngwKQ") + if err != nil { + panic(err) + } + + n := big.NewInt(0) + n.SetBytes(keyBytes) + + publicKey := &rsa.PublicKey{ + N: n, + E: 65537, + } + + return oidc.NewVerifier( + "https://accounts.google.com", + &oidc.StaticKeySet{ + PublicKeys: []crypto.PublicKey{publicKey}, + }, + config, + ) +} + +func azureIDTokenVerifier(ctx context.Context, config *oidc.Config) *oidc.IDTokenVerifier { + keyBytes, err := base64.RawURLEncoding.DecodeString("1djHqyNclRpJWtHCnkP5QWvDxozCTG_ZDnkEmudpcxjnYrVL4RVIwdNCBLAStg8Dob5OUyAlHcRFMCqGTW4HA6kHgIxyfiFsYCBDMHWd2-61N1cAS6S9SdXlWXkBQgU0Qj6q_yFYTRS7J-zI_jMLRQAlpowfDFM1vSTBIci7kqynV6pPOz4jMaDQevmSscEs-jz7e8YXAiiVpN588oBQ0jzQaTTx90WjgRP23mn8mPyabj8gcR3gLwKLsBUhlp1oZj7FopGp8z8LHuueJB_q_LOUa_gAozZ0lfoJxFimXgpgEK7GNVdMRsMH3mIl0A5oYN8f29RFwbG0rNO5ZQ1YWQ") + if err != nil { + panic(err) + } + + n := big.NewInt(0) + n.SetBytes(keyBytes) + + publicKey := &rsa.PublicKey{ + N: n, + E: 65537, + } + + return oidc.NewVerifier( + IssuerAzureMicrosoft, + &oidc.StaticKeySet{ + PublicKeys: []crypto.PublicKey{publicKey}, + }, + config, + ) +} + +var realIDTokens map[string]realIDToken = map[string]realIDToken{ + IssuerGoogle: { + AccessToken: "ya29.a0AWY7CklOn4TehiT4kA6osNP6e-pHErOY8X53T2oUe7Oqqwc3-uIJpoEgoZCUogewBuNWr-JFT2FK9s0E0oRSFtAfu0-uIDckBj5ca1pxnk0-zPkPZouqoIyl0AlIpQjIUEuyuQTYUay99kRajbHcFCR1VMbNcQaCgYKAQESARESFQG1tDrp1joUHupV5Rn8-nWDpKkmMw0165", + IDToken: "eyJhbGciOiJSUzI1NiIsImtpZCI6Ijg1YmE5MzEzZmQ3YTdkNGFmYTg0ODg0YWJjYzg0MDMwMDQzNjMxODAiLCJ0eXAiOiJKV1QifQ.eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhenAiOiI5MTQ2NjY0MjA3NS03OWNwaWs4aWNxYzU4NjY5bjdtaXY5NjZsYmFwOTNhMi5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbSIsImF1ZCI6IjkxNDY2NjQyMDc1LTc5Y3BpazhpY3FjNTg2NjluN21pdjk2NmxiYXA5M2EyLmFwcHMuZ29vZ2xldXNlcmNvbnRlbnQuY29tIiwic3ViIjoiMTAzNzgzMTkwMTI2NDM5NzUxMjY5IiwiaGQiOiJzdXBhYmFzZS5pbyIsImVtYWlsIjoic3RvamFuQHN1cGFiYXNlLmlvIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImF0X2hhc2giOiJlcGVWV244VmxWa28zd195Unk3UDZRIiwibmFtZSI6IlN0b2phbiBEaW1pdHJvdnNraSIsInBpY3R1cmUiOiJodHRwczovL2xoMy5nb29nbGV1c2VyY29udGVudC5jb20vYS9BQWNIVHRka0dhWjVlcGtqT1dxSEF1UUV4N2cwRlBCeXJiQ2ZNUjVNTk5kYz1zOTYtYyIsImdpdmVuX25hbWUiOiJTdG9qYW4iLCJmYW1pbHlfbmFtZSI6IkRpbWl0cm92c2tpIiwibG9jYWxlIjoiZW4tR0IiLCJpYXQiOjE2ODY2NTk5MzIsImV4cCI6MTY4NjY2MzUzMn0.nKAN9BFSxvavXYfWX4fZHREYY_3O4uOFRFq1KU1NNrBOMq_CPpM8c8PV7ZhKQvGCjBthSjtxGWbcqT0ByA7RdpNW6kj5UpFxEPdhenZ-eO1FwiEVIC8uZpiX6J3Nr7fAqi1P0DVeB3Zr_GrtkS9MDhZNb3hE5NDkvjCulwP4gRBC-5Pn_aRJRESxYkr_naKiSSmVilkmNVjZO4orq6KuYlvWHKHZIRiUI1akt0gVr5GxsEpd_duzUU30yVSPiq8l6fgxvJn2hT0MHa77wo3hvlP0NyAoSE7Nh4tRSowB0Qq7_byDMUmNWfXh-Qqa2M6ywuJ-_3LTLNUJH-cwdm2tNQ", + Time: time.Unix(1686659933, 0), // 1 sec after iat + Verifier: googleIDTokenVerifier, + }, + IssuerAzureMicrosoft: { + AccessToken: "access-token", + Time: time.Unix(1697277774, 0), // 1 sec after iat + IDToken: "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IlhvdVhMWVExVGlwNW9kWWFqaUN0RlZnVmFFcyJ9.eyJ2ZXIiOiIyLjAiLCJpc3MiOiJodHRwczovL2xvZ2luLm1pY3Jvc29mdG9ubGluZS5jb20vOTE4ODA0MGQtNmM2Ny00YzViLWIxMTItMzZhMzA0YjY2ZGFkL3YyLjAiLCJzdWIiOiJBQUFBQUFBQUFBQUFBQUFBQUFBQUFCWkRuRDkxOTBfc2wxcTZwenZlRHZNIiwiYXVkIjoiYTBkOGY5NzItNTRhYy00YWJmLTkxNGMtNTIyMDE0YzQwMjJhIiwiZXhwIjoxNjk3MzY0NDczLCJpYXQiOjE2OTcyNzc3NzMsIm5iZiI6MTY5NzI3Nzc3MywiZW1haWwiOiJzZGltaXRyb3Zza2lAZ21haWwuY29tIiwidGlkIjoiOTE4ODA0MGQtNmM2Ny00YzViLWIxMTItMzZhMzA0YjY2ZGFkIiwieG1zX2Vkb3YiOiIxIiwiYWlvIjoiRHBQV3lZSnRJcUl5OHpyVjROIUlIdGtFa09BMDhPS29lZ1RkYmZQUEVPYmxtYk9ESFQ0cGJVcVI1cExraENyWWZ6bUgzb3A1RzN5RGp2M0tNZ0Rad29lQ1FjKmVueldyb21iQ3BuKkR6OEpQOGMxU3pEVG1TbGp4U3U3UnVLTXNZSjRvS1lDazFBSVcqUUNUTmlMWkpUKlN3WWZQcjZBTW9IejFEZ3pBZEFkbk9uWiFHNUNFeEtQalBxcHRuVmpUZlEkJCJ9.CskICxOaeqd4SkiPdWEHJKZVdhAdgzM5SN7K7FYi0dguQH1-v6XTetDIoEsBn0GZoozXjbG2GgkFcVhhBvNA0ZrDIr4KcjfnJ5-7rwX3AtxdQ3umrHRlGu3jlmbDOtWzPWNMLLRXfR1Mm3pHEUvlzqmk3Ffh4TuAmXID-fb-Xmfuuv1k0UsZ5mlr_3ybTPVZk-Lj0bqkR1L5Zzt4HjgfpchRryJ3Y24b4dDsSjg7mgE_5JivgjhtVef5OnqYhKUF1DTy2pFysFO_eRliK6qjouYeZnQOJnWHP1MgpySAOQ3sVcwvE4P9g7V3QouxByZPv-g99N1K4GwZrtdm46gtTQ", + Verifier: azureIDTokenVerifier, + }, +} + +func TestParseIDToken(t *testing.T) { + defer func() { + OverrideVerifiers = make(map[string]func(context.Context, *oidc.Config) *oidc.IDTokenVerifier) + OverrideClock = nil + }() + + // note that this test can fail if/when the issuers rotate their + // signing keys (which happens rarely if ever) + // then you should obtain new ID tokens and update this test + for issuer, token := range realIDTokens { + oidcProvider, err := oidc.NewProvider(context.Background(), issuer) + require.NoError(t, err) + + OverrideVerifiers[oidcProvider.Endpoint().AuthURL] = token.Verifier + + _, user, err := ParseIDToken(context.Background(), oidcProvider, &oidc.Config{ + SkipClientIDCheck: true, + Now: func() time.Time { + return token.Time + }, + }, token.IDToken, ParseIDTokenOptions{ + AccessToken: token.AccessToken, + }) + require.NoError(t, err) + + require.NotEmpty(t, user.Emails[0].Email) + require.Equal(t, user.Emails[0].Verified, true) + } +} + +func TestAzureIDTokenClaimsIsEmailVerified(t *testing.T) { + positiveExamples := []AzureIDTokenClaims{ + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: nil, + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: true, + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: "1", + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: "true", + }, + } + + negativeExamples := []AzureIDTokenClaims{ + { + Email: "", + XMicrosoftEmailDomainOwnerVerified: true, + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: false, + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: "0", + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: "false", + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: float32(0), + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: float64(0), + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: int(0), + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: int32(0), + }, + { + Email: "test@example.com", + XMicrosoftEmailDomainOwnerVerified: int64(0), + }, + } + + for i, example := range positiveExamples { + if !example.IsEmailVerified() { + t.Errorf("positive example %v reports negative result", i) + } + } + + for i, example := range negativeExamples { + if example.IsEmailVerified() { + t.Errorf("negative example %v reports positive result", i) + } + } +} diff --git a/auth_v2.169.0/internal/api/provider/provider.go b/auth_v2.169.0/internal/api/provider/provider.go new file mode 100644 index 0000000..857e882 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/provider.go @@ -0,0 +1,128 @@ +package provider + +import ( + "bytes" + "context" + "encoding/json" + "io" + "log" + "net/http" + "os" + "time" + + "github.com/supabase/auth/internal/utilities" + "golang.org/x/oauth2" +) + +var defaultTimeout time.Duration = time.Second * 10 + +func init() { + timeoutStr := os.Getenv("GOTRUE_INTERNAL_HTTP_TIMEOUT") + if timeoutStr != "" { + if timeout, err := time.ParseDuration(timeoutStr); err != nil { + log.Fatalf("error loading GOTRUE_INTERNAL_HTTP_TIMEOUT: %v", err.Error()) + } else if timeout != 0 { + defaultTimeout = timeout + } + } +} + +type Claims struct { + // Reserved claims + Issuer string `json:"iss,omitempty" structs:"iss,omitempty"` + Subject string `json:"sub,omitempty" structs:"sub,omitempty"` + Aud string `json:"aud,omitempty" structs:"aud,omitempty"` + Iat float64 `json:"iat,omitempty" structs:"iat,omitempty"` + Exp float64 `json:"exp,omitempty" structs:"exp,omitempty"` + + // Default profile claims + Name string `json:"name,omitempty" structs:"name,omitempty"` + FamilyName string `json:"family_name,omitempty" structs:"family_name,omitempty"` + GivenName string `json:"given_name,omitempty" structs:"given_name,omitempty"` + MiddleName string `json:"middle_name,omitempty" structs:"middle_name,omitempty"` + NickName string `json:"nickname,omitempty" structs:"nickname,omitempty"` + PreferredUsername string `json:"preferred_username,omitempty" structs:"preferred_username,omitempty"` + Profile string `json:"profile,omitempty" structs:"profile,omitempty"` + Picture string `json:"picture,omitempty" structs:"picture,omitempty"` + Website string `json:"website,omitempty" structs:"website,omitempty"` + Gender string `json:"gender,omitempty" structs:"gender,omitempty"` + Birthdate string `json:"birthdate,omitempty" structs:"birthdate,omitempty"` + ZoneInfo string `json:"zoneinfo,omitempty" structs:"zoneinfo,omitempty"` + Locale string `json:"locale,omitempty" structs:"locale,omitempty"` + UpdatedAt string `json:"updated_at,omitempty" structs:"updated_at,omitempty"` + Email string `json:"email,omitempty" structs:"email,omitempty"` + EmailVerified bool `json:"email_verified,omitempty" structs:"email_verified"` + Phone string `json:"phone,omitempty" structs:"phone,omitempty"` + PhoneVerified bool `json:"phone_verified,omitempty" structs:"phone_verified"` + + // Custom profile claims that are provider specific + CustomClaims map[string]interface{} `json:"custom_claims,omitempty" structs:"custom_claims,omitempty"` + + // TODO: Deprecate in next major release + FullName string `json:"full_name,omitempty" structs:"full_name,omitempty"` + AvatarURL string `json:"avatar_url,omitempty" structs:"avatar_url,omitempty"` + Slug string `json:"slug,omitempty" structs:"slug,omitempty"` + ProviderId string `json:"provider_id,omitempty" structs:"provider_id,omitempty"` + UserNameKey string `json:"user_name,omitempty" structs:"user_name,omitempty"` +} + +// Email is a struct that provides information on whether an email is verified or is the primary email address +type Email struct { + Email string + Verified bool + Primary bool +} + +// UserProvidedData is a struct that contains the user's data returned from the oauth provider +type UserProvidedData struct { + Emails []Email + Metadata *Claims +} + +// Provider is an interface for interacting with external account providers +type Provider interface { + AuthCodeURL(string, ...oauth2.AuthCodeOption) string +} + +// OAuthProvider specifies additional methods needed for providers using OAuth +type OAuthProvider interface { + AuthCodeURL(string, ...oauth2.AuthCodeOption) string + GetUserData(context.Context, *oauth2.Token) (*UserProvidedData, error) + GetOAuthToken(string) (*oauth2.Token, error) +} + +func chooseHost(base, defaultHost string) string { + if base == "" { + return "https://" + defaultHost + } + + baseLen := len(base) + if base[baseLen-1] == '/' { + return base[:baseLen-1] + } + + return base +} + +func makeRequest(ctx context.Context, tok *oauth2.Token, g *oauth2.Config, url string, dst interface{}) error { + client := g.Client(ctx, tok) + client.Timeout = defaultTimeout + res, err := client.Get(url) + if err != nil { + return err + } + defer utilities.SafeClose(res.Body) + + bodyBytes, _ := io.ReadAll(res.Body) + res.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + + if res.StatusCode < http.StatusOK || res.StatusCode >= http.StatusMultipleChoices { + return httpError(res.StatusCode, string(bodyBytes)) + } + + if err := json.NewDecoder(res.Body).Decode(dst); err != nil { + return err + } + + return nil +} diff --git a/auth_v2.169.0/internal/api/provider/slack.go b/auth_v2.169.0/internal/api/provider/slack.go new file mode 100644 index 0000000..40377b0 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/slack.go @@ -0,0 +1,94 @@ +package provider + +import ( + "context" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const defaultSlackApiBase = "slack.com" + +type slackProvider struct { + *oauth2.Config + APIPath string +} + +type slackUser struct { + ID string `json:"https://slack.com/user_id"` + Email string `json:"email"` + Name string `json:"name"` + AvatarURL string `json:"picture"` + TeamID string `json:"https://slack.com/team_id"` +} + +// NewSlackProvider creates a Slack account provider with Legacy Slack OAuth. +func NewSlackProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + apiPath := chooseHost(ext.URL, defaultSlackApiBase) + "/api" + authPath := chooseHost(ext.URL, defaultSlackApiBase) + "/oauth" + + oauthScopes := []string{ + "profile", + "email", + "openid", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &slackProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authPath + "/authorize", + TokenURL: apiPath + "/oauth.access", + }, + Scopes: oauthScopes, + RedirectURL: ext.RedirectURI, + }, + APIPath: apiPath, + }, nil +} + +func (g slackProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g slackProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u slackUser + if err := makeRequest(ctx, tok, g.Config, g.APIPath+"/openid.connect.userInfo", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Email != "" { + data.Emails = []Email{{ + Email: u.Email, + Verified: true, // Slack doesn't provide data on if email is verified. + Primary: true, + }} + } + + data.Metadata = &Claims{ + Issuer: g.APIPath, + Subject: u.ID, + Name: u.Name, + Picture: u.AvatarURL, + CustomClaims: map[string]interface{}{ + "https://slack.com/team_id": u.TeamID, + }, + + // To be deprecated + AvatarURL: u.AvatarURL, + FullName: u.Name, + ProviderId: u.ID, + } + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/slack_oidc.go b/auth_v2.169.0/internal/api/provider/slack_oidc.go new file mode 100644 index 0000000..3c7a5eb --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/slack_oidc.go @@ -0,0 +1,99 @@ +package provider + +import ( + "context" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const defaultSlackOIDCApiBase = "slack.com" + +type slackOIDCProvider struct { + *oauth2.Config + APIPath string +} + +type slackOIDCUser struct { + ID string `json:"https://slack.com/user_id"` + TeamID string `json:"https://slack.com/team_id"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + Name string `json:"name"` + AvatarURL string `json:"picture"` +} + +// NewSlackOIDCProvider creates a Slack account provider with Sign in with Slack. +func NewSlackOIDCProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + apiPath := chooseHost(ext.URL, defaultSlackOIDCApiBase) + "/api" + authPath := chooseHost(ext.URL, defaultSlackOIDCApiBase) + "/openid" + + // these are required scopes for slack's OIDC flow + // see https://api.slack.com/authentication/sign-in-with-slack#implementation + oauthScopes := []string{ + "profile", + "email", + "openid", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &slackOIDCProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authPath + "/connect/authorize", + TokenURL: apiPath + "/openid.connect.token", + }, + Scopes: oauthScopes, + RedirectURL: ext.RedirectURI, + }, + APIPath: apiPath, + }, nil +} + +func (g slackOIDCProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g slackOIDCProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u slackOIDCUser + if err := makeRequest(ctx, tok, g.Config, g.APIPath+"/openid.connect.userInfo", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Email != "" { + data.Emails = []Email{{ + Email: u.Email, + // email_verified is returned as part of the response + // see: https://api.slack.com/authentication/sign-in-with-slack#response + Verified: u.EmailVerified, + Primary: true, + }} + } + + data.Metadata = &Claims{ + Issuer: g.APIPath, + Subject: u.ID, + Name: u.Name, + Picture: u.AvatarURL, + CustomClaims: map[string]interface{}{ + "https://slack.com/team_id": u.TeamID, + }, + + // To be deprecated + AvatarURL: u.AvatarURL, + FullName: u.Name, + ProviderId: u.ID, + } + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/spotify.go b/auth_v2.169.0/internal/api/provider/spotify.go new file mode 100644 index 0000000..e6d2f38 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/spotify.go @@ -0,0 +1,114 @@ +package provider + +import ( + "context" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const ( + defaultSpotifyAPIBase = "api.spotify.com/v1" // Used to get user data + defaultSpotifyAuthBase = "accounts.spotify.com" // Used for OAuth flow +) + +type spotifyProvider struct { + *oauth2.Config + APIPath string +} + +type spotifyUser struct { + DisplayName string `json:"display_name"` + Avatars []spotifyUserImage `json:"images"` + Email string `json:"email"` + ID string `json:"id"` +} + +type spotifyUserImage struct { + Url string `json:"url"` + Height int `json:"height"` + Width int `json:"width"` +} + +// NewSpotifyProvider creates a Spotify account provider. +func NewSpotifyProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + apiPath := chooseHost(ext.URL, defaultSpotifyAPIBase) + authPath := chooseHost(ext.URL, defaultSpotifyAuthBase) + + oauthScopes := []string{ + "user-read-email", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &spotifyProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authPath + "/authorize", + TokenURL: authPath + "/api/token", + }, + Scopes: oauthScopes, + RedirectURL: ext.RedirectURI, + }, + APIPath: apiPath, + }, nil +} + +func (g spotifyProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g spotifyProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u spotifyUser + if err := makeRequest(ctx, tok, g.Config, g.APIPath+"/me", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Email != "" { + data.Emails = []Email{{ + Email: u.Email, + // Spotify dosen't provide data on whether the user's email is verified. + // https://developer.spotify.com/documentation/web-api/reference/get-current-users-profile + Verified: false, + Primary: true, + }} + } + + var avatarURL string + + // Spotify returns a list of avatars, we want to use the largest one + if len(u.Avatars) >= 1 { + largestAvatar := u.Avatars[0] + + for _, avatar := range u.Avatars { + if avatar.Height*avatar.Width > largestAvatar.Height*largestAvatar.Width { + largestAvatar = avatar + } + } + + avatarURL = largestAvatar.Url + } + + data.Metadata = &Claims{ + Issuer: g.APIPath, + Subject: u.ID, + Name: u.DisplayName, + Picture: avatarURL, + + // To be deprecated + AvatarURL: avatarURL, + FullName: u.DisplayName, + ProviderId: u.ID, + } + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/twitch.go b/auth_v2.169.0/internal/api/provider/twitch.go new file mode 100644 index 0000000..defb198 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/twitch.go @@ -0,0 +1,154 @@ +package provider + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/utilities" + "golang.org/x/oauth2" +) + +// Twitch + +const ( + defaultTwitchAuthBase = "id.twitch.tv" + defaultTwitchAPIBase = "api.twitch.tv" +) + +type twitchProvider struct { + *oauth2.Config + APIHost string +} + +type twitchUsers struct { + Data []struct { + ID string `json:"id"` + Login string `json:"login"` + DisplayName string `json:"display_name"` + Type string `json:"type"` + BroadcasterType string `json:"broadcaster_type"` + Description string `json:"description"` + ProfileImageURL string `json:"profile_image_url"` + OfflineImageURL string `json:"offline_image_url"` + ViewCount int `json:"view_count"` + Email string `json:"email"` + CreatedAt time.Time `json:"created_at"` + } `json:"data"` +} + +// NewTwitchProvider creates a Twitch account provider. +func NewTwitchProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + apiHost := chooseHost(ext.URL, defaultTwitchAPIBase) + authHost := chooseHost(ext.URL, defaultTwitchAuthBase) + + oauthScopes := []string{ + "user:read:email", + } + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + return &twitchProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authHost + "/oauth2/authorize", + TokenURL: authHost + "/oauth2/token", + }, + RedirectURL: ext.RedirectURI, + Scopes: oauthScopes, + }, + APIHost: apiHost, + }, nil +} + +func (t twitchProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return t.Exchange(context.Background(), code) +} + +func (t twitchProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u twitchUsers + + // Perform http request, because we neeed to set the Client-Id header + req, err := http.NewRequest("GET", t.APIHost+"/helix/users", nil) + + if err != nil { + return nil, err + } + + // set headers + req.Header.Set("Client-Id", t.Config.ClientID) + req.Header.Set("Authorization", "Bearer "+tok.AccessToken) + + client := &http.Client{Timeout: defaultTimeout} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer utilities.SafeClose(resp.Body) + + if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices { + return nil, fmt.Errorf("a %v error occurred with retrieving user from twitch", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + err = json.Unmarshal(body, &u) + if err != nil { + return nil, err + } + + if len(u.Data) == 0 { + return nil, errors.New("unable to find user with twitch provider") + } + + user := u.Data[0] + + data := &UserProvidedData{} + if user.Email != "" { + data.Emails = []Email{{ + Email: user.Email, + Verified: true, + Primary: true, + }} + } + + data.Metadata = &Claims{ + Issuer: t.APIHost, + Subject: user.ID, + Picture: user.ProfileImageURL, + Name: user.Login, + NickName: user.DisplayName, + CustomClaims: map[string]interface{}{ + "broadcaster_type": user.BroadcasterType, + "description": user.Description, + "type": user.Type, + "offline_image_url": user.OfflineImageURL, + "view_count": user.ViewCount, + }, + + // To be deprecated + Slug: user.DisplayName, + AvatarURL: user.ProfileImageURL, + FullName: user.Login, + ProviderId: user.ID, + } + + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/twitter.go b/auth_v2.169.0/internal/api/provider/twitter.go new file mode 100644 index 0000000..8dc5a4c --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/twitter.go @@ -0,0 +1,155 @@ +package provider + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "github.com/mrjones/oauth" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/utilities" + "golang.org/x/oauth2" +) + +const ( + defaultTwitterAPIBase = "api.twitter.com" + requestURL = "/oauth/request_token" + authenticateURL = "/oauth/authenticate" + tokenURL = "/oauth/access_token" //#nosec G101 -- Not a secret value. + endpointProfile = "/1.1/account/verify_credentials.json" +) + +// TwitterProvider stores the custom config for twitter provider +type TwitterProvider struct { + ClientKey string + Secret string + CallbackURL string + AuthURL string + RequestToken *oauth.RequestToken + OauthVerifier string + Consumer *oauth.Consumer + UserInfoURL string +} + +type twitterUser struct { + UserName string `json:"screen_name"` + Name string `json:"name"` + AvatarURL string `json:"profile_image_url_https"` + Email string `json:"email"` + ID string `json:"id_str"` +} + +// NewTwitterProvider creates a Twitter account provider. +func NewTwitterProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + authHost := chooseHost(ext.URL, defaultTwitterAPIBase) + p := &TwitterProvider{ + ClientKey: ext.ClientID[0], + Secret: ext.Secret, + CallbackURL: ext.RedirectURI, + UserInfoURL: authHost + endpointProfile, + } + p.Consumer = newConsumer(p, authHost) + return p, nil +} + +// GetOAuthToken is a stub method for OAuthProvider interface, unused in OAuth1.0 protocol +func (t TwitterProvider) GetOAuthToken(_ string) (*oauth2.Token, error) { + return &oauth2.Token{}, nil +} + +// GetUserData is a stub method for OAuthProvider interface, unused in OAuth1.0 protocol +func (t TwitterProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + return &UserProvidedData{}, nil +} + +// FetchUserData retrieves the user's data from the twitter provider +func (t TwitterProvider) FetchUserData(ctx context.Context, tok *oauth.AccessToken) (*UserProvidedData, error) { + var u twitterUser + resp, err := t.Consumer.Get( + t.UserInfoURL, + map[string]string{"include_entities": "false", "skip_status": "true", "include_email": "true"}, + tok, + ) + if err != nil { + return nil, err + } + defer utilities.SafeClose(resp.Body) + if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices { + return &UserProvidedData{}, fmt.Errorf("a %v error occurred with retrieving user from twitter", resp.StatusCode) + } + bits, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + _ = json.NewDecoder(bytes.NewReader(bits)).Decode(&u) + + data := &UserProvidedData{} + if u.Email != "" { + data.Emails = []Email{{ + Email: u.Email, + Verified: true, + Primary: true, + }} + } + + data.Metadata = &Claims{ + Issuer: t.UserInfoURL, + Subject: u.ID, + Name: u.Name, + Picture: u.AvatarURL, + PreferredUsername: u.UserName, + + // To be deprecated + UserNameKey: u.UserName, + FullName: u.Name, + AvatarURL: u.AvatarURL, + ProviderId: u.ID, + } + + return data, nil +} + +// AuthCodeURL fetches the request token from the twitter provider +func (t *TwitterProvider) AuthCodeURL(state string, args ...oauth2.AuthCodeOption) string { + // we do nothing with the state here as the state is passed in the requestURL step + requestToken, url, err := t.Consumer.GetRequestTokenAndUrl(t.CallbackURL + "?state=" + state) + if err != nil { + return "" + } + t.RequestToken = requestToken + t.AuthURL = url + return t.AuthURL +} + +func newConsumer(provider *TwitterProvider, authHost string) *oauth.Consumer { + c := oauth.NewConsumer( + provider.ClientKey, + provider.Secret, + oauth.ServiceProvider{ + RequestTokenUrl: authHost + requestURL, + AuthorizeTokenUrl: authHost + authenticateURL, + AccessTokenUrl: authHost + tokenURL, + }, + ) + return c +} + +// Marshal encodes the twitter request token +func (t TwitterProvider) Marshal() string { + b, _ := json.Marshal(t.RequestToken) + return string(b) +} + +// Unmarshal decodes the twitter request token +func (t TwitterProvider) Unmarshal(data string) (*oauth.RequestToken, error) { + requestToken := &oauth.RequestToken{} + err := json.NewDecoder(strings.NewReader(data)).Decode(requestToken) + return requestToken, err +} diff --git a/auth_v2.169.0/internal/api/provider/vercel_marketplace.go b/auth_v2.169.0/internal/api/provider/vercel_marketplace.go new file mode 100644 index 0000000..ba76a74 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/vercel_marketplace.go @@ -0,0 +1,78 @@ +package provider + +import ( + "context" + "errors" + "strings" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const ( + defaultVercelMarketplaceAPIBase = "api.vercel.com" + IssuerVercelMarketplace = "https://marketplace.vercel.com" +) + +type vercelMarketplaceProvider struct { + *oauth2.Config + oidc *oidc.Provider + APIPath string +} + +// NewVercelMarketplaceProvider creates a VercelMarketplace account provider via OIDC. +func NewVercelMarketplaceProvider(ext conf.OAuthProviderConfiguration, scopes string) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + apiPath := chooseHost(ext.URL, defaultVercelMarketplaceAPIBase) + + oauthScopes := []string{} + + if scopes != "" { + oauthScopes = append(oauthScopes, strings.Split(scopes, ",")...) + } + + oidcProvider, err := oidc.NewProvider(context.Background(), IssuerVercelMarketplace) + if err != nil { + return nil, err + } + + return &vercelMarketplaceProvider{ + oidc: oidcProvider, + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: apiPath + "/oauth/v2/authorization", + TokenURL: apiPath + "/oauth/v2/accessToken", + }, + Scopes: oauthScopes, + RedirectURL: ext.RedirectURI, + }, + APIPath: apiPath, + }, nil +} + +func (g vercelMarketplaceProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g vercelMarketplaceProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + idToken := tok.Extra("id_token") + if tok.AccessToken == "" || idToken == nil { + return nil, errors.New("vercel_marketplace: no OIDC ID token present in response") + } + + _, data, err := ParseIDToken(ctx, g.oidc, &oidc.Config{ + ClientID: g.ClientID, + }, idToken.(string), ParseIDTokenOptions{ + AccessToken: tok.AccessToken, + }) + if err != nil { + return nil, err + } + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/workos.go b/auth_v2.169.0/internal/api/provider/workos.go new file mode 100644 index 0000000..75cafa2 --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/workos.go @@ -0,0 +1,98 @@ +package provider + +import ( + "context" + "strings" + + "github.com/mitchellh/mapstructure" + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const ( + defaultWorkOSAPIBase = "api.workos.com" +) + +type workosProvider struct { + *oauth2.Config + APIPath string +} + +// See https://workos.com/docs/reference/sso/profile. +type workosUser struct { + ID string `mapstructure:"id"` + ConnectionID string `mapstructure:"connection_id"` + OrganizationID string `mapstructure:"organization_id"` + ConnectionType string `mapstructure:"connection_type"` + Email string `mapstructure:"email"` + FirstName string `mapstructure:"first_name"` + LastName string `mapstructure:"last_name"` + Object string `mapstructure:"object"` + IdpID string `mapstructure:"idp_id"` + RawAttributes map[string]interface{} `mapstructure:"raw_attributes"` +} + +// NewWorkOSProvider creates a WorkOS account provider. +func NewWorkOSProvider(ext conf.OAuthProviderConfiguration) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + apiPath := chooseHost(ext.URL, defaultWorkOSAPIBase) + + return &workosProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: apiPath + "/sso/authorize", + TokenURL: apiPath + "/sso/token", + }, + RedirectURL: ext.RedirectURI, + }, + APIPath: apiPath, + }, nil +} + +func (g workosProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g workosProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + if tok.AccessToken == "" { + return &UserProvidedData{}, nil + } + + // WorkOS API returns the user's profile data along with the OAuth2 token, so + // we can just convert from `map[string]interface{}` to `workosUser` without + // an additional network request. + var u workosUser + err := mapstructure.Decode(tok.Extra("profile"), &u) + if err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Email != "" { + data.Emails = []Email{{ + Email: u.Email, + Verified: true, + Primary: true, + }} + } + + data.Metadata = &Claims{ + Issuer: g.APIPath, + Subject: u.ID, + Name: strings.TrimSpace(u.FirstName + " " + u.LastName), + CustomClaims: map[string]interface{}{ + "connection_id": u.ConnectionID, + "organization_id": u.OrganizationID, + }, + + // To be deprecated + FullName: strings.TrimSpace(u.FirstName + " " + u.LastName), + ProviderId: u.ID, + } + + return data, nil +} diff --git a/auth_v2.169.0/internal/api/provider/zoom.go b/auth_v2.169.0/internal/api/provider/zoom.go new file mode 100644 index 0000000..8e2e9fa --- /dev/null +++ b/auth_v2.169.0/internal/api/provider/zoom.go @@ -0,0 +1,91 @@ +package provider + +import ( + "context" + "strings" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/oauth2" +) + +const ( + defaultZoomAuthBase = "zoom.us" + defaultZoomAPIBase = "api.zoom.us" +) + +type zoomProvider struct { + *oauth2.Config + APIPath string +} + +type zoomUser struct { + ID string `json:"id"` + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + Email string `json:"email"` + EmailVerified int `json:"verified"` + LoginType string `json:"login_type"` + AvatarURL string `json:"pic_url"` +} + +// NewZoomProvider creates a Zoom account provider. +func NewZoomProvider(ext conf.OAuthProviderConfiguration) (OAuthProvider, error) { + if err := ext.ValidateOAuth(); err != nil { + return nil, err + } + + apiPath := chooseHost(ext.URL, defaultZoomAPIBase) + "/v2" + authPath := chooseHost(ext.URL, defaultZoomAuthBase) + "/oauth" + + return &zoomProvider{ + Config: &oauth2.Config{ + ClientID: ext.ClientID[0], + ClientSecret: ext.Secret, + Endpoint: oauth2.Endpoint{ + AuthURL: authPath + "/authorize", + TokenURL: authPath + "/token", + }, + RedirectURL: ext.RedirectURI, + }, + APIPath: apiPath, + }, nil +} + +func (g zoomProvider) GetOAuthToken(code string) (*oauth2.Token, error) { + return g.Exchange(context.Background(), code) +} + +func (g zoomProvider) GetUserData(ctx context.Context, tok *oauth2.Token) (*UserProvidedData, error) { + var u zoomUser + if err := makeRequest(ctx, tok, g.Config, g.APIPath+"/users/me", &u); err != nil { + return nil, err + } + + data := &UserProvidedData{} + if u.Email != "" { + email := Email{} + email.Email = u.Email + email.Primary = true + // A login_type of "100" refers to email-based logins, not oauth. + // A user is verified (type 1) only if they received an email when their profile was created and confirmed the link. + // A zoom user will only be sent an email confirmation link if they signed up using their zoom work email and not oauth. + // See: https://devforum.zoom.us/t/how-to-determine-if-a-zoom-user-actually-owns-their-email-address/44430 + if u.LoginType != "100" || u.EmailVerified != 0 { + email.Verified = true + } + data.Emails = []Email{email} + } + + data.Metadata = &Claims{ + Issuer: g.APIPath, + Subject: u.ID, + Name: strings.TrimSpace(u.FirstName + " " + u.LastName), + Picture: u.AvatarURL, + + // To be deprecated + AvatarURL: u.AvatarURL, + FullName: strings.TrimSpace(u.FirstName + " " + u.LastName), + ProviderId: u.ID, + } + return data, nil +} diff --git a/auth_v2.169.0/internal/api/reauthenticate.go b/auth_v2.169.0/internal/api/reauthenticate.go new file mode 100644 index 0000000..5146ae4 --- /dev/null +++ b/auth_v2.169.0/internal/api/reauthenticate.go @@ -0,0 +1,97 @@ +package api + +import ( + "net/http" + + "github.com/supabase/auth/internal/api/sms_provider" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +const InvalidNonceMessage = "Nonce has expired or is invalid" + +// Reauthenticate sends a reauthentication otp to either the user's email or phone +func (a *API) Reauthenticate(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + + user := getUser(ctx) + email, phone := user.GetEmail(), user.GetPhone() + + if email == "" && phone == "" { + return badRequestError(ErrorCodeValidationFailed, "Reauthentication requires the user to have an email or a phone number") + } + + if email != "" { + if !user.IsConfirmed() { + return unprocessableEntityError(ErrorCodeEmailNotConfirmed, "Please verify your email first.") + } + } else if phone != "" { + if !user.IsPhoneConfirmed() { + return unprocessableEntityError(ErrorCodePhoneNotConfirmed, "Please verify your phone first.") + } + } + + messageID := "" + err := db.Transaction(func(tx *storage.Connection) error { + if terr := models.NewAuditLogEntry(r, tx, user, models.UserReauthenticateAction, "", nil); terr != nil { + return terr + } + if email != "" { + return a.sendReauthenticationOtp(r, tx, user) + } else if phone != "" { + mID, err := a.sendPhoneConfirmation(r, tx, user, phone, phoneReauthenticationOtp, sms_provider.SMSProvider) + if err != nil { + return err + } + + messageID = mID + } + return nil + }) + if err != nil { + return err + } + + ret := map[string]any{} + if messageID != "" { + ret["message_id"] = messageID + + } + + return sendJSON(w, http.StatusOK, ret) +} + +// verifyReauthentication checks if the nonce provided is valid +func (a *API) verifyReauthentication(nonce string, tx *storage.Connection, config *conf.GlobalConfiguration, user *models.User) error { + if user.ReauthenticationToken == "" || user.ReauthenticationSentAt == nil { + return unprocessableEntityError(ErrorCodeReauthenticationNotValid, InvalidNonceMessage) + } + var isValid bool + if user.GetEmail() != "" { + tokenHash := crypto.GenerateTokenHash(user.GetEmail(), nonce) + isValid = isOtpValid(tokenHash, user.ReauthenticationToken, user.ReauthenticationSentAt, config.Mailer.OtpExp) + } else if user.GetPhone() != "" { + if config.Sms.IsTwilioVerifyProvider() { + smsProvider, _ := sms_provider.GetSmsProvider(*config) + if err := smsProvider.(*sms_provider.TwilioVerifyProvider).VerifyOTP(string(user.Phone), nonce); err != nil { + return forbiddenError(ErrorCodeOTPExpired, "Token has expired or is invalid").WithInternalError(err) + } + return nil + } else { + tokenHash := crypto.GenerateTokenHash(user.GetPhone(), nonce) + isValid = isOtpValid(tokenHash, user.ReauthenticationToken, user.ReauthenticationSentAt, config.Sms.OtpExp) + } + } else { + return unprocessableEntityError(ErrorCodeReauthenticationNotValid, "Reauthentication requires an email or a phone number") + } + if !isValid { + return unprocessableEntityError(ErrorCodeReauthenticationNotValid, InvalidNonceMessage) + } + if err := user.ConfirmReauthentication(tx); err != nil { + return internalServerError("Error during reauthentication").WithInternalError(err) + } + return nil +} diff --git a/auth_v2.169.0/internal/api/recover.go b/auth_v2.169.0/internal/api/recover.go new file mode 100644 index 0000000..7c03c32 --- /dev/null +++ b/auth_v2.169.0/internal/api/recover.go @@ -0,0 +1,73 @@ +package api + +import ( + "net/http" + + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +// RecoverParams holds the parameters for a password recovery request +type RecoverParams struct { + Email string `json:"email"` + CodeChallenge string `json:"code_challenge"` + CodeChallengeMethod string `json:"code_challenge_method"` +} + +func (p *RecoverParams) Validate(a *API) error { + if p.Email == "" { + return badRequestError(ErrorCodeValidationFailed, "Password recovery requires an email") + } + var err error + if p.Email, err = a.validateEmail(p.Email); err != nil { + return err + } + if err := validatePKCEParams(p.CodeChallengeMethod, p.CodeChallenge); err != nil { + return err + } + return nil +} + +// Recover sends a recovery email +func (a *API) Recover(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + params := &RecoverParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + flowType := getFlowFromChallenge(params.CodeChallenge) + if err := params.Validate(a); err != nil { + return err + } + + var user *models.User + var err error + aud := a.requestAud(ctx, r) + + user, err = models.FindUserByEmailAndAudience(db, params.Email, aud) + if err != nil { + if models.IsNotFoundError(err) { + return sendJSON(w, http.StatusOK, map[string]string{}) + } + return internalServerError("Unable to process request").WithInternalError(err) + } + if isPKCEFlow(flowType) { + if _, err := generateFlowState(db, models.Recovery.String(), models.Recovery, params.CodeChallengeMethod, params.CodeChallenge, &(user.ID)); err != nil { + return err + } + } + + err = db.Transaction(func(tx *storage.Connection) error { + if terr := models.NewAuditLogEntry(r, tx, user, models.UserRecoveryRequestedAction, "", nil); terr != nil { + return terr + } + return a.sendPasswordRecovery(r, tx, user, flowType) + }) + if err != nil { + return err + } + + return sendJSON(w, http.StatusOK, map[string]string{}) +} diff --git a/auth_v2.169.0/internal/api/recover_test.go b/auth_v2.169.0/internal/api/recover_test.go new file mode 100644 index 0000000..a7e655c --- /dev/null +++ b/auth_v2.169.0/internal/api/recover_test.go @@ -0,0 +1,153 @@ +package api + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type RecoverTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestRecover(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &RecoverTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *RecoverTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + // Create user + u, err := models.NewUser("", "test@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") +} + +func (ts *RecoverTestSuite) TestRecover_FirstRecovery() { + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.RecoverySentAt = &time.Time{} + require.NoError(ts.T(), ts.API.db.Update(u)) + + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/recover", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + assert.WithinDuration(ts.T(), time.Now(), *u.RecoverySentAt, 1*time.Second) +} + +func (ts *RecoverTestSuite) TestRecover_NoEmailSent() { + recoveryTime := time.Now().UTC().Add(-59 * time.Second) + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.RecoverySentAt = &recoveryTime + require.NoError(ts.T(), ts.API.db.Update(u)) + + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/recover", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusTooManyRequests, w.Code) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + // ensure it did not send a new email + u1 := recoveryTime.Round(time.Second).Unix() + u2 := u.RecoverySentAt.Round(time.Second).Unix() + assert.Equal(ts.T(), u1, u2) +} + +func (ts *RecoverTestSuite) TestRecover_NewEmailSent() { + recoveryTime := time.Now().UTC().Add(-20 * time.Minute) + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.RecoverySentAt = &recoveryTime + require.NoError(ts.T(), ts.API.db.Update(u)) + + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/recover", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + // ensure it sent a new email + assert.WithinDuration(ts.T(), time.Now(), *u.RecoverySentAt, 1*time.Second) +} + +func (ts *RecoverTestSuite) TestRecover_NoSideChannelLeak() { + email := "doesntexist@example.com" + + _, err := models.FindUserByEmailAndAudience(ts.API.db, email, ts.Config.JWT.Aud) + require.True(ts.T(), models.IsNotFoundError(err), "User with email %s does exist", email) + + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": email, + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/recover", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) +} diff --git a/auth_v2.169.0/internal/api/resend.go b/auth_v2.169.0/internal/api/resend.go new file mode 100644 index 0000000..2c30536 --- /dev/null +++ b/auth_v2.169.0/internal/api/resend.go @@ -0,0 +1,154 @@ +package api + +import ( + "net/http" + + "github.com/supabase/auth/internal/api/sms_provider" + mail "github.com/supabase/auth/internal/mailer" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +// ResendConfirmationParams holds the parameters for a resend request +type ResendConfirmationParams struct { + Type string `json:"type"` + Email string `json:"email"` + Phone string `json:"phone"` +} + +func (p *ResendConfirmationParams) Validate(a *API) error { + config := a.config + + switch p.Type { + case mail.SignupVerification, mail.EmailChangeVerification, smsVerification, phoneChangeVerification: + break + default: + // type does not match one of the above + return badRequestError(ErrorCodeValidationFailed, "Missing one of these types: signup, email_change, sms, phone_change") + + } + if p.Email == "" && p.Type == mail.SignupVerification { + return badRequestError(ErrorCodeValidationFailed, "Type provided requires an email address") + } + if p.Phone == "" && p.Type == smsVerification { + return badRequestError(ErrorCodeValidationFailed, "Type provided requires a phone number") + } + + var err error + if p.Email != "" && p.Phone != "" { + return badRequestError(ErrorCodeValidationFailed, "Only an email address or phone number should be provided.") + } else if p.Email != "" { + if !config.External.Email.Enabled { + return badRequestError(ErrorCodeEmailProviderDisabled, "Email logins are disabled") + } + p.Email, err = a.validateEmail(p.Email) + if err != nil { + return err + } + } else if p.Phone != "" { + if !config.External.Phone.Enabled { + return badRequestError(ErrorCodePhoneProviderDisabled, "Phone logins are disabled") + } + p.Phone, err = validatePhone(p.Phone) + if err != nil { + return err + } + } else { + // both email and phone are empty + return badRequestError(ErrorCodeValidationFailed, "Missing email address or phone number") + } + return nil +} + +// Recover sends a recovery email +func (a *API) Resend(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + params := &ResendConfirmationParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + if err := params.Validate(a); err != nil { + return err + } + + var user *models.User + var err error + aud := a.requestAud(ctx, r) + if params.Email != "" { + user, err = models.FindUserByEmailAndAudience(db, params.Email, aud) + } else if params.Phone != "" { + user, err = models.FindUserByPhoneAndAudience(db, params.Phone, aud) + } + + if err != nil { + if models.IsNotFoundError(err) { + return sendJSON(w, http.StatusOK, map[string]string{}) + } + return internalServerError("Unable to process request").WithInternalError(err) + } + + switch params.Type { + case mail.SignupVerification: + if user.IsConfirmed() { + // if the user's email is confirmed already, we don't need to send a confirmation email again + return sendJSON(w, http.StatusOK, map[string]string{}) + } + case smsVerification: + if user.IsPhoneConfirmed() { + // if the user's phone is confirmed already, we don't need to send a confirmation sms again + return sendJSON(w, http.StatusOK, map[string]string{}) + } + case mail.EmailChangeVerification: + // do not resend if user doesn't have a new email address + if user.EmailChange == "" { + return sendJSON(w, http.StatusOK, map[string]string{}) + } + case phoneChangeVerification: + // do not resend if user doesn't have a new phone number + if user.PhoneChange == "" { + return sendJSON(w, http.StatusOK, map[string]string{}) + } + } + + messageID := "" + err = db.Transaction(func(tx *storage.Connection) error { + switch params.Type { + case mail.SignupVerification: + if terr := models.NewAuditLogEntry(r, tx, user, models.UserConfirmationRequestedAction, "", nil); terr != nil { + return terr + } + // PKCE not implemented yet + return a.sendConfirmation(r, tx, user, models.ImplicitFlow) + case smsVerification: + if terr := models.NewAuditLogEntry(r, tx, user, models.UserRecoveryRequestedAction, "", nil); terr != nil { + return terr + } + mID, terr := a.sendPhoneConfirmation(r, tx, user, params.Phone, phoneConfirmationOtp, sms_provider.SMSProvider) + if terr != nil { + return terr + } + messageID = mID + case mail.EmailChangeVerification: + return a.sendEmailChange(r, tx, user, user.EmailChange, models.ImplicitFlow) + case phoneChangeVerification: + mID, terr := a.sendPhoneConfirmation(r, tx, user, user.PhoneChange, phoneChangeVerification, sms_provider.SMSProvider) + if terr != nil { + return terr + } + messageID = mID + } + return nil + }) + if err != nil { + return err + } + + ret := map[string]any{} + if messageID != "" { + ret["message_id"] = messageID + } + + return sendJSON(w, http.StatusOK, ret) +} diff --git a/auth_v2.169.0/internal/api/resend_test.go b/auth_v2.169.0/internal/api/resend_test.go new file mode 100644 index 0000000..83c58c4 --- /dev/null +++ b/auth_v2.169.0/internal/api/resend_test.go @@ -0,0 +1,217 @@ +package api + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + mail "github.com/supabase/auth/internal/mailer" + "github.com/supabase/auth/internal/models" +) + +type ResendTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestResend(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &ResendTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *ResendTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) +} + +func (ts *ResendTestSuite) TestResendValidation() { + cases := []struct { + desc string + params map[string]interface{} + expected map[string]interface{} + }{ + { + desc: "Invalid type", + params: map[string]interface{}{ + "type": "invalid", + "email": "foo@example.com", + }, + expected: map[string]interface{}{ + "code": http.StatusBadRequest, + "message": "Missing one of these types: signup, email_change, sms, phone_change", + }, + }, + { + desc: "Type & email mismatch", + params: map[string]interface{}{ + "type": "sms", + "email": "foo@example.com", + }, + expected: map[string]interface{}{ + "code": http.StatusBadRequest, + "message": "Type provided requires a phone number", + }, + }, + { + desc: "Phone & email change type", + params: map[string]interface{}{ + "type": "email_change", + "phone": "+123456789", + }, + expected: map[string]interface{}{ + "code": http.StatusOK, + "message": nil, + }, + }, + { + desc: "Email & phone number provided", + params: map[string]interface{}{ + "type": "email_change", + "phone": "+123456789", + "email": "foo@example.com", + }, + expected: map[string]interface{}{ + "code": http.StatusBadRequest, + "message": "Only an email address or phone number should be provided.", + }, + }, + } + for _, c := range cases { + ts.Run(c.desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.params)) + req := httptest.NewRequest(http.MethodPost, "http://localhost/resend", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), c.expected["code"], w.Code) + + data := make(map[string]interface{}) + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + require.Equal(ts.T(), c.expected["message"], data["msg"]) + }) + } + +} + +func (ts *ResendTestSuite) TestResendSuccess() { + // Create user + u, err := models.NewUser("123456789", "foo@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + + // Avoid max freq limit error + now := time.Now().Add(-1 * time.Minute) + + // Enable Phone Logoin for phone related tests + ts.Config.External.Phone.Enabled = true + // disable secure email change + ts.Config.Mailer.SecureEmailChangeEnabled = false + + u.ConfirmationToken = "123456" + u.ConfirmationSentAt = &now + u.EmailChange = "bar@example.com" + u.EmailChangeSentAt = &now + u.EmailChangeTokenNew = "123456" + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.ConfirmationToken, models.ConfirmationToken)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.EmailChange, u.EmailChangeTokenNew, models.EmailChangeTokenNew)) + + phoneUser, err := models.NewUser("1234567890", "", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + phoneUser.EmailChange = "bar@example.com" + phoneUser.EmailChangeSentAt = &now + phoneUser.EmailChangeTokenNew = "123456" + require.NoError(ts.T(), ts.API.db.Create(phoneUser), "Error saving new test user") + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, phoneUser.ID, phoneUser.EmailChange, phoneUser.EmailChangeTokenNew, models.EmailChangeTokenNew)) + + emailUser, err := models.NewUser("", "bar@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + phoneUser.PhoneChange = "1234567890" + phoneUser.PhoneChangeSentAt = &now + phoneUser.PhoneChangeToken = "123456" + require.NoError(ts.T(), ts.API.db.Create(emailUser), "Error saving new test user") + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, phoneUser.ID, phoneUser.PhoneChange, phoneUser.PhoneChangeToken, models.PhoneChangeToken)) + + cases := []struct { + desc string + params map[string]interface{} + // expected map[string]interface{} + user *models.User + }{ + { + desc: "Resend signup confirmation", + params: map[string]interface{}{ + "type": "signup", + "email": u.GetEmail(), + }, + user: u, + }, + { + desc: "Resend email change", + params: map[string]interface{}{ + "type": "email_change", + "email": u.GetEmail(), + }, + user: u, + }, + { + desc: "Resend email change for phone user", + params: map[string]interface{}{ + "type": "email_change", + "phone": phoneUser.GetPhone(), + }, + user: phoneUser, + }, + { + desc: "Resend phone change for email user", + params: map[string]interface{}{ + "type": "phone_change", + "email": emailUser.GetEmail(), + }, + user: emailUser, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.params)) + req := httptest.NewRequest(http.MethodPost, "http://localhost/resend", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + switch c.params["type"] { + case mail.SignupVerification, mail.EmailChangeVerification: + dbUser, err := models.FindUserByID(ts.API.db, c.user.ID) + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), dbUser) + + if c.params["type"] == mail.SignupVerification { + require.NotEqual(ts.T(), dbUser.ConfirmationToken, c.user.ConfirmationToken) + require.NotEqual(ts.T(), dbUser.ConfirmationSentAt, c.user.ConfirmationSentAt) + } else if c.params["type"] == mail.EmailChangeVerification { + require.NotEqual(ts.T(), dbUser.EmailChangeTokenNew, c.user.EmailChangeTokenNew) + require.NotEqual(ts.T(), dbUser.EmailChangeSentAt, c.user.EmailChangeSentAt) + } + } + }) + } +} diff --git a/auth_v2.169.0/internal/api/router.go b/auth_v2.169.0/internal/api/router.go new file mode 100644 index 0000000..1feb66d --- /dev/null +++ b/auth_v2.169.0/internal/api/router.go @@ -0,0 +1,92 @@ +package api + +import ( + "context" + "net/http" + + "github.com/go-chi/chi/v5" +) + +func newRouter() *router { + return &router{chi.NewRouter()} +} + +type router struct { + chi chi.Router +} + +func (r *router) Route(pattern string, fn func(*router)) { + r.chi.Route(pattern, func(c chi.Router) { + fn(&router{c}) + }) +} + +func (r *router) Get(pattern string, fn apiHandler) { + r.chi.Get(pattern, handler(fn)) +} +func (r *router) Post(pattern string, fn apiHandler) { + r.chi.Post(pattern, handler(fn)) +} +func (r *router) Put(pattern string, fn apiHandler) { + r.chi.Put(pattern, handler(fn)) +} +func (r *router) Delete(pattern string, fn apiHandler) { + r.chi.Delete(pattern, handler(fn)) +} + +func (r *router) With(fn middlewareHandler) *router { + c := r.chi.With(middleware(fn)) + return &router{c} +} + +func (r *router) WithBypass(fn func(next http.Handler) http.Handler) *router { + c := r.chi.With(fn) + return &router{c} +} + +func (r *router) Use(fn middlewareHandler) { + r.chi.Use(middleware(fn)) +} +func (r *router) UseBypass(fn func(next http.Handler) http.Handler) { + r.chi.Use(fn) +} + +func (r *router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + r.chi.ServeHTTP(w, req) +} + +type apiHandler func(w http.ResponseWriter, r *http.Request) error + +func handler(fn apiHandler) http.HandlerFunc { + return fn.serve +} + +func (h apiHandler) serve(w http.ResponseWriter, r *http.Request) { + if err := h(w, r); err != nil { + HandleResponseError(err, w, r) + } +} + +type middlewareHandler func(w http.ResponseWriter, r *http.Request) (context.Context, error) + +func (m middlewareHandler) handler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m.serve(next, w, r) + }) +} + +func (m middlewareHandler) serve(next http.Handler, w http.ResponseWriter, r *http.Request) { + ctx, err := m(w, r) + if err != nil { + HandleResponseError(err, w, r) + return + } + if ctx != nil { + r = r.WithContext(ctx) + } + next.ServeHTTP(w, r) +} + +func middleware(fn middlewareHandler) func(http.Handler) http.Handler { + return fn.handler +} diff --git a/auth_v2.169.0/internal/api/saml.go b/auth_v2.169.0/internal/api/saml.go new file mode 100644 index 0000000..f32d443 --- /dev/null +++ b/auth_v2.169.0/internal/api/saml.go @@ -0,0 +1,113 @@ +package api + +import ( + "encoding/xml" + "net/http" + "net/url" + "strings" + "time" + + "github.com/crewjam/saml" + "github.com/crewjam/saml/samlsp" +) + +// getSAMLServiceProvider generates a new service provider object with the +// (optionally) provided descriptor (metadata) for the identity provider. +func (a *API) getSAMLServiceProvider(identityProvider *saml.EntityDescriptor, idpInitiated bool) *saml.ServiceProvider { + var externalURL *url.URL + + if a.config.SAML.ExternalURL != "" { + url, err := url.ParseRequestURI(a.config.SAML.ExternalURL) + if err != nil { + // this should not fail as a.config should have been validated using #Validate() + panic(err) + } + + externalURL = url + } else { + url, err := url.ParseRequestURI(a.config.API.ExternalURL) + if err != nil { + // this should not fail as a.config should have been validated using #Validate() + panic(err) + } + + externalURL = url + } + + if !strings.HasSuffix(externalURL.Path, "/") { + externalURL.Path += "/" + } + + externalURL.Path += "sso/" + + provider := samlsp.DefaultServiceProvider(samlsp.Options{ + URL: *externalURL, + Key: a.config.SAML.RSAPrivateKey, + Certificate: a.config.SAML.Certificate, + SignRequest: true, + AllowIDPInitiated: idpInitiated, + IDPMetadata: identityProvider, + }) + + provider.AuthnNameIDFormat = saml.PersistentNameIDFormat + + return &provider +} + +// SAMLMetadata serves GoTrue's SAML Service Provider metadata file. +func (a *API) SAMLMetadata(w http.ResponseWriter, r *http.Request) error { + serviceProvider := a.getSAMLServiceProvider(nil, true) + + metadata := serviceProvider.Metadata() + + if r.FormValue("download") == "true" { + // 5 year expiration, comparable to what GSuite does + metadata.ValidUntil = time.Now().UTC().AddDate(5, 0, 0) + } + + for i := range metadata.SPSSODescriptors { + // we set this to false since the IdP initiated flow can only + // sign the Assertion, and not the full Request + // unfortunately this is hardcoded in the crewjam library if + // signatures (instead of encryption) are supported + // https://github.com/crewjam/saml/blob/v0.4.8/service_provider.go#L217 + metadata.SPSSODescriptors[i].AuthnRequestsSigned = nil + + // advertize the requested NameID formats (either persistent or email address) + metadata.SPSSODescriptors[i].NameIDFormats = []saml.NameIDFormat{ + saml.EmailAddressNameIDFormat, + saml.PersistentNameIDFormat, + } + } + + for i := range metadata.SPSSODescriptors { + spd := &metadata.SPSSODescriptors[i] + + var keyDescriptors []saml.KeyDescriptor + + for _, kd := range spd.KeyDescriptors { + // only advertize key as usable for encryption if allowed + if kd.Use == "signing" || (a.config.SAML.AllowEncryptedAssertions && kd.Use == "encryption") { + keyDescriptors = append(keyDescriptors, kd) + } + } + + spd.KeyDescriptors = keyDescriptors + } + + metadataXML, err := xml.Marshal(metadata) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/xml") + w.Header().Set("Cache-Control", "public, max-age=600") // cache at CDN for 10 minutes + + if r.FormValue("download") == "true" { + w.Header().Set("Content-Disposition", "attachment; filename=\"metadata.xml\"") + } + + _, err = w.Write(metadataXML) + + return err +} diff --git a/auth_v2.169.0/internal/api/saml_test.go b/auth_v2.169.0/internal/api/saml_test.go new file mode 100644 index 0000000..a290fb2 --- /dev/null +++ b/auth_v2.169.0/internal/api/saml_test.go @@ -0,0 +1,59 @@ +package api + +import ( + tst "testing" + "time" + + "encoding/xml" + "net/http" + "net/http/httptest" + + "github.com/crewjam/saml" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" +) + +func TestSAMLMetadataWithAPI(t *tst.T) { + config, err := conf.LoadGlobal(apiTestConfig) + require.NoError(t, err) + config.API.ExternalURL = "https://projectref.supabase.co/auth/v1/" + config.SAML.Enabled = true + config.SAML.PrivateKey = "MIIEowIBAAKCAQEAszrVveMQcSsa0Y+zN1ZFb19cRS0jn4UgIHTprW2tVBmO2PABzjY3XFCfx6vPirMAPWBYpsKmXrvm1tr0A6DZYmA8YmJd937VUQ67fa6DMyppBYTjNgGEkEhmKuszvF3MARsIKCGtZqUrmS7UG4404wYxVppnr2EYm3RGtHlkYsXu20MBqSDXP47bQP+PkJqC3BuNGk3xt5UHl2FSFpTHelkI6lBynw16B+lUT1F96SERNDaMqi/TRsZdGe5mB/29ngC/QBMpEbRBLNRir5iUevKS7Pn4aph9Qjaxx/97siktK210FJT23KjHpgcUfjoQ6BgPBTLtEeQdRyDuc/CgfwIDAQABAoIBAGYDWOEpupQPSsZ4mjMnAYJwrp4ZISuMpEqVAORbhspVeb70bLKonT4IDcmiexCg7cQBcLQKGpPVM4CbQ0RFazXZPMVq470ZDeWDEyhoCfk3bGtdxc1Zc9CDxNMs6FeQs6r1beEZug6weG5J/yRn/qYxQife3qEuDMl+lzfl2EN3HYVOSnBmdt50dxRuX26iW3nqqbMRqYn9OHuJ1LvRRfYeyVKqgC5vgt/6Tf7DAJwGe0dD7q08byHV8DBZ0pnMVU0bYpf1GTgMibgjnLjK//EVWafFHtN+RXcjzGmyJrk3+7ZyPUpzpDjO21kpzUQLrpEkkBRnmg6bwHnSrBr8avECgYEA3pq1PTCAOuLQoIm1CWR9/dhkbJQiKTJevlWV8slXQLR50P0WvI2RdFuSxlWmA4xZej8s4e7iD3MYye6SBsQHygOVGc4efvvEZV8/XTlDdyj7iLVGhnEmu2r7AFKzy8cOvXx0QcLg+zNd7vxZv/8D3Qj9Jje2LjLHKM5n/dZ3RzUCgYEAzh5Lo2anc4WN8faLGt7rPkGQF+7/18ImQE11joHWa3LzAEy7FbeOGpE/vhOv5umq5M/KlWFIRahMEQv4RusieHWI19ZLIP+JwQFxWxS+cPp3xOiGcquSAZnlyVSxZ//dlVgaZq2o2MfrxECcovRlaknl2csyf+HjFFwKlNxHm2MCgYAr//R3BdEy0oZeVRndo2lr9YvUEmu2LOihQpWDCd0fQw0ZDA2kc28eysL2RROte95r1XTvq6IvX5a0w11FzRWlDpQ4J4/LlcQ6LVt+98SoFwew+/PWuyLmxLycUbyMOOpm9eSc4wJJZNvaUzMCSkvfMtmm5jgyZYMMQ9A2Ul/9SQKBgB9mfh9mhBwVPIqgBJETZMMXOdxrjI5SBYHGSyJqpT+5Q0vIZLfqPrvNZOiQFzwWXPJ+tV4Mc/YorW3rZOdo6tdvEGnRO6DLTTEaByrY/io3/gcBZXoSqSuVRmxleqFdWWRnB56c1hwwWLqNHU+1671FhL6pNghFYVK4suP6qu4BAoGBAMk+VipXcIlD67mfGrET/xDqiWWBZtgTzTMjTpODhDY1GZck1eb4CQMP5j5V3gFJ4cSgWDJvnWg8rcz0unz/q4aeMGl1rah5WNDWj1QKWMS6vJhMHM/rqN1WHWR0ZnV83svYgtg0zDnQKlLujqW4JmGXLMU7ur6a+e6lpa1fvLsP" + config.API.MaxRequestDuration = 5 * time.Second + + require.NoError(t, config.ApplyDefaults()) + require.NoError(t, config.SAML.PopulateFields(config.API.ExternalURL)) + + require.NotNil(t, config.SAML.Certificate) + + api := NewAPI(config, nil) + + // Setup request + req := httptest.NewRequest(http.MethodGet, "http://localhost/sso/saml/metadata", nil) + + w := httptest.NewRecorder() + api.handler.ServeHTTP(w, req) + require.Equal(t, w.Code, http.StatusOK) + + metadata := saml.EntityDescriptor{} + require.NoError(t, xml.Unmarshal(w.Body.Bytes(), &metadata)) + + require.Equal(t, metadata.EntityID, "https://projectref.supabase.co/auth/v1/sso/saml/metadata") + require.Equal(t, len(metadata.SPSSODescriptors), 1) + + require.Nil(t, metadata.SPSSODescriptors[0].AuthnRequestsSigned) + require.True(t, *(metadata.SPSSODescriptors[0].WantAssertionsSigned)) + + require.Equal(t, len(metadata.SPSSODescriptors[0].AssertionConsumerServices), 2) + require.Equal(t, metadata.SPSSODescriptors[0].AssertionConsumerServices[0].Location, "https://projectref.supabase.co/auth/v1/sso/saml/acs") + require.Equal(t, metadata.SPSSODescriptors[0].AssertionConsumerServices[1].Location, "https://projectref.supabase.co/auth/v1/sso/saml/acs") + require.Equal(t, len(metadata.SPSSODescriptors[0].SingleLogoutServices), 1) + require.Equal(t, metadata.SPSSODescriptors[0].SingleLogoutServices[0].Location, "https://projectref.supabase.co/auth/v1/sso/saml/slo") + + require.Equal(t, len(metadata.SPSSODescriptors[0].KeyDescriptors), 1) + require.Equal(t, metadata.SPSSODescriptors[0].KeyDescriptors[0].Use, "signing") + + require.Equal(t, len(metadata.SPSSODescriptors[0].NameIDFormats), 2) + require.Equal(t, metadata.SPSSODescriptors[0].NameIDFormats[0], saml.EmailAddressNameIDFormat) + require.Equal(t, metadata.SPSSODescriptors[0].NameIDFormats[1], saml.PersistentNameIDFormat) +} diff --git a/auth_v2.169.0/internal/api/samlacs.go b/auth_v2.169.0/internal/api/samlacs.go new file mode 100644 index 0000000..8627f93 --- /dev/null +++ b/auth_v2.169.0/internal/api/samlacs.go @@ -0,0 +1,327 @@ +package api + +import ( + "context" + "encoding/base64" + "encoding/json" + "encoding/xml" + "net/http" + "net/url" + "time" + + "github.com/crewjam/saml" + "github.com/fatih/structs" + "github.com/gofrs/uuid" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" +) + +func (a *API) samlDestroyRelayState(ctx context.Context, relayState *models.SAMLRelayState) error { + db := a.db.WithContext(ctx) + + // It's OK to destroy the RelayState, as a user will + // likely initiate a completely new login flow, instead + // of reusing the same one. + + return db.Transaction(func(tx *storage.Connection) error { + return tx.Destroy(relayState) + }) +} + +func IsSAMLMetadataStale(idpMetadata *saml.EntityDescriptor, samlProvider models.SAMLProvider) bool { + now := time.Now() + + hasValidityExpired := !idpMetadata.ValidUntil.IsZero() && now.After(idpMetadata.ValidUntil) + hasCacheDurationExceeded := idpMetadata.CacheDuration != 0 && now.After(samlProvider.UpdatedAt.Add(idpMetadata.CacheDuration)) + + // if metadata XML does not publish validity or caching information, update once in 24 hours + needsForceUpdate := idpMetadata.ValidUntil.IsZero() && idpMetadata.CacheDuration == 0 && now.After(samlProvider.UpdatedAt.Add(24*time.Hour)) + + return hasValidityExpired || hasCacheDurationExceeded || needsForceUpdate +} + +func (a *API) SamlAcs(w http.ResponseWriter, r *http.Request) error { + if err := a.handleSamlAcs(w, r); err != nil { + u, uerr := url.Parse(a.config.SiteURL) + if uerr != nil { + return internalServerError("site url is improperly formattted").WithInternalError(err) + } + + q := getErrorQueryString(err, utilities.GetRequestID(r.Context()), observability.GetLogEntry(r).Entry, u.Query()) + u.RawQuery = q.Encode() + http.Redirect(w, r, u.String(), http.StatusSeeOther) + } + return nil +} + +// handleSamlAcs implements the main Assertion Consumer Service endpoint behavior. +func (a *API) handleSamlAcs(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + + db := a.db.WithContext(ctx) + config := a.config + log := observability.GetLogEntry(r).Entry + + relayStateValue := r.FormValue("RelayState") + relayStateUUID := uuid.FromStringOrNil(relayStateValue) + relayStateURL, _ := url.ParseRequestURI(relayStateValue) + + entityId := "" + initiatedBy := "" + redirectTo := "" + var requestIds []string + + var flowState *models.FlowState + if relayStateUUID != uuid.Nil { + // relay state is a valid UUID, therefore this is likely a SP initiated flow + + relayState, err := models.FindSAMLRelayStateByID(db, relayStateUUID) + if models.IsNotFoundError(err) { + return notFoundError(ErrorCodeSAMLRelayStateNotFound, "SAML RelayState does not exist, try logging in again?") + } else if err != nil { + return err + } + + if time.Since(relayState.CreatedAt) >= a.config.SAML.RelayStateValidityPeriod { + if err := a.samlDestroyRelayState(ctx, relayState); err != nil { + return internalServerError("SAML RelayState has expired and destroying it failed. Try logging in again?").WithInternalError(err) + } + + return unprocessableEntityError(ErrorCodeSAMLRelayStateExpired, "SAML RelayState has expired. Try logging in again?") + } + + // TODO: add abuse detection to bind the RelayState UUID with a + // HTTP-Only cookie + + ssoProvider, err := models.FindSSOProviderByID(db, relayState.SSOProviderID) + if err != nil { + return internalServerError("Unable to find SSO Provider from SAML RelayState") + } + + initiatedBy = "sp" + entityId = ssoProvider.SAMLProvider.EntityID + redirectTo = relayState.RedirectTo + requestIds = append(requestIds, relayState.RequestID) + if relayState.FlowState != nil { + flowState = relayState.FlowState + } + + if err := a.samlDestroyRelayState(ctx, relayState); err != nil { + return err + } + } else if relayStateValue == "" || relayStateURL != nil { + // RelayState may be a URL in which case it's the URL where the + // IdP is telling us to redirect the user to + + if r.FormValue("SAMLart") != "" { + // SAML Artifact responses are possible only when + // RelayState can be used to identify the Identity + // Provider. + return badRequestError(ErrorCodeValidationFailed, "SAML Artifact response can only be used with SP initiated flow") + } + + samlResponse := r.FormValue("SAMLResponse") + if samlResponse == "" { + return badRequestError(ErrorCodeValidationFailed, "SAMLResponse is missing") + } + + responseXML, err := base64.StdEncoding.DecodeString(samlResponse) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "SAMLResponse is not a valid Base64 string") + } + + var peekResponse saml.Response + err = xml.Unmarshal(responseXML, &peekResponse) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "SAMLResponse is not a valid XML SAML assertion").WithInternalError(err) + } + + initiatedBy = "idp" + entityId = peekResponse.Issuer.Value + redirectTo = relayStateValue + } else { + // RelayState can't be identified, so SAML flow can't continue + return badRequestError(ErrorCodeValidationFailed, "SAML RelayState is not a valid UUID or URL") + } + + ssoProvider, err := models.FindSAMLProviderByEntityID(db, entityId) + if models.IsNotFoundError(err) { + return notFoundError(ErrorCodeSAMLIdPNotFound, "A SAML connection has not been established with this Identity Provider") + } else if err != nil { + return err + } + + idpMetadata, err := ssoProvider.SAMLProvider.EntityDescriptor() + if err != nil { + return err + } + + samlMetadataModified := false + + if ssoProvider.SAMLProvider.MetadataURL == nil { + if !idpMetadata.ValidUntil.IsZero() && time.Until(idpMetadata.ValidUntil) <= (30*24*60)*time.Second { + logentry := log.WithField("sso_provider_id", ssoProvider.ID.String()) + logentry = logentry.WithField("expires_in", time.Until(idpMetadata.ValidUntil).String()) + logentry = logentry.WithField("valid_until", idpMetadata.ValidUntil) + logentry = logentry.WithField("saml_entity_id", ssoProvider.SAMLProvider.EntityID) + + logentry.Warn("SAML Metadata for identity provider will expire soon! Update its metadata_xml!") + } + } else if *ssoProvider.SAMLProvider.MetadataURL != "" && IsSAMLMetadataStale(idpMetadata, ssoProvider.SAMLProvider) { + rawMetadata, err := fetchSAMLMetadata(ctx, *ssoProvider.SAMLProvider.MetadataURL) + if err != nil { + // Fail silently but raise warning and continue with existing metadata + logentry := log.WithField("sso_provider_id", ssoProvider.ID.String()) + logentry = logentry.WithField("expires_in", time.Until(idpMetadata.ValidUntil).String()) + logentry = logentry.WithField("valid_until", idpMetadata.ValidUntil) + logentry = logentry.WithError(err) + logentry.Warn("SAML Metadata could not be retrieved, continuing with existing metadata") + } else { + ssoProvider.SAMLProvider.MetadataXML = string(rawMetadata) + samlMetadataModified = true + } + } + + serviceProvider := a.getSAMLServiceProvider(idpMetadata, initiatedBy == "idp") + spAssertion, err := serviceProvider.ParseResponse(r, requestIds) + if err != nil { + if ire, ok := err.(*saml.InvalidResponseError); ok { + return badRequestError(ErrorCodeValidationFailed, "SAML Assertion is not valid %s", ire.Response).WithInternalError(ire.PrivateErr) + } + + return badRequestError(ErrorCodeValidationFailed, "SAML Assertion is not valid").WithInternalError(err) + } + + assertion := SAMLAssertion{ + spAssertion, + } + + userID := assertion.UserID() + if userID == "" { + return badRequestError(ErrorCodeSAMLAssertionNoUserID, "SAML Assertion did not contain a persistent Subject Identifier attribute or Subject NameID uniquely identifying this user") + } + + claims := assertion.Process(ssoProvider.SAMLProvider.AttributeMapping) + + email, ok := claims["email"].(string) + if !ok || email == "" { + // mapping does not identify the email attribute, try to figure it out + email = assertion.Email() + } + + if email == "" { + return badRequestError(ErrorCodeSAMLAssertionNoEmail, "SAML Assertion does not contain an email address") + } else { + claims["email"] = email + } + + jsonClaims, err := json.Marshal(claims) + if err != nil { + return internalServerError("Mapped claims from provider could not be serialized into JSON").WithInternalError(err) + } + + providerClaims := &provider.Claims{} + if err := json.Unmarshal(jsonClaims, providerClaims); err != nil { + return internalServerError("Mapped claims from provider could not be deserialized from JSON").WithInternalError(err) + } + + providerClaims.Subject = userID + providerClaims.Issuer = ssoProvider.SAMLProvider.EntityID + providerClaims.Email = email + providerClaims.EmailVerified = true + + providerClaimsMap := structs.Map(providerClaims) + + // remove all of the parsed claims, so that the rest can go into CustomClaims + for key := range providerClaimsMap { + delete(claims, key) + } + + providerClaims.CustomClaims = claims + + var userProvidedData provider.UserProvidedData + + userProvidedData.Emails = append(userProvidedData.Emails, provider.Email{ + Email: email, + Verified: true, + Primary: true, + }) + + // userProvidedData.Provider.Type = "saml" + // userProvidedData.Provider.ID = ssoProvider.ID.String() + // userProvidedData.Provider.SAMLEntityID = ssoProvider.SAMLProvider.EntityID + // userProvidedData.Provider.SAMLInitiatedBy = initiatedBy + + userProvidedData.Metadata = providerClaims + + // TODO: below + // refreshTokenParams.SSOProviderID = ssoProvider.ID + // refreshTokenParams.InitiatedByProvider = initiatedBy == "idp" + // refreshTokenParams.NotBefore = assertion.NotBefore() + // refreshTokenParams.NotAfter = assertion.NotAfter() + + notAfter := assertion.NotAfter() + + var grantParams models.GrantParams + + grantParams.FillGrantParams(r) + + if !notAfter.IsZero() { + grantParams.SessionNotAfter = ¬After + } + + var token *AccessTokenResponse + if samlMetadataModified { + if err := db.UpdateColumns(&ssoProvider.SAMLProvider, "metadata_xml", "updated_at"); err != nil { + return err + } + } + + if err := db.Transaction(func(tx *storage.Connection) error { + var terr error + var user *models.User + + // accounts potentially created via SAML can contain non-unique email addresses in the auth.users table + if user, terr = a.createAccountFromExternalIdentity(tx, r, &userProvidedData, "sso:"+ssoProvider.ID.String()); terr != nil { + return terr + } + if flowState != nil { + // This means that the callback is using PKCE + flowState.UserID = &(user.ID) + if terr := tx.Update(flowState); terr != nil { + return terr + } + } + + token, terr = a.issueRefreshToken(r, tx, user, models.SSOSAML, grantParams) + + if terr != nil { + return internalServerError("Unable to issue refresh token from SAML Assertion").WithInternalError(terr) + } + + return nil + }); err != nil { + return err + } + + if !utilities.IsRedirectURLValid(config, redirectTo) { + redirectTo = config.SiteURL + } + if flowState != nil { + // This means that the callback is using PKCE + // Set the flowState.AuthCode to the query param here + redirectTo, err = a.prepPKCERedirectURL(redirectTo, flowState.AuthCode) + if err != nil { + return err + } + http.Redirect(w, r, redirectTo, http.StatusFound) + return nil + + } + http.Redirect(w, r, token.AsRedirectURL(redirectTo, url.Values{}), http.StatusFound) + + return nil +} diff --git a/auth_v2.169.0/internal/api/samlassertion.go b/auth_v2.169.0/internal/api/samlassertion.go new file mode 100644 index 0000000..fdf9323 --- /dev/null +++ b/auth_v2.169.0/internal/api/samlassertion.go @@ -0,0 +1,188 @@ +package api + +import ( + "strings" + "time" + + "github.com/crewjam/saml" + "github.com/supabase/auth/internal/models" +) + +type SAMLAssertion struct { + *saml.Assertion +} + +const ( + SAMLSubjectIDAttributeName = "urn:oasis:names:tc:SAML:attribute:subject-id" +) + +// Attribute returns the first matching attribute value in the attribute +// statements where name equals the official SAML attribute Name or +// FriendlyName. Returns nil if such an attribute can't be found. +func (a *SAMLAssertion) Attribute(name string) []saml.AttributeValue { + var values []saml.AttributeValue + + for _, stmt := range a.AttributeStatements { + for _, attr := range stmt.Attributes { + if strings.EqualFold(attr.Name, name) || strings.EqualFold(attr.FriendlyName, name) { + values = append(values, attr.Values...) + } + } + } + + return values +} + +// UserID returns the best choice for a persistent user identifier on the +// Identity Provider side. Don't assume the format of the string returned, as +// it's Identity Provider specific. +func (a *SAMLAssertion) UserID() string { + // First we look up the SAMLSubjectIDAttributeName in the attribute + // section of the assertion, as this is the preferred way to + // persistently identify users in SAML 2.0. + // See: https://docs.oasis-open.org/security/saml-subject-id-attr/v1.0/cs01/saml-subject-id-attr-v1.0-cs01.html#_Toc536097226 + values := a.Attribute(SAMLSubjectIDAttributeName) + if len(values) > 0 { + return values[0].Value + } + + // Otherwise, fall back to the SubjectID value. + subjectID, isPersistent := a.SubjectID() + if !isPersistent { + return "" + } + + return subjectID +} + +// SubjectID returns the user identifier in present in the Subject section of +// the SAML assertion. Note that this way of identifying the Subject is +// generally superseded by the SAMLSubjectIDAttributeName assertion attribute; +// tho must be present in all assertions. It can have a few formats, of which +// the most important are: saml.EmailAddressNameIDFormat (meaning the user ID +// is an email address), saml.PersistentNameIDFormat (the user ID is an opaque +// string that does not change with each assertion, e.g. UUID), +// saml.TransientNameIDFormat (the user ID changes with each assertion -- can't +// be used to identify a user). The boolean returned identifies if the user ID +// is persistent. If it's an email address, it's lowercased just in case. +func (a *SAMLAssertion) SubjectID() (string, bool) { + if a.Subject == nil { + return "", false + } + + if a.Subject.NameID == nil { + return "", false + } + + if a.Subject.NameID.Value == "" { + return "", false + } + + if a.Subject.NameID.Format == string(saml.EmailAddressNameIDFormat) { + return strings.ToLower(strings.TrimSpace(a.Subject.NameID.Value)), true + } + + // all other NameID formats are regarded as persistent + isPersistent := a.Subject.NameID.Format != string(saml.TransientNameIDFormat) + + return a.Subject.NameID.Value, isPersistent +} + +// Email returns the best guess for an email address. +func (a *SAMLAssertion) Email() string { + attributeNames := []string{ + "urn:oid:0.9.2342.19200300.100.1.3", + "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress", + "http://schemas.xmlsoap.org/claims/EmailAddress", + "mail", + "Mail", + "email", + } + + for _, name := range attributeNames { + for _, attr := range a.Attribute(name) { + if attr.Value != "" { + return attr.Value + } + } + } + + if a.Subject.NameID.Format == string(saml.EmailAddressNameIDFormat) { + return a.Subject.NameID.Value + } + + return "" +} + +// Process processes this assertion according to the SAMLAttributeMapping. Never returns nil. +func (a *SAMLAssertion) Process(mapping models.SAMLAttributeMapping) map[string]interface{} { + ret := make(map[string]interface{}) + + for key, mapper := range mapping.Keys { + names := []string{} + if mapper.Name != "" { + names = append(names, mapper.Name) + } + names = append(names, mapper.Names...) + + setKey := false + + for _, name := range names { + for _, attr := range a.Attribute(name) { + if attr.Value != "" { + setKey = true + + if mapper.Array { + if ret[key] == nil { + ret[key] = []string{} + } + + ret[key] = append(ret[key].([]string), attr.Value) + } else { + ret[key] = attr.Value + break + } + } + } + + if setKey { + break + } + } + + if !setKey && mapper.Default != nil { + ret[key] = mapper.Default + } + } + + return ret +} + +// NotBefore extracts the time before which this assertion should not be +// considered. +func (a *SAMLAssertion) NotBefore() time.Time { + if a.Conditions != nil && !a.Conditions.NotBefore.IsZero() { + return a.Conditions.NotBefore.UTC() + } + + return time.Time{} +} + +// NotAfter extracts the time at which or after this assertion should not be +// considered. +func (a *SAMLAssertion) NotAfter() time.Time { + var notOnOrAfter time.Time + + for _, statement := range a.AuthnStatements { + if statement.SessionNotOnOrAfter == nil { + continue + } + + notOnOrAfter = *statement.SessionNotOnOrAfter + if !notOnOrAfter.IsZero() { + break + } + } + + return notOnOrAfter +} diff --git a/auth_v2.169.0/internal/api/samlassertion_test.go b/auth_v2.169.0/internal/api/samlassertion_test.go new file mode 100644 index 0000000..b7461b2 --- /dev/null +++ b/auth_v2.169.0/internal/api/samlassertion_test.go @@ -0,0 +1,347 @@ +package api + +import ( + tst "testing" + + "encoding/xml" + + "github.com/crewjam/saml" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/models" +) + +func TestSAMLAssertionUserID(t *tst.T) { + type spec struct { + xml string + userID string + } + + examples := []spec{ + { + xml: ` + + https://example.com/saml + + + transient-name-id + + + + + + + http://localhost:9999/saml/metadata + + + + + + urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport + + + + + +`, + userID: "", + }, + { + xml: ` + + https://example.com/saml + + + persistent-name-id + + + + + + + http://localhost:9999/saml/metadata + + + + + + urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport + + + + + +`, + userID: "persistent-name-id", + }, + { + xml: ` + + https://example.com/saml + + + name-id@example.com + + + + + + + http://localhost:9999/saml/metadata + + + + + + urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport + + + + + +`, + userID: "name-id@example.com", + }, + { + xml: ` + + https://example.com/saml + + + name-id@example.com + + + + + + + http://localhost:9999/saml/metadata + + + + + + urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport + + + + + subject-id + + + +`, + userID: "subject-id", + }, + } + + for i, example := range examples { + rawAssertion := saml.Assertion{} + require.NoError(t, xml.Unmarshal([]byte(example.xml), &rawAssertion)) + + assertion := SAMLAssertion{ + &rawAssertion, + } + + userID := assertion.UserID() + + require.Equal(t, userID, example.userID, "example %d had different user ID", i) + } +} + +func TestSAMLAssertionProcessing(t *tst.T) { + type spec struct { + desc string + xml string + mapping models.SAMLAttributeMapping + expected map[string]interface{} + } + + examples := []spec{ + { + desc: "valid attribute and mapping", + xml: ` + + + + someone@example.com + + + + `, + mapping: models.SAMLAttributeMapping{ + Keys: map[string]models.SAMLAttribute{ + "email": { + Name: "mail", + }, + }, + }, + expected: map[string]interface{}{ + "email": "someone@example.com", + }, + }, + { + desc: "valid attributes, use first attribute found in Names", + xml: ` + + + + old-soap@example.com + + + soap@example.com + + + + `, + mapping: models.SAMLAttributeMapping{ + Keys: map[string]models.SAMLAttribute{ + "email": { + Names: []string{ + "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress", + "http://schemas.xmlsoap.org/claims/EmailAddress", + }, + }, + }, + }, + expected: map[string]interface{}{ + "email": "soap@example.com", + }, + }, + { + desc: "valid groups attribute", + xml: ` + + + + group1 + group2 + + + soap@example.com + + + + `, + mapping: models.SAMLAttributeMapping{ + Keys: map[string]models.SAMLAttribute{ + "email": { + Names: []string{ + "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress", + "http://schemas.xmlsoap.org/claims/EmailAddress", + }, + }, + "groups": { + Name: "groups", + Array: true, + }, + }, + }, + expected: map[string]interface{}{ + "email": "soap@example.com", + "groups": []string{ + "group1", + "group2", + }, + }, + }, + { + desc: "missing attribute use default value", + xml: ` + + + + someone@example.com + + + +`, + mapping: models.SAMLAttributeMapping{ + Keys: map[string]models.SAMLAttribute{ + "email": { + Name: "email", + }, + "role": { + Default: "member", + }, + }, + }, + expected: map[string]interface{}{ + "email": "someone@example.com", + "role": "member", + }, + }, + { + desc: "use default value even if attribute exists but is not specified in mapping", + xml: ` + + + + someone@example.com + + + admin + + + + `, + mapping: models.SAMLAttributeMapping{ + Keys: map[string]models.SAMLAttribute{ + "email": { + Name: "mail", + }, + "role": { + Default: "member", + }, + }, + }, + expected: map[string]interface{}{ + "email": "someone@example.com", + "role": "member", + }, + }, + { + desc: "use value in XML when attribute exists and is specified in mapping", + xml: ` + + + + someone@example.com + + + admin + + + + `, + mapping: models.SAMLAttributeMapping{ + Keys: map[string]models.SAMLAttribute{ + "email": { + Name: "mail", + }, + "role": { + Name: "role", + Default: "member", + }, + }, + }, + expected: map[string]interface{}{ + "email": "someone@example.com", + "role": "admin", + }, + }, + } + + for i, example := range examples { + t.Run(example.desc, func(t *tst.T) { + rawAssertion := saml.Assertion{} + require.NoError(t, xml.Unmarshal([]byte(example.xml), &rawAssertion)) + + assertion := SAMLAssertion{ + &rawAssertion, + } + + result := assertion.Process(example.mapping) + require.Equal(t, example.expected, result, "example %d had different processing", i) + }) + } +} diff --git a/auth_v2.169.0/internal/api/settings.go b/auth_v2.169.0/internal/api/settings.go new file mode 100644 index 0000000..bc2f386 --- /dev/null +++ b/auth_v2.169.0/internal/api/settings.go @@ -0,0 +1,79 @@ +package api + +import "net/http" + +type ProviderSettings struct { + AnonymousUsers bool `json:"anonymous_users"` + Apple bool `json:"apple"` + Azure bool `json:"azure"` + Bitbucket bool `json:"bitbucket"` + Discord bool `json:"discord"` + Facebook bool `json:"facebook"` + Figma bool `json:"figma"` + Fly bool `json:"fly"` + GitHub bool `json:"github"` + GitLab bool `json:"gitlab"` + Google bool `json:"google"` + Keycloak bool `json:"keycloak"` + Kakao bool `json:"kakao"` + Linkedin bool `json:"linkedin"` + LinkedinOIDC bool `json:"linkedin_oidc"` + Notion bool `json:"notion"` + Spotify bool `json:"spotify"` + Slack bool `json:"slack"` + SlackOIDC bool `json:"slack_oidc"` + WorkOS bool `json:"workos"` + Twitch bool `json:"twitch"` + Twitter bool `json:"twitter"` + Email bool `json:"email"` + Phone bool `json:"phone"` + Zoom bool `json:"zoom"` +} + +type Settings struct { + ExternalProviders ProviderSettings `json:"external"` + DisableSignup bool `json:"disable_signup"` + MailerAutoconfirm bool `json:"mailer_autoconfirm"` + PhoneAutoconfirm bool `json:"phone_autoconfirm"` + SmsProvider string `json:"sms_provider"` + SAMLEnabled bool `json:"saml_enabled"` +} + +func (a *API) Settings(w http.ResponseWriter, r *http.Request) error { + config := a.config + + return sendJSON(w, http.StatusOK, &Settings{ + ExternalProviders: ProviderSettings{ + AnonymousUsers: config.External.AnonymousUsers.Enabled, + Apple: config.External.Apple.Enabled, + Azure: config.External.Azure.Enabled, + Bitbucket: config.External.Bitbucket.Enabled, + Discord: config.External.Discord.Enabled, + Facebook: config.External.Facebook.Enabled, + Figma: config.External.Figma.Enabled, + Fly: config.External.Fly.Enabled, + GitHub: config.External.Github.Enabled, + GitLab: config.External.Gitlab.Enabled, + Google: config.External.Google.Enabled, + Kakao: config.External.Kakao.Enabled, + Keycloak: config.External.Keycloak.Enabled, + Linkedin: config.External.Linkedin.Enabled, + LinkedinOIDC: config.External.LinkedinOIDC.Enabled, + Notion: config.External.Notion.Enabled, + Spotify: config.External.Spotify.Enabled, + Slack: config.External.Slack.Enabled, + SlackOIDC: config.External.SlackOIDC.Enabled, + Twitch: config.External.Twitch.Enabled, + Twitter: config.External.Twitter.Enabled, + WorkOS: config.External.WorkOS.Enabled, + Email: config.External.Email.Enabled, + Phone: config.External.Phone.Enabled, + Zoom: config.External.Zoom.Enabled, + }, + DisableSignup: config.DisableSignup, + MailerAutoconfirm: config.Mailer.Autoconfirm, + PhoneAutoconfirm: config.Sms.Autoconfirm, + SmsProvider: config.Sms.Provider, + SAMLEnabled: config.SAML.Enabled, + }) +} diff --git a/auth_v2.169.0/internal/api/settings_test.go b/auth_v2.169.0/internal/api/settings_test.go new file mode 100644 index 0000000..767bcf7 --- /dev/null +++ b/auth_v2.169.0/internal/api/settings_test.go @@ -0,0 +1,73 @@ +package api + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSettings_DefaultProviders(t *testing.T) { + api, _, err := setupAPIForTest() + require.NoError(t, err) + + // Setup request + req := httptest.NewRequest(http.MethodGet, "http://localhost/settings", nil) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + api.handler.ServeHTTP(w, req) + require.Equal(t, w.Code, http.StatusOK) + resp := Settings{} + require.NoError(t, json.NewDecoder(w.Body).Decode(&resp)) + + p := resp.ExternalProviders + + require.False(t, p.Phone) + require.True(t, p.Email) + require.True(t, p.Azure) + require.True(t, p.Bitbucket) + require.True(t, p.Discord) + require.True(t, p.Facebook) + require.True(t, p.Notion) + require.True(t, p.Spotify) + require.True(t, p.Slack) + require.True(t, p.SlackOIDC) + require.True(t, p.Google) + require.True(t, p.Kakao) + require.True(t, p.Keycloak) + require.True(t, p.Linkedin) + require.True(t, p.LinkedinOIDC) + require.True(t, p.GitHub) + require.True(t, p.GitLab) + require.True(t, p.Twitch) + require.True(t, p.WorkOS) + require.True(t, p.Zoom) + +} + +func TestSettings_EmailDisabled(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + config.External.Email.Enabled = false + + // Setup request + req := httptest.NewRequest(http.MethodGet, "http://localhost/settings", nil) + req.Header.Set("Content-Type", "application/json") + + ctx := context.Background() + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + api.handler.ServeHTTP(w, req) + require.Equal(t, w.Code, http.StatusOK) + resp := Settings{} + require.NoError(t, json.NewDecoder(w.Body).Decode(&resp)) + + p := resp.ExternalProviders + require.False(t, p.Email) +} diff --git a/auth_v2.169.0/internal/api/signup.go b/auth_v2.169.0/internal/api/signup.go new file mode 100644 index 0000000..1c74da6 --- /dev/null +++ b/auth_v2.169.0/internal/api/signup.go @@ -0,0 +1,390 @@ +package api + +import ( + "context" + "net/http" + "time" + + "github.com/fatih/structs" + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/api/sms_provider" + "github.com/supabase/auth/internal/metering" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +// SignupParams are the parameters the Signup endpoint accepts +type SignupParams struct { + Email string `json:"email"` + Phone string `json:"phone"` + Password string `json:"password"` + Data map[string]interface{} `json:"data"` + Provider string `json:"-"` + Aud string `json:"-"` + Channel string `json:"channel"` + CodeChallengeMethod string `json:"code_challenge_method"` + CodeChallenge string `json:"code_challenge"` +} + +func (a *API) validateSignupParams(ctx context.Context, p *SignupParams) error { + config := a.config + + if p.Password == "" { + return badRequestError(ErrorCodeValidationFailed, "Signup requires a valid password") + } + + if err := a.checkPasswordStrength(ctx, p.Password); err != nil { + return err + } + if p.Email != "" && p.Phone != "" { + return badRequestError(ErrorCodeValidationFailed, "Only an email address or phone number should be provided on signup.") + } + if p.Provider == "phone" && !sms_provider.IsValidMessageChannel(p.Channel, config) { + return badRequestError(ErrorCodeValidationFailed, InvalidChannelError) + } + // PKCE not needed as phone signups already return access token in body + if p.Phone != "" && p.CodeChallenge != "" { + return badRequestError(ErrorCodeValidationFailed, "PKCE not supported for phone signups") + } + if err := validatePKCEParams(p.CodeChallengeMethod, p.CodeChallenge); err != nil { + return err + } + + return nil +} + +func (p *SignupParams) ConfigureDefaults() { + if p.Email != "" { + p.Provider = "email" + } else if p.Phone != "" { + p.Provider = "phone" + } + if p.Data == nil { + p.Data = make(map[string]interface{}) + } + + // For backwards compatibility, we default to SMS if params Channel is not specified + if p.Phone != "" && p.Channel == "" { + p.Channel = sms_provider.SMSProvider + } +} + +func (params *SignupParams) ToUserModel(isSSOUser bool) (user *models.User, err error) { + switch params.Provider { + case "email": + user, err = models.NewUser("", params.Email, params.Password, params.Aud, params.Data) + case "phone": + user, err = models.NewUser(params.Phone, "", params.Password, params.Aud, params.Data) + case "anonymous": + user, err = models.NewUser("", "", "", params.Aud, params.Data) + user.IsAnonymous = true + default: + // handles external provider case + user, err = models.NewUser("", params.Email, params.Password, params.Aud, params.Data) + } + if err != nil { + err = internalServerError("Database error creating user").WithInternalError(err) + return + } + user.IsSSOUser = isSSOUser + if user.AppMetaData == nil { + user.AppMetaData = make(map[string]interface{}) + } + + user.Identities = make([]models.Identity, 0) + + if params.Provider != "anonymous" { + // TODO: Deprecate "provider" field + user.AppMetaData["provider"] = params.Provider + + user.AppMetaData["providers"] = []string{params.Provider} + } + + return user, nil +} + +// Signup is the endpoint for registering a new user +func (a *API) Signup(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + config := a.config + db := a.db.WithContext(ctx) + + if config.DisableSignup { + return unprocessableEntityError(ErrorCodeSignupDisabled, "Signups not allowed for this instance") + } + + params := &SignupParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + params.ConfigureDefaults() + + if err := a.validateSignupParams(ctx, params); err != nil { + return err + } + + var err error + flowType := getFlowFromChallenge(params.CodeChallenge) + + var user *models.User + var grantParams models.GrantParams + + grantParams.FillGrantParams(r) + + params.Aud = a.requestAud(ctx, r) + + switch params.Provider { + case "email": + if !config.External.Email.Enabled { + return badRequestError(ErrorCodeEmailProviderDisabled, "Email signups are disabled") + } + params.Email, err = a.validateEmail(params.Email) + if err != nil { + return err + } + user, err = models.IsDuplicatedEmail(db, params.Email, params.Aud, nil) + case "phone": + if !config.External.Phone.Enabled { + return badRequestError(ErrorCodePhoneProviderDisabled, "Phone signups are disabled") + } + params.Phone, err = validatePhone(params.Phone) + if err != nil { + return err + } + user, err = models.FindUserByPhoneAndAudience(db, params.Phone, params.Aud) + default: + msg := "" + if config.External.Email.Enabled && config.External.Phone.Enabled { + msg = "Sign up only available with email or phone provider" + } else if config.External.Email.Enabled { + msg = "Sign up only available with email provider" + } else if config.External.Phone.Enabled { + msg = "Sign up only available with phone provider" + } else { + msg = "Sign up with this provider not possible" + } + + return badRequestError(ErrorCodeValidationFailed, msg) + } + + if err != nil && !models.IsNotFoundError(err) { + return internalServerError("Database error finding user").WithInternalError(err) + } + + var signupUser *models.User + if user == nil { + // always call this outside of a database transaction as this method + // can be computationally hard and block due to password hashing + signupUser, err = params.ToUserModel(false /* <- isSSOUser */) + if err != nil { + return err + } + } + + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + if user != nil { + if (params.Provider == "email" && user.IsConfirmed()) || (params.Provider == "phone" && user.IsPhoneConfirmed()) { + return UserExistsError + } + // do not update the user because we can't be sure of their claimed identity + } else { + user, terr = a.signupNewUser(tx, signupUser) + if terr != nil { + return terr + } + } + identity, terr := models.FindIdentityByIdAndProvider(tx, user.ID.String(), params.Provider) + if terr != nil { + if !models.IsNotFoundError(terr) { + return terr + } + identityData := structs.Map(provider.Claims{ + Subject: user.ID.String(), + Email: user.GetEmail(), + }) + for k, v := range params.Data { + if _, ok := identityData[k]; !ok { + identityData[k] = v + } + } + identity, terr = a.createNewIdentity(tx, user, params.Provider, identityData) + if terr != nil { + return terr + } + if terr := user.RemoveUnconfirmedIdentities(tx, identity); terr != nil { + return terr + } + } + user.Identities = []models.Identity{*identity} + + if params.Provider == "email" && !user.IsConfirmed() { + if config.Mailer.Autoconfirm { + if terr = models.NewAuditLogEntry(r, tx, user, models.UserSignedUpAction, "", map[string]interface{}{ + "provider": params.Provider, + }); terr != nil { + return terr + } + if terr = user.Confirm(tx); terr != nil { + return internalServerError("Database error updating user").WithInternalError(terr) + } + } else { + if terr = models.NewAuditLogEntry(r, tx, user, models.UserConfirmationRequestedAction, "", map[string]interface{}{ + "provider": params.Provider, + }); terr != nil { + return terr + } + if isPKCEFlow(flowType) { + _, terr := generateFlowState(tx, params.Provider, models.EmailSignup, params.CodeChallengeMethod, params.CodeChallenge, &user.ID) + if terr != nil { + return terr + } + } + if terr = a.sendConfirmation(r, tx, user, flowType); terr != nil { + return terr + } + } + } else if params.Provider == "phone" && !user.IsPhoneConfirmed() { + if config.Sms.Autoconfirm { + if terr = models.NewAuditLogEntry(r, tx, user, models.UserSignedUpAction, "", map[string]interface{}{ + "provider": params.Provider, + "channel": params.Channel, + }); terr != nil { + return terr + } + if terr = user.ConfirmPhone(tx); terr != nil { + return internalServerError("Database error updating user").WithInternalError(terr) + } + } else { + if terr = models.NewAuditLogEntry(r, tx, user, models.UserConfirmationRequestedAction, "", map[string]interface{}{ + "provider": params.Provider, + }); terr != nil { + return terr + } + if _, terr := a.sendPhoneConfirmation(r, tx, user, params.Phone, phoneConfirmationOtp, params.Channel); terr != nil { + return terr + } + } + } + + return nil + }) + + if err != nil { + if errors.Is(err, UserExistsError) { + err = db.Transaction(func(tx *storage.Connection) error { + if terr := models.NewAuditLogEntry(r, tx, user, models.UserRepeatedSignUpAction, "", map[string]interface{}{ + "provider": params.Provider, + }); terr != nil { + return terr + } + return nil + }) + if err != nil { + return err + } + if config.Mailer.Autoconfirm || config.Sms.Autoconfirm { + return unprocessableEntityError(ErrorCodeUserAlreadyExists, "User already registered") + } + sanitizedUser, err := sanitizeUser(user, params) + if err != nil { + return err + } + return sendJSON(w, http.StatusOK, sanitizedUser) + } + return err + } + + // handles case where Mailer.Autoconfirm is true or Phone.Autoconfirm is true + if user.IsConfirmed() || user.IsPhoneConfirmed() { + var token *AccessTokenResponse + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + if terr = models.NewAuditLogEntry(r, tx, user, models.LoginAction, "", map[string]interface{}{ + "provider": params.Provider, + }); terr != nil { + return terr + } + token, terr = a.issueRefreshToken(r, tx, user, models.PasswordGrant, grantParams) + + if terr != nil { + return terr + } + return nil + }) + if err != nil { + return err + } + metering.RecordLogin("password", user.ID) + return sendJSON(w, http.StatusOK, token) + } + if user.HasBeenInvited() { + // Remove sensitive fields + user.UserMetaData = map[string]interface{}{} + user.Identities = []models.Identity{} + } + return sendJSON(w, http.StatusOK, user) +} + +// sanitizeUser removes all user sensitive information from the user object +// Should be used whenever we want to prevent information about whether a user is registered or not from leaking +func sanitizeUser(u *models.User, params *SignupParams) (*models.User, error) { + now := time.Now() + + u.ID = uuid.Must(uuid.NewV4()) + + u.Role, u.EmailChange = "", "" + u.CreatedAt, u.UpdatedAt, u.ConfirmationSentAt = now, now, &now + u.LastSignInAt, u.ConfirmedAt, u.EmailChangeSentAt, u.EmailConfirmedAt, u.PhoneConfirmedAt = nil, nil, nil, nil, nil + u.Identities = make([]models.Identity, 0) + u.UserMetaData = params.Data + u.Aud = params.Aud + + // sanitize app_metadata + u.AppMetaData = map[string]interface{}{ + "provider": params.Provider, + "providers": []string{params.Provider}, + } + + // sanitize param fields + switch params.Provider { + case "email": + u.Phone = "" + case "phone": + u.Email = "" + default: + u.Phone, u.Email = "", "" + } + + return u, nil +} + +func (a *API) signupNewUser(conn *storage.Connection, user *models.User) (*models.User, error) { + config := a.config + + err := conn.Transaction(func(tx *storage.Connection) error { + var terr error + if terr = tx.Create(user); terr != nil { + return internalServerError("Database error saving new user").WithInternalError(terr) + } + if terr = user.SetRole(tx, config.JWT.DefaultGroupName); terr != nil { + return internalServerError("Database error updating user").WithInternalError(terr) + } + return nil + }) + if err != nil { + return nil, err + } + + // there may be triggers or generated column values in the database that will modify the + // user data as it is being inserted. thus we load the user object + // again to fetch those changes. + if err := conn.Reload(user); err != nil { + return nil, internalServerError("Database error loading user after sign-up").WithInternalError(err) + } + + return user, nil +} diff --git a/auth_v2.169.0/internal/api/signup_test.go b/auth_v2.169.0/internal/api/signup_test.go new file mode 100644 index 0000000..3f47832 --- /dev/null +++ b/auth_v2.169.0/internal/api/signup_test.go @@ -0,0 +1,153 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + mail "github.com/supabase/auth/internal/mailer" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type SignupTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestSignup(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &SignupTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *SignupTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) +} + +// TestSignup tests API /signup route +func (ts *SignupTestSuite) TestSignup() { + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + "password": "test123", + "data": map[string]interface{}{ + "a": 1, + }, + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "/signup", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), http.StatusOK, w.Code) + + data := models.User{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + assert.Equal(ts.T(), "test@example.com", data.GetEmail()) + assert.Equal(ts.T(), ts.Config.JWT.Aud, data.Aud) + assert.Equal(ts.T(), 1.0, data.UserMetaData["a"]) + assert.Equal(ts.T(), "email", data.AppMetaData["provider"]) + assert.Equal(ts.T(), []interface{}{"email"}, data.AppMetaData["providers"]) +} + +// TestSignupTwice checks to make sure the same email cannot be registered twice +func (ts *SignupTestSuite) TestSignupTwice() { + // Request body + var buffer bytes.Buffer + + encode := func() { + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test1@example.com", + "password": "test123", + "data": map[string]interface{}{ + "a": 1, + }, + })) + } + + encode() + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/signup", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + y := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(y, req) + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test1@example.com", ts.Config.JWT.Aud) + if err == nil { + require.NoError(ts.T(), u.Confirm(ts.API.db)) + } + + encode() + ts.API.handler.ServeHTTP(w, req) + + data := models.User{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + require.Equal(ts.T(), http.StatusOK, w.Code) + + assert.NotEqual(ts.T(), u.ID, data.ID) + assert.Equal(ts.T(), "test1@example.com", data.GetEmail()) + assert.Equal(ts.T(), ts.Config.JWT.Aud, data.Aud) + assert.Equal(ts.T(), 1.0, data.UserMetaData["a"]) + assert.Equal(ts.T(), "email", data.AppMetaData["provider"]) + assert.Equal(ts.T(), []interface{}{"email"}, data.AppMetaData["providers"]) +} + +func (ts *SignupTestSuite) TestVerifySignup() { + user, err := models.NewUser("123456789", "test@example.com", "testing", ts.Config.JWT.Aud, nil) + user.ConfirmationToken = "asdf3" + now := time.Now() + user.ConfirmationSentAt = &now + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(user)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, user.ID, user.GetEmail(), user.ConfirmationToken, models.ConfirmationToken)) + + // Find test user + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + // Setup request + reqUrl := fmt.Sprintf("http://localhost/verify?type=%s&token=%s", mail.SignupVerification, u.ConfirmationToken) + req := httptest.NewRequest(http.MethodGet, reqUrl, nil) + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusSeeOther, w.Code) + + urlVal, err := url.Parse(w.Result().Header.Get("Location")) + require.NoError(ts.T(), err) + v, err := url.ParseQuery(urlVal.Fragment) + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), v.Get("access_token")) + require.NotEmpty(ts.T(), v.Get("expires_in")) + require.NotEmpty(ts.T(), v.Get("refresh_token")) +} diff --git a/auth_v2.169.0/internal/api/sms_provider/messagebird.go b/auth_v2.169.0/internal/api/sms_provider/messagebird.go new file mode 100644 index 0000000..05f7939 --- /dev/null +++ b/auth_v2.169.0/internal/api/sms_provider/messagebird.go @@ -0,0 +1,115 @@ +package sms_provider + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/utilities" +) + +const ( + defaultMessagebirdApiBase = "https://rest.messagebird.com" +) + +type MessagebirdProvider struct { + Config *conf.MessagebirdProviderConfiguration + APIPath string +} + +type MessagebirdResponseRecipients struct { + TotalSentCount int `json:"totalSentCount"` +} + +type MessagebirdResponse struct { + ID string `json:"id"` + Recipients MessagebirdResponseRecipients `json:"recipients"` +} + +type MessagebirdError struct { + Code int `json:"code"` + Description string `json:"description"` + Parameter string `json:"parameter"` +} + +type MessagebirdErrResponse struct { + Errors []MessagebirdError `json:"errors"` +} + +func (t MessagebirdErrResponse) Error() string { + return t.Errors[0].Description +} + +// Creates a SmsProvider with the Messagebird Config +func NewMessagebirdProvider(config conf.MessagebirdProviderConfiguration) (SmsProvider, error) { + if err := config.Validate(); err != nil { + return nil, err + } + + apiPath := defaultMessagebirdApiBase + "/messages" + return &MessagebirdProvider{ + Config: &config, + APIPath: apiPath, + }, nil +} + +func (t *MessagebirdProvider) SendMessage(phone, message, channel, otp string) (string, error) { + switch channel { + case SMSProvider: + return t.SendSms(phone, message) + default: + return "", fmt.Errorf("channel type %q is not supported for Messagebird", channel) + } +} + +// Send an SMS containing the OTP with Messagebird's API +func (t *MessagebirdProvider) SendSms(phone string, message string) (string, error) { + body := url.Values{ + "originator": {t.Config.Originator}, + "body": {message}, + "recipients": {phone}, + "type": {"sms"}, + "datacoding": {"unicode"}, + } + + client := &http.Client{Timeout: defaultTimeout} + r, err := http.NewRequest("POST", t.APIPath, strings.NewReader(body.Encode())) + if err != nil { + return "", err + } + r.Header.Add("Content-Type", "application/x-www-form-urlencoded") + r.Header.Add("Authorization", "AccessKey "+t.Config.AccessKey) + res, err := client.Do(r) + if err != nil { + return "", err + } + + if res.StatusCode == http.StatusBadRequest || res.StatusCode == http.StatusForbidden || res.StatusCode == http.StatusUnauthorized || res.StatusCode == http.StatusUnprocessableEntity { + resp := &MessagebirdErrResponse{} + if err := json.NewDecoder(res.Body).Decode(resp); err != nil { + return "", err + } + return "", resp + } + defer utilities.SafeClose(res.Body) + + // validate sms status + resp := &MessagebirdResponse{} + derr := json.NewDecoder(res.Body).Decode(resp) + if derr != nil { + return "", derr + } + + if resp.Recipients.TotalSentCount == 0 { + return "", fmt.Errorf("messagebird error: total sent count is 0") + } + + return resp.ID, nil +} + +func (t *MessagebirdProvider) VerifyOTP(phone, code string) error { + return fmt.Errorf("VerifyOTP is not supported for Messagebird") +} diff --git a/auth_v2.169.0/internal/api/sms_provider/sms_provider.go b/auth_v2.169.0/internal/api/sms_provider/sms_provider.go new file mode 100644 index 0000000..103db4f --- /dev/null +++ b/auth_v2.169.0/internal/api/sms_provider/sms_provider.go @@ -0,0 +1,70 @@ +package sms_provider + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/supabase/auth/internal/conf" +) + +// overrides the SmsProvider set to always return the mock provider +var MockProvider SmsProvider = nil + +var defaultTimeout time.Duration = time.Second * 10 + +const SMSProvider = "sms" +const WhatsappProvider = "whatsapp" + +func init() { + timeoutStr := os.Getenv("GOTRUE_INTERNAL_HTTP_TIMEOUT") + if timeoutStr != "" { + if timeout, err := time.ParseDuration(timeoutStr); err != nil { + log.Fatalf("error loading GOTRUE_INTERNAL_HTTP_TIMEOUT: %v", err.Error()) + } else if timeout != 0 { + defaultTimeout = timeout + } + } +} + +type SmsProvider interface { + SendMessage(phone, message, channel, otp string) (string, error) + VerifyOTP(phone, token string) error +} + +func GetSmsProvider(config conf.GlobalConfiguration) (SmsProvider, error) { + if MockProvider != nil { + return MockProvider, nil + } + + switch name := config.Sms.Provider; name { + case "twilio": + return NewTwilioProvider(config.Sms.Twilio) + case "messagebird": + return NewMessagebirdProvider(config.Sms.Messagebird) + case "textlocal": + return NewTextlocalProvider(config.Sms.Textlocal) + case "vonage": + return NewVonageProvider(config.Sms.Vonage) + case "twilio_verify": + return NewTwilioVerifyProvider(config.Sms.TwilioVerify) + default: + return nil, fmt.Errorf("sms Provider %s could not be found", name) + } +} + +func IsValidMessageChannel(channel string, config *conf.GlobalConfiguration) bool { + if config.Hook.SendSMS.Enabled { + // channel doesn't matter if SMS hook is enabled + return true + } + switch channel { + case SMSProvider: + return true + case WhatsappProvider: + return config.Sms.Provider == "twilio" || config.Sms.Provider == "twilio_verify" + default: + return false + } +} diff --git a/auth_v2.169.0/internal/api/sms_provider/sms_provider_test.go b/auth_v2.169.0/internal/api/sms_provider/sms_provider_test.go new file mode 100644 index 0000000..e5b5216 --- /dev/null +++ b/auth_v2.169.0/internal/api/sms_provider/sms_provider_test.go @@ -0,0 +1,287 @@ +package sms_provider + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "gopkg.in/h2non/gock.v1" +) + +var handleApiRequest func(*http.Request) (*http.Response, error) + +type SmsProviderTestSuite struct { + suite.Suite + Config *conf.GlobalConfiguration +} + +type MockHttpClient struct { + mock.Mock +} + +func (m *MockHttpClient) Do(req *http.Request) (*http.Response, error) { + return handleApiRequest(req) +} + +func TestSmsProvider(t *testing.T) { + ts := &SmsProviderTestSuite{ + Config: &conf.GlobalConfiguration{ + Sms: conf.SmsProviderConfiguration{ + Twilio: conf.TwilioProviderConfiguration{ + AccountSid: "test_account_sid", + AuthToken: "test_auth_token", + MessageServiceSid: "test_message_service_id", + }, + TwilioVerify: conf.TwilioVerifyProviderConfiguration{ + AccountSid: "test_account_sid", + AuthToken: "test_auth_token", + MessageServiceSid: "test_message_service_id", + }, + Messagebird: conf.MessagebirdProviderConfiguration{ + AccessKey: "test_access_key", + Originator: "test_originator", + }, + Vonage: conf.VonageProviderConfiguration{ + ApiKey: "test_api_key", + ApiSecret: "test_api_secret", + From: "test_from", + }, + Textlocal: conf.TextlocalProviderConfiguration{ + ApiKey: "test_api_key", + Sender: "test_sender", + }, + }, + }, + } + suite.Run(t, ts) +} + +func (ts *SmsProviderTestSuite) TestTwilioSendSms() { + defer gock.Off() + provider, err := NewTwilioProvider(ts.Config.Sms.Twilio) + require.NoError(ts.T(), err) + + twilioProvider, ok := provider.(*TwilioProvider) + require.Equal(ts.T(), true, ok) + + phone := "123456789" + message := "This is the sms code: 123456" + + body := url.Values{ + "To": {"+" + phone}, + "Channel": {"sms"}, + "From": {twilioProvider.Config.MessageServiceSid}, + "Body": {message}, + } + + cases := []struct { + Desc string + TwilioResponse *gock.Response + ExpectedError error + OTP string + }{ + { + Desc: "Successfully sent sms", + TwilioResponse: gock.New(twilioProvider.APIPath).Post(""). + MatchHeader("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(twilioProvider.Config.AccountSid+":"+twilioProvider.Config.AuthToken))). + MatchType("url").BodyString(body.Encode()). + Reply(200).JSON(SmsStatus{ + To: "+" + phone, + From: twilioProvider.Config.MessageServiceSid, + Status: "sent", + Body: message, + MessageSID: "abcdef", + }), + OTP: "123456", + ExpectedError: nil, + }, + { + Desc: "Sms status is failed / undelivered", + TwilioResponse: gock.New(twilioProvider.APIPath).Post(""). + MatchHeader("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(twilioProvider.Config.AccountSid+":"+twilioProvider.Config.AuthToken))). + MatchType("url").BodyString(body.Encode()). + Reply(200).JSON(SmsStatus{ + ErrorMessage: "failed to send sms", + ErrorCode: "401", + Status: "failed", + MessageSID: "abcdef", + }), + ExpectedError: fmt.Errorf("twilio error: %v %v for message %v", "failed to send sms", "401", "abcdef"), + }, + { + Desc: "Non-2xx status code returned", + TwilioResponse: gock.New(twilioProvider.APIPath).Post(""). + MatchHeader("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(twilioProvider.Config.AccountSid+":"+twilioProvider.Config.AuthToken))). + MatchType("url").BodyString(body.Encode()). + Reply(500).JSON(twilioErrResponse{ + Code: 500, + Message: "Internal server error", + MoreInfo: "error", + Status: 500, + }), + OTP: "123456", + ExpectedError: &twilioErrResponse{ + Code: 500, + Message: "Internal server error", + MoreInfo: "error", + Status: 500, + }, + }, + } + + for _, c := range cases { + ts.Run(c.Desc, func() { + _, err = twilioProvider.SendSms(phone, message, SMSProvider, c.OTP) + require.Equal(ts.T(), c.ExpectedError, err) + }) + } +} + +func (ts *SmsProviderTestSuite) TestMessagebirdSendSms() { + defer gock.Off() + provider, err := NewMessagebirdProvider(ts.Config.Sms.Messagebird) + require.NoError(ts.T(), err) + + messagebirdProvider, ok := provider.(*MessagebirdProvider) + require.Equal(ts.T(), true, ok) + + phone := "123456789" + message := "This is the sms code: 123456" + body := url.Values{ + "originator": {messagebirdProvider.Config.Originator}, + "body": {message}, + "recipients": {phone}, + "type": {"sms"}, + "datacoding": {"unicode"}, + } + gock.New(messagebirdProvider.APIPath).Post("").MatchHeader("Authorization", "AccessKey "+messagebirdProvider.Config.AccessKey).MatchType("url").BodyString(body.Encode()).Reply(200).JSON(MessagebirdResponse{ + Recipients: MessagebirdResponseRecipients{ + TotalSentCount: 1, + }, + }) + + _, err = messagebirdProvider.SendSms(phone, message) + require.NoError(ts.T(), err) +} + +func (ts *SmsProviderTestSuite) TestVonageSendSms() { + defer gock.Off() + provider, err := NewVonageProvider(ts.Config.Sms.Vonage) + require.NoError(ts.T(), err) + + vonageProvider, ok := provider.(*VonageProvider) + require.Equal(ts.T(), true, ok) + + phone := "123456789" + message := "This is the sms code: 123456" + + body := url.Values{ + "from": {vonageProvider.Config.From}, + "to": {phone}, + "text": {message}, + "api_key": {vonageProvider.Config.ApiKey}, + "api_secret": {vonageProvider.Config.ApiSecret}, + } + + gock.New(vonageProvider.APIPath).Post("").MatchType("url").BodyString(body.Encode()).Reply(200).JSON(VonageResponse{ + Messages: []VonageResponseMessage{ + {Status: "0"}, + }, + }) + + _, err = vonageProvider.SendSms(phone, message) + require.NoError(ts.T(), err) +} + +func (ts *SmsProviderTestSuite) TestTextLocalSendSms() { + defer gock.Off() + provider, err := NewTextlocalProvider(ts.Config.Sms.Textlocal) + require.NoError(ts.T(), err) + + textlocalProvider, ok := provider.(*TextlocalProvider) + require.Equal(ts.T(), true, ok) + + phone := "123456789" + message := "This is the sms code: 123456" + body := url.Values{ + "sender": {textlocalProvider.Config.Sender}, + "apikey": {textlocalProvider.Config.ApiKey}, + "message": {message}, + "numbers": {phone}, + } + + gock.New(textlocalProvider.APIPath).Post("").MatchType("url").BodyString(body.Encode()).Reply(200).JSON(TextlocalResponse{ + Status: "success", + Errors: []TextlocalError{}, + }) + + _, err = textlocalProvider.SendSms(phone, message) + require.NoError(ts.T(), err) +} +func (ts *SmsProviderTestSuite) TestTwilioVerifySendSms() { + defer gock.Off() + provider, err := NewTwilioVerifyProvider(ts.Config.Sms.TwilioVerify) + require.NoError(ts.T(), err) + + twilioVerifyProvider, ok := provider.(*TwilioVerifyProvider) + require.Equal(ts.T(), true, ok) + + phone := "123456789" + message := "This is the sms code: 123456" + + body := url.Values{ + "To": {"+" + phone}, + "Channel": {"sms"}, + } + + cases := []struct { + Desc string + TwilioResponse *gock.Response + ExpectedError error + }{ + { + Desc: "Successfully sent sms", + TwilioResponse: gock.New(twilioVerifyProvider.APIPath).Post(""). + MatchHeader("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(twilioVerifyProvider.Config.AccountSid+":"+twilioVerifyProvider.Config.AuthToken))). + MatchType("url").BodyString(body.Encode()). + Reply(200).JSON(SmsStatus{ + To: "+" + phone, + From: twilioVerifyProvider.Config.MessageServiceSid, + Status: "sent", + Body: message, + }), + ExpectedError: nil, + }, + { + Desc: "Non-2xx status code returned", + TwilioResponse: gock.New(twilioVerifyProvider.APIPath).Post(""). + MatchHeader("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(twilioVerifyProvider.Config.AccountSid+":"+twilioVerifyProvider.Config.AuthToken))). + MatchType("url").BodyString(body.Encode()). + Reply(500).JSON(twilioErrResponse{ + Code: 500, + Message: "Internal server error", + MoreInfo: "error", + Status: 500, + }), + ExpectedError: &twilioErrResponse{ + Code: 500, + Message: "Internal server error", + MoreInfo: "error", + Status: 500, + }, + }, + } + + for _, c := range cases { + ts.Run(c.Desc, func() { + _, err = twilioVerifyProvider.SendSms(phone, message, SMSProvider) + require.Equal(ts.T(), c.ExpectedError, err) + }) + } +} diff --git a/auth_v2.169.0/internal/api/sms_provider/textlocal.go b/auth_v2.169.0/internal/api/sms_provider/textlocal.go new file mode 100644 index 0000000..ef07a6f --- /dev/null +++ b/auth_v2.169.0/internal/api/sms_provider/textlocal.go @@ -0,0 +1,107 @@ +package sms_provider + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/utilities" +) + +const ( + defaultTextLocalApiBase = "https://api.textlocal.in" + textLocalTemplateErrorCode = 80 +) + +type TextlocalProvider struct { + Config *conf.TextlocalProviderConfiguration + APIPath string +} + +type TextlocalError struct { + Code int `json:"code"` + Message string `json:"message"` +} + +type TextlocalResponse struct { + Status string `json:"status"` + Errors []TextlocalError `json:"errors"` + Messages []TextlocalMessage `json:"messages"` +} + +type TextlocalMessage struct { + MessageID string `json:"id"` +} + +// Creates a SmsProvider with the Textlocal Config +func NewTextlocalProvider(config conf.TextlocalProviderConfiguration) (SmsProvider, error) { + if err := config.Validate(); err != nil { + return nil, err + } + + apiPath := defaultTextLocalApiBase + "/send" + return &TextlocalProvider{ + Config: &config, + APIPath: apiPath, + }, nil +} + +func (t *TextlocalProvider) SendMessage(phone, message, channel, otp string) (string, error) { + switch channel { + case SMSProvider: + return t.SendSms(phone, message) + default: + return "", fmt.Errorf("channel type %q is not supported for TextLocal", channel) + } +} + +// Send an SMS containing the OTP with Textlocal's API +func (t *TextlocalProvider) SendSms(phone string, message string) (string, error) { + body := url.Values{ + "sender": {t.Config.Sender}, + "apikey": {t.Config.ApiKey}, + "message": {message}, + "numbers": {phone}, + } + + client := &http.Client{Timeout: defaultTimeout} + r, err := http.NewRequest("POST", t.APIPath, strings.NewReader(body.Encode())) + if err != nil { + return "", err + } + + r.Header.Add("Content-Type", "application/x-www-form-urlencoded") + res, err := client.Do(r) + if err != nil { + return "", err + } + defer utilities.SafeClose(res.Body) + + resp := &TextlocalResponse{} + derr := json.NewDecoder(res.Body).Decode(resp) + if derr != nil { + return "", derr + } + + messageID := "" + + if resp.Status != "success" { + if len(resp.Messages) > 0 { + messageID = resp.Messages[0].MessageID + } + + if len(resp.Errors) > 0 && resp.Errors[0].Code == textLocalTemplateErrorCode { + return messageID, fmt.Errorf("textlocal error: %v (code: %v) template message: %s", resp.Errors[0].Message, resp.Errors[0].Code, message) + } + + return messageID, fmt.Errorf("textlocal error: %v (code: %v) message %s", resp.Errors[0].Message, resp.Errors[0].Code, messageID) + } + + return messageID, nil +} +func (t *TextlocalProvider) VerifyOTP(phone, code string) error { + return fmt.Errorf("VerifyOTP is not supported for Textlocal") +} diff --git a/auth_v2.169.0/internal/api/sms_provider/twilio.go b/auth_v2.169.0/internal/api/sms_provider/twilio.go new file mode 100644 index 0000000..3536c2f --- /dev/null +++ b/auth_v2.169.0/internal/api/sms_provider/twilio.go @@ -0,0 +1,141 @@ +package sms_provider + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/utilities" +) + +const ( + defaultTwilioApiBase = "https://api.twilio.com" + apiVersion = "2010-04-01" +) + +type TwilioProvider struct { + Config *conf.TwilioProviderConfiguration + APIPath string +} + +var isPhoneNumber = regexp.MustCompile("^[1-9][0-9]{1,14}$") + +// formatPhoneNumber removes "+" and whitespaces in a phone number +func formatPhoneNumber(phone string) string { + return strings.ReplaceAll(strings.TrimPrefix(phone, "+"), " ", "") +} + +type SmsStatus struct { + To string `json:"to"` + From string `json:"from"` + MessageSID string `json:"sid"` + Status string `json:"status"` + ErrorCode string `json:"error_code"` + ErrorMessage string `json:"error_message"` + Body string `json:"body"` +} + +type twilioErrResponse struct { + Code int `json:"code"` + Message string `json:"message"` + MoreInfo string `json:"more_info"` + Status int `json:"status"` +} + +func (t twilioErrResponse) Error() string { + return fmt.Sprintf("%s More information: %s", t.Message, t.MoreInfo) +} + +// Creates a SmsProvider with the Twilio Config +func NewTwilioProvider(config conf.TwilioProviderConfiguration) (SmsProvider, error) { + if err := config.Validate(); err != nil { + return nil, err + } + + apiPath := defaultTwilioApiBase + "/" + apiVersion + "/" + "Accounts" + "/" + config.AccountSid + "/Messages.json" + return &TwilioProvider{ + Config: &config, + APIPath: apiPath, + }, nil +} + +func (t *TwilioProvider) SendMessage(phone, message, channel, otp string) (string, error) { + switch channel { + case SMSProvider, WhatsappProvider: + return t.SendSms(phone, message, channel, otp) + default: + return "", fmt.Errorf("channel type %q is not supported for Twilio", channel) + } +} + +// Send an SMS containing the OTP with Twilio's API +func (t *TwilioProvider) SendSms(phone, message, channel, otp string) (string, error) { + sender := t.Config.MessageServiceSid + receiver := "+" + phone + body := url.Values{ + "To": {receiver}, // twilio api requires "+" extension to be included + "Channel": {channel}, + "From": {sender}, + "Body": {message}, + } + if channel == WhatsappProvider { + receiver = channel + ":" + receiver + if isPhoneNumber.MatchString(formatPhoneNumber(sender)) { + sender = channel + ":" + sender + } + + // Programmable Messaging (WhatsApp) takes in different set of inputs + body = url.Values{ + "To": {receiver}, // twilio api requires "+" extension to be included + "Channel": {channel}, + "From": {sender}, + } + // For backward compatibility with old API. + if t.Config.ContentSid != "" { + // Used to substitute OTP. See https://www.twilio.com/docs/content/whatsappauthentication for more details + contentVariables := fmt.Sprintf(`{"1": "%s"}`, otp) + body.Set("ContentSid", t.Config.ContentSid) + body.Set("ContentVariables", contentVariables) + } else { + body.Set("Body", message) + } + } + client := &http.Client{Timeout: defaultTimeout} + r, err := http.NewRequest("POST", t.APIPath, strings.NewReader(body.Encode())) + if err != nil { + return "", err + } + r.Header.Add("Content-Type", "application/x-www-form-urlencoded") + r.SetBasicAuth(t.Config.AccountSid, t.Config.AuthToken) + res, err := client.Do(r) + if err != nil { + return "", err + } + defer utilities.SafeClose(res.Body) + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { + resp := &twilioErrResponse{} + if err := json.NewDecoder(res.Body).Decode(resp); err != nil { + return "", err + } + return "", resp + } + // validate sms status + resp := &SmsStatus{} + derr := json.NewDecoder(res.Body).Decode(resp) + if derr != nil { + return "", derr + } + + if resp.Status == "failed" || resp.Status == "undelivered" { + return resp.MessageSID, fmt.Errorf("twilio error: %v %v for message %s", resp.ErrorMessage, resp.ErrorCode, resp.MessageSID) + } + + return resp.MessageSID, nil +} +func (t *TwilioProvider) VerifyOTP(phone, code string) error { + return fmt.Errorf("VerifyOTP is not supported for Twilio") +} diff --git a/auth_v2.169.0/internal/api/sms_provider/twilio_verify.go b/auth_v2.169.0/internal/api/sms_provider/twilio_verify.go new file mode 100644 index 0000000..8ec5463 --- /dev/null +++ b/auth_v2.169.0/internal/api/sms_provider/twilio_verify.go @@ -0,0 +1,139 @@ +package sms_provider + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/utilities" +) + +const ( + verifyServiceApiBase = "https://verify.twilio.com/v2/Services/" +) + +type TwilioVerifyProvider struct { + Config *conf.TwilioVerifyProviderConfiguration + APIPath string +} + +type VerificationResponse struct { + To string `json:"to"` + Status string `json:"status"` + Channel string `json:"channel"` + Valid bool `json:"valid"` + VerificationSID string `json:"sid"` + ErrorCode string `json:"error_code"` + ErrorMessage string `json:"error_message"` +} + +// See: https://www.twilio.com/docs/verify/api/verification-check +type VerificationCheckResponse struct { + To string `json:"to"` + Status string `json:"status"` + Channel string `json:"channel"` + Valid bool `json:"valid"` + ErrorCode string `json:"error_code"` + ErrorMessage string `json:"error_message"` +} + +// Creates a SmsProvider with the Twilio Config +func NewTwilioVerifyProvider(config conf.TwilioVerifyProviderConfiguration) (SmsProvider, error) { + if err := config.Validate(); err != nil { + return nil, err + } + apiPath := verifyServiceApiBase + config.MessageServiceSid + "/Verifications" + + return &TwilioVerifyProvider{ + Config: &config, + APIPath: apiPath, + }, nil +} + +func (t *TwilioVerifyProvider) SendMessage(phone, message, channel, otp string) (string, error) { + switch channel { + case SMSProvider, WhatsappProvider: + return t.SendSms(phone, message, channel) + default: + return "", fmt.Errorf("channel type %q is not supported for Twilio", channel) + } +} + +// Send an SMS containing the OTP with Twilio's API +func (t *TwilioVerifyProvider) SendSms(phone, message, channel string) (string, error) { + // Unlike Programmable Messaging, Verify does not require a prefix for channel + receiver := "+" + phone + body := url.Values{ + "To": {receiver}, + "Channel": {channel}, + } + client := &http.Client{Timeout: defaultTimeout} + r, err := http.NewRequest("POST", t.APIPath, strings.NewReader(body.Encode())) + if err != nil { + return "", err + } + r.Header.Add("Content-Type", "application/x-www-form-urlencoded") + r.SetBasicAuth(t.Config.AccountSid, t.Config.AuthToken) + res, err := client.Do(r) + if err != nil { + return "", err + } + defer utilities.SafeClose(res.Body) + if !(res.StatusCode == http.StatusOK || res.StatusCode == http.StatusCreated) { + resp := &twilioErrResponse{} + if err := json.NewDecoder(res.Body).Decode(resp); err != nil { + return "", err + } + return "", resp + } + + resp := &VerificationResponse{} + derr := json.NewDecoder(res.Body).Decode(resp) + if derr != nil { + return "", derr + } + return resp.VerificationSID, nil +} + +func (t *TwilioVerifyProvider) VerifyOTP(phone, code string) error { + verifyPath := verifyServiceApiBase + t.Config.MessageServiceSid + "/VerificationCheck" + receiver := "+" + phone + + body := url.Values{ + "To": {receiver}, // twilio api requires "+" extension to be included + "Code": {code}, + } + client := &http.Client{Timeout: defaultTimeout} + r, err := http.NewRequest("POST", verifyPath, strings.NewReader(body.Encode())) + if err != nil { + return err + } + r.Header.Add("Content-Type", "application/x-www-form-urlencoded") + r.SetBasicAuth(t.Config.AccountSid, t.Config.AuthToken) + res, err := client.Do(r) + if err != nil { + return err + } + defer utilities.SafeClose(res.Body) + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { + resp := &twilioErrResponse{} + if err := json.NewDecoder(res.Body).Decode(resp); err != nil { + return err + } + return resp + } + resp := &VerificationCheckResponse{} + derr := json.NewDecoder(res.Body).Decode(resp) + if derr != nil { + return derr + } + + if resp.Status != "approved" || !resp.Valid { + return fmt.Errorf("twilio verification error: %v %v", resp.ErrorMessage, resp.Status) + } + + return nil +} diff --git a/auth_v2.169.0/internal/api/sms_provider/vonage.go b/auth_v2.169.0/internal/api/sms_provider/vonage.go new file mode 100644 index 0000000..4b9fd5b --- /dev/null +++ b/auth_v2.169.0/internal/api/sms_provider/vonage.go @@ -0,0 +1,105 @@ +package sms_provider + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/utilities" + "golang.org/x/exp/utf8string" +) + +const ( + defaultVonageApiBase = "https://rest.nexmo.com" +) + +type VonageProvider struct { + Config *conf.VonageProviderConfiguration + APIPath string +} + +type VonageResponseMessage struct { + MessageID string `json:"message-id"` + Status string `json:"status"` + ErrorText string `json:"error-text"` +} + +type VonageResponse struct { + Messages []VonageResponseMessage `json:"messages"` +} + +// Creates a SmsProvider with the Vonage Config +func NewVonageProvider(config conf.VonageProviderConfiguration) (SmsProvider, error) { + if err := config.Validate(); err != nil { + return nil, err + } + + apiPath := defaultVonageApiBase + "/sms/json" + return &VonageProvider{ + Config: &config, + APIPath: apiPath, + }, nil +} + +func (t *VonageProvider) SendMessage(phone, message, channel, otp string) (string, error) { + switch channel { + case SMSProvider: + return t.SendSms(phone, message) + default: + return "", fmt.Errorf("channel type %q is not supported for Vonage", channel) + } +} + +// Send an SMS containing the OTP with Vonage's API +func (t *VonageProvider) SendSms(phone string, message string) (string, error) { + body := url.Values{ + "from": {t.Config.From}, + "to": {phone}, + "text": {message}, + "api_key": {t.Config.ApiKey}, + "api_secret": {t.Config.ApiSecret}, + } + + isMessageContainUnicode := !utf8string.NewString(message).IsASCII() + if isMessageContainUnicode { + body.Set("type", "unicode") + } + + client := &http.Client{Timeout: defaultTimeout} + r, err := http.NewRequest("POST", t.APIPath, strings.NewReader(body.Encode())) + if err != nil { + return "", err + } + + r.Header.Add("Content-Type", "application/x-www-form-urlencoded") + res, err := client.Do(r) + if err != nil { + return "", err + } + defer utilities.SafeClose(res.Body) + + resp := &VonageResponse{} + derr := json.NewDecoder(res.Body).Decode(resp) + if derr != nil { + return "", derr + } + + if len(resp.Messages) <= 0 { + return "", errors.New("vonage error: Internal Error") + } + + // A status of zero indicates success; a non-zero value means something went wrong. + if resp.Messages[0].Status != "0" { + return resp.Messages[0].MessageID, fmt.Errorf("vonage error: %v (status: %v) for message %s", resp.Messages[0].ErrorText, resp.Messages[0].Status, resp.Messages[0].MessageID) + } + + return resp.Messages[0].MessageID, nil +} + +func (t *VonageProvider) VerifyOTP(phone, code string) error { + return fmt.Errorf("VerifyOTP is not supported for Vonage") +} diff --git a/auth_v2.169.0/internal/api/sorting.go b/auth_v2.169.0/internal/api/sorting.go new file mode 100644 index 0000000..f951d95 --- /dev/null +++ b/auth_v2.169.0/internal/api/sorting.go @@ -0,0 +1,41 @@ +package api + +import ( + "fmt" + "net/http" + "strings" + + "github.com/supabase/auth/internal/models" +) + +func sort(r *http.Request, allowedFields map[string]bool, defaultSort []models.SortField) (*models.SortParams, error) { + sortParams := &models.SortParams{ + Fields: defaultSort, + } + urlParams := r.URL.Query() + if values, exists := urlParams["sort"]; exists && len(values) > 0 { + sortParams.Fields = []models.SortField{} + for _, value := range values { + parts := strings.SplitN(value, " ", 2) + field := parts[0] + if _, ok := allowedFields[field]; !ok { + return nil, fmt.Errorf("bad field for sort '%v'", field) + } + + dir := models.Descending + if len(parts) > 1 { + switch strings.ToUpper(parts[1]) { + case string(models.Ascending): + dir = models.Ascending + case string(models.Descending): + dir = models.Descending + default: + return nil, fmt.Errorf("bad direction for sort '%v', only 'asc' and 'desc' allowed", parts[1]) + } + } + sortParams.Fields = append(sortParams.Fields, models.SortField{Name: field, Dir: dir}) + } + } + + return sortParams, nil +} diff --git a/auth_v2.169.0/internal/api/sso.go b/auth_v2.169.0/internal/api/sso.go new file mode 100644 index 0000000..1003407 --- /dev/null +++ b/auth_v2.169.0/internal/api/sso.go @@ -0,0 +1,147 @@ +package api + +import ( + "net/http" + + "github.com/crewjam/saml" + "github.com/gofrs/uuid" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +type SingleSignOnParams struct { + ProviderID uuid.UUID `json:"provider_id"` + Domain string `json:"domain"` + RedirectTo string `json:"redirect_to"` + SkipHTTPRedirect *bool `json:"skip_http_redirect"` + CodeChallenge string `json:"code_challenge"` + CodeChallengeMethod string `json:"code_challenge_method"` +} + +type SingleSignOnResponse struct { + URL string `json:"url"` +} + +func (p *SingleSignOnParams) validate() (bool, error) { + hasProviderID := p.ProviderID != uuid.Nil + hasDomain := p.Domain != "" + + if hasProviderID && hasDomain { + return hasProviderID, badRequestError(ErrorCodeValidationFailed, "Only one of provider_id or domain supported") + } else if !hasProviderID && !hasDomain { + return hasProviderID, badRequestError(ErrorCodeValidationFailed, "A provider_id or domain needs to be provided") + } + + return hasProviderID, nil +} + +// SingleSignOn handles the single-sign-on flow for a provided SSO domain or provider. +func (a *API) SingleSignOn(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + + params := &SingleSignOnParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + var err error + hasProviderID := false + + if hasProviderID, err = params.validate(); err != nil { + return err + } + codeChallengeMethod := params.CodeChallengeMethod + codeChallenge := params.CodeChallenge + + if err := validatePKCEParams(codeChallengeMethod, codeChallenge); err != nil { + return err + } + flowType := getFlowFromChallenge(params.CodeChallenge) + var flowStateID *uuid.UUID + flowStateID = nil + if isPKCEFlow(flowType) { + flowState, err := generateFlowState(db, models.SSOSAML.String(), models.SSOSAML, codeChallengeMethod, codeChallenge, nil) + if err != nil { + return err + } + flowStateID = &flowState.ID + } + + var ssoProvider *models.SSOProvider + + if hasProviderID { + ssoProvider, err = models.FindSSOProviderByID(db, params.ProviderID) + if models.IsNotFoundError(err) { + return notFoundError(ErrorCodeSSOProviderNotFound, "No such SSO provider") + } else if err != nil { + return internalServerError("Unable to find SSO provider by ID").WithInternalError(err) + } + } else { + ssoProvider, err = models.FindSSOProviderByDomain(db, params.Domain) + if models.IsNotFoundError(err) { + return notFoundError(ErrorCodeSSOProviderNotFound, "No SSO provider assigned for this domain") + } else if err != nil { + return internalServerError("Unable to find SSO provider by domain").WithInternalError(err) + } + } + + entityDescriptor, err := ssoProvider.SAMLProvider.EntityDescriptor() + if err != nil { + return internalServerError("Error parsing SAML Metadata for SAML provider").WithInternalError(err) + } + + serviceProvider := a.getSAMLServiceProvider(entityDescriptor, false /* <- idpInitiated */) + + authnRequest, err := serviceProvider.MakeAuthenticationRequest( + serviceProvider.GetSSOBindingLocation(saml.HTTPRedirectBinding), + saml.HTTPRedirectBinding, + saml.HTTPPostBinding, + ) + if err != nil { + return internalServerError("Error creating SAML Authentication Request").WithInternalError(err) + } + + // Some IdPs do not support the use of the `persistent` NameID format, + // and require a different format to be sent to work. + if ssoProvider.SAMLProvider.NameIDFormat != nil { + authnRequest.NameIDPolicy.Format = ssoProvider.SAMLProvider.NameIDFormat + } + + relayState := models.SAMLRelayState{ + SSOProviderID: ssoProvider.ID, + RequestID: authnRequest.ID, + RedirectTo: params.RedirectTo, + FlowStateID: flowStateID, + } + + if err := db.Transaction(func(tx *storage.Connection) error { + if terr := tx.Create(&relayState); terr != nil { + return internalServerError("Error creating SAML relay state from sign up").WithInternalError(err) + } + + return nil + }); err != nil { + return err + } + + ssoRedirectURL, err := authnRequest.Redirect(relayState.ID.String(), serviceProvider) + if err != nil { + return internalServerError("Error creating SAML authentication request redirect URL").WithInternalError(err) + } + + skipHTTPRedirect := false + + if params.SkipHTTPRedirect != nil { + skipHTTPRedirect = *params.SkipHTTPRedirect + } + + if skipHTTPRedirect { + return sendJSON(w, http.StatusOK, SingleSignOnResponse{ + URL: ssoRedirectURL.String(), + }) + } + + http.Redirect(w, r, ssoRedirectURL.String(), http.StatusSeeOther) + return nil +} diff --git a/auth_v2.169.0/internal/api/sso_test.go b/auth_v2.169.0/internal/api/sso_test.go new file mode 100644 index 0000000..bae1beb --- /dev/null +++ b/auth_v2.169.0/internal/api/sso_test.go @@ -0,0 +1,752 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +const dateInPast = "2001-02-03T04:05:06.789" +const dateInFarFuture = "2999-02-03T04:05:06.789" +const oneHour = "PT1H" + +type SSOTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration + AdminJWT string +} + +func TestSSO(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &SSOTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + if config.SAML.Enabled { + suite.Run(t, ts) + } +} + +func (ts *SSOTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + claims := &AccessTokenClaims{ + Role: "supabase_admin", + } + token, err := jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(ts.Config.JWT.Secret)) + require.NoError(ts.T(), err, "Error generating admin jwt") + + ts.AdminJWT = token +} + +func (ts *SSOTestSuite) TestNonAdminJWT() { + // TODO +} + +func (ts *SSOTestSuite) TestAdminListEmptySSOProviders() { + req := httptest.NewRequest(http.MethodGet, "http://localhost/admin/sso/providers", nil) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), http.StatusOK, w.Code) + + body, err := io.ReadAll(w.Body) + require.NoError(ts.T(), err) + + var result struct { + Items []interface{} `json:"items"` + NextToken string `json:"next_token"` + } + + require.NoError(ts.T(), json.Unmarshal(body, &result)) + require.Equal(ts.T(), len(result.Items), 0) + require.Equal(ts.T(), result.NextToken, "") +} + +func (ts *SSOTestSuite) TestAdminGetSSOProviderNotExist() { + examples := []struct { + URL string + }{ + { + URL: "http://localhost/admin/sso/providers/not-a-uuid", + }, + { + URL: "http://localhost/admin/sso/providers/677477db-3f51-4038-bc05-c6bb9bdc3c32", + }, + } + + for _, example := range examples { + req := httptest.NewRequest(http.MethodGet, example.URL, nil) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), http.StatusNotFound, w.Code) + } +} + +func configurableSAMLIDPMetadata(entityID, validUntil, cacheDuration string) string { + return fmt.Sprintf(` + + + + + MIIDdDCCAlygAwIBAgIGAYKSjRZiMA0GCSqGSIb3DQEBCwUAMHsxFDASBgNVBAoTC0dvb2dsZSBJ +bmMuMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MQ8wDQYDVQQDEwZHb29nbGUxGDAWBgNVBAsTD0dv +b2dsZSBGb3IgV29yazELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWEwHhcNMjIwODEy +MTQ1NDU1WhcNMjcwODExMTQ1NDU1WjB7MRQwEgYDVQQKEwtHb29nbGUgSW5jLjEWMBQGA1UEBxMN +TW91bnRhaW4gVmlldzEPMA0GA1UEAxMGR29vZ2xlMRgwFgYDVQQLEw9Hb29nbGUgRm9yIFdvcmsx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAlncFzErcnZm7ZWO71NZStnCIAoYNKf6Uw3LPLzcvk0YrA/eBC3PVDHSfahi+apGO +Ytdq7IQUvBdto3rJTvP49fjyO0WLbAbiPC+dILt2Gx9kttxpSp99Bf+8ObL/fTy5Y2oHbJBfBX1V +qfDQIY0fcej3AndFYUOE0gZXyeSbnROB8W1PzHxOc7rq1mlas0rvyja7AK4gwXjIwyIGsFDmHnve +buqWOYMzOT9oD+iQq9BWYVHkXGZn0BXzKtnw9w8I3IxQdndUoCl95pYRIvdl1b0dWdO9cXtSsTkL +kAa8B/mCQcF4W2M3t/yKtrcLcRTALg3/Hc+Xz+3BpY/fSDk1SwIDAQABMA0GCSqGSIb3DQEBCwUA +A4IBAQCER02WLf6bKwTGVD/3VTntetIiETuPs46Dum8blbsg+2BYdAHIQcB9cLuMRosIw0nYj54m +SfiyfoWGcx3CkMup1MtKyWu+SqDHl9Bpf+GFLG0ngKD/zB6xwpv/TCi+g/FBYe2TvzD6B1V0z7Vs +Xf+Gc2TWBKmCuKf/g2AUt7IQLpOaqxuJVoZjp4sEMov6d3FnaoHQEd0lg+XmnYfLNtwe3QRSU0BD +x6lVV4kXi0x0n198/gkjnA85rPZoZ6dmqHtkcM0Gabgg6KEE5ubSDlWDsdv27uANceCZAoxd1+in +4/KqqkhynnbJs7Op5ZX8cckiHGGTGHNb35kys/XukuCo + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress + + + +`, entityID, validUntil, cacheDuration, entityID, entityID) + +} + +func (ts *SSOTestSuite) TestIsStaleSAMLMetadata() { + + // https://en.wikipedia.org/wiki/ISO_8601 + currentTime := time.Now() + currentTimeAsISO8601 := currentTime.UTC().Format("2006-01-02T15:04:05Z07:00") + examples := []struct { + Description string + Metadata []byte + IsStale bool + CacheDurationExceeded bool + }{ + { + Description: "Metadata is valid and within cache duration", + Metadata: []byte(configurableSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-B", dateInFarFuture, oneHour)), + IsStale: false, + CacheDurationExceeded: false, + }, + { + + Description: "Metadata is valid but is a minute past cache duration", + Metadata: []byte(configurableSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-B", currentTimeAsISO8601, oneHour)), + IsStale: true, + CacheDurationExceeded: true, + }, + + { + Description: "Metadata is invalid but within cache duration", + Metadata: []byte(configurableSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-B", dateInPast, oneHour)), + IsStale: true, + CacheDurationExceeded: false, + }, + } + + for _, example := range examples { + metadata, err := parseSAMLMetadata(example.Metadata) + require.NoError(ts.T(), err) + provider := models.SAMLProvider{ + EntityID: metadata.EntityID, + MetadataXML: string(example.Metadata), + UpdatedAt: currentTime, + } + if example.CacheDurationExceeded { + provider.UpdatedAt = currentTime.Add(-time.Minute * 59) + } + + require.Equal(ts.T(), example.IsStale, IsSAMLMetadataStale(metadata, provider)) + } + +} + +func validSAMLIDPMetadata(entityID string) string { + return fmt.Sprintf(` + + + + + MIIDdDCCAlygAwIBAgIGAYKSjRZiMA0GCSqGSIb3DQEBCwUAMHsxFDASBgNVBAoTC0dvb2dsZSBJ +bmMuMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MQ8wDQYDVQQDEwZHb29nbGUxGDAWBgNVBAsTD0dv +b2dsZSBGb3IgV29yazELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWEwHhcNMjIwODEy +MTQ1NDU1WhcNMjcwODExMTQ1NDU1WjB7MRQwEgYDVQQKEwtHb29nbGUgSW5jLjEWMBQGA1UEBxMN +TW91bnRhaW4gVmlldzEPMA0GA1UEAxMGR29vZ2xlMRgwFgYDVQQLEw9Hb29nbGUgRm9yIFdvcmsx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAlncFzErcnZm7ZWO71NZStnCIAoYNKf6Uw3LPLzcvk0YrA/eBC3PVDHSfahi+apGO +Ytdq7IQUvBdto3rJTvP49fjyO0WLbAbiPC+dILt2Gx9kttxpSp99Bf+8ObL/fTy5Y2oHbJBfBX1V +qfDQIY0fcej3AndFYUOE0gZXyeSbnROB8W1PzHxOc7rq1mlas0rvyja7AK4gwXjIwyIGsFDmHnve +buqWOYMzOT9oD+iQq9BWYVHkXGZn0BXzKtnw9w8I3IxQdndUoCl95pYRIvdl1b0dWdO9cXtSsTkL +kAa8B/mCQcF4W2M3t/yKtrcLcRTALg3/Hc+Xz+3BpY/fSDk1SwIDAQABMA0GCSqGSIb3DQEBCwUA +A4IBAQCER02WLf6bKwTGVD/3VTntetIiETuPs46Dum8blbsg+2BYdAHIQcB9cLuMRosIw0nYj54m +SfiyfoWGcx3CkMup1MtKyWu+SqDHl9Bpf+GFLG0ngKD/zB6xwpv/TCi+g/FBYe2TvzD6B1V0z7Vs +Xf+Gc2TWBKmCuKf/g2AUt7IQLpOaqxuJVoZjp4sEMov6d3FnaoHQEd0lg+XmnYfLNtwe3QRSU0BD +x6lVV4kXi0x0n198/gkjnA85rPZoZ6dmqHtkcM0Gabgg6KEE5ubSDlWDsdv27uANceCZAoxd1+in +4/KqqkhynnbJs7Op5ZX8cckiHGGTGHNb35kys/XukuCo + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress + + + +`, entityID, entityID, entityID) +} + +func (ts *SSOTestSuite) TestAdminCreateSSOProvider() { + examples := []struct { + StatusCode int + Request map[string]interface{} + }{ + { + StatusCode: http.StatusBadRequest, + Request: map[string]interface{}{}, + }, + { + StatusCode: http.StatusBadRequest, + Request: map[string]interface{}{ + "type": "saml", + }, + }, + { + StatusCode: http.StatusBadRequest, + Request: map[string]interface{}{ + "type": "oidc", + }, + }, + { + StatusCode: http.StatusCreated, + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-A"), + }, + }, + { + StatusCode: http.StatusCreated, + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-B"), + }, + }, + { + StatusCode: http.StatusCreated, + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-DUPLICATE"), + }, + }, + { + StatusCode: http.StatusCreated, + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-WITH-ATTRIBUTE-MAPPING"), + "attribute_mapping": map[string]interface{}{ + "keys": map[string]interface{}{ + "username": map[string]interface{}{ + "name": "mail", + }, + }, + }, + }, + }, + { + StatusCode: http.StatusUnprocessableEntity, + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-DUPLICATE"), + }, + }, + { + StatusCode: http.StatusCreated, + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-WITH-DOMAIN-A"), + "domains": []string{ + "example.com", + }, + }, + }, + { + StatusCode: http.StatusBadRequest, + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-WITH-DOMAIN-B"), + "domains": []string{ + "example.com", + }, + }, + }, + { + StatusCode: http.StatusBadRequest, + Request: map[string]interface{}{ + "type": "saml", + "metadata_url": "https://accounts.google.com/o/saml2?idpid=EXAMPLE-WITH-METADATA-URL-TOO", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-WITH-METADATA-URL-TOO"), + }, + }, + { + StatusCode: http.StatusBadRequest, + Request: map[string]interface{}{ + "type": "saml", + "metadata_url": "http://accounts.google.com/o/saml2?idpid=EXAMPLE-WITH-METADATA-OVER-HTTP", + }, + }, + { + StatusCode: http.StatusBadRequest, + Request: map[string]interface{}{ + "type": "saml", + "metadata_url": "https://accounts.google.com\\o/saml2?idpid=EXAMPLE-WITH-INVALID-METADATA-URL", + }, + }, + // TODO: add example with metadata_url + } + + for i, example := range examples { + body, err := json.Marshal(example.Request) + require.NoError(ts.T(), err) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/admin/sso/providers", bytes.NewBuffer(body)) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + response, err := io.ReadAll(w.Body) + require.NoError(ts.T(), err) + + require.Equal(ts.T(), example.StatusCode, w.Code, "Example %d failed with body %q", i, response) + + if example.StatusCode != http.StatusCreated { + continue + } + + // now check if the provider can be queried (GET) + var provider struct { + ID string `json:"id"` + } + + require.NoError(ts.T(), json.Unmarshal(response, &provider)) + + req = httptest.NewRequest(http.MethodGet, "http://localhost/admin/sso/providers/"+provider.ID, nil) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w = httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + response, err = io.ReadAll(w.Body) + require.NoError(ts.T(), err) + + require.Equal(ts.T(), http.StatusOK, w.Code) + + originalProviderID := provider.ID + provider.ID = "" + + require.NoError(ts.T(), json.Unmarshal(response, &provider)) + require.Equal(ts.T(), provider.ID, originalProviderID) + + // now check if the provider can be queried (List) + var providers struct { + Items []struct { + ID string `json:"id"` + } `json:"items"` + } + + req = httptest.NewRequest(http.MethodGet, "http://localhost/admin/sso/providers", nil) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w = httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + response, err = io.ReadAll(w.Body) + require.NoError(ts.T(), err) + + require.NoError(ts.T(), json.Unmarshal(response, &providers)) + + contained := false + for _, listProvider := range providers.Items { + if listProvider.ID == provider.ID { + contained = true + break + } + } + + require.True(ts.T(), contained) + } +} + +func (ts *SSOTestSuite) TestAdminUpdateSSOProvider() { + providers := []struct { + ID string + Request map[string]interface{} + }{ + { + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-A"), + }, + }, + { + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-C"), + "domains": []string{ + "example.com", + }, + }, + }, + } + + for i, example := range providers { + body, err := json.Marshal(example.Request) + require.NoError(ts.T(), err) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/admin/sso/providers", bytes.NewBuffer(body)) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + response, err := io.ReadAll(w.Body) + require.NoError(ts.T(), err) + + var payload struct { + ID string `json:"id"` + } + + require.NoError(ts.T(), json.Unmarshal(response, &payload)) + + providers[i].ID = payload.ID + } + + examples := []struct { + ID string + Status int + Request map[string]interface{} + }{ + { + ID: providers[0].ID, + Status: http.StatusBadRequest, // changing entity ID + Request: map[string]interface{}{ + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-B"), + }, + }, + { + ID: providers[0].ID, + Status: http.StatusBadRequest, // domain already exists + Request: map[string]interface{}{ + "domains": []string{ + "example.com", + }, + }, + }, + { + ID: providers[1].ID, + Status: http.StatusOK, + Request: map[string]interface{}{ + "domains": []string{ + "example.com", + "example.org", + }, + }, + }, + { + ID: providers[1].ID, + Status: http.StatusOK, + Request: map[string]interface{}{ + "attribute_mapping": map[string]interface{}{ + "keys": map[string]interface{}{ + "username": map[string]interface{}{ + "name": "mail", + }, + }, + }, + }, + }, + } + + for _, example := range examples { + body, err := json.Marshal(example.Request) + require.NoError(ts.T(), err) + + req := httptest.NewRequest(http.MethodPut, "http://localhost/admin/sso/providers/"+example.ID, bytes.NewBuffer(body)) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), w.Code, example.Status) + } +} + +func (ts *SSOTestSuite) TestAdminDeleteSSOProvider() { + providers := []struct { + ID string + Request map[string]interface{} + }{ + { + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-A"), + }, + }, + } + + for i, example := range providers { + body, err := json.Marshal(example.Request) + require.NoError(ts.T(), err) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/admin/sso/providers", bytes.NewBuffer(body)) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + response, err := io.ReadAll(w.Body) + require.NoError(ts.T(), err) + + var payload struct { + ID string `json:"id"` + } + + require.NoError(ts.T(), json.Unmarshal(response, &payload)) + + providers[i].ID = payload.ID + } + + examples := []struct { + ID string + Status int + }{ + { + ID: providers[0].ID, + Status: http.StatusOK, + }, + } + + for _, example := range examples { + req := httptest.NewRequest(http.MethodDelete, "http://localhost/admin/sso/providers/"+example.ID, nil) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), w.Code, example.Status) + } + + check := []struct { + ID string + }{ + { + ID: providers[0].ID, + }, + } + + for _, example := range check { + req := httptest.NewRequest(http.MethodGet, "http://localhost/admin/sso/providers/"+example.ID, nil) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), http.StatusNotFound, w.Code) + } +} + +func (ts *SSOTestSuite) TestSingleSignOn() { + providers := []struct { + ID string + Request map[string]interface{} + }{ + { + // creates a SAML provider (EXAMPLE-A) + // does not have a domain mapping + Request: map[string]interface{}{ + "type": "saml", + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-A"), + }, + }, + { + // creates a SAML provider (EXAMPLE-B) + // does have a domain mapping on example.com + Request: map[string]interface{}{ + "type": "saml", + "domains": []string{ + "example.com", + }, + "metadata_xml": validSAMLIDPMetadata("https://accounts.google.com/o/saml2?idpid=EXAMPLE-B"), + }, + }, + } + + for i, example := range providers { + body, err := json.Marshal(example.Request) + require.NoError(ts.T(), err) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/admin/sso/providers", bytes.NewBuffer(body)) + req.Header.Set("Authorization", "Bearer "+ts.AdminJWT) + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + response, err := io.ReadAll(w.Body) + require.NoError(ts.T(), err) + + var payload struct { + ID string `json:"id"` + } + + require.NoError(ts.T(), json.Unmarshal(response, &payload)) + + providers[i].ID = payload.ID + } + + examples := []struct { + Code int + Request map[string]interface{} + URL string + }{ + { + // call /sso with provider_id (EXAMPLE-A) + // should be successful and redirect to the EXAMPLE-A SSO URL + Request: map[string]interface{}{ + "provider_id": providers[0].ID, + }, + Code: http.StatusSeeOther, + URL: "https://accounts.google.com/o/saml2?idpid=EXAMPLE-A", + }, + { + // call /sso with provider_id (EXAMPLE-A) and SSO PKCE + // should be successful and redirect to the EXAMPLE-A SSO URL + Request: map[string]interface{}{ + "provider_id": providers[0].ID, + "code_challenge": "vby3iMQ4XUuycKkEyNsYHXshPql1Dod7Ebey2iXTXm4", + "code_challenge_method": "s256", + }, + Code: http.StatusSeeOther, + URL: "https://accounts.google.com/o/saml2?idpid=EXAMPLE-A", + }, + { + // call /sso with domain=example.com (provider=EXAMPLE-B) + // should be successful and redirect to the EXAMPLE-B SSO URL + Request: map[string]interface{}{ + "domain": "example.com", + }, + Code: http.StatusSeeOther, + URL: "https://accounts.google.com/o/saml2?idpid=EXAMPLE-B", + }, + { + // call /sso with domain=example.com (provider=EXAMPLE-B) + // should be successful and redirect to the EXAMPLE-B SSO URL + Request: map[string]interface{}{ + "domain": "example.com", + "skip_http_redirect": true, + }, + Code: http.StatusOK, + URL: "https://accounts.google.com/o/saml2?idpid=EXAMPLE-B", + }, + { + // call /sso with domain=example.org (no such provider) + // should be unsuccessful with 404 + Request: map[string]interface{}{ + "domain": "example.org", + }, + Code: http.StatusNotFound, + }, + { + // call /sso with a provider_id= (no such provider) + // should be unsuccessful with 404 + Request: map[string]interface{}{ + "provider_id": "14d906bf-9bd5-4734-b7d1-3904e240610e", + }, + Code: http.StatusNotFound, + }, + } + + for _, example := range examples { + body, err := json.Marshal(example.Request) + require.NoError(ts.T(), err) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/sso", bytes.NewBuffer(body)) + // no authorization header intentional, this is a login endpoint + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), w.Code, example.Code) + + locationURLString := "" + + if example.Code == http.StatusSeeOther { + locationURLString = w.Header().Get("Location") + } else if example.Code == http.StatusOK { + var response struct { + URL string `json:"url"` + } + + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&response)) + + require.NotEmpty(ts.T(), response.URL) + + locationURLString = response.URL + } else { + continue + } + + locationURL, err := url.ParseRequestURI(locationURLString) + require.NoError(ts.T(), err) + + locationQuery, err := url.ParseQuery(locationURL.RawQuery) + + require.NoError(ts.T(), err) + + samlQueryParams := []string{ + "SAMLRequest", + "RelayState", + "SigAlg", + "Signature", + } + + for _, param := range samlQueryParams { + require.True(ts.T(), locationQuery.Has(param)) + } + + for _, param := range samlQueryParams { + locationQuery.Del(param) + } + + locationURL.RawQuery = locationQuery.Encode() + + require.Equal(ts.T(), locationURL.String(), example.URL) + } +} + +func TestSSOCreateParamsValidation(t *testing.T) { + // TODO +} diff --git a/auth_v2.169.0/internal/api/ssoadmin.go b/auth_v2.169.0/internal/api/ssoadmin.go new file mode 100644 index 0000000..20fd8b9 --- /dev/null +++ b/auth_v2.169.0/internal/api/ssoadmin.go @@ -0,0 +1,421 @@ +package api + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" + "unicode/utf8" + + "github.com/crewjam/saml" + "github.com/crewjam/saml/samlsp" + "github.com/go-chi/chi/v5" + "github.com/gofrs/uuid" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" +) + +// loadSSOProvider looks for an idp_id parameter in the URL route and loads the SSO provider +// with that ID (or resource ID) and adds it to the context. +func (a *API) loadSSOProvider(w http.ResponseWriter, r *http.Request) (context.Context, error) { + ctx := r.Context() + db := a.db.WithContext(ctx) + + idpParam := chi.URLParam(r, "idp_id") + + idpID, err := uuid.FromString(idpParam) + if err != nil { + // idpParam is not UUIDv4 + return nil, notFoundError(ErrorCodeSSOProviderNotFound, "SSO Identity Provider not found") + } + + // idpParam is a UUIDv4 + provider, err := models.FindSSOProviderByID(db, idpID) + if err != nil { + if models.IsNotFoundError(err) { + return nil, notFoundError(ErrorCodeSSOProviderNotFound, "SSO Identity Provider not found") + } else { + return nil, internalServerError("Database error finding SSO Identity Provider").WithInternalError(err) + } + } + + observability.LogEntrySetField(r, "sso_provider_id", provider.ID.String()) + + return withSSOProvider(r.Context(), provider), nil +} + +// adminSSOProvidersList lists all SAML SSO Identity Providers in the system. Does +// not deal with pagination at this time. +func (a *API) adminSSOProvidersList(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + + providers, err := models.FindAllSAMLProviders(db) + if err != nil { + return err + } + + for i := range providers { + // remove metadata XML so that the returned JSON is not ginormous + providers[i].SAMLProvider.MetadataXML = "" + } + + return sendJSON(w, http.StatusOK, map[string]interface{}{ + "items": providers, + }) +} + +type CreateSSOProviderParams struct { + Type string `json:"type"` + + MetadataURL string `json:"metadata_url"` + MetadataXML string `json:"metadata_xml"` + Domains []string `json:"domains"` + AttributeMapping models.SAMLAttributeMapping `json:"attribute_mapping"` + NameIDFormat string `json:"name_id_format"` +} + +func (p *CreateSSOProviderParams) validate(forUpdate bool) error { + if !forUpdate && p.Type != "saml" { + return badRequestError(ErrorCodeValidationFailed, "Only 'saml' supported for SSO provider type") + } else if p.MetadataURL != "" && p.MetadataXML != "" { + return badRequestError(ErrorCodeValidationFailed, "Only one of metadata_xml or metadata_url needs to be set") + } else if !forUpdate && p.MetadataURL == "" && p.MetadataXML == "" { + return badRequestError(ErrorCodeValidationFailed, "Either metadata_xml or metadata_url must be set") + } else if p.MetadataURL != "" { + metadataURL, err := url.ParseRequestURI(p.MetadataURL) + if err != nil { + return badRequestError(ErrorCodeValidationFailed, "metadata_url is not a valid URL") + } + + if metadataURL.Scheme != "https" { + return badRequestError(ErrorCodeValidationFailed, "metadata_url is not a HTTPS URL") + } + } + + switch p.NameIDFormat { + case "", + string(saml.PersistentNameIDFormat), + string(saml.EmailAddressNameIDFormat), + string(saml.TransientNameIDFormat), + string(saml.UnspecifiedNameIDFormat): + // it's valid + + default: + return badRequestError(ErrorCodeValidationFailed, "name_id_format must be unspecified or one of %v", strings.Join([]string{ + string(saml.PersistentNameIDFormat), + string(saml.EmailAddressNameIDFormat), + string(saml.TransientNameIDFormat), + string(saml.UnspecifiedNameIDFormat), + }, ", ")) + } + + return nil +} + +func (p *CreateSSOProviderParams) metadata(ctx context.Context) ([]byte, *saml.EntityDescriptor, error) { + var rawMetadata []byte + var err error + + if p.MetadataXML != "" { + rawMetadata = []byte(p.MetadataXML) + } else if p.MetadataURL != "" { + rawMetadata, err = fetchSAMLMetadata(ctx, p.MetadataURL) + if err != nil { + return nil, nil, err + } + } else { + // impossible situation if you called validate() prior + return nil, nil, nil + } + + metadata, err := parseSAMLMetadata(rawMetadata) + if err != nil { + return nil, nil, err + } + + return rawMetadata, metadata, nil +} + +func parseSAMLMetadata(rawMetadata []byte) (*saml.EntityDescriptor, error) { + if !utf8.Valid(rawMetadata) { + return nil, badRequestError(ErrorCodeValidationFailed, "SAML Metadata XML contains invalid UTF-8 characters, which are not supported at this time") + } + + metadata, err := samlsp.ParseMetadata(rawMetadata) + if err != nil { + return nil, err + } + + if metadata.EntityID == "" { + return nil, badRequestError(ErrorCodeValidationFailed, "SAML Metadata does not contain an EntityID") + } + + if len(metadata.IDPSSODescriptors) < 1 { + return nil, badRequestError(ErrorCodeValidationFailed, "SAML Metadata does not contain any IDPSSODescriptor") + } + + if len(metadata.IDPSSODescriptors) > 1 { + return nil, badRequestError(ErrorCodeValidationFailed, "SAML Metadata contains multiple IDPSSODescriptors") + } + + return metadata, nil +} + +func fetchSAMLMetadata(ctx context.Context, url string) ([]byte, error) { + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, internalServerError("Unable to create a request to metadata_url").WithInternalError(err) + } + + req = req.WithContext(ctx) + + req.Header.Set("Accept", "application/xml;charset=UTF-8") + req.Header.Set("Accept-Charset", "UTF-8") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + + defer utilities.SafeClose(resp.Body) + if resp.StatusCode != http.StatusOK { + return nil, badRequestError(ErrorCodeSAMLMetadataFetchFailed, "HTTP %v error fetching SAML Metadata from URL '%s'", resp.StatusCode, url) + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return data, nil +} + +// adminSSOProvidersCreate creates a new SAML Identity Provider in the system. +func (a *API) adminSSOProvidersCreate(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + + params := &CreateSSOProviderParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + if err := params.validate(false /* <- forUpdate */); err != nil { + return err + } + + rawMetadata, metadata, err := params.metadata(ctx) + if err != nil { + return err + } + + existingProvider, err := models.FindSAMLProviderByEntityID(db, metadata.EntityID) + if err != nil && !models.IsNotFoundError(err) { + return err + } + if existingProvider != nil { + return unprocessableEntityError(ErrorCodeSAMLIdPAlreadyExists, "SAML Identity Provider with this EntityID (%s) already exists", metadata.EntityID) + } + + provider := &models.SSOProvider{ + // TODO handle Name, Description, Attribute Mapping + SAMLProvider: models.SAMLProvider{ + EntityID: metadata.EntityID, + MetadataXML: string(rawMetadata), + }, + } + + if params.MetadataURL != "" { + provider.SAMLProvider.MetadataURL = ¶ms.MetadataURL + } + + if params.NameIDFormat != "" { + provider.SAMLProvider.NameIDFormat = ¶ms.NameIDFormat + } + + provider.SAMLProvider.AttributeMapping = params.AttributeMapping + + for _, domain := range params.Domains { + existingProvider, err := models.FindSSOProviderByDomain(db, domain) + if err != nil && !models.IsNotFoundError(err) { + return err + } + if existingProvider != nil { + return badRequestError(ErrorCodeSSODomainAlreadyExists, "SSO Domain '%s' is already assigned to an SSO identity provider (%s)", domain, existingProvider.ID.String()) + } + + provider.SSODomains = append(provider.SSODomains, models.SSODomain{ + Domain: domain, + }) + } + + if err := db.Transaction(func(tx *storage.Connection) error { + if terr := tx.Eager().Create(provider); terr != nil { + return terr + } + + return tx.Eager().Load(provider) + }); err != nil { + return err + } + + return sendJSON(w, http.StatusCreated, provider) +} + +// adminSSOProvidersGet returns an existing SAML Identity Provider in the system. +func (a *API) adminSSOProvidersGet(w http.ResponseWriter, r *http.Request) error { + provider := getSSOProvider(r.Context()) + + return sendJSON(w, http.StatusOK, provider) +} + +// adminSSOProvidersUpdate updates a provider with the provided diff values. +func (a *API) adminSSOProvidersUpdate(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + + params := &CreateSSOProviderParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + if err := params.validate(true /* <- forUpdate */); err != nil { + return err + } + + modified := false + updateSAMLProvider := false + + provider := getSSOProvider(ctx) + + if params.MetadataXML != "" || params.MetadataURL != "" { + // metadata is being updated + rawMetadata, metadata, err := params.metadata(ctx) + if err != nil { + return err + } + + if provider.SAMLProvider.EntityID != metadata.EntityID { + return badRequestError(ErrorCodeSAMLEntityIDMismatch, "SAML Metadata can be updated only if the EntityID matches for the provider; expected '%s' but got '%s'", provider.SAMLProvider.EntityID, metadata.EntityID) + } + + if params.MetadataURL != "" { + provider.SAMLProvider.MetadataURL = ¶ms.MetadataURL + } + + provider.SAMLProvider.MetadataXML = string(rawMetadata) + updateSAMLProvider = true + modified = true + } + + // domains are being "updated" only when params.Domains is not nil, if + // it was nil (but not `[]`) then the caller is expecting not to modify + // the domains + updateDomains := params.Domains != nil + + var createDomains, deleteDomains []models.SSODomain + keepDomains := make(map[string]bool) + + for _, domain := range params.Domains { + existingProvider, err := models.FindSSOProviderByDomain(db, domain) + if err != nil && !models.IsNotFoundError(err) { + return err + } + if existingProvider != nil { + if existingProvider.ID == provider.ID { + keepDomains[domain] = true + } else { + return badRequestError(ErrorCodeSSODomainAlreadyExists, "SSO domain '%s' already assigned to another provider (%s)", domain, existingProvider.ID.String()) + } + } else { + modified = true + createDomains = append(createDomains, models.SSODomain{ + Domain: domain, + SSOProviderID: provider.ID, + }) + } + } + + if updateDomains { + for i, domain := range provider.SSODomains { + if !keepDomains[domain.Domain] { + modified = true + deleteDomains = append(deleteDomains, provider.SSODomains[i]) + } + } + } + + updateAttributeMapping := false + if params.AttributeMapping.Keys != nil { + updateAttributeMapping = !provider.SAMLProvider.AttributeMapping.Equal(¶ms.AttributeMapping) + if updateAttributeMapping { + modified = true + provider.SAMLProvider.AttributeMapping = params.AttributeMapping + } + } + + nameIDFormat := "" + if provider.SAMLProvider.NameIDFormat != nil { + nameIDFormat = *provider.SAMLProvider.NameIDFormat + } + + if params.NameIDFormat != nameIDFormat { + modified = true + + if params.NameIDFormat == "" { + provider.SAMLProvider.NameIDFormat = nil + } else { + provider.SAMLProvider.NameIDFormat = ¶ms.NameIDFormat + } + } + + if modified { + if err := db.Transaction(func(tx *storage.Connection) error { + if terr := tx.Eager().Update(provider); terr != nil { + return terr + } + + if updateDomains { + if terr := tx.Destroy(deleteDomains); terr != nil { + return terr + } + + if terr := tx.Eager().Create(createDomains); terr != nil { + return terr + } + } + + if updateAttributeMapping || updateSAMLProvider { + if terr := tx.Eager().Update(&provider.SAMLProvider); terr != nil { + return terr + } + } + + return tx.Eager().Load(provider) + }); err != nil { + return unprocessableEntityError(ErrorCodeConflict, "Updating SSO provider failed, likely due to a conflict. Try again?").WithInternalError(err) + } + } + + return sendJSON(w, http.StatusOK, provider) +} + +// adminSSOProvidersDelete deletes a SAML identity provider. +func (a *API) adminSSOProvidersDelete(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + + provider := getSSOProvider(ctx) + + if err := db.Transaction(func(tx *storage.Connection) error { + return tx.Eager().Destroy(provider) + }); err != nil { + return err + } + + return sendJSON(w, http.StatusOK, provider) +} diff --git a/auth_v2.169.0/internal/api/token.go b/auth_v2.169.0/internal/api/token.go new file mode 100644 index 0000000..cc945f2 --- /dev/null +++ b/auth_v2.169.0/internal/api/token.go @@ -0,0 +1,506 @@ +package api + +import ( + "context" + "net/http" + "net/url" + "strconv" + "time" + + "fmt" + + "github.com/gofrs/uuid" + "github.com/golang-jwt/jwt/v5" + "github.com/xeipuuv/gojsonschema" + + "github.com/supabase/auth/internal/hooks" + "github.com/supabase/auth/internal/metering" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" +) + +// AccessTokenClaims is a struct thats used for JWT claims +type AccessTokenClaims struct { + jwt.RegisteredClaims + Email string `json:"email"` + Phone string `json:"phone"` + AppMetaData map[string]interface{} `json:"app_metadata"` + UserMetaData map[string]interface{} `json:"user_metadata"` + Role string `json:"role"` + AuthenticatorAssuranceLevel string `json:"aal,omitempty"` + AuthenticationMethodReference []models.AMREntry `json:"amr,omitempty"` + SessionId string `json:"session_id,omitempty"` + IsAnonymous bool `json:"is_anonymous"` +} + +// AccessTokenResponse represents an OAuth2 success response +type AccessTokenResponse struct { + Token string `json:"access_token"` + TokenType string `json:"token_type"` // Bearer + ExpiresIn int `json:"expires_in"` + ExpiresAt int64 `json:"expires_at"` + RefreshToken string `json:"refresh_token"` + User *models.User `json:"user"` + ProviderAccessToken string `json:"provider_token,omitempty"` + ProviderRefreshToken string `json:"provider_refresh_token,omitempty"` + WeakPassword *WeakPasswordError `json:"weak_password,omitempty"` +} + +// AsRedirectURL encodes the AccessTokenResponse as a redirect URL that +// includes the access token response data in a URL fragment. +func (r *AccessTokenResponse) AsRedirectURL(redirectURL string, extraParams url.Values) string { + extraParams.Set("access_token", r.Token) + extraParams.Set("token_type", r.TokenType) + extraParams.Set("expires_in", strconv.Itoa(r.ExpiresIn)) + extraParams.Set("expires_at", strconv.FormatInt(r.ExpiresAt, 10)) + extraParams.Set("refresh_token", r.RefreshToken) + + return redirectURL + "#" + extraParams.Encode() +} + +// PasswordGrantParams are the parameters the ResourceOwnerPasswordGrant method accepts +type PasswordGrantParams struct { + Email string `json:"email"` + Phone string `json:"phone"` + Password string `json:"password"` +} + +// PKCEGrantParams are the parameters the PKCEGrant method accepts +type PKCEGrantParams struct { + AuthCode string `json:"auth_code"` + CodeVerifier string `json:"code_verifier"` +} + +const useCookieHeader = "x-use-cookie" +const InvalidLoginMessage = "Invalid login credentials" + +// Token is the endpoint for OAuth access token requests +func (a *API) Token(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + grantType := r.FormValue("grant_type") + switch grantType { + case "password": + return a.ResourceOwnerPasswordGrant(ctx, w, r) + case "refresh_token": + return a.RefreshTokenGrant(ctx, w, r) + case "id_token": + return a.IdTokenGrant(ctx, w, r) + case "pkce": + return a.PKCE(ctx, w, r) + default: + return badRequestError(ErrorCodeInvalidCredentials, "unsupported_grant_type") + } +} + +// ResourceOwnerPasswordGrant implements the password grant type flow +func (a *API) ResourceOwnerPasswordGrant(ctx context.Context, w http.ResponseWriter, r *http.Request) error { + db := a.db.WithContext(ctx) + + params := &PasswordGrantParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + aud := a.requestAud(ctx, r) + config := a.config + + if params.Email != "" && params.Phone != "" { + return badRequestError(ErrorCodeValidationFailed, "Only an email address or phone number should be provided on login.") + } + var user *models.User + var grantParams models.GrantParams + var provider string + var err error + + grantParams.FillGrantParams(r) + + if params.Email != "" { + provider = "email" + if !config.External.Email.Enabled { + return unprocessableEntityError(ErrorCodeEmailProviderDisabled, "Email logins are disabled") + } + user, err = models.FindUserByEmailAndAudience(db, params.Email, aud) + } else if params.Phone != "" { + provider = "phone" + if !config.External.Phone.Enabled { + return unprocessableEntityError(ErrorCodePhoneProviderDisabled, "Phone logins are disabled") + } + params.Phone = formatPhoneNumber(params.Phone) + user, err = models.FindUserByPhoneAndAudience(db, params.Phone, aud) + } else { + return badRequestError(ErrorCodeValidationFailed, "missing email or phone") + } + + if err != nil { + if models.IsNotFoundError(err) { + return badRequestError(ErrorCodeInvalidCredentials, InvalidLoginMessage) + } + return internalServerError("Database error querying schema").WithInternalError(err) + } + + if !user.HasPassword() { + return badRequestError(ErrorCodeInvalidCredentials, InvalidLoginMessage) + } + + if user.IsBanned() { + return badRequestError(ErrorCodeUserBanned, "User is banned") + } + + isValidPassword, shouldReEncrypt, err := user.Authenticate(ctx, db, params.Password, config.Security.DBEncryption.DecryptionKeys, config.Security.DBEncryption.Encrypt, config.Security.DBEncryption.EncryptionKeyID) + if err != nil { + return err + } + + var weakPasswordError *WeakPasswordError + if isValidPassword { + if err := a.checkPasswordStrength(ctx, params.Password); err != nil { + if wpe, ok := err.(*WeakPasswordError); ok { + weakPasswordError = wpe + } else { + observability.GetLogEntry(r).Entry.WithError(err).Warn("Password strength check on sign-in failed") + } + } + + if shouldReEncrypt { + if err := user.SetPassword(ctx, params.Password, true, config.Security.DBEncryption.EncryptionKeyID, config.Security.DBEncryption.EncryptionKey); err != nil { + return err + } + + // directly change this in the database without + // calling user.UpdatePassword() because this + // is not a password change, just encryption + // change in the database + if err := db.UpdateOnly(user, "encrypted_password"); err != nil { + return err + } + } + } + + if config.Hook.PasswordVerificationAttempt.Enabled { + input := hooks.PasswordVerificationAttemptInput{ + UserID: user.ID, + Valid: isValidPassword, + } + output := hooks.PasswordVerificationAttemptOutput{} + if err := a.invokeHook(nil, r, &input, &output); err != nil { + return err + } + + if output.Decision == hooks.HookRejection { + if output.Message == "" { + output.Message = hooks.DefaultPasswordHookRejectionMessage + } + if output.ShouldLogoutUser { + if err := models.Logout(a.db, user.ID); err != nil { + return err + } + } + return badRequestError(ErrorCodeInvalidCredentials, output.Message) + } + } + if !isValidPassword { + return badRequestError(ErrorCodeInvalidCredentials, InvalidLoginMessage) + } + + if params.Email != "" && !user.IsConfirmed() { + return badRequestError(ErrorCodeEmailNotConfirmed, "Email not confirmed") + } else if params.Phone != "" && !user.IsPhoneConfirmed() { + return badRequestError(ErrorCodePhoneNotConfirmed, "Phone not confirmed") + } + + var token *AccessTokenResponse + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + if terr = models.NewAuditLogEntry(r, tx, user, models.LoginAction, "", map[string]interface{}{ + "provider": provider, + }); terr != nil { + return terr + } + token, terr = a.issueRefreshToken(r, tx, user, models.PasswordGrant, grantParams) + if terr != nil { + return terr + } + + return nil + }) + if err != nil { + return err + } + + token.WeakPassword = weakPasswordError + + metering.RecordLogin("password", user.ID) + return sendJSON(w, http.StatusOK, token) +} + +func (a *API) PKCE(ctx context.Context, w http.ResponseWriter, r *http.Request) error { + db := a.db.WithContext(ctx) + var grantParams models.GrantParams + + // There is a slight problem with this as it will pick-up the + // User-Agent and IP addresses from the server if used on the server + // side. Currently there's no mechanism to distinguish, but the server + // can be told to at least propagate the User-Agent header. + grantParams.FillGrantParams(r) + + params := &PKCEGrantParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + if params.AuthCode == "" || params.CodeVerifier == "" { + return badRequestError(ErrorCodeValidationFailed, "invalid request: both auth code and code verifier should be non-empty") + } + + flowState, err := models.FindFlowStateByAuthCode(db, params.AuthCode) + // Sanity check in case user ID was not set properly + if models.IsNotFoundError(err) || flowState.UserID == nil { + return notFoundError(ErrorCodeFlowStateNotFound, "invalid flow state, no valid flow state found") + } else if err != nil { + return err + } + if flowState.IsExpired(a.config.External.FlowStateExpiryDuration) { + return unprocessableEntityError(ErrorCodeFlowStateExpired, "invalid flow state, flow state has expired") + } + + user, err := models.FindUserByID(db, *flowState.UserID) + if err != nil { + return err + } + if err := flowState.VerifyPKCE(params.CodeVerifier); err != nil { + return badRequestError(ErrorCodeBadCodeVerifier, err.Error()) + } + + var token *AccessTokenResponse + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + authMethod, err := models.ParseAuthenticationMethod(flowState.AuthenticationMethod) + if err != nil { + return err + } + if terr := models.NewAuditLogEntry(r, tx, user, models.LoginAction, "", map[string]interface{}{ + "provider_type": flowState.ProviderType, + }); terr != nil { + return terr + } + token, terr = a.issueRefreshToken(r, tx, user, authMethod, grantParams) + if terr != nil { + // error type is already handled in issueRefreshToken + return terr + } + token.ProviderAccessToken = flowState.ProviderAccessToken + // Because not all providers give out a refresh token + // See corresponding OAuth2 spec: + if flowState.ProviderRefreshToken != "" { + token.ProviderRefreshToken = flowState.ProviderRefreshToken + } + if terr = tx.Destroy(flowState); terr != nil { + return err + } + return nil + }) + if err != nil { + return err + } + + return sendJSON(w, http.StatusOK, token) +} + +func (a *API) generateAccessToken(r *http.Request, tx *storage.Connection, user *models.User, sessionId *uuid.UUID, authenticationMethod models.AuthenticationMethod) (string, int64, error) { + config := a.config + if sessionId == nil { + return "", 0, internalServerError("Session is required to issue access token") + } + sid := sessionId.String() + session, terr := models.FindSessionByID(tx, *sessionId, false) + if terr != nil { + return "", 0, terr + } + aal, amr, terr := session.CalculateAALAndAMR(user) + if terr != nil { + return "", 0, terr + } + + issuedAt := time.Now().UTC() + expiresAt := issuedAt.Add(time.Second * time.Duration(config.JWT.Exp)) + + claims := &hooks.AccessTokenClaims{ + RegisteredClaims: jwt.RegisteredClaims{ + Subject: user.ID.String(), + Audience: jwt.ClaimStrings{user.Aud}, + IssuedAt: jwt.NewNumericDate(issuedAt), + ExpiresAt: jwt.NewNumericDate(expiresAt), + Issuer: config.JWT.Issuer, + }, + Email: user.GetEmail(), + Phone: user.GetPhone(), + AppMetaData: user.AppMetaData, + UserMetaData: user.UserMetaData, + Role: user.Role, + SessionId: sid, + AuthenticatorAssuranceLevel: aal.String(), + AuthenticationMethodReference: amr, + IsAnonymous: user.IsAnonymous, + } + + var gotrueClaims jwt.Claims = claims + if config.Hook.CustomAccessToken.Enabled { + input := hooks.CustomAccessTokenInput{ + UserID: user.ID, + Claims: claims, + AuthenticationMethod: authenticationMethod.String(), + } + + output := hooks.CustomAccessTokenOutput{} + + err := a.invokeHook(tx, r, &input, &output) + if err != nil { + return "", 0, err + } + gotrueClaims = jwt.MapClaims(output.Claims) + } + + signed, err := signJwt(&config.JWT, gotrueClaims) + if err != nil { + return "", 0, err + } + + return signed, expiresAt.Unix(), nil +} + +func (a *API) issueRefreshToken(r *http.Request, conn *storage.Connection, user *models.User, authenticationMethod models.AuthenticationMethod, grantParams models.GrantParams) (*AccessTokenResponse, error) { + config := a.config + + now := time.Now() + user.LastSignInAt = &now + + var tokenString string + var expiresAt int64 + var refreshToken *models.RefreshToken + + err := conn.Transaction(func(tx *storage.Connection) error { + var terr error + + refreshToken, terr = models.GrantAuthenticatedUser(tx, user, grantParams) + if terr != nil { + return internalServerError("Database error granting user").WithInternalError(terr) + } + + terr = models.AddClaimToSession(tx, *refreshToken.SessionId, authenticationMethod) + if terr != nil { + return terr + } + + tokenString, expiresAt, terr = a.generateAccessToken(r, tx, user, refreshToken.SessionId, authenticationMethod) + if terr != nil { + // Account for Hook Error + httpErr, ok := terr.(*HTTPError) + if ok { + return httpErr + } + return internalServerError("error generating jwt token").WithInternalError(terr) + } + return nil + }) + if err != nil { + return nil, err + } + + return &AccessTokenResponse{ + Token: tokenString, + TokenType: "bearer", + ExpiresIn: config.JWT.Exp, + ExpiresAt: expiresAt, + RefreshToken: refreshToken.Token, + User: user, + }, nil +} + +func (a *API) updateMFASessionAndClaims(r *http.Request, tx *storage.Connection, user *models.User, authenticationMethod models.AuthenticationMethod, grantParams models.GrantParams) (*AccessTokenResponse, error) { + ctx := r.Context() + config := a.config + var tokenString string + var expiresAt int64 + var refreshToken *models.RefreshToken + currentClaims := getClaims(ctx) + sessionId, err := uuid.FromString(currentClaims.SessionId) + if err != nil { + return nil, internalServerError("Cannot read SessionId claim as UUID").WithInternalError(err) + } + + err = tx.Transaction(func(tx *storage.Connection) error { + if terr := models.AddClaimToSession(tx, sessionId, authenticationMethod); terr != nil { + return terr + } + session, terr := models.FindSessionByID(tx, sessionId, false) + if terr != nil { + return terr + } + currentToken, terr := models.FindTokenBySessionID(tx, &session.ID) + if terr != nil { + return terr + } + if err := tx.Load(user, "Identities"); err != nil { + return err + } + // Swap to ensure current token is the latest one + refreshToken, terr = models.GrantRefreshTokenSwap(r, tx, user, currentToken) + if terr != nil { + return terr + } + aal, _, terr := session.CalculateAALAndAMR(user) + if terr != nil { + return terr + } + + if err := session.UpdateAALAndAssociatedFactor(tx, aal, grantParams.FactorID); err != nil { + return err + } + + tokenString, expiresAt, terr = a.generateAccessToken(r, tx, user, &session.ID, authenticationMethod) + if terr != nil { + httpErr, ok := terr.(*HTTPError) + if ok { + return httpErr + } + return internalServerError("error generating jwt token").WithInternalError(terr) + } + return nil + }) + if err != nil { + return nil, err + } + return &AccessTokenResponse{ + Token: tokenString, + TokenType: "bearer", + ExpiresIn: config.JWT.Exp, + ExpiresAt: expiresAt, + RefreshToken: refreshToken.Token, + User: user, + }, nil +} + +func validateTokenClaims(outputClaims map[string]interface{}) error { + schemaLoader := gojsonschema.NewStringLoader(hooks.MinimumViableTokenSchema) + + documentLoader := gojsonschema.NewGoLoader(outputClaims) + + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + return err + } + + if !result.Valid() { + var errorMessages string + + for _, desc := range result.Errors() { + errorMessages += fmt.Sprintf("- %s\n", desc) + fmt.Printf("- %s\n", desc) + } + return fmt.Errorf("output claims do not conform to the expected schema: \n%s", errorMessages) + + } + + return nil +} diff --git a/auth_v2.169.0/internal/api/token_oidc.go b/auth_v2.169.0/internal/api/token_oidc.go new file mode 100644 index 0000000..b576742 --- /dev/null +++ b/auth_v2.169.0/internal/api/token_oidc.go @@ -0,0 +1,253 @@ +package api + +import ( + "context" + "crypto/sha256" + "fmt" + "net/http" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" +) + +// IdTokenGrantParams are the parameters the IdTokenGrant method accepts +type IdTokenGrantParams struct { + IdToken string `json:"id_token"` + AccessToken string `json:"access_token"` + Nonce string `json:"nonce"` + Provider string `json:"provider"` + ClientID string `json:"client_id"` + Issuer string `json:"issuer"` +} + +func (p *IdTokenGrantParams) getProvider(ctx context.Context, config *conf.GlobalConfiguration, r *http.Request) (*oidc.Provider, bool, string, []string, error) { + log := observability.GetLogEntry(r).Entry + + var cfg *conf.OAuthProviderConfiguration + var issuer string + var providerType string + var acceptableClientIDs []string + + switch true { + case p.Provider == "apple" || p.Issuer == provider.IssuerApple: + cfg = &config.External.Apple + providerType = "apple" + issuer = provider.IssuerApple + acceptableClientIDs = append(acceptableClientIDs, config.External.Apple.ClientID...) + + if config.External.IosBundleId != "" { + acceptableClientIDs = append(acceptableClientIDs, config.External.IosBundleId) + } + + case p.Provider == "google" || p.Issuer == provider.IssuerGoogle: + cfg = &config.External.Google + providerType = "google" + issuer = provider.IssuerGoogle + acceptableClientIDs = append(acceptableClientIDs, config.External.Google.ClientID...) + + case p.Provider == "azure" || provider.IsAzureIssuer(p.Issuer): + issuer = p.Issuer + if issuer == "" || !provider.IsAzureIssuer(issuer) { + detectedIssuer, err := provider.DetectAzureIDTokenIssuer(ctx, p.IdToken) + if err != nil { + return nil, false, "", nil, badRequestError(ErrorCodeValidationFailed, "Unable to detect issuer in ID token for Azure provider").WithInternalError(err) + } + issuer = detectedIssuer + } + cfg = &config.External.Azure + providerType = "azure" + acceptableClientIDs = append(acceptableClientIDs, config.External.Azure.ClientID...) + + case p.Provider == "facebook" || p.Issuer == provider.IssuerFacebook: + cfg = &config.External.Facebook + providerType = "facebook" + issuer = provider.IssuerFacebook + acceptableClientIDs = append(acceptableClientIDs, config.External.Facebook.ClientID...) + + case p.Provider == "keycloak" || (config.External.Keycloak.Enabled && config.External.Keycloak.URL != "" && p.Issuer == config.External.Keycloak.URL): + cfg = &config.External.Keycloak + providerType = "keycloak" + issuer = config.External.Keycloak.URL + acceptableClientIDs = append(acceptableClientIDs, config.External.Keycloak.ClientID...) + + case p.Provider == "kakao" || p.Issuer == provider.IssuerKakao: + cfg = &config.External.Kakao + providerType = "kakao" + issuer = provider.IssuerKakao + acceptableClientIDs = append(acceptableClientIDs, config.External.Kakao.ClientID...) + + case p.Provider == "vercel_marketplace" || p.Issuer == provider.IssuerVercelMarketplace: + cfg = &config.External.VercelMarketplace + providerType = "vercel_marketplace" + issuer = provider.IssuerVercelMarketplace + acceptableClientIDs = append(acceptableClientIDs, config.External.VercelMarketplace.ClientID...) + + default: + log.WithField("issuer", p.Issuer).WithField("client_id", p.ClientID).Warn("Use of POST /token with arbitrary issuer and client_id is deprecated for security reasons. Please switch to using the API with provider only!") + + allowed := false + for _, allowedIssuer := range config.External.AllowedIdTokenIssuers { + if p.Issuer == allowedIssuer { + allowed = true + providerType = allowedIssuer + acceptableClientIDs = []string{p.ClientID} + issuer = allowedIssuer + break + } + } + + if !allowed { + return nil, false, "", nil, badRequestError(ErrorCodeValidationFailed, fmt.Sprintf("Custom OIDC provider %q not allowed", p.Provider)) + } + + cfg = &conf.OAuthProviderConfiguration{ + Enabled: true, + SkipNonceCheck: false, + } + } + + if !cfg.Enabled { + return nil, false, "", nil, badRequestError(ErrorCodeProviderDisabled, fmt.Sprintf("Provider (issuer %q) is not enabled", issuer)) + } + + oidcProvider, err := oidc.NewProvider(ctx, issuer) + if err != nil { + return nil, false, "", nil, err + } + + return oidcProvider, cfg.SkipNonceCheck, providerType, acceptableClientIDs, nil +} + +// IdTokenGrant implements the id_token grant type flow +func (a *API) IdTokenGrant(ctx context.Context, w http.ResponseWriter, r *http.Request) error { + log := observability.GetLogEntry(r).Entry + + db := a.db.WithContext(ctx) + config := a.config + + params := &IdTokenGrantParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + if params.IdToken == "" { + return oauthError("invalid request", "id_token required") + } + + if params.Provider == "" && (params.ClientID == "" || params.Issuer == "") { + return oauthError("invalid request", "provider or client_id and issuer required") + } + + oidcProvider, skipNonceCheck, providerType, acceptableClientIDs, err := params.getProvider(ctx, config, r) + if err != nil { + return err + } + + idToken, userData, err := provider.ParseIDToken(ctx, oidcProvider, nil, params.IdToken, provider.ParseIDTokenOptions{ + SkipAccessTokenCheck: params.AccessToken == "", + AccessToken: params.AccessToken, + }) + if err != nil { + return oauthError("invalid request", "Bad ID token").WithInternalError(err) + } + + userData.Metadata.EmailVerified = false + for _, email := range userData.Emails { + if email.Primary { + userData.Metadata.Email = email.Email + userData.Metadata.EmailVerified = email.Verified + break + } else { + userData.Metadata.Email = email.Email + userData.Metadata.EmailVerified = email.Verified + } + } + + if idToken.Subject == "" { + return oauthError("invalid request", "Missing sub claim in id_token") + } + + correctAudience := false + for _, clientID := range acceptableClientIDs { + if clientID == "" { + continue + } + + for _, aud := range idToken.Audience { + if aud == clientID { + correctAudience = true + break + } + } + + if correctAudience { + break + } + } + + if !correctAudience { + return oauthError("invalid request", fmt.Sprintf("Unacceptable audience in id_token: %v", idToken.Audience)) + } + + if !skipNonceCheck { + tokenHasNonce := idToken.Nonce != "" + paramsHasNonce := params.Nonce != "" + + if tokenHasNonce != paramsHasNonce { + return oauthError("invalid request", "Passed nonce and nonce in id_token should either both exist or not.") + } else if tokenHasNonce && paramsHasNonce { + // verify nonce to mitigate replay attacks + hash := fmt.Sprintf("%x", sha256.Sum256([]byte(params.Nonce))) + if hash != idToken.Nonce { + return oauthError("invalid nonce", "Nonces mismatch") + } + } + } + + if params.AccessToken == "" { + if idToken.AccessTokenHash != "" { + log.Warn("ID token has a at_hash claim, but no access_token parameter was provided. In future versions, access_token will be mandatory as it's security best practice.") + } + } else { + if idToken.AccessTokenHash == "" { + log.Info("ID token does not have a at_hash claim, access_token parameter is unused.") + } + } + + var token *AccessTokenResponse + var grantParams models.GrantParams + + grantParams.FillGrantParams(r) + + if err := db.Transaction(func(tx *storage.Connection) error { + var user *models.User + var terr error + + user, terr = a.createAccountFromExternalIdentity(tx, r, userData, providerType) + if terr != nil { + return terr + } + + token, terr = a.issueRefreshToken(r, tx, user, models.OAuth, grantParams) + if terr != nil { + return terr + } + + return nil + }); err != nil { + switch err.(type) { + case *storage.CommitWithError: + return err + case *HTTPError: + return err + default: + return oauthError("server_error", "Internal Server Error").WithInternalError(err) + } + } + + return sendJSON(w, http.StatusOK, token) +} diff --git a/auth_v2.169.0/internal/api/token_oidc_test.go b/auth_v2.169.0/internal/api/token_oidc_test.go new file mode 100644 index 0000000..1eab99e --- /dev/null +++ b/auth_v2.169.0/internal/api/token_oidc_test.go @@ -0,0 +1,69 @@ +package api + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" +) + +type TokenOIDCTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestTokenOIDC(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &TokenOIDCTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func SetupTestOIDCProvider(ts *TokenOIDCTestSuite) *httptest.Server { + var server *httptest.Server + server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/.well-known/openid-configuration": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"issuer":"` + server.URL + `","authorization_endpoint":"` + server.URL + `/authorize","token_endpoint":"` + server.URL + `/token","jwks_uri":"` + server.URL + `/jwks"}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + return server +} + +func (ts *TokenOIDCTestSuite) TestGetProvider() { + server := SetupTestOIDCProvider(ts) + defer server.Close() + + params := &IdTokenGrantParams{ + IdToken: "test-id-token", + AccessToken: "test-access-token", + Nonce: "test-nonce", + Provider: server.URL, + ClientID: "test-client-id", + Issuer: server.URL, + } + + ts.Config.External.AllowedIdTokenIssuers = []string{server.URL} + + req := httptest.NewRequest(http.MethodPost, "http://localhost", nil) + oidcProvider, skipNonceCheck, providerType, acceptableClientIds, err := params.getProvider(context.Background(), ts.Config, req) + require.NoError(ts.T(), err) + require.NotNil(ts.T(), oidcProvider) + require.False(ts.T(), skipNonceCheck) + require.Equal(ts.T(), params.Provider, providerType) + require.NotEmpty(ts.T(), acceptableClientIds) +} diff --git a/auth_v2.169.0/internal/api/token_refresh.go b/auth_v2.169.0/internal/api/token_refresh.go new file mode 100644 index 0000000..7eae233 --- /dev/null +++ b/auth_v2.169.0/internal/api/token_refresh.go @@ -0,0 +1,274 @@ +package api + +import ( + "context" + mathRand "math/rand" + "net/http" + "time" + + "github.com/supabase/auth/internal/metering" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" +) + +const retryLoopDuration = 5.0 + +// RefreshTokenGrantParams are the parameters the RefreshTokenGrant method accepts +type RefreshTokenGrantParams struct { + RefreshToken string `json:"refresh_token"` +} + +// RefreshTokenGrant implements the refresh_token grant type flow +func (a *API) RefreshTokenGrant(ctx context.Context, w http.ResponseWriter, r *http.Request) error { + db := a.db.WithContext(ctx) + config := a.config + + params := &RefreshTokenGrantParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + if params.RefreshToken == "" { + return oauthError("invalid_request", "refresh_token required") + } + + // A 5 second retry loop is used to make sure that refresh token + // requests do not waste database connections waiting for each other. + // Instead of waiting at the database level, they're waiting at the API + // level instead and retry to refresh the locked row every 10-30 + // milliseconds. + retryStart := a.Now() + retry := true + + for retry && time.Since(retryStart).Seconds() < retryLoopDuration { + retry = false + + user, token, session, err := models.FindUserWithRefreshToken(db, params.RefreshToken, false) + if err != nil { + if models.IsNotFoundError(err) { + return badRequestError(ErrorCodeRefreshTokenNotFound, "Invalid Refresh Token: Refresh Token Not Found") + } + return internalServerError(err.Error()) + } + + if user.IsBanned() { + return badRequestError(ErrorCodeUserBanned, "Invalid Refresh Token: User Banned") + } + + if session == nil { + // a refresh token won't have a session if it's created prior to the sessions table introduced + if err := db.Destroy(token); err != nil { + return internalServerError("Error deleting refresh token with missing session").WithInternalError(err) + } + return badRequestError(ErrorCodeSessionNotFound, "Invalid Refresh Token: No Valid Session Found") + } + + result := session.CheckValidity(retryStart, &token.UpdatedAt, config.Sessions.Timebox, config.Sessions.InactivityTimeout) + + switch result { + case models.SessionValid: + // do nothing + + case models.SessionTimedOut: + return badRequestError(ErrorCodeSessionExpired, "Invalid Refresh Token: Session Expired (Inactivity)") + + default: + return badRequestError(ErrorCodeSessionExpired, "Invalid Refresh Token: Session Expired") + } + + // Basic checks above passed, now we need to serialize access + // to the session in a transaction so that there's no + // concurrent modification. In the event that the refresh + // token's row or session is locked, the transaction is closed + // and the whole process will be retried a bit later so that + // the connection pool does not get exhausted. + + var tokenString string + var expiresAt int64 + var newTokenResponse *AccessTokenResponse + + err = db.Transaction(func(tx *storage.Connection) error { + user, token, session, terr := models.FindUserWithRefreshToken(tx, params.RefreshToken, true /* forUpdate */) + if terr != nil { + if models.IsNotFoundError(terr) { + // because forUpdate was set, and the + // previous check outside the + // transaction found a refresh token + // and session, but now we're getting a + // IsNotFoundError, this means that the + // refresh token row and session are + // probably locked so we need to retry + // in a few milliseconds. + retry = true + return terr + } + return internalServerError(terr.Error()) + } + + if a.config.Sessions.SinglePerUser { + sessions, terr := models.FindAllSessionsForUser(tx, user.ID, true /* forUpdate */) + if models.IsNotFoundError(terr) { + // because forUpdate was set, and the + // previous check outside the + // transaction found a user and + // session, but now we're getting a + // IsNotFoundError, this means that the + // user is locked and we need to retry + // in a few milliseconds + retry = true + return terr + } else if terr != nil { + return internalServerError(terr.Error()) + } + + sessionTag := session.DetermineTag(config.Sessions.Tags) + + // go through all sessions of the user and + // check if the current session is the user's + // most recently refreshed valid session + for _, s := range sessions { + if s.ID == session.ID { + // current session, skip it + continue + } + + if s.CheckValidity(retryStart, nil, config.Sessions.Timebox, config.Sessions.InactivityTimeout) != models.SessionValid { + // session is not valid so it + // can't be regarded as active + // on the user + continue + } + + if s.DetermineTag(config.Sessions.Tags) != sessionTag { + // if tags are specified, + // ignore sessions with a + // mismatching tag + continue + } + + // since token is not the refresh token + // of s, we can't use it's UpdatedAt + // time to compare! + if s.LastRefreshedAt(nil).After(session.LastRefreshedAt(&token.UpdatedAt)) { + // session is not the most + // recently active one + return badRequestError(ErrorCodeSessionExpired, "Invalid Refresh Token: Session Expired (Revoked by Newer Login)") + } + } + + // this session is the user's active session + } + + // refresh token row and session are locked at this + // point, cannot be concurrently refreshed + + var issuedToken *models.RefreshToken + + if token.Revoked { + activeRefreshToken, terr := session.FindCurrentlyActiveRefreshToken(tx) + if terr != nil && !models.IsNotFoundError(terr) { + return internalServerError(terr.Error()) + } + + if activeRefreshToken != nil && activeRefreshToken.Parent.String() == token.Token { + // Token was revoked, but it's the + // parent of the currently active one. + // This indicates that the client was + // not able to store the result when it + // refreshed token. This case is + // allowed, provided we return back the + // active refresh token instead of + // creating a new one. + issuedToken = activeRefreshToken + } else { + // For a revoked refresh token to be reused, it + // has to fall within the reuse interval. + reuseUntil := token.UpdatedAt.Add( + time.Second * time.Duration(config.Security.RefreshTokenReuseInterval)) + + if a.Now().After(reuseUntil) { + // not OK to reuse this token + if config.Security.RefreshTokenRotationEnabled { + // Revoke all tokens in token family + if err := models.RevokeTokenFamily(tx, token); err != nil { + return internalServerError(err.Error()) + } + } + + return storage.NewCommitWithError(badRequestError(ErrorCodeRefreshTokenAlreadyUsed, "Invalid Refresh Token: Already Used").WithInternalMessage("Possible abuse attempt: %v", token.ID)) + } + } + } + + if terr = models.NewAuditLogEntry(r, tx, user, models.TokenRefreshedAction, "", nil); terr != nil { + return terr + } + + if issuedToken == nil { + newToken, terr := models.GrantRefreshTokenSwap(r, tx, user, token) + if terr != nil { + return terr + } + + issuedToken = newToken + } + + tokenString, expiresAt, terr = a.generateAccessToken(r, tx, user, issuedToken.SessionId, models.TokenRefresh) + if terr != nil { + httpErr, ok := terr.(*HTTPError) + if ok { + return httpErr + } + return internalServerError("error generating jwt token").WithInternalError(terr) + } + + refreshedAt := a.Now() + session.RefreshedAt = &refreshedAt + + userAgent := r.Header.Get("User-Agent") + if userAgent != "" { + session.UserAgent = &userAgent + } else { + session.UserAgent = nil + } + + ipAddress := utilities.GetIPAddress(r) + if ipAddress != "" { + session.IP = &ipAddress + } else { + session.IP = nil + } + + if terr := session.UpdateOnlyRefreshInfo(tx); terr != nil { + return internalServerError("failed to update session information").WithInternalError(terr) + } + + newTokenResponse = &AccessTokenResponse{ + Token: tokenString, + TokenType: "bearer", + ExpiresIn: config.JWT.Exp, + ExpiresAt: expiresAt, + RefreshToken: issuedToken.Token, + User: user, + } + + return nil + }) + if err != nil { + if retry && models.IsNotFoundError(err) { + // refresh token and session row were likely locked, so + // we need to wait a moment before retrying the whole + // process anew + time.Sleep(time.Duration(10+mathRand.Intn(20)) * time.Millisecond) // #nosec + continue + } else { + return err + } + } + metering.RecordLogin("token", user.ID) + return sendJSON(w, http.StatusOK, newTokenResponse) + } + + return conflictError("Too many concurrent token refresh requests on the same session or refresh token") +} diff --git a/auth_v2.169.0/internal/api/token_test.go b/auth_v2.169.0/internal/api/token_test.go new file mode 100644 index 0000000..fc89d4f --- /dev/null +++ b/auth_v2.169.0/internal/api/token_test.go @@ -0,0 +1,857 @@ +package api + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type TokenTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration + + RefreshToken *models.RefreshToken + User *models.User +} + +func TestToken(t *testing.T) { + os.Setenv("GOTRUE_RATE_LIMIT_HEADER", "My-Custom-Header") + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &TokenTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *TokenTestSuite) SetupTest() { + ts.RefreshToken = nil + models.TruncateAll(ts.API.db) + + // Create user & refresh token + u, err := models.NewUser("", "test@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + t := time.Now() + u.EmailConfirmedAt = &t + u.BannedUntil = nil + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") + + ts.User = u + ts.RefreshToken, err = models.GrantAuthenticatedUser(ts.API.db, u, models.GrantParams{}) + require.NoError(ts.T(), err, "Error creating refresh token") + ts.Config.Hook.CustomAccessToken.Enabled = false + +} + +func (ts *TokenTestSuite) TestSessionTimebox() { + timebox := 10 * time.Second + + ts.API.config.Sessions.Timebox = &timebox + ts.API.overrideTime = func() time.Time { + return time.Now().Add(timebox).Add(time.Second) + } + + defer func() { + ts.API.overrideTime = nil + ts.API.config.Sessions.Timebox = nil + }() + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": ts.RefreshToken.Token, + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusBadRequest, w.Code) + + var firstResult struct { + ErrorCode string `json:"error_code"` + Message string `json:"msg"` + } + + assert.NoError(ts.T(), json.NewDecoder(w.Result().Body).Decode(&firstResult)) + assert.Equal(ts.T(), ErrorCodeSessionExpired, firstResult.ErrorCode) + assert.Equal(ts.T(), "Invalid Refresh Token: Session Expired", firstResult.Message) +} + +func (ts *TokenTestSuite) TestSessionInactivityTimeout() { + inactivityTimeout := 10 * time.Second + + ts.API.config.Sessions.InactivityTimeout = &inactivityTimeout + ts.API.overrideTime = func() time.Time { + return time.Now().Add(inactivityTimeout).Add(time.Second) + } + + defer func() { + ts.API.config.Sessions.InactivityTimeout = nil + ts.API.overrideTime = nil + }() + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": ts.RefreshToken.Token, + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusBadRequest, w.Code) + + var firstResult struct { + ErrorCode string `json:"error_code"` + Message string `json:"msg"` + } + + assert.NoError(ts.T(), json.NewDecoder(w.Result().Body).Decode(&firstResult)) + assert.Equal(ts.T(), ErrorCodeSessionExpired, firstResult.ErrorCode) + assert.Equal(ts.T(), "Invalid Refresh Token: Session Expired (Inactivity)", firstResult.Message) +} + +func (ts *TokenTestSuite) TestFailedToSaveRefreshTokenResultCase() { + var buffer bytes.Buffer + + // first refresh + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": ts.RefreshToken.Token, + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) + + var firstResult struct { + RefreshToken string `json:"refresh_token"` + } + + assert.NoError(ts.T(), json.NewDecoder(w.Result().Body).Decode(&firstResult)) + assert.NotEmpty(ts.T(), firstResult.RefreshToken) + + // pretend that the browser wasn't able to save the firstResult, + // run again with the first refresh token + buffer = bytes.Buffer{} + + // second refresh with the reused refresh token + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": ts.RefreshToken.Token, + })) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) + + var secondResult struct { + RefreshToken string `json:"refresh_token"` + } + + assert.NoError(ts.T(), json.NewDecoder(w.Result().Body).Decode(&secondResult)) + assert.NotEmpty(ts.T(), secondResult.RefreshToken) + + // new refresh token is not being issued but the active one from + // the first refresh that failed to save is stored + assert.Equal(ts.T(), firstResult.RefreshToken, secondResult.RefreshToken) +} + +func (ts *TokenTestSuite) TestSingleSessionPerUserNoTags() { + ts.API.config.Sessions.SinglePerUser = true + defer func() { + ts.API.config.Sessions.SinglePerUser = false + }() + + firstRefreshToken := ts.RefreshToken + + // just in case to give some delay between first and second session creation + time.Sleep(10 * time.Millisecond) + + secondRefreshToken, err := models.GrantAuthenticatedUser(ts.API.db, ts.User, models.GrantParams{}) + + require.NoError(ts.T(), err) + + require.NotEqual(ts.T(), *firstRefreshToken.SessionId, *secondRefreshToken.SessionId) + require.Equal(ts.T(), firstRefreshToken.UserID, secondRefreshToken.UserID) + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": firstRefreshToken.Token, + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusBadRequest, w.Code) + assert.True(ts.T(), ts.API.config.Sessions.SinglePerUser) + + var firstResult struct { + ErrorCode string `json:"error_code"` + Message string `json:"msg"` + } + + assert.NoError(ts.T(), json.NewDecoder(w.Result().Body).Decode(&firstResult)) + assert.Equal(ts.T(), ErrorCodeSessionExpired, firstResult.ErrorCode) + assert.Equal(ts.T(), "Invalid Refresh Token: Session Expired (Revoked by Newer Login)", firstResult.Message) +} + +func (ts *TokenTestSuite) TestRateLimitTokenRefresh() { + var buffer bytes.Buffer + req := httptest.NewRequest(http.MethodPost, "http://localhost/token", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("My-Custom-Header", "1.2.3.4") + + // It rate limits after 30 requests + for i := 0; i < 30; i++ { + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusBadRequest, w.Code) + } + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusTooManyRequests, w.Code) + + // It ignores X-Forwarded-For by default + req.Header.Set("X-Forwarded-For", "1.1.1.1") + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusTooManyRequests, w.Code) + + // It doesn't rate limit a new value for the limited header + req = httptest.NewRequest(http.MethodPost, "http://localhost/token", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("My-Custom-Header", "5.6.7.8") + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusBadRequest, w.Code) +} + +func (ts *TokenTestSuite) TestTokenPasswordGrantSuccess() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + "password": "password", + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=password", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) +} + +func (ts *TokenTestSuite) TestTokenRefreshTokenGrantSuccess() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": ts.RefreshToken.Token, + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) +} + +func (ts *TokenTestSuite) TestTokenPasswordGrantFailure() { + u := ts.createBannedUser() + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": u.GetEmail(), + "password": "password", + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=password", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusBadRequest, w.Code) +} + +func (ts *TokenTestSuite) TestTokenPKCEGrantFailure() { + authCode := "1234563" + codeVerifier := "4a9505b9-0857-42bb-ab3c-098b4d28ddc2" + invalidAuthCode := authCode + "123" + invalidVerifier := codeVerifier + "123" + codeChallenge := sha256.Sum256([]byte(codeVerifier)) + challenge := base64.RawURLEncoding.EncodeToString(codeChallenge[:]) + flowState := models.NewFlowState("github", challenge, models.SHA256, models.OAuth, nil) + flowState.AuthCode = authCode + require.NoError(ts.T(), ts.API.db.Create(flowState)) + cases := []struct { + desc string + authCode string + codeVerifier string + grantType string + expectedHTTPCode int + }{ + { + desc: "Invalid Authcode", + authCode: invalidAuthCode, + codeVerifier: codeVerifier, + }, + { + desc: "Invalid code verifier", + authCode: authCode, + codeVerifier: invalidVerifier, + }, + { + desc: "Invalid auth code and verifier", + authCode: invalidAuthCode, + codeVerifier: invalidVerifier, + }, + } + for _, v := range cases { + ts.Run(v.desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "code_verifier": v.codeVerifier, + "auth_code": v.authCode, + })) + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=pkce", &buffer) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusNotFound, w.Code) + }) + } +} + +func (ts *TokenTestSuite) TestTokenRefreshTokenGrantFailure() { + _ = ts.createBannedUser() + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": ts.RefreshToken.Token, + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusBadRequest, w.Code) +} + +func (ts *TokenTestSuite) TestRefreshTokenReuseRevocation() { + originalSecurity := ts.API.config.Security + + ts.API.config.Security.RefreshTokenRotationEnabled = true + ts.API.config.Security.RefreshTokenReuseInterval = 0 + + defer func() { + ts.API.config.Security = originalSecurity + }() + + refreshTokens := []string{ + ts.RefreshToken.Token, + } + + for i := 0; i < 3; i += 1 { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": refreshTokens[len(refreshTokens)-1], + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + assert.Equal(ts.T(), http.StatusOK, w.Code) + + var response struct { + RefreshToken string `json:"refresh_token"` + } + + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&response)) + + refreshTokens = append(refreshTokens, response.RefreshToken) + } + + // ensure that the 4 refresh tokens are setup correctly + for i, refreshToken := range refreshTokens { + _, token, _, err := models.FindUserWithRefreshToken(ts.API.db, refreshToken, false) + require.NoError(ts.T(), err) + + if i == len(refreshTokens)-1 { + require.False(ts.T(), token.Revoked) + } else { + require.True(ts.T(), token.Revoked) + } + } + + // try to reuse the first (earliest) refresh token which should trigger the family revocation logic + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": refreshTokens[0], + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + assert.Equal(ts.T(), http.StatusBadRequest, w.Code) + + var response struct { + ErrorCode string `json:"error_code"` + Message string `json:"msg"` + } + + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&response)) + require.Equal(ts.T(), ErrorCodeRefreshTokenAlreadyUsed, response.ErrorCode) + require.Equal(ts.T(), "Invalid Refresh Token: Already Used", response.Message) + + // ensure that the refresh tokens are marked as revoked in the database + for _, refreshToken := range refreshTokens { + _, token, _, err := models.FindUserWithRefreshToken(ts.API.db, refreshToken, false) + require.NoError(ts.T(), err) + + require.True(ts.T(), token.Revoked) + } + + // finally ensure that none of the refresh tokens can be reused any + // more, starting with the previously valid one + for i := len(refreshTokens) - 1; i >= 0; i -= 1 { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": refreshTokens[i], + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + assert.Equal(ts.T(), http.StatusBadRequest, w.Code, "For refresh token %d", i) + + var response struct { + ErrorCode string `json:"error_code"` + Message string `json:"msg"` + } + + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&response)) + require.Equal(ts.T(), ErrorCodeRefreshTokenAlreadyUsed, response.ErrorCode, "For refresh token %d", i) + require.Equal(ts.T(), "Invalid Refresh Token: Already Used", response.Message, "For refresh token %d", i) + } +} + +func (ts *TokenTestSuite) createBannedUser() *models.User { + u, err := models.NewUser("", "banned@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + t := time.Now() + u.EmailConfirmedAt = &t + t = t.Add(24 * time.Hour) + u.BannedUntil = &t + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test banned user") + + ts.RefreshToken, err = models.GrantAuthenticatedUser(ts.API.db, u, models.GrantParams{}) + require.NoError(ts.T(), err, "Error creating refresh token") + + return u +} + +func (ts *TokenTestSuite) TestTokenRefreshWithExpiredSession() { + var err error + + now := time.Now().UTC().Add(-1 * time.Second) + + ts.RefreshToken, err = models.GrantAuthenticatedUser(ts.API.db, ts.User, models.GrantParams{ + SessionNotAfter: &now, + }) + require.NoError(ts.T(), err, "Error creating refresh token") + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": ts.RefreshToken.Token, + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusBadRequest, w.Code) +} + +func (ts *TokenTestSuite) TestTokenRefreshWithUnexpiredSession() { + var err error + + now := time.Now().UTC().Add(1 * time.Second) + + ts.RefreshToken, err = models.GrantAuthenticatedUser(ts.API.db, ts.User, models.GrantParams{ + SessionNotAfter: &now, + }) + require.NoError(ts.T(), err, "Error creating refresh token") + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": ts.RefreshToken.Token, + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) +} + +func (ts *TokenTestSuite) TestMagicLinkPKCESignIn() { + var buffer bytes.Buffer + // Send OTP + codeVerifier := "4a9505b9-0857-42bb-ab3c-098b4d28ddc2" + codeChallenge := sha256.Sum256([]byte(codeVerifier)) + challenge := base64.RawURLEncoding.EncodeToString(codeChallenge[:]) + + req := httptest.NewRequest(http.MethodPost, "/otp", &buffer) + req.Header.Set("Content-Type", "application/json") + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(OtpParams{ + Email: "test@example.com", + CreateUser: true, + CodeChallengeMethod: "s256", + CodeChallenge: challenge, + })) + req = httptest.NewRequest(http.MethodPost, "/otp", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + // Verify OTP + requestUrl := fmt.Sprintf("http://localhost/verify?type=%v&token=%v", "magiclink", u.RecoveryToken) + req = httptest.NewRequest(http.MethodGet, requestUrl, &buffer) + req.Header.Set("Content-Type", "application/json") + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusSeeOther, w.Code) + rURL, _ := w.Result().Location() + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + assert.True(ts.T(), u.IsConfirmed()) + + f, err := url.ParseQuery(rURL.RawQuery) + require.NoError(ts.T(), err) + authCode := f.Get("code") + assert.NotEmpty(ts.T(), authCode) + // Extract token and sign in + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "code_verifier": codeVerifier, + "auth_code": authCode, + })) + req = httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=pkce", &buffer) + req.Header.Set("Content-Type", "application/json") + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + verifyResp := &AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&verifyResp)) + require.NotEmpty(ts.T(), verifyResp.Token) + +} + +func (ts *TokenTestSuite) TestPasswordVerificationHook() { + type verificationHookTestcase struct { + desc string + uri string + hookFunctionSQL string + expectedCode int + } + cases := []verificationHookTestcase{ + { + desc: "Default success", + uri: "pg-functions://postgres/auth/password_verification_hook", + hookFunctionSQL: ` + create or replace function password_verification_hook(input jsonb) + returns jsonb as $$ + begin + return jsonb_build_object('decision', 'continue'); + end; $$ language plpgsql;`, + expectedCode: http.StatusOK, + }, { + desc: "Reject- Enabled", + uri: "pg-functions://postgres/auth/password_verification_hook_reject", + hookFunctionSQL: ` + create or replace function password_verification_hook_reject(input jsonb) + returns jsonb as $$ + begin + return jsonb_build_object('decision', 'reject', 'message', 'You shall not pass!'); + end; $$ language plpgsql;`, + expectedCode: http.StatusBadRequest, + }, + } + for _, c := range cases { + ts.T().Run(c.desc, func(t *testing.T) { + ts.Config.Hook.PasswordVerificationAttempt.Enabled = true + ts.Config.Hook.PasswordVerificationAttempt.URI = c.uri + require.NoError(ts.T(), ts.Config.Hook.PasswordVerificationAttempt.PopulateExtensibilityPoint()) + + err := ts.API.db.RawQuery(c.hookFunctionSQL).Exec() + require.NoError(t, err) + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + "password": "password", + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=password", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + assert.Equal(ts.T(), c.expectedCode, w.Code) + cleanupHookSQL := fmt.Sprintf("drop function if exists %s", ts.Config.Hook.PasswordVerificationAttempt.HookName) + require.NoError(ts.T(), ts.API.db.RawQuery(cleanupHookSQL).Exec()) + // Reset so it doesn't affect other tests + ts.Config.Hook.PasswordVerificationAttempt.Enabled = false + + }) + } + +} + +func (ts *TokenTestSuite) TestCustomAccessToken() { + type customAccessTokenTestcase struct { + desc string + uri string + hookFunctionSQL string + expectedClaims map[string]interface{} + shouldError bool + } + cases := []customAccessTokenTestcase{ + { + desc: "Add a new claim", + uri: "pg-functions://postgres/auth/custom_access_token_add_claim", + hookFunctionSQL: ` create or replace function custom_access_token_add_claim(input jsonb) returns jsonb as $$ declare result jsonb; begin if jsonb_typeof(jsonb_object_field(input, 'claims')) is null then result := jsonb_build_object('error', jsonb_build_object('http_code', 400, 'message', 'Input does not contain claims field')); return result; end if; + input := jsonb_set(input, '{claims,newclaim}', '"newvalue"', true); + result := jsonb_build_object('claims', input->'claims'); + return result; +end; $$ language plpgsql;`, + expectedClaims: map[string]interface{}{ + "newclaim": "newvalue", + }, + }, { + desc: "Delete the Role claim", + uri: "pg-functions://postgres/auth/custom_access_token_delete_claim", + hookFunctionSQL: ` +create or replace function custom_access_token_delete_claim(input jsonb) +returns jsonb as $$ +declare + result jsonb; +begin + input := jsonb_set(input, '{claims}', (input->'claims') - 'role'); + result := jsonb_build_object('claims', input->'claims'); + return result; +end; $$ language plpgsql;`, + expectedClaims: map[string]interface{}{}, + shouldError: true, + }, { + desc: "Delete a non-required claim (UserMetadata)", + uri: "pg-functions://postgres/auth/custom_access_token_delete_usermetadata", + hookFunctionSQL: ` +create or replace function custom_access_token_delete_usermetadata(input jsonb) +returns jsonb as $$ +declare + result jsonb; +begin + input := jsonb_set(input, '{claims}', (input->'claims') - 'user_metadata'); + result := jsonb_build_object('claims', input->'claims'); + return result; +end; $$ language plpgsql;`, + // Not used + expectedClaims: map[string]interface{}{ + "user_metadata": nil, + }, + shouldError: false, + }, + } + for _, c := range cases { + ts.T().Run(c.desc, func(t *testing.T) { + ts.Config.Hook.CustomAccessToken.Enabled = true + ts.Config.Hook.CustomAccessToken.URI = c.uri + require.NoError(t, ts.Config.Hook.CustomAccessToken.PopulateExtensibilityPoint()) + + err := ts.API.db.RawQuery(c.hookFunctionSQL).Exec() + require.NoError(t, err) + + var buffer bytes.Buffer + require.NoError(t, json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": ts.RefreshToken.Token, + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + var tokenResponse struct { + AccessToken string `json:"access_token"` + } + require.NoError(t, json.NewDecoder(w.Result().Body).Decode(&tokenResponse)) + if c.shouldError { + require.Equal(t, http.StatusInternalServerError, w.Code) + } else { + parts := strings.Split(tokenResponse.AccessToken, ".") + require.Equal(t, 3, len(parts), "Token should have 3 parts") + + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) + require.NoError(t, err) + + var responseClaims map[string]interface{} + require.NoError(t, json.Unmarshal(payload, &responseClaims)) + + for key, expectedValue := range c.expectedClaims { + if expectedValue == nil { + // Since c.shouldError is false here, we only need to check if the claim should be removed + _, exists := responseClaims[key] + assert.False(t, exists, "Claim should be removed") + } else { + assert.Equal(t, expectedValue, responseClaims[key]) + } + } + } + + cleanupHookSQL := fmt.Sprintf("drop function if exists %s", ts.Config.Hook.CustomAccessToken.HookName) + require.NoError(t, ts.API.db.RawQuery(cleanupHookSQL).Exec()) + ts.Config.Hook.CustomAccessToken.Enabled = false + }) + } +} + +func (ts *TokenTestSuite) TestAllowSelectAuthenticationMethods() { + + companyUser, err := models.NewUser("12345678", "test@company.com", "password", ts.Config.JWT.Aud, nil) + t := time.Now() + companyUser.EmailConfirmedAt = &t + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(companyUser), "Error saving new test user") + + type allowSelectAuthMethodsTestcase struct { + desc string + uri string + email string + expectedError string + expectedStatus int + } + + // Common hook function SQL definition + hookFunctionSQL := ` +create or replace function auth.custom_access_token(event jsonb) returns jsonb language plpgsql as $$ +declare + email_claim text; + authentication_method text; +begin + email_claim := event->'claims'->>'email'; + authentication_method := event->>'authentication_method'; + + if authentication_method = 'password' and email_claim not like '%@company.com' then + return jsonb_build_object( + 'error', jsonb_build_object( + 'http_code', 403, + 'message', 'only members on company.com can access with password authentication' + ) + ); + end if; + + return event; +end; +$$;` + + cases := []allowSelectAuthMethodsTestcase{ + { + desc: "Error for non-protected domain with password authentication", + uri: "pg-functions://postgres/auth/custom_access_token", + email: "test@example.com", + expectedError: "only members on company.com can access with password authentication", + expectedStatus: http.StatusForbidden, + }, + { + desc: "Allow access for protected domain with password authentication", + uri: "pg-functions://postgres/auth/custom_access_token", + email: companyUser.Email.String(), + expectedError: "", + expectedStatus: http.StatusOK, + }, + } + + for _, c := range cases { + ts.T().Run(c.desc, func(t *testing.T) { + // Enable and set up the custom access token hook + ts.Config.Hook.CustomAccessToken.Enabled = true + ts.Config.Hook.CustomAccessToken.URI = c.uri + require.NoError(t, ts.Config.Hook.CustomAccessToken.PopulateExtensibilityPoint()) + + // Execute the common hook function SQL + err := ts.API.db.RawQuery(hookFunctionSQL).Exec() + require.NoError(t, err) + + var buffer bytes.Buffer + + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": c.email, + "password": "password", + })) + + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=password", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + require.Equal(t, c.expectedStatus, w.Code, "Unexpected HTTP status code") + if c.expectedError != "" { + require.Contains(t, w.Body.String(), c.expectedError, "Expected error message not found") + } else { + require.NotContains(t, w.Body.String(), "error", "Unexpected error occurred") + } + + // Delete the function and cleanup + cleanupHookSQL := fmt.Sprintf("drop function if exists %s", ts.Config.Hook.CustomAccessToken.HookName) + require.NoError(t, ts.API.db.RawQuery(cleanupHookSQL).Exec()) + ts.Config.Hook.CustomAccessToken.Enabled = false + }) + } +} diff --git a/auth_v2.169.0/internal/api/user.go b/auth_v2.169.0/internal/api/user.go new file mode 100644 index 0000000..8588ce3 --- /dev/null +++ b/auth_v2.169.0/internal/api/user.go @@ -0,0 +1,266 @@ +package api + +import ( + "context" + "net/http" + "time" + + "github.com/gofrs/uuid" + "github.com/supabase/auth/internal/api/sms_provider" + "github.com/supabase/auth/internal/mailer" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/storage" +) + +// UserUpdateParams parameters for updating a user +type UserUpdateParams struct { + Email string `json:"email"` + Password *string `json:"password"` + Nonce string `json:"nonce"` + Data map[string]interface{} `json:"data"` + AppData map[string]interface{} `json:"app_metadata,omitempty"` + Phone string `json:"phone"` + Channel string `json:"channel"` + CodeChallenge string `json:"code_challenge"` + CodeChallengeMethod string `json:"code_challenge_method"` +} + +func (a *API) validateUserUpdateParams(ctx context.Context, p *UserUpdateParams) error { + config := a.config + + var err error + if p.Email != "" { + p.Email, err = a.validateEmail(p.Email) + if err != nil { + return err + } + } + + if p.Phone != "" { + if p.Phone, err = validatePhone(p.Phone); err != nil { + return err + } + if p.Channel == "" { + p.Channel = sms_provider.SMSProvider + } + if !sms_provider.IsValidMessageChannel(p.Channel, config) { + return badRequestError(ErrorCodeValidationFailed, InvalidChannelError) + } + } + + if p.Password != nil { + if err := a.checkPasswordStrength(ctx, *p.Password); err != nil { + return err + } + } + + return nil +} + +// UserGet returns a user +func (a *API) UserGet(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + claims := getClaims(ctx) + if claims == nil { + return internalServerError("Could not read claims") + } + + aud := a.requestAud(ctx, r) + audienceFromClaims, _ := claims.GetAudience() + if len(audienceFromClaims) == 0 || aud != audienceFromClaims[0] { + return badRequestError(ErrorCodeValidationFailed, "Token audience doesn't match request audience") + } + + user := getUser(ctx) + return sendJSON(w, http.StatusOK, user) +} + +// UserUpdate updates fields on a user +func (a *API) UserUpdate(w http.ResponseWriter, r *http.Request) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + config := a.config + aud := a.requestAud(ctx, r) + + params := &UserUpdateParams{} + if err := retrieveRequestParams(r, params); err != nil { + return err + } + + user := getUser(ctx) + session := getSession(ctx) + + if err := a.validateUserUpdateParams(ctx, params); err != nil { + return err + } + + if params.AppData != nil && !isAdmin(user, config) { + if !isAdmin(user, config) { + return forbiddenError(ErrorCodeNotAdmin, "Updating app_metadata requires admin privileges") + } + } + + if user.HasMFAEnabled() && !session.IsAAL2() { + if (params.Password != nil && *params.Password != "") || (params.Email != "" && user.GetEmail() != params.Email) || (params.Phone != "" && user.GetPhone() != params.Phone) { + return httpError(http.StatusUnauthorized, ErrorCodeInsufficientAAL, "AAL2 session is required to update email or password when MFA is enabled.") + } + } + + if user.IsAnonymous { + if params.Password != nil && *params.Password != "" { + if params.Email == "" && params.Phone == "" { + return unprocessableEntityError(ErrorCodeValidationFailed, "Updating password of an anonymous user without an email or phone is not allowed") + } + } + } + + if user.IsSSOUser { + updatingForbiddenFields := false + + updatingForbiddenFields = updatingForbiddenFields || (params.Password != nil && *params.Password != "") + updatingForbiddenFields = updatingForbiddenFields || (params.Email != "" && params.Email != user.GetEmail()) + updatingForbiddenFields = updatingForbiddenFields || (params.Phone != "" && params.Phone != user.GetPhone()) + updatingForbiddenFields = updatingForbiddenFields || (params.Nonce != "") + + if updatingForbiddenFields { + return unprocessableEntityError(ErrorCodeUserSSOManaged, "Updating email, phone, password of a SSO account only possible via SSO") + } + } + + if params.Email != "" && user.GetEmail() != params.Email { + if duplicateUser, err := models.IsDuplicatedEmail(db, params.Email, aud, user); err != nil { + return internalServerError("Database error checking email").WithInternalError(err) + } else if duplicateUser != nil { + return unprocessableEntityError(ErrorCodeEmailExists, DuplicateEmailMsg) + } + } + + if params.Phone != "" && user.GetPhone() != params.Phone { + if exists, err := models.IsDuplicatedPhone(db, params.Phone, aud); err != nil { + return internalServerError("Database error checking phone").WithInternalError(err) + } else if exists { + return unprocessableEntityError(ErrorCodePhoneExists, DuplicatePhoneMsg) + } + } + + if params.Password != nil { + if config.Security.UpdatePasswordRequireReauthentication { + now := time.Now() + // we require reauthentication if the user hasn't signed in recently in the current session + if session == nil || now.After(session.CreatedAt.Add(24*time.Hour)) { + if len(params.Nonce) == 0 { + return badRequestError(ErrorCodeReauthenticationNeeded, "Password update requires reauthentication") + } + if err := a.verifyReauthentication(params.Nonce, db, config, user); err != nil { + return err + } + } + } + + password := *params.Password + if password != "" { + isSamePassword := false + + if user.HasPassword() { + auth, _, err := user.Authenticate(ctx, db, password, config.Security.DBEncryption.DecryptionKeys, false, "") + if err != nil { + return err + } + + isSamePassword = auth + } + + if isSamePassword { + return unprocessableEntityError(ErrorCodeSamePassword, "New password should be different from the old password.") + } + } + + if err := user.SetPassword(ctx, password, config.Security.DBEncryption.Encrypt, config.Security.DBEncryption.EncryptionKeyID, config.Security.DBEncryption.EncryptionKey); err != nil { + return err + } + } + + err := db.Transaction(func(tx *storage.Connection) error { + var terr error + if params.Password != nil { + var sessionID *uuid.UUID + if session != nil { + sessionID = &session.ID + } + + if terr = user.UpdatePassword(tx, sessionID); terr != nil { + return internalServerError("Error during password storage").WithInternalError(terr) + } + + if terr := models.NewAuditLogEntry(r, tx, user, models.UserUpdatePasswordAction, "", nil); terr != nil { + return terr + } + } + + if params.Data != nil { + if terr = user.UpdateUserMetaData(tx, params.Data); terr != nil { + return internalServerError("Error updating user").WithInternalError(terr) + } + } + + if params.AppData != nil { + if terr = user.UpdateAppMetaData(tx, params.AppData); terr != nil { + return internalServerError("Error updating user").WithInternalError(terr) + } + } + + if params.Email != "" && params.Email != user.GetEmail() { + if user.IsAnonymous && config.Mailer.Autoconfirm { + // anonymous users can add an email with automatic confirmation, which is similar to signing up + // permanent users always need to verify their email address when changing it + user.EmailChange = params.Email + if _, terr := a.emailChangeVerify(r, tx, &VerifyParams{ + Type: mailer.EmailChangeVerification, + Email: params.Email, + }, user); terr != nil { + return terr + } + + } else { + flowType := getFlowFromChallenge(params.CodeChallenge) + if isPKCEFlow(flowType) { + _, terr := generateFlowState(tx, models.EmailChange.String(), models.EmailChange, params.CodeChallengeMethod, params.CodeChallenge, &user.ID) + if terr != nil { + return terr + } + + } + if terr = a.sendEmailChange(r, tx, user, params.Email, flowType); terr != nil { + return terr + } + } + } + + if params.Phone != "" && params.Phone != user.GetPhone() { + if config.Sms.Autoconfirm { + user.PhoneChange = params.Phone + if _, terr := a.smsVerify(r, tx, user, &VerifyParams{ + Type: phoneChangeVerification, + Phone: params.Phone, + }); terr != nil { + return terr + } + } else { + if _, terr := a.sendPhoneConfirmation(r, tx, user, params.Phone, phoneChangeVerification, params.Channel); terr != nil { + return terr + } + } + } + + if terr = models.NewAuditLogEntry(r, tx, user, models.UserModifiedAction, "", nil); terr != nil { + return internalServerError("Error recording audit log entry").WithInternalError(terr) + } + + return nil + }) + if err != nil { + return err + } + + return sendJSON(w, http.StatusOK, user) +} diff --git a/auth_v2.169.0/internal/api/user_test.go b/auth_v2.169.0/internal/api/user_test.go new file mode 100644 index 0000000..ed6c585 --- /dev/null +++ b/auth_v2.169.0/internal/api/user_test.go @@ -0,0 +1,558 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/models" +) + +type UserTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestUser(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &UserTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *UserTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + // Create user + u, err := models.NewUser("123456789", "test@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") +} + +func (ts *UserTestSuite) generateToken(user *models.User, sessionId *uuid.UUID) string { + req := httptest.NewRequest(http.MethodPost, "/token?grant_type=password", nil) + token, _, err := ts.API.generateAccessToken(req, ts.API.db, user, sessionId, models.PasswordGrant) + require.NoError(ts.T(), err, "Error generating access token") + return token +} + +func (ts *UserTestSuite) generateAccessTokenAndSession(user *models.User) string { + session, err := models.NewSession(user.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(session)) + + req := httptest.NewRequest(http.MethodPost, "/token?grant_type=password", nil) + token, _, err := ts.API.generateAccessToken(req, ts.API.db, user, &session.ID, models.PasswordGrant) + require.NoError(ts.T(), err, "Error generating access token") + return token +} + +func (ts *UserTestSuite) TestUserGet() { + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err, "Error finding user") + token := ts.generateAccessTokenAndSession(u) + + require.NoError(ts.T(), err, "Error generating access token") + + req := httptest.NewRequest(http.MethodGet, "http://localhost/user", nil) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) +} + +func (ts *UserTestSuite) TestUserUpdateEmail() { + cases := []struct { + desc string + userData map[string]interface{} + isSecureEmailChangeEnabled bool + isMailerAutoconfirmEnabled bool + expectedCode int + }{ + { + desc: "User doesn't have an existing email", + userData: map[string]interface{}{ + "email": "", + "phone": "", + }, + isSecureEmailChangeEnabled: false, + isMailerAutoconfirmEnabled: false, + expectedCode: http.StatusOK, + }, + { + desc: "User doesn't have an existing email and double email confirmation required", + userData: map[string]interface{}{ + "email": "", + "phone": "234567890", + }, + isSecureEmailChangeEnabled: true, + isMailerAutoconfirmEnabled: false, + expectedCode: http.StatusOK, + }, + { + desc: "User has an existing email", + userData: map[string]interface{}{ + "email": "foo@example.com", + "phone": "", + }, + isSecureEmailChangeEnabled: false, + isMailerAutoconfirmEnabled: false, + expectedCode: http.StatusOK, + }, + { + desc: "User has an existing email and double email confirmation required", + userData: map[string]interface{}{ + "email": "bar@example.com", + "phone": "", + }, + isSecureEmailChangeEnabled: true, + isMailerAutoconfirmEnabled: false, + expectedCode: http.StatusOK, + }, + { + desc: "Update email with mailer autoconfirm enabled", + userData: map[string]interface{}{ + "email": "bar@example.com", + "phone": "", + }, + isSecureEmailChangeEnabled: true, + isMailerAutoconfirmEnabled: true, + expectedCode: http.StatusOK, + }, + { + desc: "Update email with mailer autoconfirm enabled and anonymous user", + userData: map[string]interface{}{ + "email": "bar@example.com", + "phone": "", + "is_anonymous": true, + }, + isSecureEmailChangeEnabled: true, + isMailerAutoconfirmEnabled: true, + expectedCode: http.StatusOK, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + u, err := models.NewUser("", "", "", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), u.SetEmail(ts.API.db, c.userData["email"].(string)), "Error setting user email") + require.NoError(ts.T(), u.SetPhone(ts.API.db, c.userData["phone"].(string)), "Error setting user phone") + if isAnonymous, ok := c.userData["is_anonymous"]; ok { + u.IsAnonymous = isAnonymous.(bool) + } + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving test user") + + token := ts.generateAccessTokenAndSession(u) + + require.NoError(ts.T(), err, "Error generating access token") + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "new@example.com", + })) + req := httptest.NewRequest(http.MethodPut, "http://localhost/user", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + w := httptest.NewRecorder() + ts.Config.Mailer.SecureEmailChangeEnabled = c.isSecureEmailChangeEnabled + ts.Config.Mailer.Autoconfirm = c.isMailerAutoconfirmEnabled + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), c.expectedCode, w.Code) + + var data models.User + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + + if c.isMailerAutoconfirmEnabled && u.IsAnonymous { + require.Empty(ts.T(), data.EmailChange) + require.Equal(ts.T(), "new@example.com", data.GetEmail()) + require.Len(ts.T(), data.Identities, 1) + } else { + require.Equal(ts.T(), "new@example.com", data.EmailChange) + require.Len(ts.T(), data.Identities, 0) + } + + // remove user after each case + require.NoError(ts.T(), ts.API.db.Destroy(u)) + }) + } + +} +func (ts *UserTestSuite) TestUserUpdatePhoneAutoconfirmEnabled() { + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + existingUser, err := models.NewUser("22222222", "", "", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(existingUser)) + + cases := []struct { + desc string + userData map[string]string + expectedCode int + }{ + { + desc: "New phone number is the same as current phone number", + userData: map[string]string{ + "phone": "123456789", + }, + expectedCode: http.StatusOK, + }, + { + desc: "New phone number exists already", + userData: map[string]string{ + "phone": "22222222", + }, + expectedCode: http.StatusUnprocessableEntity, + }, + { + desc: "New phone number is different from current phone number", + userData: map[string]string{ + "phone": "234567890", + }, + expectedCode: http.StatusOK, + }, + } + + ts.Config.Sms.Autoconfirm = true + + for _, c := range cases { + ts.Run(c.desc, func() { + token := ts.generateAccessTokenAndSession(u) + require.NoError(ts.T(), err, "Error generating access token") + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "phone": c.userData["phone"], + })) + req := httptest.NewRequest(http.MethodPut, "http://localhost/user", &buffer) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), c.expectedCode, w.Code) + + if c.expectedCode == http.StatusOK { + // check that the user response returned contains the updated phone field + data := &models.User{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&data)) + require.Equal(ts.T(), data.GetPhone(), c.userData["phone"]) + } + }) + } + +} + +func (ts *UserTestSuite) TestUserUpdatePassword() { + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + r, err := models.GrantAuthenticatedUser(ts.API.db, u, models.GrantParams{}) + require.NoError(ts.T(), err) + + r2, err := models.GrantAuthenticatedUser(ts.API.db, u, models.GrantParams{}) + require.NoError(ts.T(), err) + + // create a session and modify it's created_at time to simulate a session that is not recently logged in + notRecentlyLoggedIn, err := models.FindSessionByID(ts.API.db, *r2.SessionId, true) + require.NoError(ts.T(), err) + + // cannot use Update here because Update doesn't removes the created_at field + require.NoError(ts.T(), ts.API.db.RawQuery( + "update "+notRecentlyLoggedIn.TableName()+" set created_at = ? where id = ?", + time.Now().Add(-24*time.Hour), + notRecentlyLoggedIn.ID).Exec(), + ) + + type expected struct { + code int + isAuthenticated bool + } + + var cases = []struct { + desc string + newPassword string + nonce string + requireReauthentication bool + sessionId *uuid.UUID + expected expected + }{ + { + desc: "Need reauthentication because outside of recently logged in window", + newPassword: "newpassword123", + nonce: "", + requireReauthentication: true, + sessionId: ¬RecentlyLoggedIn.ID, + expected: expected{code: http.StatusBadRequest, isAuthenticated: false}, + }, + { + desc: "No nonce provided", + newPassword: "newpassword123", + nonce: "", + sessionId: ¬RecentlyLoggedIn.ID, + requireReauthentication: true, + expected: expected{code: http.StatusBadRequest, isAuthenticated: false}, + }, + { + desc: "Invalid nonce", + newPassword: "newpassword1234", + nonce: "123456", + sessionId: ¬RecentlyLoggedIn.ID, + requireReauthentication: true, + expected: expected{code: http.StatusUnprocessableEntity, isAuthenticated: false}, + }, + { + desc: "No need reauthentication because recently logged in", + newPassword: "newpassword123", + nonce: "", + requireReauthentication: true, + sessionId: r.SessionId, + expected: expected{code: http.StatusOK, isAuthenticated: true}, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + ts.Config.Security.UpdatePasswordRequireReauthentication = c.requireReauthentication + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]string{"password": c.newPassword, "nonce": c.nonce})) + + req := httptest.NewRequest(http.MethodPut, "http://localhost/user", &buffer) + req.Header.Set("Content-Type", "application/json") + token := ts.generateToken(u, c.sessionId) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), c.expected.code, w.Code) + + // Request body + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + isAuthenticated, _, err := u.Authenticate(context.Background(), ts.API.db, c.newPassword, ts.API.config.Security.DBEncryption.DecryptionKeys, ts.API.config.Security.DBEncryption.Encrypt, ts.API.config.Security.DBEncryption.EncryptionKeyID) + require.NoError(ts.T(), err) + + require.Equal(ts.T(), c.expected.isAuthenticated, isAuthenticated) + }) + } +} + +func (ts *UserTestSuite) TestUserUpdatePasswordNoReauthenticationRequired() { + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + type expected struct { + code int + isAuthenticated bool + } + + var cases = []struct { + desc string + newPassword string + nonce string + requireReauthentication bool + expected expected + }{ + { + desc: "Invalid password length", + newPassword: "", + nonce: "", + requireReauthentication: false, + expected: expected{code: http.StatusUnprocessableEntity, isAuthenticated: false}, + }, + + { + desc: "Valid password length", + newPassword: "newpassword", + nonce: "", + requireReauthentication: false, + expected: expected{code: http.StatusOK, isAuthenticated: true}, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + ts.Config.Security.UpdatePasswordRequireReauthentication = c.requireReauthentication + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]string{"password": c.newPassword, "nonce": c.nonce})) + + req := httptest.NewRequest(http.MethodPut, "http://localhost/user", &buffer) + req.Header.Set("Content-Type", "application/json") + token := ts.generateAccessTokenAndSession(u) + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), c.expected.code, w.Code) + + // Request body + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + isAuthenticated, _, err := u.Authenticate(context.Background(), ts.API.db, c.newPassword, ts.API.config.Security.DBEncryption.DecryptionKeys, ts.API.config.Security.DBEncryption.Encrypt, ts.API.config.Security.DBEncryption.EncryptionKeyID) + require.NoError(ts.T(), err) + + require.Equal(ts.T(), c.expected.isAuthenticated, isAuthenticated) + }) + } +} + +func (ts *UserTestSuite) TestUserUpdatePasswordReauthentication() { + ts.Config.Security.UpdatePasswordRequireReauthentication = true + + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + // Confirm the test user + now := time.Now() + u.EmailConfirmedAt = &now + require.NoError(ts.T(), ts.API.db.Update(u), "Error updating new test user") + + token := ts.generateAccessTokenAndSession(u) + + // request for reauthentication nonce + req := httptest.NewRequest(http.MethodGet, "http://localhost/reauthenticate", nil) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), w.Code, http.StatusOK) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + require.NotEmpty(ts.T(), u.ReauthenticationToken) + require.NotEmpty(ts.T(), u.ReauthenticationSentAt) + + // update reauthentication token to a known token + u.ReauthenticationToken = crypto.GenerateTokenHash(u.GetEmail(), "123456") + require.NoError(ts.T(), ts.API.db.Update(u)) + + // update password with reauthentication token + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "password": "newpass", + "nonce": "123456", + })) + + req = httptest.NewRequest(http.MethodPut, "http://localhost/user", &buffer) + req.Header.Set("Content-Type", "application/json") + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), w.Code, http.StatusOK) + + // Request body + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + isAuthenticated, _, err := u.Authenticate(context.Background(), ts.API.db, "newpass", ts.Config.Security.DBEncryption.DecryptionKeys, ts.Config.Security.DBEncryption.Encrypt, ts.Config.Security.DBEncryption.EncryptionKeyID) + require.NoError(ts.T(), err) + + require.True(ts.T(), isAuthenticated) + require.Empty(ts.T(), u.ReauthenticationToken) + require.Nil(ts.T(), u.ReauthenticationSentAt) +} + +func (ts *UserTestSuite) TestUserUpdatePasswordLogoutOtherSessions() { + ts.Config.Security.UpdatePasswordRequireReauthentication = false + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + // Confirm the test user + now := time.Now() + u.EmailConfirmedAt = &now + require.NoError(ts.T(), ts.API.db.Update(u), "Error updating new test user") + + // Login the test user to get first session + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": u.GetEmail(), + "password": "password", + })) + req := httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=password", &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + session1 := AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&session1)) + + // Login test user to get second session + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": u.GetEmail(), + "password": "password", + })) + req = httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=password", &buffer) + req.Header.Set("Content-Type", "application/json") + + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + session2 := AccessTokenResponse{} + require.NoError(ts.T(), json.NewDecoder(w.Body).Decode(&session2)) + + // Update user's password using first session + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "password": "newpass", + })) + + req = httptest.NewRequest(http.MethodPut, "http://localhost/user", &buffer) + req.Header.Set("Content-Type", "application/json") + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", session1.Token)) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + // Attempt to refresh session1 should pass + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": session1.RefreshToken, + })) + + req = httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusOK, w.Code) + + // Attempt to refresh session2 should fail + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "refresh_token": session2.RefreshToken, + })) + + req = httptest.NewRequest(http.MethodPost, "http://localhost/token?grant_type=refresh_token", &buffer) + req.Header.Set("Content-Type", "application/json") + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.NotEqual(ts.T(), http.StatusOK, w.Code) +} diff --git a/auth_v2.169.0/internal/api/verify.go b/auth_v2.169.0/internal/api/verify.go new file mode 100644 index 0000000..b42f5a5 --- /dev/null +++ b/auth_v2.169.0/internal/api/verify.go @@ -0,0 +1,749 @@ +package api + +import ( + "context" + "errors" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/fatih/structs" + "github.com/sethvargo/go-password/password" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/api/sms_provider" + "github.com/supabase/auth/internal/crypto" + mail "github.com/supabase/auth/internal/mailer" + "github.com/supabase/auth/internal/models" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" +) + +const ( + smsVerification = "sms" + phoneChangeVerification = "phone_change" + // includes signupVerification and magicLinkVerification +) + +const ( + zeroConfirmation int = iota + singleConfirmation +) + +// Only applicable when SECURE_EMAIL_CHANGE_ENABLED +const singleConfirmationAccepted = "Confirmation link accepted. Please proceed to confirm link sent to the other email" + +// VerifyParams are the parameters the Verify endpoint accepts +type VerifyParams struct { + Type string `json:"type"` + Token string `json:"token"` + TokenHash string `json:"token_hash"` + Email string `json:"email"` + Phone string `json:"phone"` + RedirectTo string `json:"redirect_to"` +} + +func (p *VerifyParams) Validate(r *http.Request, a *API) error { + var err error + if p.Type == "" { + return badRequestError(ErrorCodeValidationFailed, "Verify requires a verification type") + } + switch r.Method { + case http.MethodGet: + if p.Token == "" { + return badRequestError(ErrorCodeValidationFailed, "Verify requires a token or a token hash") + } + // TODO: deprecate the token query param from GET /verify and use token_hash instead (breaking change) + p.TokenHash = p.Token + case http.MethodPost: + if (p.Token == "" && p.TokenHash == "") || (p.Token != "" && p.TokenHash != "") { + return badRequestError(ErrorCodeValidationFailed, "Verify requires either a token or a token hash") + } + if p.Token != "" { + if isPhoneOtpVerification(p) { + p.Phone, err = validatePhone(p.Phone) + if err != nil { + return err + } + p.TokenHash = crypto.GenerateTokenHash(p.Phone, p.Token) + } else if isEmailOtpVerification(p) { + p.Email, err = a.validateEmail(p.Email) + if err != nil { + return unprocessableEntityError(ErrorCodeValidationFailed, "Invalid email format").WithInternalError(err) + } + p.TokenHash = crypto.GenerateTokenHash(p.Email, p.Token) + } else { + return badRequestError(ErrorCodeValidationFailed, "Only an email address or phone number should be provided on verify") + } + } else if p.TokenHash != "" { + if p.Email != "" || p.Phone != "" || p.RedirectTo != "" { + return badRequestError(ErrorCodeValidationFailed, "Only the token_hash and type should be provided") + } + } + default: + return nil + } + return nil +} + +// Verify exchanges a confirmation or recovery token to a refresh token +func (a *API) Verify(w http.ResponseWriter, r *http.Request) error { + params := &VerifyParams{} + switch r.Method { + case http.MethodGet: + params.Token = r.FormValue("token") + params.Type = r.FormValue("type") + params.RedirectTo = utilities.GetReferrer(r, a.config) + if err := params.Validate(r, a); err != nil { + return err + } + return a.verifyGet(w, r, params) + case http.MethodPost: + if err := retrieveRequestParams(r, params); err != nil { + return err + } + if err := params.Validate(r, a); err != nil { + return err + } + return a.verifyPost(w, r, params) + default: + // this should have been handled by Chi + panic("Only GET and POST methods allowed") + } +} + +func (a *API) verifyGet(w http.ResponseWriter, r *http.Request, params *VerifyParams) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + + var ( + user *models.User + grantParams models.GrantParams + err error + token *AccessTokenResponse + authCode string + rurl string + ) + + grantParams.FillGrantParams(r) + + flowType := models.ImplicitFlow + var authenticationMethod models.AuthenticationMethod + if strings.HasPrefix(params.Token, PKCEPrefix) { + flowType = models.PKCEFlow + authenticationMethod, err = models.ParseAuthenticationMethod(params.Type) + if err != nil { + return err + } + } + + err = db.Transaction(func(tx *storage.Connection) error { + var terr error + user, terr = a.verifyTokenHash(tx, params) + if terr != nil { + return terr + } + switch params.Type { + case mail.SignupVerification, mail.InviteVerification: + user, terr = a.signupVerify(r, ctx, tx, user) + case mail.RecoveryVerification, mail.MagicLinkVerification: + user, terr = a.recoverVerify(r, tx, user) + case mail.EmailChangeVerification: + user, terr = a.emailChangeVerify(r, tx, params, user) + if user == nil && terr == nil { + // only one OTP is confirmed at this point, so we return early and ask the user to confirm the second OTP + rurl, terr = a.prepRedirectURL(singleConfirmationAccepted, params.RedirectTo, flowType) + if terr != nil { + return terr + } + return nil + } + default: + return badRequestError(ErrorCodeValidationFailed, "Unsupported verification type") + } + + if terr != nil { + return terr + } + + if terr := user.UpdateAppMetaDataProviders(tx); terr != nil { + return terr + } + + // Reload user model from db. + // This is important for refreshing the data in any generated columns like IsAnonymous. + if terr := tx.Reload(user); err != nil { + return terr + } + + if isImplicitFlow(flowType) { + token, terr = a.issueRefreshToken(r, tx, user, models.OTP, grantParams) + if terr != nil { + return terr + } + + } else if isPKCEFlow(flowType) { + if authCode, terr = issueAuthCode(tx, user, authenticationMethod); terr != nil { + return badRequestError(ErrorCodeFlowStateNotFound, "No associated flow state found. %s", terr) + } + } + return nil + }) + + if err != nil { + var herr *HTTPError + if errors.As(err, &herr) { + rurl, err = a.prepErrorRedirectURL(herr, r, params.RedirectTo, flowType) + if err != nil { + return err + } + } + } + if rurl != "" { + http.Redirect(w, r, rurl, http.StatusSeeOther) + return nil + } + rurl = params.RedirectTo + if isImplicitFlow(flowType) && token != nil { + q := url.Values{} + q.Set("type", params.Type) + rurl = token.AsRedirectURL(rurl, q) + } else if isPKCEFlow(flowType) { + rurl, err = a.prepPKCERedirectURL(rurl, authCode) + if err != nil { + return err + } + } + http.Redirect(w, r, rurl, http.StatusSeeOther) + return nil +} + +func (a *API) verifyPost(w http.ResponseWriter, r *http.Request, params *VerifyParams) error { + ctx := r.Context() + db := a.db.WithContext(ctx) + + var ( + user *models.User + grantParams models.GrantParams + token *AccessTokenResponse + ) + var isSingleConfirmationResponse = false + + grantParams.FillGrantParams(r) + + err := db.Transaction(func(tx *storage.Connection) error { + var terr error + aud := a.requestAud(ctx, r) + + if isUsingTokenHash(params) { + user, terr = a.verifyTokenHash(tx, params) + } else { + user, terr = a.verifyUserAndToken(tx, params, aud) + } + if terr != nil { + return terr + } + + switch params.Type { + case mail.SignupVerification, mail.InviteVerification: + user, terr = a.signupVerify(r, ctx, tx, user) + case mail.RecoveryVerification, mail.MagicLinkVerification: + user, terr = a.recoverVerify(r, tx, user) + case mail.EmailChangeVerification: + user, terr = a.emailChangeVerify(r, tx, params, user) + if user == nil && terr == nil { + isSingleConfirmationResponse = true + return nil + } + case smsVerification, phoneChangeVerification: + user, terr = a.smsVerify(r, tx, user, params) + default: + return badRequestError(ErrorCodeValidationFailed, "Unsupported verification type") + } + + if terr != nil { + return terr + } + + if terr := user.UpdateAppMetaDataProviders(tx); terr != nil { + return terr + } + + // Reload user model from db. + // This is important for refreshing the data in any generated columns like IsAnonymous. + if terr := tx.Reload(user); terr != nil { + return terr + } + token, terr = a.issueRefreshToken(r, tx, user, models.OTP, grantParams) + if terr != nil { + return terr + } + return nil + }) + if err != nil { + return err + } + if isSingleConfirmationResponse { + return sendJSON(w, http.StatusOK, map[string]string{ + "msg": singleConfirmationAccepted, + "code": strconv.Itoa(http.StatusOK), + }) + } + return sendJSON(w, http.StatusOK, token) +} + +func (a *API) signupVerify(r *http.Request, ctx context.Context, conn *storage.Connection, user *models.User) (*models.User, error) { + config := a.config + + shouldUpdatePassword := false + if !user.HasPassword() && user.InvitedAt != nil { + // sign them up with temporary password, and require application + // to present the user with a password set form + password, err := password.Generate(64, 10, 0, false, true) + if err != nil { + // password generation must succeed + panic(err) + } + + if err := user.SetPassword(ctx, password, config.Security.DBEncryption.Encrypt, config.Security.DBEncryption.EncryptionKeyID, config.Security.DBEncryption.EncryptionKey); err != nil { + return nil, err + } + shouldUpdatePassword = true + } + + err := conn.Transaction(func(tx *storage.Connection) error { + var terr error + if shouldUpdatePassword { + if terr = user.UpdatePassword(tx, nil); terr != nil { + return internalServerError("Error storing password").WithInternalError(terr) + } + } + + if terr = models.NewAuditLogEntry(r, tx, user, models.UserSignedUpAction, "", nil); terr != nil { + return terr + } + + if terr = user.Confirm(tx); terr != nil { + return internalServerError("Error confirming user").WithInternalError(terr) + } + + for _, identity := range user.Identities { + if identity.Email == "" || user.Email == "" || identity.Email != user.Email { + continue + } + + if terr = identity.UpdateIdentityData(tx, map[string]interface{}{ + "email_verified": true, + }); terr != nil { + return internalServerError("Error setting email_verified to true on identity").WithInternalError(terr) + } + } + + return nil + }) + if err != nil { + return nil, err + } + return user, nil +} + +func (a *API) recoverVerify(r *http.Request, conn *storage.Connection, user *models.User) (*models.User, error) { + err := conn.Transaction(func(tx *storage.Connection) error { + var terr error + if terr = user.Recover(tx); terr != nil { + return terr + } + if !user.IsConfirmed() { + if terr = models.NewAuditLogEntry(r, tx, user, models.UserSignedUpAction, "", nil); terr != nil { + return terr + } + + if terr = user.Confirm(tx); terr != nil { + return terr + } + } else { + if terr = models.NewAuditLogEntry(r, tx, user, models.LoginAction, "", nil); terr != nil { + return terr + } + } + return nil + }) + + if err != nil { + return nil, internalServerError("Database error updating user").WithInternalError(err) + } + return user, nil +} + +func (a *API) smsVerify(r *http.Request, conn *storage.Connection, user *models.User, params *VerifyParams) (*models.User, error) { + + err := conn.Transaction(func(tx *storage.Connection) error { + + if params.Type == smsVerification { + if terr := models.NewAuditLogEntry(r, tx, user, models.UserSignedUpAction, "", nil); terr != nil { + return terr + } + if terr := user.ConfirmPhone(tx); terr != nil { + return internalServerError("Error confirming user").WithInternalError(terr) + } + } else if params.Type == phoneChangeVerification { + if terr := models.NewAuditLogEntry(r, tx, user, models.UserModifiedAction, "", nil); terr != nil { + return terr + } + if identity, terr := models.FindIdentityByIdAndProvider(tx, user.ID.String(), "phone"); terr != nil { + if !models.IsNotFoundError(terr) { + return terr + } + // confirming the phone change should create a new phone identity if the user doesn't have one + if _, terr = a.createNewIdentity(tx, user, "phone", structs.Map(provider.Claims{ + Subject: user.ID.String(), + Phone: params.Phone, + PhoneVerified: true, + })); terr != nil { + return terr + } + } else { + if terr := identity.UpdateIdentityData(tx, map[string]interface{}{ + "phone": params.Phone, + "phone_verified": true, + }); terr != nil { + return terr + } + } + if terr := user.ConfirmPhoneChange(tx); terr != nil { + return internalServerError("Error confirming user").WithInternalError(terr) + } + } + + if user.IsAnonymous { + user.IsAnonymous = false + if terr := tx.UpdateOnly(user, "is_anonymous"); terr != nil { + return terr + } + } + + if terr := tx.Load(user, "Identities"); terr != nil { + return internalServerError("Error refetching identities").WithInternalError(terr) + } + return nil + }) + if err != nil { + return nil, err + } + return user, nil +} + +func (a *API) prepErrorRedirectURL(err *HTTPError, r *http.Request, rurl string, flowType models.FlowType) (string, error) { + u, perr := url.Parse(rurl) + if perr != nil { + return "", err + } + q := u.Query() + + // Maintain separate query params for hash and query + hq := url.Values{} + log := observability.GetLogEntry(r).Entry + errorID := utilities.GetRequestID(r.Context()) + err.ErrorID = errorID + log.WithError(err.Cause()).Info(err.Error()) + if str, ok := oauthErrorMap[err.HTTPStatus]; ok { + hq.Set("error", str) + q.Set("error", str) + } + hq.Set("error_code", err.ErrorCode) + hq.Set("error_description", err.Message) + + q.Set("error_code", err.ErrorCode) + q.Set("error_description", err.Message) + if flowType == models.PKCEFlow { + // Additionally, may override existing error query param if set to PKCE. + u.RawQuery = q.Encode() + } + // Left as hash fragment to comply with spec. + u.Fragment = hq.Encode() + return u.String(), nil +} + +func (a *API) prepRedirectURL(message string, rurl string, flowType models.FlowType) (string, error) { + u, perr := url.Parse(rurl) + if perr != nil { + return "", perr + } + hq := url.Values{} + q := u.Query() + hq.Set("message", message) + if flowType == models.PKCEFlow { + q.Set("message", message) + } + u.RawQuery = q.Encode() + u.Fragment = hq.Encode() + return u.String(), nil +} + +func (a *API) prepPKCERedirectURL(rurl, code string) (string, error) { + u, err := url.Parse(rurl) + if err != nil { + return "", err + } + q := u.Query() + q.Set("code", code) + u.RawQuery = q.Encode() + return u.String(), nil +} + +func (a *API) emailChangeVerify(r *http.Request, conn *storage.Connection, params *VerifyParams, user *models.User) (*models.User, error) { + config := a.config + if !config.Mailer.Autoconfirm && + config.Mailer.SecureEmailChangeEnabled && + user.EmailChangeConfirmStatus == zeroConfirmation && + user.GetEmail() != "" { + err := conn.Transaction(func(tx *storage.Connection) error { + currentOTT, terr := models.FindOneTimeToken(tx, params.TokenHash, models.EmailChangeTokenCurrent) + if terr != nil && !models.IsNotFoundError(terr) { + return terr + } + + newOTT, terr := models.FindOneTimeToken(tx, params.TokenHash, models.EmailChangeTokenNew) + if terr != nil && !models.IsNotFoundError(terr) { + return terr + } + + user.EmailChangeConfirmStatus = singleConfirmation + + if params.Token == user.EmailChangeTokenCurrent || params.TokenHash == user.EmailChangeTokenCurrent || (currentOTT != nil && params.TokenHash == currentOTT.TokenHash) { + user.EmailChangeTokenCurrent = "" + if terr := models.ClearOneTimeTokenForUser(tx, user.ID, models.EmailChangeTokenCurrent); terr != nil { + return terr + } + } else if params.Token == user.EmailChangeTokenNew || params.TokenHash == user.EmailChangeTokenNew || (newOTT != nil && params.TokenHash == newOTT.TokenHash) { + user.EmailChangeTokenNew = "" + if terr := models.ClearOneTimeTokenForUser(tx, user.ID, models.EmailChangeTokenNew); terr != nil { + return terr + } + } + if terr := tx.UpdateOnly(user, "email_change_confirm_status", "email_change_token_current", "email_change_token_new"); terr != nil { + return terr + } + return nil + }) + if err != nil { + return nil, err + } + return nil, nil + } + + // one email is confirmed at this point if GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED is enabled + err := conn.Transaction(func(tx *storage.Connection) error { + if terr := models.NewAuditLogEntry(r, tx, user, models.UserModifiedAction, "", nil); terr != nil { + return terr + } + + if identity, terr := models.FindIdentityByIdAndProvider(tx, user.ID.String(), "email"); terr != nil { + if !models.IsNotFoundError(terr) { + return terr + } + // confirming the email change should create a new email identity if the user doesn't have one + if _, terr = a.createNewIdentity(tx, user, "email", structs.Map(provider.Claims{ + Subject: user.ID.String(), + Email: user.EmailChange, + EmailVerified: true, + })); terr != nil { + return terr + } + } else { + if terr := identity.UpdateIdentityData(tx, map[string]interface{}{ + "email": user.EmailChange, + "email_verified": true, + }); terr != nil { + return terr + } + } + if user.IsAnonymous { + user.IsAnonymous = false + if terr := tx.UpdateOnly(user, "is_anonymous"); terr != nil { + return terr + } + } + if terr := tx.Load(user, "Identities"); terr != nil { + return internalServerError("Error refetching identities").WithInternalError(terr) + } + if terr := user.ConfirmEmailChange(tx, zeroConfirmation); terr != nil { + return internalServerError("Error confirm email").WithInternalError(terr) + } + + return nil + }) + if err != nil { + return nil, err + } + + return user, nil +} + +func (a *API) verifyTokenHash(conn *storage.Connection, params *VerifyParams) (*models.User, error) { + config := a.config + + var user *models.User + var err error + switch params.Type { + case mail.EmailOTPVerification: + // need to find user by confirmation token or recovery token with the token hash + user, err = models.FindUserByConfirmationOrRecoveryToken(conn, params.TokenHash) + case mail.SignupVerification, mail.InviteVerification: + user, err = models.FindUserByConfirmationToken(conn, params.TokenHash) + case mail.RecoveryVerification, mail.MagicLinkVerification: + user, err = models.FindUserByRecoveryToken(conn, params.TokenHash) + case mail.EmailChangeVerification: + user, err = models.FindUserByEmailChangeToken(conn, params.TokenHash) + default: + return nil, badRequestError(ErrorCodeValidationFailed, "Invalid email verification type") + } + + if err != nil { + if models.IsNotFoundError(err) { + return nil, forbiddenError(ErrorCodeOTPExpired, "Email link is invalid or has expired").WithInternalError(err) + } + return nil, internalServerError("Database error finding user from email link").WithInternalError(err) + } + + if user.IsBanned() { + return nil, forbiddenError(ErrorCodeUserBanned, "User is banned") + } + + var isExpired bool + switch params.Type { + case mail.EmailOTPVerification: + sentAt := user.ConfirmationSentAt + params.Type = "signup" + if user.RecoveryToken == params.TokenHash { + sentAt = user.RecoverySentAt + params.Type = "magiclink" + } + isExpired = isOtpExpired(sentAt, config.Mailer.OtpExp) + case mail.SignupVerification, mail.InviteVerification: + isExpired = isOtpExpired(user.ConfirmationSentAt, config.Mailer.OtpExp) + case mail.RecoveryVerification, mail.MagicLinkVerification: + isExpired = isOtpExpired(user.RecoverySentAt, config.Mailer.OtpExp) + case mail.EmailChangeVerification: + isExpired = isOtpExpired(user.EmailChangeSentAt, config.Mailer.OtpExp) + } + + if isExpired { + return nil, forbiddenError(ErrorCodeOTPExpired, "Email link is invalid or has expired").WithInternalMessage("email link has expired") + } + + return user, nil +} + +// verifyUserAndToken verifies the token associated to the user based on the verify type +func (a *API) verifyUserAndToken(conn *storage.Connection, params *VerifyParams, aud string) (*models.User, error) { + config := a.config + + var user *models.User + var err error + tokenHash := params.TokenHash + + switch params.Type { + case phoneChangeVerification: + user, err = models.FindUserByPhoneChangeAndAudience(conn, params.Phone, aud) + case smsVerification: + user, err = models.FindUserByPhoneAndAudience(conn, params.Phone, aud) + case mail.EmailChangeVerification: + // Since the email change could be trigger via the implicit or PKCE flow, + // the query used has to also check if the token saved in the db contains the pkce_ prefix + user, err = models.FindUserForEmailChange(conn, params.Email, tokenHash, aud, config.Mailer.SecureEmailChangeEnabled) + default: + user, err = models.FindUserByEmailAndAudience(conn, params.Email, aud) + } + + if err != nil { + if models.IsNotFoundError(err) { + return nil, forbiddenError(ErrorCodeOTPExpired, "Token has expired or is invalid").WithInternalError(err) + } + return nil, internalServerError("Database error finding user").WithInternalError(err) + } + + if user.IsBanned() { + return nil, forbiddenError(ErrorCodeUserBanned, "User is banned") + } + + var isValid bool + + smsProvider, _ := sms_provider.GetSmsProvider(*config) + switch params.Type { + case mail.EmailOTPVerification: + // if the type is emailOTPVerification, we'll check both the confirmation_token and recovery_token columns + if isOtpValid(tokenHash, user.ConfirmationToken, user.ConfirmationSentAt, config.Mailer.OtpExp) { + isValid = true + params.Type = mail.SignupVerification + } else if isOtpValid(tokenHash, user.RecoveryToken, user.RecoverySentAt, config.Mailer.OtpExp) { + isValid = true + params.Type = mail.MagicLinkVerification + } else { + isValid = false + } + case mail.SignupVerification, mail.InviteVerification: + isValid = isOtpValid(tokenHash, user.ConfirmationToken, user.ConfirmationSentAt, config.Mailer.OtpExp) + case mail.RecoveryVerification, mail.MagicLinkVerification: + isValid = isOtpValid(tokenHash, user.RecoveryToken, user.RecoverySentAt, config.Mailer.OtpExp) + case mail.EmailChangeVerification: + isValid = isOtpValid(tokenHash, user.EmailChangeTokenCurrent, user.EmailChangeSentAt, config.Mailer.OtpExp) || + isOtpValid(tokenHash, user.EmailChangeTokenNew, user.EmailChangeSentAt, config.Mailer.OtpExp) + case phoneChangeVerification, smsVerification: + if testOTP, ok := config.Sms.GetTestOTP(params.Phone, time.Now()); ok { + if params.Token == testOTP { + return user, nil + } + } + + phone := params.Phone + sentAt := user.ConfirmationSentAt + expectedToken := user.ConfirmationToken + if params.Type == phoneChangeVerification { + phone = user.PhoneChange + sentAt = user.PhoneChangeSentAt + expectedToken = user.PhoneChangeToken + } + + if !config.Hook.SendSMS.Enabled && config.Sms.IsTwilioVerifyProvider() { + if err := smsProvider.(*sms_provider.TwilioVerifyProvider).VerifyOTP(phone, params.Token); err != nil { + return nil, forbiddenError(ErrorCodeOTPExpired, "Token has expired or is invalid").WithInternalError(err) + } + return user, nil + } + isValid = isOtpValid(tokenHash, expectedToken, sentAt, config.Sms.OtpExp) + } + + if !isValid { + return nil, forbiddenError(ErrorCodeOTPExpired, "Token has expired or is invalid").WithInternalMessage("token has expired or is invalid") + } + return user, nil +} + +// isOtpValid checks the actual otp sent against the expected otp and ensures that it's within the valid window +func isOtpValid(actual, expected string, sentAt *time.Time, otpExp uint) bool { + if expected == "" || sentAt == nil { + return false + } + return !isOtpExpired(sentAt, otpExp) && ((actual == expected) || ("pkce_"+actual == expected)) +} + +func isOtpExpired(sentAt *time.Time, otpExp uint) bool { + return time.Now().After(sentAt.Add(time.Second * time.Duration(otpExp))) // #nosec G115 +} + +// isPhoneOtpVerification checks if the verification came from a phone otp +func isPhoneOtpVerification(params *VerifyParams) bool { + return params.Phone != "" && params.Email == "" +} + +// isEmailOtpVerification checks if the verification came from an email otp +func isEmailOtpVerification(params *VerifyParams) bool { + return params.Phone == "" && params.Email != "" +} + +func isUsingTokenHash(params *VerifyParams) bool { + return params.TokenHash != "" && params.Token == "" && params.Phone == "" && params.Email == "" +} diff --git a/auth_v2.169.0/internal/api/verify_test.go b/auth_v2.169.0/internal/api/verify_test.go new file mode 100644 index 0000000..7c97d69 --- /dev/null +++ b/auth_v2.169.0/internal/api/verify_test.go @@ -0,0 +1,1280 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + mail "github.com/supabase/auth/internal/mailer" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/models" +) + +type VerifyTestSuite struct { + suite.Suite + API *API + Config *conf.GlobalConfiguration +} + +func TestVerify(t *testing.T) { + api, config, err := setupAPIForTest() + require.NoError(t, err) + + ts := &VerifyTestSuite{ + API: api, + Config: config, + } + defer api.db.Close() + + suite.Run(t, ts) +} + +func (ts *VerifyTestSuite) SetupTest() { + models.TruncateAll(ts.API.db) + + // Create user + u, err := models.NewUser("12345678", "test@example.com", "password", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err, "Error creating test user model") + require.NoError(ts.T(), ts.API.db.Create(u), "Error saving new test user") + + // Create identity + i, err := models.NewIdentity(u, "email", map[string]interface{}{ + "sub": u.ID.String(), + "email": "test@example.com", + "email_verified": false, + }) + require.NoError(ts.T(), err, "Error creating test identity model") + require.NoError(ts.T(), ts.API.db.Create(i), "Error saving new test identity") +} + +func (ts *VerifyTestSuite) TestVerifyPasswordRecovery() { + // modify config so we don't hit rate limit from requesting recovery twice in 60s + ts.Config.SMTP.MaxFrequency = 60 + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.RecoverySentAt = &time.Time{} + require.NoError(ts.T(), ts.API.db.Update(u)) + testEmail := "test@example.com" + + cases := []struct { + desc string + body map[string]interface{} + isPKCE bool + }{ + { + desc: "Implict Flow Recovery", + body: map[string]interface{}{ + "email": testEmail, + }, + isPKCE: false, + }, + { + desc: "PKCE Flow", + body: map[string]interface{}{ + "email": testEmail, + // Code Challenge needs to be at least 43 characters long + "code_challenge": "6b151854-cc15-4e29-8db7-3d3a9f15b3066b151854-cc15-4e29-8db7-3d3a9f15b306", + "code_challenge_method": models.SHA256.String(), + }, + isPKCE: true, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + // Reset user + u.EmailConfirmedAt = nil + require.NoError(ts.T(), ts.API.db.Update(u)) + require.NoError(ts.T(), models.ClearAllOneTimeTokensForUser(ts.API.db, u.ID)) + + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.body)) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/recover", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + assert.WithinDuration(ts.T(), time.Now(), *u.RecoverySentAt, 1*time.Second) + assert.False(ts.T(), u.IsConfirmed()) + + recoveryToken := u.RecoveryToken + + reqURL := fmt.Sprintf("http://localhost/verify?type=%s&token=%s", mail.RecoveryVerification, recoveryToken) + req = httptest.NewRequest(http.MethodGet, reqURL, nil) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusSeeOther, w.Code) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + assert.True(ts.T(), u.IsConfirmed()) + + if c.isPKCE { + rURL, _ := w.Result().Location() + + f, err := url.ParseQuery(rURL.RawQuery) + require.NoError(ts.T(), err) + assert.NotEmpty(ts.T(), f.Get("code")) + } + }) + } +} + +func (ts *VerifyTestSuite) TestVerifySecureEmailChange() { + currentEmail := "test@example.com" + newEmail := "new@example.com" + + // Change from new email to current email and back to new email + cases := []struct { + desc string + body map[string]interface{} + isPKCE bool + currentEmail string + newEmail string + }{ + { + desc: "Implict Flow Email Change", + body: map[string]interface{}{ + "email": newEmail, + }, + isPKCE: false, + currentEmail: currentEmail, + newEmail: newEmail, + }, + { + desc: "PKCE Email Change", + body: map[string]interface{}{ + "email": currentEmail, + // Code Challenge needs to be at least 43 characters long + "code_challenge": "6b151854-cc15-4e29-8db7-3d3a9f15b3066b151854-cc15-4e29-8db7-3d3a9f15b306", + "code_challenge_method": models.SHA256.String(), + }, + isPKCE: true, + currentEmail: newEmail, + newEmail: currentEmail, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + u, err := models.FindUserByEmailAndAudience(ts.API.db, c.currentEmail, ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + // reset user + u.EmailChangeSentAt = nil + u.EmailChangeTokenCurrent = "" + u.EmailChangeTokenNew = "" + require.NoError(ts.T(), ts.API.db.Update(u)) + require.NoError(ts.T(), models.ClearAllOneTimeTokensForUser(ts.API.db, u.ID)) + + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.body)) + + // Setup request + req := httptest.NewRequest(http.MethodPut, "http://localhost/user", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Generate access token for request and a mock session + var token string + session, err := models.NewSession(u.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.API.db.Create(session)) + + token, _, err = ts.API.generateAccessToken(req, ts.API.db, u, &session.ID, models.MagicLink) + require.NoError(ts.T(), err) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, c.currentEmail, ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + currentTokenHash := u.EmailChangeTokenCurrent + newTokenHash := u.EmailChangeTokenNew + + u, err = models.FindUserByEmailAndAudience(ts.API.db, c.currentEmail, ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + assert.WithinDuration(ts.T(), time.Now(), *u.EmailChangeSentAt, 1*time.Second) + assert.False(ts.T(), u.IsConfirmed()) + + // Verify new email + reqURL := fmt.Sprintf("http://localhost/verify?type=%s&token=%s", mail.EmailChangeVerification, newTokenHash) + req = httptest.NewRequest(http.MethodGet, reqURL, nil) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + require.Equal(ts.T(), http.StatusSeeOther, w.Code) + urlVal, err := url.Parse(w.Result().Header.Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + var v url.Values + if !c.isPKCE { + v, err = url.ParseQuery(urlVal.Fragment) + ts.Require().NoError(err) + ts.Require().NotEmpty(v.Get("message")) + } else if c.isPKCE { + v, err = url.ParseQuery(urlVal.RawQuery) + ts.Require().NoError(err) + ts.Require().NotEmpty(v.Get("message")) + + v, err = url.ParseQuery(urlVal.Fragment) + ts.Require().NoError(err) + ts.Require().NotEmpty(v.Get("message")) + } + + u, err = models.FindUserByEmailAndAudience(ts.API.db, c.currentEmail, ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + assert.Equal(ts.T(), singleConfirmation, u.EmailChangeConfirmStatus) + + // Verify old email + reqURL = fmt.Sprintf("http://localhost/verify?type=%s&token=%s", mail.EmailChangeVerification, currentTokenHash) + req = httptest.NewRequest(http.MethodGet, reqURL, nil) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.Equal(ts.T(), http.StatusSeeOther, w.Code) + + urlVal, err = url.Parse(w.Header().Get("Location")) + ts.Require().NoError(err, "redirect url parse failed") + if !c.isPKCE { + v, err = url.ParseQuery(urlVal.Fragment) + ts.Require().NoError(err) + ts.Require().NotEmpty(v.Get("access_token")) + ts.Require().NotEmpty(v.Get("expires_in")) + ts.Require().NotEmpty(v.Get("refresh_token")) + } else if c.isPKCE { + v, err = url.ParseQuery(urlVal.RawQuery) + ts.Require().NoError(err) + ts.Require().NotEmpty(v.Get("code")) + } + + // user's email should've been updated to newEmail + u, err = models.FindUserByEmailAndAudience(ts.API.db, c.newEmail, ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + require.Equal(ts.T(), zeroConfirmation, u.EmailChangeConfirmStatus) + + // Reset confirmation status after each test + u.EmailConfirmedAt = nil + require.NoError(ts.T(), ts.API.db.Update(u)) + }) + } +} + +func (ts *VerifyTestSuite) TestExpiredConfirmationToken() { + // verify variant testing not necessary in this test as it's testing + // the ConfirmationSentAt behavior, not the ConfirmationToken behavior + + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.ConfirmationToken = "asdf3" + sentTime := time.Now().Add(-48 * time.Hour) + u.ConfirmationSentAt = &sentTime + require.NoError(ts.T(), ts.API.db.Update(u)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.ConfirmationToken, models.ConfirmationToken)) + + // Setup request + reqURL := fmt.Sprintf("http://localhost/verify?type=%s&token=%s", mail.SignupVerification, u.ConfirmationToken) + req := httptest.NewRequest(http.MethodGet, reqURL, nil) + + // Setup response recorder + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusSeeOther, w.Code) + + rurl, err := url.Parse(w.Header().Get("Location")) + require.NoError(ts.T(), err, "redirect url parse failed") + + f, err := url.ParseQuery(rurl.Fragment) + require.NoError(ts.T(), err) + assert.Equal(ts.T(), ErrorCodeOTPExpired, f.Get("error_code")) + assert.Equal(ts.T(), "Email link is invalid or has expired", f.Get("error_description")) + assert.Equal(ts.T(), "access_denied", f.Get("error")) +} + +func (ts *VerifyTestSuite) TestInvalidOtp() { + u, err := models.FindUserByPhoneAndAudience(ts.API.db, "12345678", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + sentTime := time.Now().Add(-48 * time.Hour) + u.ConfirmationToken = "123456" + u.ConfirmationSentAt = &sentTime + u.PhoneChange = "22222222" + u.PhoneChangeToken = "123456" + u.PhoneChangeSentAt = &sentTime + u.EmailChange = "test@gmail.com" + u.EmailChangeTokenNew = "123456" + u.EmailChangeTokenCurrent = "123456" + require.NoError(ts.T(), ts.API.db.Update(u)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.ConfirmationToken, models.ConfirmationToken)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.PhoneChange, u.PhoneChangeToken, models.PhoneChangeToken)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.EmailChangeTokenCurrent, models.EmailChangeTokenCurrent)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.EmailChange, u.EmailChangeTokenNew, models.EmailChangeTokenNew)) + + type ResponseBody struct { + Code int `json:"code"` + Msg string `json:"msg"` + } + + expectedResponse := ResponseBody{ + Code: http.StatusForbidden, + Msg: "Token has expired or is invalid", + } + + cases := []struct { + desc string + sentTime time.Time + body map[string]interface{} + expected ResponseBody + }{ + { + desc: "Expired SMS OTP", + sentTime: time.Now().Add(-48 * time.Hour), + body: map[string]interface{}{ + "type": smsVerification, + "token": u.ConfirmationToken, + "phone": u.GetPhone(), + }, + expected: expectedResponse, + }, + { + desc: "Invalid SMS OTP", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": smsVerification, + "token": "invalid_otp", + "phone": u.GetPhone(), + }, + expected: expectedResponse, + }, + { + desc: "Invalid Phone Change OTP", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": phoneChangeVerification, + "token": "invalid_otp", + "phone": u.PhoneChange, + }, + expected: expectedResponse, + }, + { + desc: "Invalid Email OTP", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": mail.SignupVerification, + "token": "invalid_otp", + "email": u.GetEmail(), + }, + expected: expectedResponse, + }, + { + desc: "Invalid Email Change", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": mail.EmailChangeVerification, + "token": "invalid_otp", + "email": u.GetEmail(), + }, + expected: expectedResponse, + }, + } + + for _, caseItem := range cases { + c := caseItem + + ts.Run(c.desc, func() { + // update token sent time + sentTime = time.Now() + u.ConfirmationSentAt = &c.sentTime + require.NoError(ts.T(), ts.API.db.Update(u)) + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.body)) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/verify", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + + b, err := io.ReadAll(w.Body) + require.NoError(ts.T(), err) + var resp ResponseBody + err = json.Unmarshal(b, &resp) + require.NoError(ts.T(), err) + assert.Equal(ts.T(), c.expected.Code, resp.Code) + assert.Equal(ts.T(), c.expected.Msg, resp.Msg) + + }) + } +} + +func (ts *VerifyTestSuite) TestExpiredRecoveryToken() { + // verify variant testing not necessary in this test as it's testing + // the RecoverySentAt behavior, not the RecoveryToken behavior + + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.RecoveryToken = "asdf3" + sentTime := time.Now().Add(-48 * time.Hour) + u.RecoverySentAt = &sentTime + require.NoError(ts.T(), ts.API.db.Update(u)) + + // Setup request + reqURL := fmt.Sprintf("http://localhost/verify?type=%s&token=%s", "signup", u.RecoveryToken) + req := httptest.NewRequest(http.MethodGet, reqURL, nil) + + // Setup response recorder + w := httptest.NewRecorder() + + ts.API.handler.ServeHTTP(w, req) + + assert.Equal(ts.T(), http.StatusSeeOther, w.Code, w.Body.String()) +} + +func (ts *VerifyTestSuite) TestVerifyPermitedCustomUri() { + // verify variant testing not necessary in this test as it's testing + // the redirect URL behavior, not the RecoveryToken behavior + + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.RecoverySentAt = &time.Time{} + require.NoError(ts.T(), ts.API.db.Update(u)) + + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/recover", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + assert.WithinDuration(ts.T(), time.Now(), *u.RecoverySentAt, 1*time.Second) + assert.False(ts.T(), u.IsConfirmed()) + + redirectURL, _ := url.Parse(ts.Config.URIAllowList[0]) + + reqURL := fmt.Sprintf("http://localhost/verify?type=%s&token=%s&redirect_to=%s", "recovery", u.RecoveryToken, redirectURL.String()) + req = httptest.NewRequest(http.MethodGet, reqURL, nil) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusSeeOther, w.Code) + rURL, _ := w.Result().Location() + assert.Equal(ts.T(), redirectURL.Hostname(), rURL.Hostname()) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + assert.True(ts.T(), u.IsConfirmed()) +} + +func (ts *VerifyTestSuite) TestVerifyNotPermitedCustomUri() { + // verify variant testing not necessary in this test as it's testing + // the redirect URL behavior, not the RecoveryToken behavior + + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.RecoverySentAt = &time.Time{} + require.NoError(ts.T(), ts.API.db.Update(u)) + + // Request body + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(map[string]interface{}{ + "email": "test@example.com", + })) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/recover", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusOK, w.Code) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + + assert.WithinDuration(ts.T(), time.Now(), *u.RecoverySentAt, 1*time.Second) + assert.False(ts.T(), u.IsConfirmed()) + + fakeredirectURL, _ := url.Parse("http://custom-url.com") + siteURL, _ := url.Parse(ts.Config.SiteURL) + + reqURL := fmt.Sprintf("http://localhost/verify?type=%s&token=%s&redirect_to=%s", "recovery", u.RecoveryToken, fakeredirectURL.String()) + req = httptest.NewRequest(http.MethodGet, reqURL, nil) + + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusSeeOther, w.Code) + rURL, _ := w.Result().Location() + assert.Equal(ts.T(), siteURL.Hostname(), rURL.Hostname()) + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + assert.True(ts.T(), u.IsConfirmed()) +} + +func (ts *VerifyTestSuite) TestVerifySignupWithRedirectURLContainedPath() { + // verify variant testing not necessary in this test as it's testing + // the redirect URL behavior, not the RecoveryToken behavior + + testCases := []struct { + desc string + siteURL string + uriAllowList []string + requestredirectURL string + expectedredirectURL string + }{ + { + desc: "same site url and redirect url with path", + siteURL: "http://localhost:3000/#/", + uriAllowList: []string{"http://localhost:3000"}, + requestredirectURL: "http://localhost:3000/#/", + expectedredirectURL: "http://localhost:3000/#/", + }, + { + desc: "different site url and redirect url in allow list", + siteURL: "https://someapp-something.codemagic.app/#/", + uriAllowList: []string{"http://localhost:3000"}, + requestredirectURL: "http://localhost:3000", + expectedredirectURL: "http://localhost:3000", + }, + { + desc: "different site url and redirect url not in allow list", + siteURL: "https://someapp-something.codemagic.app/#/", + uriAllowList: []string{"http://localhost:3000"}, + requestredirectURL: "http://localhost:3000/docs", + expectedredirectURL: "https://someapp-something.codemagic.app/#/", + }, + { + desc: "same wildcard site url and redirect url in allow list", + siteURL: "http://sub.test.dev:3000/#/", + uriAllowList: []string{"http://*.test.dev:3000"}, + requestredirectURL: "http://sub.test.dev:3000/#/", + expectedredirectURL: "http://sub.test.dev:3000/#/", + }, + { + desc: "different wildcard site url and redirect url in allow list", + siteURL: "http://sub.test.dev/#/", + uriAllowList: []string{"http://*.other.dev:3000"}, + requestredirectURL: "http://sub.other.dev:3000", + expectedredirectURL: "http://sub.other.dev:3000", + }, + { + desc: "different wildcard site url and redirect url not in allow list", + siteURL: "http://test.dev:3000/#/", + uriAllowList: []string{"http://*.allowed.dev:3000"}, + requestredirectURL: "http://sub.test.dev:3000/#/", + expectedredirectURL: "http://test.dev:3000/#/", + }, + { + desc: "exact mobile deep link redirect url in allow list", + siteURL: "http://test.dev:3000/#/", + uriAllowList: []string{"twitter://timeline"}, + requestredirectURL: "twitter://timeline", + expectedredirectURL: "twitter://timeline", + }, + // previously the below example was not allowed and with good + // reason, however users do want flexibility in the redirect + // URL after the scheme, which is why the example is now corrected + { + desc: "wildcard mobile deep link redirect url in allow list", + siteURL: "http://test.dev:3000/#/", + uriAllowList: []string{"com.example.app://**"}, + requestredirectURL: "com.example.app://sign-in/v2", + expectedredirectURL: "com.example.app://sign-in/v2", + }, + { + desc: "redirect respects . separator", + siteURL: "http://localhost:3000", + uriAllowList: []string{"http://*.*.dev:3000"}, + requestredirectURL: "http://foo.bar.dev:3000", + expectedredirectURL: "http://foo.bar.dev:3000", + }, + { + desc: "redirect does not respect . separator", + siteURL: "http://localhost:3000", + uriAllowList: []string{"http://*.dev:3000"}, + requestredirectURL: "http://foo.bar.dev:3000", + expectedredirectURL: "http://localhost:3000", + }, + { + desc: "redirect respects / separator in url subdirectory", + siteURL: "http://localhost:3000", + uriAllowList: []string{"http://test.dev:3000/*/*"}, + requestredirectURL: "http://test.dev:3000/bar/foo", + expectedredirectURL: "http://test.dev:3000/bar/foo", + }, + { + desc: "redirect does not respect / separator in url subdirectory", + siteURL: "http://localhost:3000", + uriAllowList: []string{"http://test.dev:3000/*"}, + requestredirectURL: "http://test.dev:3000/bar/foo", + expectedredirectURL: "http://localhost:3000", + }, + } + + for _, tC := range testCases { + ts.Run(tC.desc, func() { + // prepare test data + ts.Config.SiteURL = tC.siteURL + redirectURL := tC.requestredirectURL + ts.Config.URIAllowList = tC.uriAllowList + ts.Config.ApplyDefaults() + + // set verify token to user as it actual do in magic link method + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.ConfirmationToken = "someToken" + sendTime := time.Now().Add(time.Hour) + u.ConfirmationSentAt = &sendTime + require.NoError(ts.T(), ts.API.db.Update(u)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.ConfirmationToken, models.ConfirmationToken)) + + reqURL := fmt.Sprintf("http://localhost/verify?type=%s&token=%s&redirect_to=%s", "signup", u.ConfirmationToken, redirectURL) + req := httptest.NewRequest(http.MethodGet, reqURL, nil) + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusSeeOther, w.Code) + rURL, _ := w.Result().Location() + assert.Contains(ts.T(), rURL.String(), tC.expectedredirectURL) // redirected url starts with per test value + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + assert.True(ts.T(), u.IsConfirmed()) + assert.True(ts.T(), u.UserMetaData["email_verified"].(bool)) + assert.True(ts.T(), u.Identities[0].IdentityData["email_verified"].(bool)) + }) + } +} + +func (ts *VerifyTestSuite) TestVerifyPKCEOTP() { + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + t := time.Now() + u.ConfirmationSentAt = &t + u.RecoverySentAt = &t + u.EmailChangeSentAt = &t + require.NoError(ts.T(), ts.API.db.Update(u)) + + cases := []struct { + desc string + payload *VerifyParams + authenticationMethod models.AuthenticationMethod + }{ + { + desc: "Verify user on signup", + payload: &VerifyParams{ + Type: "signup", + Token: "pkce_confirmation_token", + }, + authenticationMethod: models.EmailSignup, + }, + { + desc: "Verify magiclink", + payload: &VerifyParams{ + Type: "magiclink", + Token: "pkce_recovery_token", + }, + authenticationMethod: models.MagicLink, + }, + } + for _, c := range cases { + ts.Run(c.desc, func() { + var buffer bytes.Buffer + // since the test user is the same, the tokens are being cleared after each successful verification attempt + // so we create them on each run + if c.payload.Type == "signup" { + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), c.payload.Token, models.ConfirmationToken)) + } else if c.payload.Type == "magiclink" { + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), c.payload.Token, models.RecoveryToken)) + } + + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.payload)) + codeChallenge := "codechallengecodechallengcodechallengcodechallengcodechallenge" + flowState := models.NewFlowState(c.authenticationMethod.String(), codeChallenge, models.SHA256, c.authenticationMethod, &u.ID) + require.NoError(ts.T(), ts.API.db.Create(flowState)) + + requestUrl := fmt.Sprintf("http://localhost/verify?type=%v&token=%v", c.payload.Type, c.payload.Token) + req := httptest.NewRequest(http.MethodGet, requestUrl, &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusSeeOther, w.Code) + rURL, _ := w.Result().Location() + + u, err = models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + assert.True(ts.T(), u.IsConfirmed()) + + f, err := url.ParseQuery(rURL.RawQuery) + require.NoError(ts.T(), err) + assert.NotEmpty(ts.T(), f.Get("code")) + }) + } + +} + +func (ts *VerifyTestSuite) TestVerifyBannedUser() { + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.ConfirmationToken = "confirmation_token" + u.RecoveryToken = "recovery_token" + u.EmailChangeTokenCurrent = "current_email_change_token" + u.EmailChangeTokenNew = "new_email_change_token" + t := time.Now() + u.ConfirmationSentAt = &t + u.RecoverySentAt = &t + u.EmailChangeSentAt = &t + + t = time.Now().Add(24 * time.Hour) + u.BannedUntil = &t + require.NoError(ts.T(), ts.API.db.Update(u)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.ConfirmationToken, models.ConfirmationToken)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.RecoveryToken, models.RecoveryToken)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.EmailChangeTokenCurrent, models.EmailChangeTokenCurrent)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, u.GetEmail(), u.EmailChangeTokenNew, models.EmailChangeTokenNew)) + + cases := []struct { + desc string + payload *VerifyParams + }{ + { + desc: "Verify banned user on signup", + payload: &VerifyParams{ + Type: "signup", + Token: u.ConfirmationToken, + }, + }, + { + desc: "Verify banned user on invite", + payload: &VerifyParams{ + Type: "invite", + Token: u.ConfirmationToken, + }, + }, + { + desc: "Verify banned user on recover", + payload: &VerifyParams{ + Type: "recovery", + Token: u.RecoveryToken, + }, + }, + { + desc: "Verify banned user on magiclink", + payload: &VerifyParams{ + Type: "magiclink", + Token: u.RecoveryToken, + }, + }, + { + desc: "Verify banned user on email change", + payload: &VerifyParams{ + Type: "email_change", + Token: u.EmailChangeTokenCurrent, + }, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.payload)) + + requestUrl := fmt.Sprintf("http://localhost/verify?type=%v&token=%v", c.payload.Type, c.payload.Token) + req := httptest.NewRequest(http.MethodGet, requestUrl, &buffer) + req.Header.Set("Content-Type", "application/json") + + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), http.StatusSeeOther, w.Code) + + rurl, err := url.Parse(w.Header().Get("Location")) + require.NoError(ts.T(), err, "redirect url parse failed") + + f, err := url.ParseQuery(rurl.Fragment) + require.NoError(ts.T(), err) + assert.Equal(ts.T(), ErrorCodeUserBanned, f.Get("error_code")) + }) + } +} + +func (ts *VerifyTestSuite) TestVerifyValidOtp() { + ts.Config.Mailer.SecureEmailChangeEnabled = true + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.EmailChange = "new@example.com" + u.Phone = "12345678" + u.PhoneChange = "1234567890" + require.NoError(ts.T(), ts.API.db.Update(u)) + + type expected struct { + code int + tokenHash string + } + + cases := []struct { + desc string + sentTime time.Time + body map[string]interface{} + expected + }{ + { + desc: "Valid SMS OTP", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": smsVerification, + "token": "123456", + "phone": u.GetPhone(), + }, + expected: expected{ + code: http.StatusOK, + tokenHash: crypto.GenerateTokenHash(u.GetPhone(), "123456"), + }, + }, + { + desc: "Valid Confirmation OTP", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": mail.SignupVerification, + "token": "123456", + "email": u.GetEmail(), + }, + expected: expected{ + code: http.StatusOK, + tokenHash: crypto.GenerateTokenHash(u.GetEmail(), "123456"), + }, + }, + { + desc: "Valid Signup Token Hash", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": mail.SignupVerification, + "token_hash": crypto.GenerateTokenHash(u.GetEmail(), "123456"), + }, + expected: expected{ + code: http.StatusOK, + tokenHash: crypto.GenerateTokenHash(u.GetEmail(), "123456"), + }, + }, + { + desc: "Valid Recovery OTP", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": mail.RecoveryVerification, + "token": "123456", + "email": u.GetEmail(), + }, + expected: expected{ + code: http.StatusOK, + tokenHash: crypto.GenerateTokenHash(u.GetEmail(), "123456"), + }, + }, + { + desc: "Valid Email OTP", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": mail.EmailOTPVerification, + "token": "123456", + "email": u.GetEmail(), + }, + expected: expected{ + code: http.StatusOK, + tokenHash: crypto.GenerateTokenHash(u.GetEmail(), "123456"), + }, + }, + { + desc: "Valid Email OTP (email casing shouldn't matter)", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": mail.EmailOTPVerification, + "token": "123456", + "email": strings.ToUpper(u.GetEmail()), + }, + expected: expected{ + code: http.StatusOK, + tokenHash: crypto.GenerateTokenHash(u.GetEmail(), "123456"), + }, + }, + { + desc: "Valid Email Change OTP", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": mail.EmailChangeVerification, + "token": "123456", + "email": u.EmailChange, + }, + expected: expected{ + code: http.StatusOK, + tokenHash: crypto.GenerateTokenHash(u.EmailChange, "123456"), + }, + }, + { + desc: "Valid Phone Change OTP", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": phoneChangeVerification, + "token": "123456", + "phone": u.PhoneChange, + }, + expected: expected{ + code: http.StatusOK, + tokenHash: crypto.GenerateTokenHash(u.PhoneChange, "123456"), + }, + }, + { + desc: "Valid Email Change Token Hash", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": mail.EmailChangeVerification, + "token_hash": crypto.GenerateTokenHash(u.EmailChange, "123456"), + }, + expected: expected{ + code: http.StatusOK, + tokenHash: crypto.GenerateTokenHash(u.EmailChange, "123456"), + }, + }, + { + desc: "Valid Email Verification Type", + sentTime: time.Now(), + body: map[string]interface{}{ + "type": mail.EmailOTPVerification, + "token_hash": crypto.GenerateTokenHash(u.GetEmail(), "123456"), + }, + expected: expected{ + code: http.StatusOK, + tokenHash: crypto.GenerateTokenHash(u.GetEmail(), "123456"), + }, + }, + } + + for _, caseItem := range cases { + c := caseItem + ts.Run(c.desc, func() { + // create user + require.NoError(ts.T(), models.ClearAllOneTimeTokensForUser(ts.API.db, u.ID)) + + u.ConfirmationSentAt = &c.sentTime + u.RecoverySentAt = &c.sentTime + u.EmailChangeSentAt = &c.sentTime + u.PhoneChangeSentAt = &c.sentTime + + u.ConfirmationToken = c.expected.tokenHash + u.RecoveryToken = c.expected.tokenHash + u.EmailChangeTokenNew = c.expected.tokenHash + u.PhoneChangeToken = c.expected.tokenHash + + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, "relates_to not used", u.ConfirmationToken, models.ConfirmationToken)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, "relates_to not used", u.RecoveryToken, models.RecoveryToken)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, "relates_to not used", u.EmailChangeTokenNew, models.EmailChangeTokenNew)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, "relates_to not used", u.PhoneChangeToken, models.PhoneChangeToken)) + + require.NoError(ts.T(), ts.API.db.Update(u)) + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.body)) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/verify", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), c.expected.code, w.Code) + }) + } +} + +func (ts *VerifyTestSuite) TestSecureEmailChangeWithTokenHash() { + ts.Config.Mailer.SecureEmailChangeEnabled = true + u, err := models.FindUserByEmailAndAudience(ts.API.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + u.EmailChange = "new@example.com" + require.NoError(ts.T(), ts.API.db.Update(u)) + + currentEmailChangeToken := crypto.GenerateTokenHash(string(u.Email), "123456") + newEmailChangeToken := crypto.GenerateTokenHash(u.EmailChange, "123456") + + cases := []struct { + desc string + firstVerificationBody map[string]interface{} + secondVerificationBody map[string]interface{} + expectedStatus int + }{ + { + desc: "Secure Email Change with Token Hash (Success)", + firstVerificationBody: map[string]interface{}{ + "type": mail.EmailChangeVerification, + "token_hash": currentEmailChangeToken, + }, + secondVerificationBody: map[string]interface{}{ + "type": mail.EmailChangeVerification, + "token_hash": newEmailChangeToken, + }, + expectedStatus: http.StatusOK, + }, + { + desc: "Secure Email Change with Token Hash. Reusing a token hash twice should fail", + firstVerificationBody: map[string]interface{}{ + "type": mail.EmailChangeVerification, + "token_hash": currentEmailChangeToken, + }, + secondVerificationBody: map[string]interface{}{ + "type": mail.EmailChangeVerification, + "token_hash": currentEmailChangeToken, + }, + expectedStatus: http.StatusForbidden, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + // Set the corresponding email change tokens + u.EmailChangeTokenCurrent = currentEmailChangeToken + u.EmailChangeTokenNew = newEmailChangeToken + require.NoError(ts.T(), models.ClearAllOneTimeTokensForUser(ts.API.db, u.ID)) + + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, "relates_to not used", currentEmailChangeToken, models.EmailChangeTokenCurrent)) + require.NoError(ts.T(), models.CreateOneTimeToken(ts.API.db, u.ID, "relates_to not used", newEmailChangeToken, models.EmailChangeTokenNew)) + + currentTime := time.Now() + u.EmailChangeSentAt = ¤tTime + require.NoError(ts.T(), ts.API.db.Update(u)) + + var buffer bytes.Buffer + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.firstVerificationBody)) + + // Setup request + req := httptest.NewRequest(http.MethodPost, "http://localhost/verify", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup response recorder + w := httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + require.NoError(ts.T(), json.NewEncoder(&buffer).Encode(c.secondVerificationBody)) + + // Setup second request + req = httptest.NewRequest(http.MethodPost, "http://localhost/verify", &buffer) + req.Header.Set("Content-Type", "application/json") + + // Setup second response recorder + w = httptest.NewRecorder() + ts.API.handler.ServeHTTP(w, req) + assert.Equal(ts.T(), c.expectedStatus, w.Code) + }) + } +} + +func (ts *VerifyTestSuite) TestPrepRedirectURL() { + escapedMessage := url.QueryEscape(singleConfirmationAccepted) + cases := []struct { + desc string + message string + rurl string + flowType models.FlowType + expected string + }{ + { + desc: "(PKCE): Redirect URL with additional query params", + message: singleConfirmationAccepted, + rurl: "https://example.com/?first=another&second=other", + flowType: models.PKCEFlow, + expected: fmt.Sprintf("https://example.com/?first=another&message=%s&second=other#message=%s", escapedMessage, escapedMessage), + }, + { + desc: "(PKCE): Query params in redirect url are overriden", + message: singleConfirmationAccepted, + rurl: "https://example.com/?message=Valid+redirect+URL", + flowType: models.PKCEFlow, + expected: fmt.Sprintf("https://example.com/?message=%s#message=%s", escapedMessage, escapedMessage), + }, + { + desc: "(Implicit): plain redirect url", + message: singleConfirmationAccepted, + rurl: "https://example.com/", + flowType: models.ImplicitFlow, + expected: fmt.Sprintf("https://example.com/#message=%s", escapedMessage), + }, + { + desc: "(Implicit): query params retained", + message: singleConfirmationAccepted, + rurl: "https://example.com/?first=another", + flowType: models.ImplicitFlow, + expected: fmt.Sprintf("https://example.com/?first=another#message=%s", escapedMessage), + }, + } + for _, c := range cases { + ts.Run(c.desc, func() { + rurl, err := ts.API.prepRedirectURL(c.message, c.rurl, c.flowType) + require.NoError(ts.T(), err) + require.Equal(ts.T(), c.expected, rurl) + }) + } +} + +func (ts *VerifyTestSuite) TestPrepErrorRedirectURL() { + const DefaultError = "Invalid redirect URL" + redirectError := fmt.Sprintf("error=invalid_request&error_code=validation_failed&error_description=%s", url.QueryEscape(DefaultError)) + + cases := []struct { + desc string + message string + rurl string + flowType models.FlowType + expected string + }{ + { + desc: "(PKCE): Error in both query params and hash fragment", + message: "Valid redirect URL", + rurl: "https://example.com/", + flowType: models.PKCEFlow, + expected: fmt.Sprintf("https://example.com/?%s#%s", redirectError, redirectError), + }, + { + desc: "(PKCE): Error with conflicting query params in redirect url", + message: DefaultError, + rurl: "https://example.com/?error=Error+to+be+overriden", + flowType: models.PKCEFlow, + expected: fmt.Sprintf("https://example.com/?%s#%s", redirectError, redirectError), + }, + { + desc: "(Implicit): plain redirect url", + message: DefaultError, + rurl: "https://example.com/", + flowType: models.ImplicitFlow, + expected: fmt.Sprintf("https://example.com/#%s", redirectError), + }, + { + desc: "(Implicit): query params preserved", + message: DefaultError, + rurl: "https://example.com/?test=param", + flowType: models.ImplicitFlow, + expected: fmt.Sprintf("https://example.com/?test=param#%s", redirectError), + }, + } + for _, c := range cases { + ts.Run(c.desc, func() { + req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) + rurl, err := ts.API.prepErrorRedirectURL(badRequestError(ErrorCodeValidationFailed, DefaultError), req, c.rurl, c.flowType) + require.NoError(ts.T(), err) + require.Equal(ts.T(), c.expected, rurl) + }) + } +} + +func (ts *VerifyTestSuite) TestVerifyValidateParams() { + cases := []struct { + desc string + params *VerifyParams + method string + expected error + }{ + { + desc: "Successful GET Verify", + params: &VerifyParams{ + Type: "signup", + Token: "some-token-hash", + }, + method: http.MethodGet, + expected: nil, + }, + { + desc: "Successful POST Verify (TokenHash)", + params: &VerifyParams{ + Type: "signup", + TokenHash: "some-token-hash", + }, + method: http.MethodPost, + expected: nil, + }, + { + desc: "Successful POST Verify (Token)", + params: &VerifyParams{ + Type: "signup", + Token: "some-token", + Email: "email@example.com", + }, + method: http.MethodPost, + expected: nil, + }, + // unsuccessful validations + { + desc: "Need to send email or phone number with token", + params: &VerifyParams{ + Type: "signup", + Token: "some-token", + }, + method: http.MethodPost, + expected: badRequestError(ErrorCodeValidationFailed, "Only an email address or phone number should be provided on verify"), + }, + { + desc: "Cannot send both TokenHash and Token", + params: &VerifyParams{ + Type: "signup", + Token: "some-token", + TokenHash: "some-token-hash", + }, + method: http.MethodPost, + expected: badRequestError(ErrorCodeValidationFailed, "Verify requires either a token or a token hash"), + }, + { + desc: "No verification type specified", + params: &VerifyParams{ + Token: "some-token", + Email: "email@example.com", + }, + method: http.MethodPost, + expected: badRequestError(ErrorCodeValidationFailed, "Verify requires a verification type"), + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + req := httptest.NewRequest(c.method, "http://localhost", nil) + err := c.params.Validate(req, ts.API) + require.Equal(ts.T(), c.expected, err) + }) + } +} diff --git a/auth_v2.169.0/internal/conf/configuration.go b/auth_v2.169.0/internal/conf/configuration.go new file mode 100644 index 0000000..c4d910d --- /dev/null +++ b/auth_v2.169.0/internal/conf/configuration.go @@ -0,0 +1,1144 @@ +package conf + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "regexp" + "strings" + "text/template" + "time" + + "github.com/gobwas/glob" + "github.com/golang-jwt/jwt/v5" + "github.com/joho/godotenv" + "github.com/kelseyhightower/envconfig" + "github.com/lestrrat-go/jwx/v2/jwk" + "gopkg.in/gomail.v2" +) + +const defaultMinPasswordLength int = 6 +const defaultChallengeExpiryDuration float64 = 300 +const defaultFactorExpiryDuration time.Duration = 300 * time.Second +const defaultFlowStateExpiryDuration time.Duration = 300 * time.Second + +// See: https://www.postgresql.org/docs/7.0/syntax525.htm +var postgresNamesRegexp = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]{0,62}$`) + +// See: https://github.com/standard-webhooks/standard-webhooks/blob/main/spec/standard-webhooks.md +// We use 4 * Math.ceil(n/3) to obtain unpadded length in base 64 +// So this 4 * Math.ceil(24/3) = 32 and 4 * Math.ceil(64/3) = 88 for symmetric secrets +// Since Ed25519 key is 32 bytes so we have 4 * Math.ceil(32/3) = 44 +var symmetricSecretFormat = regexp.MustCompile(`^v1,whsec_[A-Za-z0-9+/=]{32,88}`) +var asymmetricSecretFormat = regexp.MustCompile(`^v1a,whpk_[A-Za-z0-9+/=]{44,}:whsk_[A-Za-z0-9+/=]{44,}$`) + +// Time is used to represent timestamps in the configuration, as envconfig has +// trouble parsing empty strings, due to time.Time.UnmarshalText(). +type Time struct { + time.Time +} + +func (t *Time) UnmarshalText(text []byte) error { + trimed := bytes.TrimSpace(text) + + if len(trimed) < 1 { + t.Time = time.Time{} + } else { + if err := t.Time.UnmarshalText(trimed); err != nil { + return err + } + } + + return nil +} + +// OAuthProviderConfiguration holds all config related to external account providers. +type OAuthProviderConfiguration struct { + ClientID []string `json:"client_id" split_words:"true"` + Secret string `json:"secret"` + RedirectURI string `json:"redirect_uri" split_words:"true"` + URL string `json:"url"` + ApiURL string `json:"api_url" split_words:"true"` + Enabled bool `json:"enabled"` + SkipNonceCheck bool `json:"skip_nonce_check" split_words:"true"` +} + +type AnonymousProviderConfiguration struct { + Enabled bool `json:"enabled" default:"false"` +} + +type EmailProviderConfiguration struct { + Enabled bool `json:"enabled" default:"true"` + + AuthorizedAddresses []string `json:"authorized_addresses" split_words:"true"` + + MagicLinkEnabled bool `json:"magic_link_enabled" default:"true" split_words:"true"` +} + +// DBConfiguration holds all the database related configuration. +type DBConfiguration struct { + Driver string `json:"driver" required:"true"` + URL string `json:"url" envconfig:"DATABASE_URL" required:"true"` + Namespace string `json:"namespace" envconfig:"DB_NAMESPACE" default:"auth"` + // MaxPoolSize defaults to 0 (unlimited). + MaxPoolSize int `json:"max_pool_size" split_words:"true"` + MaxIdlePoolSize int `json:"max_idle_pool_size" split_words:"true"` + ConnMaxLifetime time.Duration `json:"conn_max_lifetime,omitempty" split_words:"true"` + ConnMaxIdleTime time.Duration `json:"conn_max_idle_time,omitempty" split_words:"true"` + HealthCheckPeriod time.Duration `json:"health_check_period" split_words:"true"` + MigrationsPath string `json:"migrations_path" split_words:"true" default:"./migrations"` + CleanupEnabled bool `json:"cleanup_enabled" split_words:"true" default:"false"` +} + +func (c *DBConfiguration) Validate() error { + return nil +} + +// JWTConfiguration holds all the JWT related configuration. +type JWTConfiguration struct { + Secret string `json:"secret" required:"true"` + Exp int `json:"exp"` + Aud string `json:"aud"` + AdminGroupName string `json:"admin_group_name" split_words:"true"` + AdminRoles []string `json:"admin_roles" split_words:"true"` + DefaultGroupName string `json:"default_group_name" split_words:"true"` + Issuer string `json:"issuer"` + KeyID string `json:"key_id" split_words:"true"` + Keys JwtKeysDecoder `json:"keys"` + ValidMethods []string `json:"-"` +} + +type MFAFactorTypeConfiguration struct { + EnrollEnabled bool `json:"enroll_enabled" split_words:"true" default:"false"` + VerifyEnabled bool `json:"verify_enabled" split_words:"true" default:"false"` +} + +type TOTPFactorTypeConfiguration struct { + EnrollEnabled bool `json:"enroll_enabled" split_words:"true" default:"true"` + VerifyEnabled bool `json:"verify_enabled" split_words:"true" default:"true"` +} + +type PhoneFactorTypeConfiguration struct { + // Default to false in order to ensure Phone MFA is opt-in + MFAFactorTypeConfiguration + OtpLength int `json:"otp_length" split_words:"true"` + SMSTemplate *template.Template `json:"-"` + MaxFrequency time.Duration `json:"max_frequency" split_words:"true"` + Template string `json:"template"` +} + +// MFAConfiguration holds all the MFA related Configuration +type MFAConfiguration struct { + ChallengeExpiryDuration float64 `json:"challenge_expiry_duration" default:"300" split_words:"true"` + FactorExpiryDuration time.Duration `json:"factor_expiry_duration" default:"300s" split_words:"true"` + RateLimitChallengeAndVerify float64 `split_words:"true" default:"15"` + MaxEnrolledFactors float64 `split_words:"true" default:"10"` + MaxVerifiedFactors int `split_words:"true" default:"10"` + Phone PhoneFactorTypeConfiguration `split_words:"true"` + TOTP TOTPFactorTypeConfiguration `split_words:"true"` + WebAuthn MFAFactorTypeConfiguration `split_words:"true"` +} + +type APIConfiguration struct { + Host string + Port string `envconfig:"PORT" default:"8081"` + Endpoint string + RequestIDHeader string `envconfig:"REQUEST_ID_HEADER"` + ExternalURL string `json:"external_url" envconfig:"API_EXTERNAL_URL" required:"true"` + MaxRequestDuration time.Duration `json:"max_request_duration" split_words:"true" default:"10s"` +} + +func (a *APIConfiguration) Validate() error { + _, err := url.ParseRequestURI(a.ExternalURL) + if err != nil { + return err + } + + return nil +} + +type SessionsConfiguration struct { + Timebox *time.Duration `json:"timebox"` + InactivityTimeout *time.Duration `json:"inactivity_timeout,omitempty" split_words:"true"` + + SinglePerUser bool `json:"single_per_user" split_words:"true"` + Tags []string `json:"tags,omitempty"` +} + +func (c *SessionsConfiguration) Validate() error { + if c.Timebox == nil { + return nil + } + + if *c.Timebox <= time.Duration(0) { + return fmt.Errorf("conf: session timebox duration must be positive when set, was %v", (*c.Timebox).String()) + } + + return nil +} + +type PasswordRequiredCharacters []string + +func (v *PasswordRequiredCharacters) Decode(value string) error { + parts := strings.Split(value, ":") + + for i := 0; i < len(parts)-1; i += 1 { + part := parts[i] + + if part == "" { + continue + } + + // part ended in escape character, so it should be joined with the next one + if part[len(part)-1] == '\\' { + parts[i] = part[0:len(part)-1] + ":" + parts[i+1] + parts[i+1] = "" + continue + } + } + + for _, part := range parts { + if part != "" { + *v = append(*v, part) + } + } + + return nil +} + +// HIBPBloomConfiguration configures a bloom cache for pwned passwords. Use +// this tool to gauge the Items and FalsePositives values: +// https://hur.st/bloomfilter +type HIBPBloomConfiguration struct { + Enabled bool `json:"enabled"` + Items uint `json:"items" default:"100000"` + FalsePositives float64 `json:"false_positives" split_words:"true" default:"0.0000099"` +} + +type HIBPConfiguration struct { + Enabled bool `json:"enabled"` + FailClosed bool `json:"fail_closed" split_words:"true"` + + UserAgent string `json:"user_agent" split_words:"true" default:"https://github.com/supabase/gotrue"` + + Bloom HIBPBloomConfiguration `json:"bloom"` +} + +type PasswordConfiguration struct { + MinLength int `json:"min_length" split_words:"true"` + + RequiredCharacters PasswordRequiredCharacters `json:"required_characters" split_words:"true"` + + HIBP HIBPConfiguration `json:"hibp"` +} + +// GlobalConfiguration holds all the configuration that applies to all instances. +type GlobalConfiguration struct { + API APIConfiguration + DB DBConfiguration + External ProviderConfiguration + Logging LoggingConfig `envconfig:"LOG"` + Profiler ProfilerConfig `envconfig:"PROFILER"` + OperatorToken string `split_words:"true" required:"false"` + Tracing TracingConfig + Metrics MetricsConfig + SMTP SMTPConfiguration + + RateLimitHeader string `split_words:"true"` + RateLimitEmailSent Rate `split_words:"true" default:"30"` + RateLimitSmsSent Rate `split_words:"true" default:"30"` + RateLimitVerify float64 `split_words:"true" default:"30"` + RateLimitTokenRefresh float64 `split_words:"true" default:"150"` + RateLimitSso float64 `split_words:"true" default:"30"` + RateLimitAnonymousUsers float64 `split_words:"true" default:"30"` + RateLimitOtp float64 `split_words:"true" default:"30"` + + SiteURL string `json:"site_url" split_words:"true" required:"true"` + URIAllowList []string `json:"uri_allow_list" split_words:"true"` + URIAllowListMap map[string]glob.Glob + Password PasswordConfiguration `json:"password"` + JWT JWTConfiguration `json:"jwt"` + Mailer MailerConfiguration `json:"mailer"` + Sms SmsProviderConfiguration `json:"sms"` + DisableSignup bool `json:"disable_signup" split_words:"true"` + Hook HookConfiguration `json:"hook" split_words:"true"` + Security SecurityConfiguration `json:"security"` + Sessions SessionsConfiguration `json:"sessions"` + MFA MFAConfiguration `json:"MFA"` + SAML SAMLConfiguration `json:"saml"` + CORS CORSConfiguration `json:"cors"` +} + +type CORSConfiguration struct { + AllowedHeaders []string `json:"allowed_headers" split_words:"true"` +} + +func (c *CORSConfiguration) AllAllowedHeaders(defaults []string) []string { + set := make(map[string]bool) + for _, header := range defaults { + set[header] = true + } + + var result []string + result = append(result, defaults...) + + for _, header := range c.AllowedHeaders { + if !set[header] { + result = append(result, header) + } + + set[header] = true + } + + return result +} + +// EmailContentConfiguration holds the configuration for emails, both subjects and template URLs. +type EmailContentConfiguration struct { + Invite string `json:"invite"` + Confirmation string `json:"confirmation"` + Recovery string `json:"recovery"` + EmailChange string `json:"email_change" split_words:"true"` + MagicLink string `json:"magic_link" split_words:"true"` + Reauthentication string `json:"reauthentication"` +} + +type ProviderConfiguration struct { + AnonymousUsers AnonymousProviderConfiguration `json:"anonymous_users" split_words:"true"` + Apple OAuthProviderConfiguration `json:"apple"` + Azure OAuthProviderConfiguration `json:"azure"` + Bitbucket OAuthProviderConfiguration `json:"bitbucket"` + Discord OAuthProviderConfiguration `json:"discord"` + Facebook OAuthProviderConfiguration `json:"facebook"` + Figma OAuthProviderConfiguration `json:"figma"` + Fly OAuthProviderConfiguration `json:"fly"` + Github OAuthProviderConfiguration `json:"github"` + Gitlab OAuthProviderConfiguration `json:"gitlab"` + Google OAuthProviderConfiguration `json:"google"` + Kakao OAuthProviderConfiguration `json:"kakao"` + Notion OAuthProviderConfiguration `json:"notion"` + Keycloak OAuthProviderConfiguration `json:"keycloak"` + Linkedin OAuthProviderConfiguration `json:"linkedin"` + LinkedinOIDC OAuthProviderConfiguration `json:"linkedin_oidc" envconfig:"LINKEDIN_OIDC"` + Spotify OAuthProviderConfiguration `json:"spotify"` + Slack OAuthProviderConfiguration `json:"slack"` + SlackOIDC OAuthProviderConfiguration `json:"slack_oidc" envconfig:"SLACK_OIDC"` + Twitter OAuthProviderConfiguration `json:"twitter"` + Twitch OAuthProviderConfiguration `json:"twitch"` + VercelMarketplace OAuthProviderConfiguration `json:"vercel_marketplace" split_words:"true"` + WorkOS OAuthProviderConfiguration `json:"workos"` + Email EmailProviderConfiguration `json:"email"` + Phone PhoneProviderConfiguration `json:"phone"` + Zoom OAuthProviderConfiguration `json:"zoom"` + IosBundleId string `json:"ios_bundle_id" split_words:"true"` + RedirectURL string `json:"redirect_url"` + AllowedIdTokenIssuers []string `json:"allowed_id_token_issuers" split_words:"true"` + FlowStateExpiryDuration time.Duration `json:"flow_state_expiry_duration" split_words:"true"` +} + +type SMTPConfiguration struct { + MaxFrequency time.Duration `json:"max_frequency" split_words:"true"` + Host string `json:"host"` + Port int `json:"port,omitempty" default:"587"` + User string `json:"user"` + Pass string `json:"pass,omitempty"` + AdminEmail string `json:"admin_email" split_words:"true"` + SenderName string `json:"sender_name" split_words:"true"` + Headers string `json:"headers"` + LoggingEnabled bool `json:"logging_enabled" split_words:"true" default:"false"` + + fromAddress string `json:"-"` + normalizedHeaders map[string][]string `json:"-"` +} + +func (c *SMTPConfiguration) Validate() error { + headers := make(map[string][]string) + + if c.Headers != "" { + err := json.Unmarshal([]byte(c.Headers), &headers) + if err != nil { + return fmt.Errorf("conf: SMTP headers not a map[string][]string format: %w", err) + } + } + + if len(headers) > 0 { + c.normalizedHeaders = headers + } + + mail := gomail.NewMessage() + + c.fromAddress = mail.FormatAddress(c.AdminEmail, c.SenderName) + + return nil +} + +func (c *SMTPConfiguration) FromAddress() string { + return c.fromAddress +} + +func (c *SMTPConfiguration) NormalizedHeaders() map[string][]string { + return c.normalizedHeaders +} + +type MailerConfiguration struct { + Autoconfirm bool `json:"autoconfirm"` + AllowUnverifiedEmailSignIns bool `json:"allow_unverified_email_sign_ins" split_words:"true" default:"false"` + + Subjects EmailContentConfiguration `json:"subjects"` + Templates EmailContentConfiguration `json:"templates"` + URLPaths EmailContentConfiguration `json:"url_paths"` + + SecureEmailChangeEnabled bool `json:"secure_email_change_enabled" split_words:"true" default:"true"` + + OtpExp uint `json:"otp_exp" split_words:"true"` + OtpLength int `json:"otp_length" split_words:"true"` + + ExternalHosts []string `json:"external_hosts" split_words:"true"` + + // EXPERIMENTAL: May be removed in a future release. + EmailValidationExtended bool `json:"email_validation_extended" split_words:"true" default:"false"` + EmailValidationServiceURL string `json:"email_validation_service_url" split_words:"true"` + EmailValidationServiceHeaders string `json:"email_validation_service_headers" split_words:"true"` + + serviceHeaders map[string][]string `json:"-"` +} + +func (c *MailerConfiguration) Validate() error { + headers := make(map[string][]string) + + if c.EmailValidationServiceHeaders != "" { + err := json.Unmarshal([]byte(c.EmailValidationServiceHeaders), &headers) + if err != nil { + return fmt.Errorf("conf: mailer validation headers not a map[string][]string format: %w", err) + } + } + + if len(headers) > 0 { + c.serviceHeaders = headers + } + return nil +} + +func (c *MailerConfiguration) GetEmailValidationServiceHeaders() map[string][]string { + return c.serviceHeaders +} + +type PhoneProviderConfiguration struct { + Enabled bool `json:"enabled" default:"false"` +} + +type SmsProviderConfiguration struct { + Autoconfirm bool `json:"autoconfirm"` + MaxFrequency time.Duration `json:"max_frequency" split_words:"true"` + OtpExp uint `json:"otp_exp" split_words:"true"` + OtpLength int `json:"otp_length" split_words:"true"` + Provider string `json:"provider"` + Template string `json:"template"` + TestOTP map[string]string `json:"test_otp" split_words:"true"` + TestOTPValidUntil Time `json:"test_otp_valid_until" split_words:"true"` + SMSTemplate *template.Template `json:"-"` + + Twilio TwilioProviderConfiguration `json:"twilio"` + TwilioVerify TwilioVerifyProviderConfiguration `json:"twilio_verify" split_words:"true"` + Messagebird MessagebirdProviderConfiguration `json:"messagebird"` + Textlocal TextlocalProviderConfiguration `json:"textlocal"` + Vonage VonageProviderConfiguration `json:"vonage"` +} + +func (c *SmsProviderConfiguration) GetTestOTP(phone string, now time.Time) (string, bool) { + if c.TestOTP != nil && (c.TestOTPValidUntil.Time.IsZero() || now.Before(c.TestOTPValidUntil.Time)) { + testOTP, ok := c.TestOTP[phone] + return testOTP, ok + } + + return "", false +} + +type TwilioProviderConfiguration struct { + AccountSid string `json:"account_sid" split_words:"true"` + AuthToken string `json:"auth_token" split_words:"true"` + MessageServiceSid string `json:"message_service_sid" split_words:"true"` + ContentSid string `json:"content_sid" split_words:"true"` +} + +type TwilioVerifyProviderConfiguration struct { + AccountSid string `json:"account_sid" split_words:"true"` + AuthToken string `json:"auth_token" split_words:"true"` + MessageServiceSid string `json:"message_service_sid" split_words:"true"` +} + +type MessagebirdProviderConfiguration struct { + AccessKey string `json:"access_key" split_words:"true"` + Originator string `json:"originator" split_words:"true"` +} + +type TextlocalProviderConfiguration struct { + ApiKey string `json:"api_key" split_words:"true"` + Sender string `json:"sender" split_words:"true"` +} + +type VonageProviderConfiguration struct { + ApiKey string `json:"api_key" split_words:"true"` + ApiSecret string `json:"api_secret" split_words:"true"` + From string `json:"from" split_words:"true"` +} + +type CaptchaConfiguration struct { + Enabled bool `json:"enabled" default:"false"` + Provider string `json:"provider" default:"hcaptcha"` + Secret string `json:"provider_secret"` +} + +func (c *CaptchaConfiguration) Validate() error { + if !c.Enabled { + return nil + } + + if c.Provider != "hcaptcha" && c.Provider != "turnstile" { + return fmt.Errorf("unsupported captcha provider: %s", c.Provider) + } + + c.Secret = strings.TrimSpace(c.Secret) + + if c.Secret == "" { + return errors.New("captcha provider secret is empty") + } + + return nil +} + +// DatabaseEncryptionConfiguration configures Auth to encrypt certain columns. +// Once Encrypt is set to true, data will start getting encrypted with the +// provided encryption key. Setting it to false just stops encryption from +// going on further, but DecryptionKeys would have to contain the same key so +// the encrypted data remains accessible. +type DatabaseEncryptionConfiguration struct { + Encrypt bool `json:"encrypt"` + + EncryptionKeyID string `json:"encryption_key_id" split_words:"true"` + EncryptionKey string `json:"-" split_words:"true"` + + DecryptionKeys map[string]string `json:"-" split_words:"true"` +} + +func (c *DatabaseEncryptionConfiguration) Validate() error { + if c.Encrypt { + if c.EncryptionKeyID == "" { + return errors.New("conf: encryption key ID must be specified") + } + + decodedKey, err := base64.RawURLEncoding.DecodeString(c.EncryptionKey) + if err != nil { + return err + } + + if len(decodedKey) != 256/8 { + return errors.New("conf: encryption key is not 256 bits") + } + + if c.DecryptionKeys == nil || c.DecryptionKeys[c.EncryptionKeyID] == "" { + return errors.New("conf: encryption key must also be present in decryption keys") + } + } + + for id, key := range c.DecryptionKeys { + decodedKey, err := base64.RawURLEncoding.DecodeString(key) + if err != nil { + return err + } + + if len(decodedKey) != 256/8 { + return fmt.Errorf("conf: decryption key with ID %q must be 256 bits", id) + } + } + + return nil +} + +type SecurityConfiguration struct { + Captcha CaptchaConfiguration `json:"captcha"` + RefreshTokenRotationEnabled bool `json:"refresh_token_rotation_enabled" split_words:"true" default:"true"` + RefreshTokenReuseInterval int `json:"refresh_token_reuse_interval" split_words:"true"` + UpdatePasswordRequireReauthentication bool `json:"update_password_require_reauthentication" split_words:"true"` + ManualLinkingEnabled bool `json:"manual_linking_enabled" split_words:"true" default:"false"` + + DBEncryption DatabaseEncryptionConfiguration `json:"database_encryption" split_words:"true"` +} + +func (c *SecurityConfiguration) Validate() error { + if err := c.Captcha.Validate(); err != nil { + return err + } + + if err := c.DBEncryption.Validate(); err != nil { + return err + } + + return nil +} + +func loadEnvironment(filename string) error { + var err error + if filename != "" { + err = godotenv.Overload(filename) + } else { + err = godotenv.Load() + // handle if .env file does not exist, this is OK + if os.IsNotExist(err) { + return nil + } + } + return err +} + +// Moving away from the existing HookConfig so we can get a fresh start. +type HookConfiguration struct { + MFAVerificationAttempt ExtensibilityPointConfiguration `json:"mfa_verification_attempt" split_words:"true"` + PasswordVerificationAttempt ExtensibilityPointConfiguration `json:"password_verification_attempt" split_words:"true"` + CustomAccessToken ExtensibilityPointConfiguration `json:"custom_access_token" split_words:"true"` + SendEmail ExtensibilityPointConfiguration `json:"send_email" split_words:"true"` + SendSMS ExtensibilityPointConfiguration `json:"send_sms" split_words:"true"` +} + +type HTTPHookSecrets []string + +func (h *HTTPHookSecrets) Decode(value string) error { + parts := strings.Split(value, "|") + for _, part := range parts { + if part != "" { + *h = append(*h, part) + } + } + + return nil +} + +type ExtensibilityPointConfiguration struct { + URI string `json:"uri"` + Enabled bool `json:"enabled"` + // For internal use together with Postgres Hook. Not publicly exposed. + HookName string `json:"-"` + // We use | as a separator for keys and : as a separator for keys within a keypair. For instance: v1,whsec_test|v1a,whpk_myother:v1a,whsk_testkey|v1,whsec_secret3 + HTTPHookSecrets HTTPHookSecrets `json:"secrets" envconfig:"secrets"` +} + +func (h *HookConfiguration) Validate() error { + points := []ExtensibilityPointConfiguration{ + h.MFAVerificationAttempt, + h.PasswordVerificationAttempt, + h.CustomAccessToken, + h.SendSMS, + h.SendEmail, + } + for _, point := range points { + if err := point.ValidateExtensibilityPoint(); err != nil { + return err + } + } + return nil +} + +func (e *ExtensibilityPointConfiguration) ValidateExtensibilityPoint() error { + if e.URI == "" { + return nil + } + u, err := url.Parse(e.URI) + if err != nil { + return err + } + switch strings.ToLower(u.Scheme) { + case "pg-functions": + return validatePostgresPath(u) + case "http": + hostname := u.Hostname() + if hostname == "localhost" || hostname == "127.0.0.1" || hostname == "::1" || hostname == "host.docker.internal" { + return validateHTTPHookSecrets(e.HTTPHookSecrets) + } + return fmt.Errorf("only localhost, 127.0.0.1, and ::1 are supported with http") + case "https": + return validateHTTPHookSecrets(e.HTTPHookSecrets) + default: + return fmt.Errorf("only postgres hooks and HTTPS functions are supported at the moment") + } +} + +func validatePostgresPath(u *url.URL) error { + pathParts := strings.Split(u.Path, "/") + if len(pathParts) < 3 { + return fmt.Errorf("URI path does not contain enough parts") + } + + schema := pathParts[1] + table := pathParts[2] + // Validate schema and table names + if !postgresNamesRegexp.MatchString(schema) { + return fmt.Errorf("invalid schema name: %s", schema) + } + if !postgresNamesRegexp.MatchString(table) { + return fmt.Errorf("invalid table name: %s", table) + } + return nil +} + +func isValidSecretFormat(secret string) bool { + return symmetricSecretFormat.MatchString(secret) || asymmetricSecretFormat.MatchString(secret) +} + +func validateHTTPHookSecrets(secrets []string) error { + for _, secret := range secrets { + if !isValidSecretFormat(secret) { + return fmt.Errorf("invalid secret format") + } + } + return nil +} + +func (e *ExtensibilityPointConfiguration) PopulateExtensibilityPoint() error { + u, err := url.Parse(e.URI) + if err != nil { + return err + } + if u.Scheme == "pg-functions" { + pathParts := strings.Split(u.Path, "/") + e.HookName = fmt.Sprintf("%q.%q", pathParts[1], pathParts[2]) + } + return nil +} + +// LoadFile calls godotenv.Load() when the given filename is empty ignoring any +// errors loading, otherwise it calls godotenv.Overload(filename). +// +// godotenv.Load: preserves env, ".env" path is optional +// godotenv.Overload: overrides env, "filename" path must exist +func LoadFile(filename string) error { + var err error + if filename != "" { + err = godotenv.Overload(filename) + } else { + err = godotenv.Load() + // handle if .env file does not exist, this is OK + if os.IsNotExist(err) { + return nil + } + } + return err +} + +// LoadDirectory does nothing when configDir is empty, otherwise it will attempt +// to load a list of configuration files located in configDir by using ReadDir +// to obtain a sorted list of files containing a .env suffix. +// +// When the list is empty it will do nothing, otherwise it passes the file list +// to godotenv.Overload to pull them into the current environment. +func LoadDirectory(configDir string) error { + if configDir == "" { + return nil + } + + // Returns entries sorted by filename + ents, err := os.ReadDir(configDir) + if err != nil { + // We mimic the behavior of LoadGlobal here, if an explicit path is + // provided we return an error. + return err + } + + var paths []string + for _, ent := range ents { + if ent.IsDir() { + continue // ignore directories + } + + // We only read files ending in .env + name := ent.Name() + if !strings.HasSuffix(name, ".env") { + continue + } + + // ent.Name() does not include the watch dir. + paths = append(paths, filepath.Join(configDir, name)) + } + + // If at least one path was found we load the configuration files in the + // directory. We don't call override without config files because it will + // override the env vars previously set with a ".env", if one exists. + if len(paths) > 0 { + if err := godotenv.Overload(paths...); err != nil { + return err + } + } + return nil +} + +// LoadGlobalFromEnv will return a new *GlobalConfiguration value from the +// currently configured environment. +func LoadGlobalFromEnv() (*GlobalConfiguration, error) { + config := new(GlobalConfiguration) + if err := loadGlobal(config); err != nil { + return nil, err + } + return config, nil +} + +func LoadGlobal(filename string) (*GlobalConfiguration, error) { + if err := loadEnvironment(filename); err != nil { + return nil, err + } + + config := new(GlobalConfiguration) + if err := loadGlobal(config); err != nil { + return nil, err + } + return config, nil +} + +func loadGlobal(config *GlobalConfiguration) error { + // although the package is called "auth" it used to be called "gotrue" + // so environment configs will remain to be called "GOTRUE" + if err := envconfig.Process("gotrue", config); err != nil { + return err + } + + if err := config.ApplyDefaults(); err != nil { + return err + } + + if err := config.Validate(); err != nil { + return err + } + + if config.Hook.PasswordVerificationAttempt.Enabled { + if err := config.Hook.PasswordVerificationAttempt.PopulateExtensibilityPoint(); err != nil { + return err + } + } + + if config.Hook.SendSMS.Enabled { + if err := config.Hook.SendSMS.PopulateExtensibilityPoint(); err != nil { + return err + } + } + if config.Hook.SendEmail.Enabled { + if err := config.Hook.SendEmail.PopulateExtensibilityPoint(); err != nil { + return err + } + } + + if config.Hook.MFAVerificationAttempt.Enabled { + if err := config.Hook.MFAVerificationAttempt.PopulateExtensibilityPoint(); err != nil { + return err + } + } + + if config.Hook.CustomAccessToken.Enabled { + if err := config.Hook.CustomAccessToken.PopulateExtensibilityPoint(); err != nil { + return err + } + } + + if config.SAML.Enabled { + if err := config.SAML.PopulateFields(config.API.ExternalURL); err != nil { + return err + } + } else { + config.SAML.PrivateKey = "" + } + + if config.Sms.Provider != "" { + SMSTemplate := config.Sms.Template + if SMSTemplate == "" { + SMSTemplate = "Your code is {{ .Code }}" + } + template, err := template.New("").Parse(SMSTemplate) + if err != nil { + return err + } + config.Sms.SMSTemplate = template + } + + if config.MFA.Phone.EnrollEnabled || config.MFA.Phone.VerifyEnabled { + smsTemplate := config.MFA.Phone.Template + if smsTemplate == "" { + smsTemplate = "Your code is {{ .Code }}" + } + template, err := template.New("").Parse(smsTemplate) + if err != nil { + return err + } + config.MFA.Phone.SMSTemplate = template + } + + return nil +} + +// ApplyDefaults sets defaults for a GlobalConfiguration +func (config *GlobalConfiguration) ApplyDefaults() error { + if config.JWT.AdminGroupName == "" { + config.JWT.AdminGroupName = "admin" + } + + if len(config.JWT.AdminRoles) == 0 { + config.JWT.AdminRoles = []string{"service_role", "supabase_admin"} + } + + if config.JWT.Exp == 0 { + config.JWT.Exp = 3600 + } + + if len(config.JWT.Keys) == 0 { + // transform the secret into a JWK for consistency + privKey, err := jwk.FromRaw([]byte(config.JWT.Secret)) + if err != nil { + return err + } + if config.JWT.KeyID != "" { + if err := privKey.Set(jwk.KeyIDKey, config.JWT.KeyID); err != nil { + return err + } + } + if privKey.Algorithm().String() == "" { + if err := privKey.Set(jwk.AlgorithmKey, jwt.SigningMethodHS256.Name); err != nil { + return err + } + } + if err := privKey.Set(jwk.KeyUsageKey, "sig"); err != nil { + return err + } + if len(privKey.KeyOps()) == 0 { + if err := privKey.Set(jwk.KeyOpsKey, jwk.KeyOperationList{jwk.KeyOpSign, jwk.KeyOpVerify}); err != nil { + return err + } + } + pubKey, err := privKey.PublicKey() + if err != nil { + return err + } + config.JWT.Keys = make(JwtKeysDecoder) + config.JWT.Keys[config.JWT.KeyID] = JwkInfo{ + PublicKey: pubKey, + PrivateKey: privKey, + } + } + + if config.JWT.ValidMethods == nil { + config.JWT.ValidMethods = []string{} + for _, key := range config.JWT.Keys { + alg := GetSigningAlg(key.PublicKey) + config.JWT.ValidMethods = append(config.JWT.ValidMethods, alg.Alg()) + } + + } + + if config.Mailer.Autoconfirm && config.Mailer.AllowUnverifiedEmailSignIns { + return errors.New("cannot enable both GOTRUE_MAILER_AUTOCONFIRM and GOTRUE_MAILER_ALLOW_UNVERIFIED_EMAIL_SIGN_INS") + } + + if config.Mailer.URLPaths.Invite == "" { + config.Mailer.URLPaths.Invite = "/verify" + } + + if config.Mailer.URLPaths.Confirmation == "" { + config.Mailer.URLPaths.Confirmation = "/verify" + } + + if config.Mailer.URLPaths.Recovery == "" { + config.Mailer.URLPaths.Recovery = "/verify" + } + + if config.Mailer.URLPaths.EmailChange == "" { + config.Mailer.URLPaths.EmailChange = "/verify" + } + + if config.Mailer.OtpExp == 0 { + config.Mailer.OtpExp = 86400 // 1 day + } + + if config.Mailer.OtpLength == 0 || config.Mailer.OtpLength < 6 || config.Mailer.OtpLength > 10 { + // 6-digit otp by default + config.Mailer.OtpLength = 6 + } + + if config.SMTP.MaxFrequency == 0 { + config.SMTP.MaxFrequency = 1 * time.Minute + } + + if config.Sms.MaxFrequency == 0 { + config.Sms.MaxFrequency = 1 * time.Minute + } + + if config.Sms.OtpExp == 0 { + config.Sms.OtpExp = 60 + } + + if config.Sms.OtpLength == 0 || config.Sms.OtpLength < 6 || config.Sms.OtpLength > 10 { + // 6-digit otp by default + config.Sms.OtpLength = 6 + } + + if config.Sms.TestOTP != nil { + formatTestOtps := make(map[string]string) + for phone, otp := range config.Sms.TestOTP { + phone = strings.ReplaceAll(strings.TrimPrefix(phone, "+"), " ", "") + formatTestOtps[phone] = otp + } + config.Sms.TestOTP = formatTestOtps + } + + if len(config.Sms.Template) == 0 { + config.Sms.Template = "" + } + + if config.URIAllowList == nil { + config.URIAllowList = []string{} + } + + if config.URIAllowList != nil { + config.URIAllowListMap = make(map[string]glob.Glob) + for _, uri := range config.URIAllowList { + g := glob.MustCompile(uri, '.', '/') + config.URIAllowListMap[uri] = g + } + } + + if config.Password.MinLength < defaultMinPasswordLength { + config.Password.MinLength = defaultMinPasswordLength + } + + if config.MFA.ChallengeExpiryDuration < defaultChallengeExpiryDuration { + config.MFA.ChallengeExpiryDuration = defaultChallengeExpiryDuration + } + + if config.MFA.FactorExpiryDuration < defaultFactorExpiryDuration { + config.MFA.FactorExpiryDuration = defaultFactorExpiryDuration + } + + if config.MFA.Phone.MaxFrequency == 0 { + config.MFA.Phone.MaxFrequency = 1 * time.Minute + } + + if config.MFA.Phone.OtpLength < 6 || config.MFA.Phone.OtpLength > 10 { + // 6-digit otp by default + config.MFA.Phone.OtpLength = 6 + } + + if config.External.FlowStateExpiryDuration < defaultFlowStateExpiryDuration { + config.External.FlowStateExpiryDuration = defaultFlowStateExpiryDuration + } + + if len(config.External.AllowedIdTokenIssuers) == 0 { + config.External.AllowedIdTokenIssuers = append(config.External.AllowedIdTokenIssuers, "https://appleid.apple.com", "https://accounts.google.com") + } + + return nil +} + +// Validate validates all of configuration. +func (c *GlobalConfiguration) Validate() error { + validatables := []interface { + Validate() error + }{ + &c.API, + &c.DB, + &c.Tracing, + &c.Metrics, + &c.SMTP, + &c.Mailer, + &c.SAML, + &c.Security, + &c.Sessions, + &c.Hook, + &c.JWT.Keys, + } + + for _, validatable := range validatables { + if err := validatable.Validate(); err != nil { + return err + } + } + + return nil +} + +func (o *OAuthProviderConfiguration) ValidateOAuth() error { + if !o.Enabled { + return errors.New("provider is not enabled") + } + if len(o.ClientID) == 0 { + return errors.New("missing OAuth client ID") + } + if o.Secret == "" { + return errors.New("missing OAuth secret") + } + if o.RedirectURI == "" { + return errors.New("missing redirect URI") + } + return nil +} + +func (t *TwilioProviderConfiguration) Validate() error { + if t.AccountSid == "" { + return errors.New("missing Twilio account SID") + } + if t.AuthToken == "" { + return errors.New("missing Twilio auth token") + } + if t.MessageServiceSid == "" { + return errors.New("missing Twilio message service SID or Twilio phone number") + } + return nil +} + +func (t *TwilioVerifyProviderConfiguration) Validate() error { + if t.AccountSid == "" { + return errors.New("missing Twilio account SID") + } + if t.AuthToken == "" { + return errors.New("missing Twilio auth token") + } + if t.MessageServiceSid == "" { + return errors.New("missing Twilio message service SID or Twilio phone number") + } + return nil +} + +func (t *MessagebirdProviderConfiguration) Validate() error { + if t.AccessKey == "" { + return errors.New("missing Messagebird access key") + } + if t.Originator == "" { + return errors.New("missing Messagebird originator") + } + return nil +} + +func (t *TextlocalProviderConfiguration) Validate() error { + if t.ApiKey == "" { + return errors.New("missing Textlocal API key") + } + if t.Sender == "" { + return errors.New("missing Textlocal sender") + } + return nil +} + +func (t *VonageProviderConfiguration) Validate() error { + if t.ApiKey == "" { + return errors.New("missing Vonage API key") + } + if t.ApiSecret == "" { + return errors.New("missing Vonage API secret") + } + if t.From == "" { + return errors.New("missing Vonage 'from' parameter") + } + return nil +} + +func (t *SmsProviderConfiguration) IsTwilioVerifyProvider() bool { + return t.Provider == "twilio_verify" +} diff --git a/auth_v2.169.0/internal/conf/configuration_test.go b/auth_v2.169.0/internal/conf/configuration_test.go new file mode 100644 index 0000000..c03f954 --- /dev/null +++ b/auth_v2.169.0/internal/conf/configuration_test.go @@ -0,0 +1,246 @@ +package conf + +import ( + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMain(m *testing.M) { + defer os.Clearenv() + os.Exit(m.Run()) +} + +func TestGlobal(t *testing.T) { + os.Setenv("GOTRUE_SITE_URL", "http://localhost:8080") + os.Setenv("GOTRUE_DB_DRIVER", "postgres") + os.Setenv("GOTRUE_DB_DATABASE_URL", "fake") + os.Setenv("GOTRUE_OPERATOR_TOKEN", "token") + os.Setenv("GOTRUE_API_REQUEST_ID_HEADER", "X-Request-ID") + os.Setenv("GOTRUE_JWT_SECRET", "secret") + os.Setenv("API_EXTERNAL_URL", "http://localhost:9999") + os.Setenv("GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI", "pg-functions://postgres/auth/count_failed_attempts") + os.Setenv("GOTRUE_HOOK_SEND_SMS_SECRETS", "v1,whsec_aWxpa2VzdXBhYmFzZXZlcnltdWNoYW5kaWhvcGV5b3Vkb3Rvbw==") + os.Setenv("GOTRUE_SMTP_HEADERS", `{"X-PM-Metadata-project-ref":["project_ref"],"X-SES-Message-Tags":["ses:feedback-id-a=project_ref,ses:feedback-id-b=$messageType"]}`) + os.Setenv("GOTRUE_MAILER_EMAIL_VALIDATION_SERVICE_HEADERS", `{"apikey":["test"]}`) + os.Setenv("GOTRUE_SMTP_LOGGING_ENABLED", "true") + gc, err := LoadGlobal("") + require.NoError(t, err) + assert.Equal(t, true, gc.SMTP.LoggingEnabled) + assert.Equal(t, "project_ref", gc.SMTP.NormalizedHeaders()["X-PM-Metadata-project-ref"][0]) + require.NotNil(t, gc) + assert.Equal(t, "X-Request-ID", gc.API.RequestIDHeader) + assert.Equal(t, "pg-functions://postgres/auth/count_failed_attempts", gc.Hook.MFAVerificationAttempt.URI) + + { + hdrs := gc.Mailer.GetEmailValidationServiceHeaders() + assert.Equal(t, 1, len(hdrs["apikey"])) + assert.Equal(t, "test", hdrs["apikey"][0]) + } + +} + +func TestRateLimits(t *testing.T) { + { + os.Setenv("GOTRUE_RATE_LIMIT_EMAIL_SENT", "0/1h") + + gc, err := LoadGlobal("") + require.NoError(t, err) + assert.Equal(t, float64(0), gc.RateLimitEmailSent.Events) + assert.Equal(t, time.Hour, gc.RateLimitEmailSent.OverTime) + } + + { + os.Setenv("GOTRUE_RATE_LIMIT_EMAIL_SENT", "10/1h") + + gc, err := LoadGlobal("") + require.NoError(t, err) + assert.Equal(t, float64(10), gc.RateLimitEmailSent.Events) + assert.Equal(t, time.Hour, gc.RateLimitEmailSent.OverTime) + } +} + +func TestPasswordRequiredCharactersDecode(t *testing.T) { + examples := []struct { + Value string + Result []string + }{ + { + Value: "a:b:c", + Result: []string{ + "a", + "b", + "c", + }, + }, + { + Value: "a\\:b:c", + Result: []string{ + "a:b", + "c", + }, + }, + { + Value: "a:b\\:c", + Result: []string{ + "a", + "b:c", + }, + }, + { + Value: "\\:a:b:c", + Result: []string{ + ":a", + "b", + "c", + }, + }, + { + Value: "a:b:c\\:", + Result: []string{ + "a", + "b", + "c:", + }, + }, + { + Value: "::\\::", + Result: []string{ + ":", + }, + }, + { + Value: "", + Result: nil, + }, + { + Value: " ", + Result: []string{ + " ", + }, + }, + } + + for i, example := range examples { + var into PasswordRequiredCharacters + require.NoError(t, into.Decode(example.Value), "Example %d failed with error", i) + + require.Equal(t, []string(into), example.Result, "Example %d got unexpected result", i) + } +} + +func TestHTTPHookSecretsDecode(t *testing.T) { + examples := []struct { + Value string + Result []string + }{ + { + Value: "v1,whsec_secret1|v1a,whpk_secrets:whsk_secret2|v1,whsec_secret3", + Result: []string{"v1,whsec_secret1", "v1a,whpk_secrets:whsk_secret2", "v1,whsec_secret3"}, + }, + { + Value: "v1,whsec_singlesecret", + Result: []string{"v1,whsec_singlesecret"}, + }, + { + Value: " ", + Result: []string{" "}, + }, + { + Value: "", + Result: nil, + }, + { + Value: "|a|b|c", + Result: []string{ + "a", + "b", + "c", + }, + }, + { + Value: "||||", + Result: nil, + }, + { + Value: "::", + Result: []string{"::"}, + }, + { + Value: "secret1::secret3", + Result: []string{"secret1::secret3"}, + }, + } + + for i, example := range examples { + var into HTTPHookSecrets + + require.NoError(t, into.Decode(example.Value), "Example %d failed with error", i) + require.Equal(t, []string(into), example.Result, "Example %d got unexpected result", i) + } +} + +func TestValidateExtensibilityPointURI(t *testing.T) { + cases := []struct { + desc string + uri string + expectError bool + }{ + // Positive test cases + {desc: "Valid HTTPS URI", uri: "https://asdfgggqqwwerty.website.co/functions/v1/custom-sms-sender", expectError: false}, + {desc: "Valid HTTPS URI", uri: "HTTPS://www.asdfgggqqwwerty.website.co/functions/v1/custom-sms-sender", expectError: false}, + {desc: "Valid Postgres URI", uri: "pg-functions://postgres/auth/verification_hook_reject", expectError: false}, + {desc: "Another Valid URI", uri: "pg-functions://postgres/user_management/add_user", expectError: false}, + {desc: "Another Valid URI", uri: "pg-functions://postgres/MySpeCial/FUNCTION_THAT_YELLS_AT_YOU", expectError: false}, + {desc: "Valid HTTP URI", uri: "http://localhost/functions/v1/custom-sms-sender", expectError: false}, + + // Negative test cases + {desc: "Invalid HTTP URI", uri: "http://asdfgggg.website.co/functions/v1/custom-sms-sender", expectError: true}, + {desc: "Invalid HTTPS URI (HTTP)", uri: "http://asdfgggqqwwerty.supabase.co/functions/v1/custom-sms-sender", expectError: true}, + {desc: "Invalid Schema Name", uri: "pg-functions://postgres/123auth/verification_hook_reject", expectError: true}, + {desc: "Invalid Function Name", uri: "pg-functions://postgres/auth/123verification_hook_reject", expectError: true}, + {desc: "Insufficient Path Parts", uri: "pg-functions://postgres/auth", expectError: true}, + } + + for _, tc := range cases { + ep := ExtensibilityPointConfiguration{URI: tc.uri} + err := ep.ValidateExtensibilityPoint() + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + } +} + +func TestValidateExtensibilityPointSecrets(t *testing.T) { + validHTTPSURI := "https://asdfgggqqwwerty.website.co/functions/v1/custom-sms-sender" + cases := []struct { + desc string + secret []string + expectError bool + }{ + // Positive test cases + {desc: "Valid Symmetric Secret", secret: []string{"v1,whsec_NDYzODhlNTY0ZGI1OWZjYTU2NjMwN2FhYzM3YzBkMWQ0NzVjNWRkNTJmZDU0MGNhYTAzMjVjNjQzMzE3Mjk2Zg====="}, expectError: false}, + {desc: "Valid Asymmetric Secret", secret: []string{"v1a,whpk_NDYzODhlNTY0ZGI1OWZjYTU2NjMwN2FhYzM3YzBkMWQ0NzVjNWRkNTJmZDU0MGNhYTAzMjVjNjQzMzE3Mjk2Zg==:whsk_abc889a6b1160015025064f108a48d6aba1c7c95fa8e304b4d225e8ae0121511"}, expectError: false}, + {desc: "Valid Mix of Symmetric and asymmetric Secret", secret: []string{"v1,whsec_2b49264c90fd15db3bb0e05f4e1547b9c183eb06d585be8a", "v1a,whpk_46388e564db59fca566307aac37c0d1d475c5dd52fd540caa0325c643317296f:whsk_YWJjODg5YTZiMTE2MDAxNTAyNTA2NGYxMDhhNDhkNmFiYTFjN2M5NWZhOGUzMDRiNGQyMjVlOGFlMDEyMTUxMSI="}, expectError: false}, + + // Negative test cases + {desc: "Invalid Asymmetric Secret", secret: []string{"v1a,john:jill", "jill"}, expectError: true}, + {desc: "Invalid Symmetric Secret", secret: []string{"tommy"}, expectError: true}, + } + for _, tc := range cases { + ep := ExtensibilityPointConfiguration{URI: validHTTPSURI, HTTPHookSecrets: tc.secret} + err := ep.ValidateExtensibilityPoint() + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + } + +} diff --git a/auth_v2.169.0/internal/conf/jwk.go b/auth_v2.169.0/internal/conf/jwk.go new file mode 100644 index 0000000..fffb0c2 --- /dev/null +++ b/auth_v2.169.0/internal/conf/jwk.go @@ -0,0 +1,150 @@ +package conf + +import ( + "encoding/json" + "fmt" + + "github.com/golang-jwt/jwt/v5" + "github.com/lestrrat-go/jwx/v2/jwk" +) + +type JwtKeysDecoder map[string]JwkInfo + +type JwkInfo struct { + PublicKey jwk.Key `json:"public_key"` + PrivateKey jwk.Key `json:"private_key"` +} + +// Decode implements the Decoder interface +func (j *JwtKeysDecoder) Decode(value string) error { + data := make([]json.RawMessage, 0) + if err := json.Unmarshal([]byte(value), &data); err != nil { + return err + } + + config := JwtKeysDecoder{} + for _, key := range data { + privJwk, err := jwk.ParseKey(key) + if err != nil { + return err + } + pubJwk, err := jwk.PublicKeyOf(privJwk) + if err != nil { + return err + } + + // all public keys should have the the use claim set to 'sig + if err := pubJwk.Set(jwk.KeyUsageKey, "sig"); err != nil { + return err + } + + // all public keys should only have 'verify' set as the key_ops + if err := pubJwk.Set(jwk.KeyOpsKey, jwk.KeyOperationList{jwk.KeyOpVerify}); err != nil { + return err + } + + config[pubJwk.KeyID()] = JwkInfo{ + PublicKey: pubJwk, + PrivateKey: privJwk, + } + } + *j = config + return nil +} + +func (j *JwtKeysDecoder) Validate() error { + // Validate performs _minimal_ checks if the data stored in the key are valid. + // By minimal, we mean that it does not check if the key is valid for use in + // cryptographic operations. For example, it does not check if an RSA key's + // `e` field is a valid exponent, or if the `n` field is a valid modulus. + // Instead, it checks for things such as the _presence_ of some required fields, + // or if certain keys' values are of particular length. + // + // Note that depending on the underlying key type, use of this method requires + // that multiple fields in the key are properly populated. For example, an EC + // key's "x", "y" fields cannot be validated unless the "crv" field is populated first. + signingKeys := []jwk.Key{} + for _, key := range *j { + if err := key.PrivateKey.Validate(); err != nil { + return err + } + // symmetric keys don't have public keys + if key.PublicKey != nil { + if err := key.PublicKey.Validate(); err != nil { + return err + } + } + + for _, op := range key.PrivateKey.KeyOps() { + if op == jwk.KeyOpSign { + signingKeys = append(signingKeys, key.PrivateKey) + break + } + } + } + + switch { + case len(signingKeys) == 0: + return fmt.Errorf("no signing key detected") + case len(signingKeys) > 1: + return fmt.Errorf("multiple signing keys detected, only 1 signing key is supported") + } + + return nil +} + +func GetSigningJwk(config *JWTConfiguration) (jwk.Key, error) { + for _, key := range config.Keys { + for _, op := range key.PrivateKey.KeyOps() { + // the private JWK with key_ops "sign" should be used as the signing key + if op == jwk.KeyOpSign { + return key.PrivateKey, nil + } + } + } + return nil, fmt.Errorf("no signing key found") +} + +func GetSigningKey(k jwk.Key) (any, error) { + var key any + if err := k.Raw(&key); err != nil { + return nil, err + } + return key, nil +} + +func GetSigningAlg(k jwk.Key) jwt.SigningMethod { + if k == nil { + return jwt.SigningMethodHS256 + } + + switch (k).Algorithm().String() { + case "RS256": + return jwt.SigningMethodRS256 + case "RS512": + return jwt.SigningMethodRS512 + case "ES256": + return jwt.SigningMethodES256 + case "ES512": + return jwt.SigningMethodES512 + case "EdDSA": + return jwt.SigningMethodEdDSA + } + + // return HS256 to preserve existing behaviour + return jwt.SigningMethodHS256 +} + +func FindPublicKeyByKid(kid string, config *JWTConfiguration) (any, error) { + if k, ok := config.Keys[kid]; ok { + key, err := GetSigningKey(k.PublicKey) + if err != nil { + return nil, err + } + return key, nil + } + if kid == config.KeyID { + return []byte(config.Secret), nil + } + return nil, fmt.Errorf("invalid kid: %s", kid) +} diff --git a/auth_v2.169.0/internal/conf/jwk_test.go b/auth_v2.169.0/internal/conf/jwk_test.go new file mode 100644 index 0000000..275c0eb --- /dev/null +++ b/auth_v2.169.0/internal/conf/jwk_test.go @@ -0,0 +1,81 @@ +package conf + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDecode(t *testing.T) { + // array of JWKs containing 4 keys + gotrueJwtKeys := `[{"kty":"oct","k":"9Sj51i2YvfY85NJZFD6rAl9fKDxSKjFgW6W6ZXOJLnU","kid":"f90202bc-413a-4db3-8e04-b70a02a65669","key_ops":["verify"],"alg":"HS256"},{"kty":"RSA","n":"4slQjr-XoU6I1KXFWOeeJi387RIUxjhyzXX3GUVNb75a0SPKoGShlJEbpvuXqkDLGDweLcIZy-01nqgjSzMY_tUO3L78MxVfIVn7MByJ4_zbrVf5rjKeAk9EEMl6pb8nKJGArph9sOwL68LLioNySt_WNo_hMfuxUuVkRagh5gLjYoQ4odkULQrgwlMcXxXNnvg0aYURUr2SDmncHNuZQ3adebRlI164mUZPPWui2fg72R7c9qhVaAEzbdG-JAuC3zn5iL4zZk-8pOwZkM7Qb_2lrcXwdTl_Qz6fMdAHz_3rggac5oeKkdvO2x7_XiUwGxIBYSghxg5BBxcyqd6WrQ","e":"AQAB","d":"FjJo7uH4aUoktO8kHhbHbY_KSdQpHDjKyc7yTS_0DWYgUfdozzubJfRDF42vI-KsXssF-NoB0wJf0uP0L8ip6G326XPuoMQRTMgcaF8j6swTwsapSOEagr7BzcECx1zpc2-ojhwbLHSvRutWDzPJkbrUccF8vRC6BsiAUG4Hapiumbot7JtJGwU8ZUhxico7_OEJ_MtkRrHByXgrOMnzNLrmViI9rzvtWOhVc8sNDzLogDDi01AP0j6WeBhbOpaZ_1BMLQ9IeeN5Iiy-7Qj-q4-8kBXIPXpYaKMFnDTmhB0GAVUFimF6ojhZNAJvV81VMHPjrEmmps0_qBfIlKAB","p":"9G7wBpiSJHAl-w47AWvW60v_hye50lte4Ep2P3KeRyinzgxtEMivzldoqirwdoyPCJWwU7nNsv7AjdXVoHFy3fJvJeV5mhArxb2zA36OS_Tr3CQXtB3OO-RFwVcG7AGO7XvA54PK28siXY2VvkG2Xn_ZrbVebJnHQprn7ddUIIE","q":"7YSaG2E_M9XpgUJ0izwKdfGew6Hz5utPUdwMWjqr81BjtLkUtQ3tGYWs2tdaRYUTK4mNFyR2MjLYnMK-F37rue4LSKitmEu2N6RD9TwzcqwiEL_vuQTC985iJ0hzUC58LcbhYtTLU3KqZXXUqaeBXEwQAWxK1NRf6rQRhOGk4C0","dp":"fOV-sfAdpI7FaW3RCp3euGYh0B6lXW4goXyKxUq8w2FrtOY2iH_zDP0u1tyP-BNENr-91Fo5V__BxfeAa7XsWqo4zuVdaDJhG24d3Wg6L2ebaOXsUrV0Hrg6SFs-hzMYpBI69FEsQ3idO65P2GJdXBX51T-6WsWMwmTCo44GR4E","dq":"O2DrJe0p38ualLYIbMaV1uaQyleyoggxzEU20VfZpPpz8rpScvEIVVkV3Z_48WhTYo8AtshmxCXyAT6uRzFzvQfFymRhAbHr2_01ABoMwp5F5eoWBCsskscFwsxaB7GXWdpefla0figscTED-WXm8SwS1Eg-bParBAIAXzgKAAE","qi":"Cezqw8ECfMmwnRXJuiG2A93lzhixHxXISvGC-qbWaRmCfetheSviZlM0_KxF6dsvrw_aNfIPa8rv1TbN-5F04v_RU1CD79QuluzXWLkZVhPXorkK5e8sUi_odzAJXOwHKQzal5ndInl4XYctDHQr8jXcFW5Un65FhPwdAC6-aek","kid":"74b1a36b-4b39-467f-976b-acc7ec600a6d","key_ops":["verify"],"alg":"RS256"},{"kty":"EC","x":"GwbnH57MUhgL14dJfayyzuI6o2_mB_Pm8xIuauHXtQs","y":"cYqN0VAcv0BC9wrg3vNgHlKhGP8ZEedUC2A8jXpaGwA","crv":"P-256","d":"4STEXq7W4UY0piCGPueMaQqAAZ5jVRjjA_b1Hq7YgmM","kid":"fa3ffc99-4635-4b19-b5c0-6d6a8d30c4eb","key_ops":["sign","verify"],"alg":"ES256"},{"crv":"Ed25519","d":"T179kXSOJHE8CNbqaI2HNdG8r3YbSoKYxNRSzTkpEcY","x":"iDYagELzmD4z6uaW7eAZLuQ9fiUlnLqtrh7AfNbiNiI","kty":"OKP","kid":"b1176272-46e4-4226-b0bd-12eef4fd7367","key_ops":["verify"],"alg":"EdDSA"}]` + var decoder JwtKeysDecoder + require.NoError(t, decoder.Decode(gotrueJwtKeys)) + require.Len(t, decoder, 4) + + for kid, key := range decoder { + require.NotEmpty(t, kid) + require.NotNil(t, key.PrivateKey) + require.NotNil(t, key.PublicKey) + require.NotEmpty(t, key.PublicKey.KeyOps(), "missing key_ops claim") + } +} + +func TestJWTConfiguration(t *testing.T) { + // array of JWKs containing 4 keys + gotrueJwtKeys := `[{"kty":"oct","k":"9Sj51i2YvfY85NJZFD6rAl9fKDxSKjFgW6W6ZXOJLnU","kid":"f90202bc-413a-4db3-8e04-b70a02a65669","key_ops":["verify"],"alg":"HS256"},{"kty":"RSA","n":"4slQjr-XoU6I1KXFWOeeJi387RIUxjhyzXX3GUVNb75a0SPKoGShlJEbpvuXqkDLGDweLcIZy-01nqgjSzMY_tUO3L78MxVfIVn7MByJ4_zbrVf5rjKeAk9EEMl6pb8nKJGArph9sOwL68LLioNySt_WNo_hMfuxUuVkRagh5gLjYoQ4odkULQrgwlMcXxXNnvg0aYURUr2SDmncHNuZQ3adebRlI164mUZPPWui2fg72R7c9qhVaAEzbdG-JAuC3zn5iL4zZk-8pOwZkM7Qb_2lrcXwdTl_Qz6fMdAHz_3rggac5oeKkdvO2x7_XiUwGxIBYSghxg5BBxcyqd6WrQ","e":"AQAB","d":"FjJo7uH4aUoktO8kHhbHbY_KSdQpHDjKyc7yTS_0DWYgUfdozzubJfRDF42vI-KsXssF-NoB0wJf0uP0L8ip6G326XPuoMQRTMgcaF8j6swTwsapSOEagr7BzcECx1zpc2-ojhwbLHSvRutWDzPJkbrUccF8vRC6BsiAUG4Hapiumbot7JtJGwU8ZUhxico7_OEJ_MtkRrHByXgrOMnzNLrmViI9rzvtWOhVc8sNDzLogDDi01AP0j6WeBhbOpaZ_1BMLQ9IeeN5Iiy-7Qj-q4-8kBXIPXpYaKMFnDTmhB0GAVUFimF6ojhZNAJvV81VMHPjrEmmps0_qBfIlKAB","p":"9G7wBpiSJHAl-w47AWvW60v_hye50lte4Ep2P3KeRyinzgxtEMivzldoqirwdoyPCJWwU7nNsv7AjdXVoHFy3fJvJeV5mhArxb2zA36OS_Tr3CQXtB3OO-RFwVcG7AGO7XvA54PK28siXY2VvkG2Xn_ZrbVebJnHQprn7ddUIIE","q":"7YSaG2E_M9XpgUJ0izwKdfGew6Hz5utPUdwMWjqr81BjtLkUtQ3tGYWs2tdaRYUTK4mNFyR2MjLYnMK-F37rue4LSKitmEu2N6RD9TwzcqwiEL_vuQTC985iJ0hzUC58LcbhYtTLU3KqZXXUqaeBXEwQAWxK1NRf6rQRhOGk4C0","dp":"fOV-sfAdpI7FaW3RCp3euGYh0B6lXW4goXyKxUq8w2FrtOY2iH_zDP0u1tyP-BNENr-91Fo5V__BxfeAa7XsWqo4zuVdaDJhG24d3Wg6L2ebaOXsUrV0Hrg6SFs-hzMYpBI69FEsQ3idO65P2GJdXBX51T-6WsWMwmTCo44GR4E","dq":"O2DrJe0p38ualLYIbMaV1uaQyleyoggxzEU20VfZpPpz8rpScvEIVVkV3Z_48WhTYo8AtshmxCXyAT6uRzFzvQfFymRhAbHr2_01ABoMwp5F5eoWBCsskscFwsxaB7GXWdpefla0figscTED-WXm8SwS1Eg-bParBAIAXzgKAAE","qi":"Cezqw8ECfMmwnRXJuiG2A93lzhixHxXISvGC-qbWaRmCfetheSviZlM0_KxF6dsvrw_aNfIPa8rv1TbN-5F04v_RU1CD79QuluzXWLkZVhPXorkK5e8sUi_odzAJXOwHKQzal5ndInl4XYctDHQr8jXcFW5Un65FhPwdAC6-aek","kid":"74b1a36b-4b39-467f-976b-acc7ec600a6d","key_ops":["verify"],"alg":"RS256"},{"kty":"EC","x":"GwbnH57MUhgL14dJfayyzuI6o2_mB_Pm8xIuauHXtQs","y":"cYqN0VAcv0BC9wrg3vNgHlKhGP8ZEedUC2A8jXpaGwA","crv":"P-256","d":"4STEXq7W4UY0piCGPueMaQqAAZ5jVRjjA_b1Hq7YgmM","kid":"fa3ffc99-4635-4b19-b5c0-6d6a8d30c4eb","key_ops":["sign","verify"],"alg":"ES256"},{"crv":"Ed25519","d":"T179kXSOJHE8CNbqaI2HNdG8r3YbSoKYxNRSzTkpEcY","x":"iDYagELzmD4z6uaW7eAZLuQ9fiUlnLqtrh7AfNbiNiI","kty":"OKP","kid":"b1176272-46e4-4226-b0bd-12eef4fd7367","key_ops":["verify"],"alg":"EdDSA"}]` + var decoder JwtKeysDecoder + require.NoError(t, decoder.Decode(gotrueJwtKeys)) + require.Len(t, decoder, 4) + + cases := []struct { + desc string + config JWTConfiguration + expectedLength int + }{ + { + desc: "GOTRUE_JWT_KEYS is nil", + config: JWTConfiguration{ + Secret: "testsecret", + KeyID: "testkeyid", + }, + expectedLength: 1, + }, + { + desc: "GOTRUE_JWT_KEYS is an empty map", + config: JWTConfiguration{ + Secret: "testsecret", + KeyID: "testkeyid", + Keys: JwtKeysDecoder{}, + }, + expectedLength: 1, + }, + { + desc: "Prefer GOTRUE_JWT_KEYS over GOTRUE_JWT_SECRET", + config: JWTConfiguration{ + Secret: "testsecret", + KeyID: "testkeyid", + Keys: decoder, + }, + expectedLength: 4, + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + globalConfig := GlobalConfiguration{ + JWT: c.config, + } + require.NoError(t, globalConfig.ApplyDefaults()) + require.NotEmpty(t, globalConfig.JWT.Keys) + require.Len(t, globalConfig.JWT.Keys, c.expectedLength) + for _, key := range globalConfig.JWT.Keys { + // public keys should contain these require claims + require.NotNil(t, key.PublicKey.Algorithm()) + require.NotNil(t, key.PublicKey.KeyID()) + require.NotNil(t, key.PublicKey.KeyOps()) + require.Equal(t, "sig", key.PublicKey.KeyUsage()) + } + }) + } +} diff --git a/auth_v2.169.0/internal/conf/logging.go b/auth_v2.169.0/internal/conf/logging.go new file mode 100644 index 0000000..d079006 --- /dev/null +++ b/auth_v2.169.0/internal/conf/logging.go @@ -0,0 +1,11 @@ +package conf + +type LoggingConfig struct { + Level string `mapstructure:"log_level" json:"log_level"` + File string `mapstructure:"log_file" json:"log_file"` + DisableColors bool `mapstructure:"disable_colors" split_words:"true" json:"disable_colors"` + QuoteEmptyFields bool `mapstructure:"quote_empty_fields" split_words:"true" json:"quote_empty_fields"` + TSFormat string `mapstructure:"ts_format" json:"ts_format"` + Fields map[string]interface{} `mapstructure:"fields" json:"fields"` + SQL string `mapstructure:"sql" json:"sql"` +} diff --git a/auth_v2.169.0/internal/conf/metrics.go b/auth_v2.169.0/internal/conf/metrics.go new file mode 100644 index 0000000..ac6f7ec --- /dev/null +++ b/auth_v2.169.0/internal/conf/metrics.go @@ -0,0 +1,26 @@ +package conf + +type MetricsExporter = string + +const ( + Prometheus MetricsExporter = "prometheus" + OpenTelemetryMetrics MetricsExporter = "opentelemetry" +) + +type MetricsConfig struct { + Enabled bool + + Exporter MetricsExporter `default:"opentelemetry"` + + // ExporterProtocol is the OTEL_EXPORTER_OTLP_PROTOCOL env variable, + // only available when exporter is opentelemetry. See: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md + ExporterProtocol string `default:"http/protobuf" envconfig:"OTEL_EXPORTER_OTLP_PROTOCOL"` + + PrometheusListenHost string `default:"0.0.0.0" envconfig:"OTEL_EXPORTER_PROMETHEUS_HOST"` + PrometheusListenPort string `default:"9100" envconfig:"OTEL_EXPORTER_PROMETHEUS_PORT"` +} + +func (mc MetricsConfig) Validate() error { + return nil +} diff --git a/auth_v2.169.0/internal/conf/profiler.go b/auth_v2.169.0/internal/conf/profiler.go new file mode 100644 index 0000000..41752bf --- /dev/null +++ b/auth_v2.169.0/internal/conf/profiler.go @@ -0,0 +1,7 @@ +package conf + +type ProfilerConfig struct { + Enabled bool `default:"false"` + Host string `default:"localhost"` + Port string `default:"9998"` +} diff --git a/auth_v2.169.0/internal/conf/rate.go b/auth_v2.169.0/internal/conf/rate.go new file mode 100644 index 0000000..059ed65 --- /dev/null +++ b/auth_v2.169.0/internal/conf/rate.go @@ -0,0 +1,65 @@ +package conf + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +const defaultOverTime = time.Hour + +const ( + BurstRateType = "burst" + IntervalRateType = "interval" +) + +type Rate struct { + Events float64 `json:"events,omitempty"` + OverTime time.Duration `json:"over_time,omitempty"` + typ string +} + +func (r *Rate) GetRateType() string { + if r.typ == "" { + return IntervalRateType + } + return r.typ +} + +// Decode is used by envconfig to parse the env-config string to a Rate value. +func (r *Rate) Decode(value string) error { + if f, err := strconv.ParseFloat(value, 64); err == nil { + r.typ = IntervalRateType + r.Events = f + r.OverTime = defaultOverTime + return nil + } + parts := strings.Split(value, "/") + if len(parts) != 2 { + return fmt.Errorf("rate: value does not match rate syntax %q", value) + } + + // 52 because the uint needs to fit in a float64 + e, err := strconv.ParseUint(parts[0], 10, 52) + if err != nil { + return fmt.Errorf("rate: events part of rate value %q failed to parse as uint64: %w", value, err) + } + + d, err := time.ParseDuration(parts[1]) + if err != nil { + return fmt.Errorf("rate: over-time part of rate value %q failed to parse as duration: %w", value, err) + } + + r.typ = BurstRateType + r.Events = float64(e) + r.OverTime = d + return nil +} + +func (r *Rate) String() string { + if r.OverTime == 0 { + return fmt.Sprintf("%f", r.Events) + } + return fmt.Sprintf("%d/%s", uint64(r.Events), r.OverTime.String()) +} diff --git a/auth_v2.169.0/internal/conf/rate_test.go b/auth_v2.169.0/internal/conf/rate_test.go new file mode 100644 index 0000000..378deda --- /dev/null +++ b/auth_v2.169.0/internal/conf/rate_test.go @@ -0,0 +1,68 @@ +package conf + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestRateDecode(t *testing.T) { + cases := []struct { + str string + exp Rate + err string + }{ + {str: "1800", + exp: Rate{Events: 1800, OverTime: time.Hour, typ: IntervalRateType}}, + {str: "1800.0", + exp: Rate{Events: 1800, OverTime: time.Hour, typ: IntervalRateType}}, + {str: "3600/1h", + exp: Rate{Events: 3600, OverTime: time.Hour, typ: BurstRateType}}, + {str: "3600/1h0m0s", + exp: Rate{Events: 3600, OverTime: time.Hour, typ: BurstRateType}}, + {str: "100/24h", + exp: Rate{Events: 100, OverTime: time.Hour * 24, typ: BurstRateType}}, + {str: "", exp: Rate{}, + err: `rate: value does not match`}, + {str: "1h", exp: Rate{}, + err: `rate: value does not match`}, + {str: "/", exp: Rate{}, + err: `rate: events part of rate value`}, + {str: "/1h", exp: Rate{}, + err: `rate: events part of rate value`}, + {str: "3600.0/1h", exp: Rate{}, + err: `rate: events part of rate value "3600.0/1h" failed to parse`}, + {str: "100/", exp: Rate{}, + err: `rate: over-time part of rate value`}, + {str: "100/1", exp: Rate{}, + err: `rate: over-time part of rate value`}, + + // zero events + {str: "0/1h", + exp: Rate{Events: 0, OverTime: time.Hour, typ: BurstRateType}}, + {str: "0/24h", + exp: Rate{Events: 0, OverTime: time.Hour * 24, typ: BurstRateType}}, + } + for idx, tc := range cases { + var r Rate + err := r.Decode(tc.str) + require.Equal(t, tc.exp, r) // verify don't mutate r on errr + t.Logf("tc #%v - duration str %v", idx, tc.str) + if tc.err != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.err) + continue + } + require.NoError(t, err) + require.Equal(t, tc.exp, r) + require.Equal(t, tc.exp.typ, r.GetRateType()) + } + + // GetRateType() zero value + require.Equal(t, IntervalRateType, (&Rate{}).GetRateType()) + + // String() + require.Equal(t, "0.000000", (&Rate{}).String()) + require.Equal(t, "100/1h0m0s", (&Rate{Events: 100, OverTime: time.Hour}).String()) +} diff --git a/auth_v2.169.0/internal/conf/saml.go b/auth_v2.169.0/internal/conf/saml.go new file mode 100644 index 0000000..66a820c --- /dev/null +++ b/auth_v2.169.0/internal/conf/saml.go @@ -0,0 +1,136 @@ +package conf + +import ( + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "errors" + "fmt" + "math/big" + "net" + "net/url" + "time" +) + +// SAMLConfiguration holds configuration for native SAML support. +type SAMLConfiguration struct { + Enabled bool `json:"enabled"` + PrivateKey string `json:"-" split_words:"true"` + AllowEncryptedAssertions bool `json:"allow_encrypted_assertions" split_words:"true"` + RelayStateValidityPeriod time.Duration `json:"relay_state_validity_period" split_words:"true"` + + RSAPrivateKey *rsa.PrivateKey `json:"-"` + RSAPublicKey *rsa.PublicKey `json:"-"` + Certificate *x509.Certificate `json:"-"` + + ExternalURL string `json:"external_url,omitempty" split_words:"true"` + + RateLimitAssertion float64 `default:"15" split_words:"true"` +} + +func (c *SAMLConfiguration) Validate() error { + if c.Enabled { + bytes, err := base64.StdEncoding.DecodeString(c.PrivateKey) + if err != nil { + return errors.New("SAML private key not in standard Base64 format") + } + + privateKey, err := x509.ParsePKCS1PrivateKey(bytes) + if err != nil { + return errors.New("SAML private key not in PKCS#1 format") + } + + err = privateKey.Validate() + if err != nil { + return errors.New("SAML private key is not valid") + } + + if privateKey.E != 0x10001 { + return errors.New("SAML private key should use the 65537 (0x10001) RSA public exponent") + } + + if privateKey.N.BitLen() < 2048 { + return errors.New("SAML private key must be at least RSA 2048") + } + + if c.RelayStateValidityPeriod < 0 { + return errors.New("SAML RelayState validity period should be a positive duration") + } + + if c.ExternalURL != "" { + _, err := url.ParseRequestURI(c.ExternalURL) + if err != nil { + return err + } + } + } + + return nil +} + +// PopulateFields fills the configuration details based off the provided +// parameters. +func (c *SAMLConfiguration) PopulateFields(externalURL string) error { + // errors are intentionally ignored since they should have been handled + // within #Validate() + bytes, _ := base64.StdEncoding.DecodeString(c.PrivateKey) + privateKey, _ := x509.ParsePKCS1PrivateKey(bytes) + + c.RSAPrivateKey = privateKey + c.RSAPublicKey = privateKey.Public().(*rsa.PublicKey) + + parsedURL, err := url.ParseRequestURI(externalURL) + if err != nil { + return fmt.Errorf("saml: unable to parse external URL for SAML, check API_EXTERNAL_URL: %w", err) + } + + host := "" + host, _, err = net.SplitHostPort(parsedURL.Host) + if err != nil { + host = parsedURL.Host + } + + // SAML does not care much about the contents of the certificate, it + // only uses it as a vessel for the public key; therefore we set these + // fixed values. + // Please avoid modifying or adding new values to this template as they + // will change the exposed SAML certificate, requiring users of + // GoTrue to re-establish a connection between their Identity Provider + // and their running GoTrue instances. + certTemplate := &x509.Certificate{ + SerialNumber: big.NewInt(0), + IsCA: false, + DNSNames: []string{ + "_samlsp." + host, + }, + KeyUsage: x509.KeyUsageDigitalSignature, + NotBefore: time.UnixMilli(0).UTC(), + NotAfter: time.UnixMilli(0).UTC().AddDate(200, 0, 0), + Subject: pkix.Name{ + CommonName: "SAML 2.0 Certificate for " + host, + }, + } + + if c.AllowEncryptedAssertions { + certTemplate.KeyUsage = certTemplate.KeyUsage | x509.KeyUsageDataEncipherment + } + + certDer, err := x509.CreateCertificate(nil, certTemplate, certTemplate, c.RSAPublicKey, c.RSAPrivateKey) + if err != nil { + return err + } + + cert, err := x509.ParseCertificate(certDer) + if err != nil { + return err + } + + c.Certificate = cert + + if c.RelayStateValidityPeriod == 0 { + c.RelayStateValidityPeriod = 2 * time.Minute + } + + return nil +} diff --git a/auth_v2.169.0/internal/conf/saml_test.go b/auth_v2.169.0/internal/conf/saml_test.go new file mode 100644 index 0000000..e8de37e --- /dev/null +++ b/auth_v2.169.0/internal/conf/saml_test.go @@ -0,0 +1,101 @@ +package conf + +import ( + tst "testing" + + "encoding/base64" + + "github.com/stretchr/testify/require" +) + +func TestSAMLConfigurationValidate(t *tst.T) { + invalidExamples := []*SAMLConfiguration{ + { + Enabled: true, + PrivateKey: "", + }, + { + Enabled: true, + PrivateKey: "InvalidBase64!", + }, + { + Enabled: true, + PrivateKey: base64.StdEncoding.EncodeToString([]byte("not PKCS#1")), + }, + { + // RSA 1024 key + Enabled: true, + PrivateKey: "MIICXQIBAAKBgQDFa3SgzWZpcoONv3Iq3FxNieks2u2TmykxxxeggI9aNpHpuCzwGQO8wqXGVvFNlkE3GSPcz7rklzfyj577Z47lfWdBP1OAefralA3tS2mafqpZ32JwDynX4as+xauLVdP4iOR96b3L2eOb6rDpr4wBJuNqO533xsjcbNPINEDkSwIDAQABAoGASggBtEtSHDjVHFKufWQlOO5+glOWw8Nrrz75nTaYizvre7mVIHRA8ogLolT4KCAwVHkY+bTsYMxULqGs/JnY+40suHECYQ2u76PTQlvJnhJANGtCxuV4lSK6B8QBJhjGExsnAOwMMKz0p5kVftx2GA+/Rz2De7DR9keNECjcAAECQQDtr5cdkEdnIffvi782843EvX/g8615AKZeUYVUl0gVXujjpIVZXDtytPHINvIW1Z2mOm2rlJukwiKYYJ8IjsxlAkEA1KGbJ9EI6AOUcnpy7FYdGkbINTDngCqVOoaddlHS+1SaofpYXZPueXXIqIG3viksxmq/Q0IY6+JRkGo/RpGq7wJARD+BAqok9oYYbR4RX7P7ZxyKlYsiqnX3T2nVAP8XYZuI/6SD7a7AGyW9ryGnzcq0o8BvMS9QqbRcvqgvwgNOyQJBAL2ZVMaOSIjKGGZz9WHz74Nstj1n3CWW0vYa7vGASMc/S5s/pefbbvvzIPfQo0z3XiuXJ/ELUTmU1vIVK1L7tRUCQQCsuE7xckZ8H/523jdWWy9zZVP1z4c5dVLDR5RY+YQNForgb6kkSv4Gzn/FRUOxqn2MEWJLla31D4EuS+XKuwZR", + }, + { + // RSA 2048 with 0x11 as public exponent + Enabled: true, + PrivateKey: "MIIEowIBAAKCAQEAyMvTanPoiorCpIQCl70qXF34FIPOkKaInr1vw+3/0nik5CDUo761E02uTrK4/8JXr5NLGmy/fQmagNsBOdKewciRB3xxs+sPNncptG4rpCBjxSJdVl+mYZaw2kdvFY7TvNTlr7qG1Q0kV/3lBgpMlyM9OqBrjuG0UUzB5hlg08KLNflkQAkoJGWNVWULi2VceP3I3QsH9uNUQkgaM9Z6rl0BaRAkobHTTvquAqqj1AlNmSh24rrIbV4hYcNnesIpG4+LDd8XfpOwTp+jUl8akF6xcRBJjiPDJGN9ety29DcCxjo2i0b+TWYU+Pex08uOeOdulsgecbIVxLUEgRHcFQIBEQKCAQBefgkjCV5fUFuYtpfO75t2ws8Ytn9TITE7pHDUrDwmz1ynlvqnaM2uuyTZvYQ8HzhSn6rfQjv+mxuH7pcqRP9qQEQ/whdjuekKkm36jjKnlsWJ8g3OSyEe3YBmuDRGYVSVGOSO7l2Rb5ih4OQ/E+fOpyvfWoz38b5EYFs/GwBjpgJG+9cdCLYKOax8WDifWkjHdrogAlE8do/QF6RZoSvhAbRkpuxYActmKU8rIORrq8dLidSjBG2aoRH+RCN4ONZ3R4iHbYF2zWfqDFdSIX64kChaOZVhtTyTnF7/1v4VF3UwByEs8hTSckFH2jW6T7RZoatpgsv5zx/roRPDBWNRAoGBAPGphQwX9GF56XVmqD9rQMD9a0FuGCNGgiFkt2OUTFKmr4rTNVE8uJqEXjQwbybTnF2iJ1ApL1zNHg0cpnpMt7ZpcWG4Bu2UsXlwBL/ZwY4Spk6tHNdTsg/wuoWRSIGNanNS6CI5EUA4cxGNUt0G+dF4LaMHZuIAU7avs+kwDMzHAoGBANS1nS8KYkPUwYlmgVPNhMDTtjvq7fgP5UFDXnlhE6rJidc/+B0p9WiRhLGWlZebn+h2fELfIgK3qc4IzCHOkar0pic2D3bNbboNQKnqFl81hg0EORTK0JJ5/K4J61l5+rZtQu3Ss1HVwDiy9SKg6F3CQj9PK0r+hjtAStFSmZxDAoGBAMcEEzciyUE3OLsJP0NJRGKylJA8jFlJH99D4lIBqEQQzMyt76xQH45O5Cr6teO9U5hna6ttNhAwcxnbW+w/LeGEAwUuI9K2sEXjx60NrnUATLlDRO2QOElc1ddolhBWV6pERrLFlbxquR2DcWq6c2E1yzr3CW7TF8OfwVagCoqFAoGBAK8sJxeuMs5y+bxyiJ9d9NsItDFYD0TBy9tkqCe5W32W6fyPCJB86Df/XjflbCKAKVYHOSgDDPMt1yIlXNCL/326arbhOeld4eSDYm3P1jBKMijWTSAujaXN3yXqDRyCkjvhgmmAV3CR6Zga5/5mZQHrRZ2MfgGGUG0HxSTanJ7NAoGBAOhZBGtFsBdtEawvCh4Z8NaMC2nU+Ru9hEsZSy6rZQrPvja9aBUk5QUdh04TYtu8PzQ1EghZy71rtwDAvxXWJ1mWcZn0kD06tZKudmZpMVXCp3SFah6DDUCFSmQ2U60yh6XOzpS2+Z97Ngi02UFph8sSQA6Dl/lmaf4bfQHCYc5Z", + }, + } + + for i, example := range invalidExamples { + err := example.Validate() + require.Error(t, err, "Invalid example %d was regarded as valid", i) + } + + validExamples := []*SAMLConfiguration{ + { + Enabled: false, + }, + { + // RSA 2048 + Enabled: true, + PrivateKey: "MIIEowIBAAKCAQEAsBuxTUWFrfy0qYXaqNSeVWcJOd6TQ4+4b/3N4p/58r1d/kMU+K+BGR+tF0GKHGYngTF6puvNDff2wgW3dp3LUSMjxOhC3sK0uL90vd+IR6v1EDDGLyQNo6EjP/x5Gp/PcL2s6hZb8iLBEq4FksPnEhWqf9Nsmgf1YPJV4AvaaWe3oBFo9zJobSs3etTVitc3qEH2DpgYFtrCKhMWv5qoZtZTyZRE3LU3rvInDgYw6HDGF1G4y4Fvah6VpRmTdyMR81r1tCLmGvk61QJp7i4HteazQ6Raqh2EZ1sH/UfEp8mrwYRaRdgLDQ/Q6/YlO8NTQwzp6YwwAybhMBnOrABLCQIDAQABAoIBADqobq0DPByQsIhKmmNjtn1RvYP1++0kANXknuAeUv2kT5tyMpkGtCRvJZM6dEszR3NDzMuufPVrI1jK2Kn8sw0KfE6I4kUaa2Gh+7uGqfjdcNn8tPZctuJKuNgGOzxAALNXqjGqUuPa6Z5UMm0JLX0blFfRTzoa7oNlFG9040H6CRjJQQGfYyPS8xeo+RUR009sK/222E5jz6ThIiCrOU/ZGm5Ws9y3AAIASqJd9QPy7qxKoFZ1qKZ/cDaf1txCKq9VBXH6ypZoU1dQibhyLCIJ3tYapBtV4p8V12oHhITXb6Vbo1P9bQSVz+2rQ0nJkjdXX/N4aHE01ecbu8MpMxUCgYEA5P4ZCAdpkTaOSJi7GyL4AcZ5MN26eifFnRO/tbmw07f6vi//vdqzC9T7kxmZ8e1OvhX5OMGNb3nsXm78WgS2EVLTkaTInG6XhlOeYj9BHAQZDBr7rcAxrVQxVgaGDiZpYun++kXw+39iq3gxuYuC9mM0AQze3SjTRIM9WWXJSqMCgYEAxODfXcWMk2P/WfjE3u+8fhjc3cvqyWSyThEZC9YzpN59dL73SE7BRkMDyZO19fFvVO9mKsRfsTio0ceC5XQOO6hUxAm4gAEvMpeapQgXTxIxF5FAQ0vGmBMxT+xg7lX8HTTJX/UCttKo3BdIJQeTf8bKVzJCoLFh8Rcv5qI6umMCgYAEuj44DTcfuVmcpBKQz9sA5mEQIjO8W9/Xi1XU4Z2F8XFqxcDo4X/6yY3cDpZACV8ry3ZWtqA94e2AUZhCH4DGwMf/ZMCDgkD8k/NcIeQtOORvfIsfni0oX+mY1g+kcSSR1zTdY95CwvF9isC0DO5KOegT8XkUZchezLrSgqhyMwKBgQCvS0mWRH6V/UMu6MDhfrNl0t1U3mt+RZo8yBx03ZO+CBvMBvxF9VlBJgoJQOuSwBVQmpdtHMvXD4vAvNNfWaYSmB5hLgaIcoWDlliq+DlIvfnX8gw13xJD9VLCxsTHcOe5WXazaYOxJIAU9uXVkplR+73NRYLtcQKzluGfiHKh4QKBgFpPtOqcAbkMsV+1qPYvvvX7E4+l52Odb4tbxGBYV8tzCqMRETqMPVxFWwsj+EQ8lyAu15rCRH7DKHVK5zL6JvIZEjt0tptKqSL2o3ovS6y3DmD6t+YpvjKME7a+vunOoJWe9pWl3wZmodfyZMpAdDLvDGhPR7Jlhun41tbMMaQF", + }, + { + // RSA 3072 + Enabled: true, + PrivateKey: "MIIG4wIBAAKCAYEApYkvDaXJEDsELSVosc0sKFnoPeJai8sOu8di5ffGVJRr7mJi+VQjM0d2KeOIllVk2IV58M33Jz2Rx61NYPLu0N9fZqPwbgYn+FNz1L1xgslUL6gyaQnCEKtH5mRqPEBOPvAygq/fZ46eBMs3GSS6NWp/XF/iPaFc1mBDAZFvXev4XV7O6iuqz5mx3rQbkIhMjQxP+IOYWMS4TqueLJWgFUbij0FepJfOE+AlmfBa7xIOyE+g5t3vRB8XwzxRPsljlfgZXstxO1r1NS3DPiUj3kGYy7em5Yb+icIA6xzy0MiwU5RcBSwtVc+M/Yk2tMY6a9z1UX2M5Zr/ih3w0CbW6KDYplqgwwDZv2f+ynIqldn7SjVo3V6fWFu+KtRkofWWkTGjaU2DTpxrxUJEnEo6zXfBSejAjGGAJyKjX74uATlOu/LQEjd5umQpWYvtvP1UkbjHYgITtoTytb3uU7Q7W/YdtNUcaE377QHZF+E+XTCCCw00bCvpDciW+w0JSkRfAgMBAAECggGAR0jCKIBiC0k+zSo04YxXHbFJ34xgLZ7t41NDdYCzuayIpglcUb43wlddvUAsi4COgudH0bkAW7eZ1YD9t2gmC3CFpq+mU9r2z2swkEZcYVPNmxA1VSJMnd0Eg2Ruky+mAlhxh/GwpOm3hpz0RzGXtnT8D42C4cNhNTgS4tP8P1fkhmDTfef8EJZBEIRC8oSfYoYQ0hXpPyDHtakV3mE4pLD303T1CrAMoGaACsCEiDsgfoY75e9gn9c75mlNG1qhhJYxD3Sv1o9lQd3Q1A71sga/E+yIlUcPP4fDaA8DdeH+FHwL9xgQPd18gsrbPdbsg8JMLmjblaz8BB1MvJMwj+b3Ey2idD8CVIq5Ql97TebyMxZp3ZYjLq/R2ay+MpE9Vjgih096Hg+kCPMPi3Q9AmVJX8kN8+2zm2EeDoI/YnJFzmBcmaOuSBEGYdrRk5RCYfZMa1jvpoNUGbWzoX4gRfC7Gr+alaCWa9ot2c+ChWZQlpbKaMYMLU/VEd7gsf/BAoHBANJsSdIxrTUWpdm27SJlq5ylKVokS5ftrxk2gM8M16jLYE7SwPrhdxphWGH8/TMz6/Jyd+CuSfwAzyy325szlFlZVpxv8qu1vWROBaaaq1Tg8cqYC2s+hUTJLevcmiBHFu+7tiYNmMqkNIfj9/FN1zvfPVwqurtB5WXGjI4qhf5SyJgtj1GiM/s9Ae86LiRZhovcEEwf0LddGpMrUEDrWOV9D95sOMA00rsJXOfOg78Ms7Nq/h9w6cnD5x4jUJTMzwKBwQDJY/TMNVa1V8ci+pOMB6iTyi3azVC6ZiCXinCQS0oLebY1GmyWLv9A+/+9Wg/h4p4OdlZSA2/9f6+6njAcxI1wfzHVC3vgF7EDs9YUeAmXWBA171uPHbfimTd21utLkcyJ/WdO4OmKP7ZIK8UWyXE98N5NQV9NRX0sm6CJemwChcoJ8/7lsuYa4nJVUXtAkAMoj7e0nOoWn1IzyolmIXSTrBPiLWh68172tr3ciR6uGN3Yba6szkFTeaBDfNQvk3ECgcEAy07XkKBwwv+L5SxKOFbVlfc6Wh8Bbty2tnyjvemhoTRHbEFTNdOMeU+ezqZamhNLoKgazVp4n2TEx2cpZu5SInYgKew8Is3pHLYJ3axJaCwjUmTPe6Ifr5NVrDMsM42cSqsqVeADRZ+cJcQMtvhHwlByf8/FNdJ4a3qIKYBKkKy5pdc3R1+aK+AJM3QaSwK47f8FPBftWI07dQB/fQonjSvlnjkgKA2hohdszYgKYRhLtEnnGMfHCywd7U+ftvWfAoHAcxfq+SGqkiy+I+FsnWRrFTtAhYE9F6nyCmkV94Dvqis+1I5rbFEjk7Hw7/geh4uJpN5AatKIGCn29gIdoPM7mgU3J3hOrT0c7u7B9CS95n5vlUNb4iirxJanugUNp7yFVn85oTyse1P6CrjpBCLP0wRrJ1+q5XBHH005rBgIzlBDrPiCvidFlivAB75vX/BtvaqU5GWg6pjW0752U6XfB94Z5vLoeQvJQ9ogG39Jx1lyv5O/dgbSErC5xJf8c8whAoHAYdxLfZcDN2IEUgg/czE9jDSJ2EwOCE9HpCntthHAvceK3+PFfpCKwOLynqF8urhdeM510QJK2ETLvzpgMBgSh/klxeBYv8BCL8BuPwyPciAFmPE1Stx7C1+JBF2fayYkCSK9w85INLAJYKTDk9gE8O6l0bXA8tuq3F0tRTwMBcyEpMOehKFamoPcU6cnNa2HC+MyTOfXSBeNZ2VciFYf5rh3YrwoUYbQJtDXxFvoX0Ba+zyneNG0j3epXZuR2lyK", + }, + { + // RSA 4096 + Enabled: true, + PrivateKey: "MIIJKgIBAAKCAgEA2cNnNX4Be3jOKTr7lxIWxWfFKtwFqbWs9CZS7gDNXUtBlGuV1+FswPvSRKWEmwsBQikBfScowk4hL/JFgN8V25PijOk7eTPmw3tHuUhoil7GkJCMKhtrYwGbvINk1pK5mfI+V8GR3l52S779fg8nwktOtr99sLgfxUdxwxFY5hE5lo5P19QPClAA89SjQ3c/FlXy8R56/qf4u+Fuvd7Ecq7nQGeovsiSpBxY2gn4KL2LdkkyZmEQVgXzXjDGOOhF7M6eKim5MCsUqgHjCCkK7Gw9HNbd4oHNE5ucWRYjG1IpEYbYmep/9+wXgwQorYFKUT0NXrUv5H3VLQpsDyWDRZJ+wXGbwV2bRh2Z5bbAJVTxF8NaO8XujVZLIe+UJ8kUWj+n3hxwil9UU9yExR6M9TZBfHTKOVWcn1CquT85ppI0dtvlu3ToBwjjcd1wWLK8rLhmEwafC142bSL2kXLc6p7YrhTBN7PBPodQ2lLMg8xbw4cNspsMAPAPfrisqEYUGAs/EUScgcsSfmyzKNcdZlUx6UkMhz2F8sKPi4I4oIugxQiCa7LuSjmfrM6msIkrV+sj06zUYmAZzN+cf7rRlGFLNt1cKqqukjhbo9RL54XZQssT5GkHuVT6neyQBJX9EwtmZtXBTI78WTUabQhBcEBbxWbn5VodxDPXmfAiumsCAwEAAQKCAgEAnU1ux5BPJ877NYNa7CTv+AdewPgQyyfmWLM6YpyHvKW5KKqSolA/jCQcHuRlps3LSexvG+Xmpn1jscvTcyUzF9t64ok0Ifhg8MKj6+6nPZT64MDZzyzhZLJrukA73lg85Dy91gyI/1XDJDJB0QbHlK1rnc0z0S0gHhTe06c7TW4R6HTCrkiL2Moz9e6bRQfltY++n3iCJmRV4/oTUeqSg7leaQK4PaCLdSrY8CAVd/B7xqVXV+czssA3rcmT1tXKdSZH0HM1R9tG4Qvd4S4sqt4BQ0zfGVjkOA7HYP8BuyGdcwCyhHSFniSYU1b0v2jOs2Jjvw8pGmffTtrhdguGB60rMocKyfXvRxjJmIXZae6W8ZCwz76rKr8igXZUXvK3LqhGfm5fDpvWQlX8ugnwWOmowJqToS/fVKwhjFjsPONRbRZh7MTebRjx9ErpQycTm0SiUrUA/WE8Na1JeelTjxThCuy1VjIOtYVk4eYGP6REQV+nYGGuD7ruR+dpD4UR3/2DsPLik8X+YUFMjGCr+LjzybDj8Ux+a/u/eKD3rIe45PooJzGR/s+RCcwtAIue29+C+2uj3lAypEIqRGd2k0RgEw8Cj43Omc3Pyf+M3IbKfpE82OGSPp/rgHIfJSwGuOWH09yxCjyqY9H/wtxea6qOpeuk/g4ipaTp/QvZikkCggEBAPeowAf5hz16Oreb4D1c9MoHdofO/Kv8D8Kig9YhwDGck8uG4TbdBMSre5mOGSykZBA566/YHf49Pdi5Eq3L5oEdNKJrxn35WlMHY2pdOCrhWfqe+Z3Fg6qlhQTFU0blFAwy6NUixHP7xsLyAdpjkSxdsQzOaHUMII8w0gD+/AqSq3c/sC9AF+CeiZQV0P53eseNVfxfv8f1aDH7JcywG4P6Xe9pdHoNW93u2j2HQcrLidOtsT5s8iXj2YO3d4YZg/I20dViC7+DrG1ep+rfiuYY5VS1jKVqTknzKHlP7OHOaYJhDPAffnNFBWj4Th11NKxigpx3ogXO9jVyCGXWwD0CggEBAOEY5hvGEufmWx821Oln3OoaUv4MBSa0RMuuBZyx7mx18gfidjB9exC6sFduTwdFjnM8PUDBnOo2ZM7ThcCbTJ4Qi7LB5gDAXUbJqJk7o+lKrfXcpYdksoXWHmWAT7RE1v9nbXle1KHKIaaga/I8hVtSfeTizb8y+dDP3T3H8tVByvneAE0LnDVmr1VhFppKnzWl5vTY2Y+6XGIWmrCuWS1+zf+dx32zJ2ZOfT1Wwk20igC79RzH0sDHSv7DNyUn9u/9LtjIIrDtWch9+5Xkq0uZQAqM0Jw/QUYqarJSNNVhREmwWk+B6sJaQUN26YyTHiOpfFu1RUwHyyg58L8yJ8cCggEBALqSqnhXh4bM+kcwavJPgSpiDO2rBbcbIVRj0iYTLxMw/jap2ijWwKzY8zhvUI/NGIUQ3XmPuqi5wknuwx+jKHfEZM6nmtV0cJN0UXTj3ViQhJTGBw7Qqax5HYjGj0Itebjm8Xj/xDgMSWS7pKG9uLRPsP4Q0ai8BhtZkBun/ICKlho0JKq0Akj5pnOlK9lIcXq8AzcpevVM774XkhZt5Yy7pOCj9VetkLPVKRyJNQtt4ttRUuHQeWwKBuev459mwXxLyDCUuH0C2Xdbg+zxk1ZdEweJ7fb/6xLS2H7rs205b0sFihWr5Ds6mCTISzDuB0yGuhbeGXV+wQTqb2EpM5ECggEBAMBFsGiQ7J1BWxxyjbNBkKY3DiUKx2ukGA+S+iA6rFng9XherG4HARPtI6vLAZ5If8FW90tVFl/JTpqMe3dmMC/kGi/7CCgkKIjKwEUDeKNRsv6MFqhsD0Ha/+Pbkjl9g9ht1EkUA7SfH9dguFQV9iNndzoHsY9cT59ZrrWTEY2vwV1lkAQ/opLKv4HCiLgKfawppfoHMO9gVIFEpaW9h1chNXzenQR1/3WYHcpDTX1qdWbjJiALX65jjV/ICFaoqHmeXmG1skxGsaZcVoZW6SqOIPHiDl8oeO0iVjkzlwWdK+N1y+6WHp0c0xp5fE0jbV8w6pS7ZhHnplUaCNaIVQkCggEAUcQ0VhA1FlWT/mcbTp3v3ojCkZH8fkbNQoL5nsxb+72VDGB0YYZBXyBCApY2E/3jfAH8nG0ALAVS1s983USx17Z2+Z+Yg13Gt1dglV0JC2vMS6j83G0UxsKdcFyafbgJl+hrvBAuOoqPLd5r5F6VnDZeDDsJ3Y6ZTmcsbf8EZkUSxT80oKBLwXi06dfnEz7nYUxvqk54QG3xN1VJAQoKaJ9sH9pbAPdA0GxRx8DIWBd3UhMFJbdIplfGlkk9kf+E1k6Z2SaRB8QQHpvdgsdQ6YXPV+0ejhiGytX9DMSmjZe3dC4C7ZdaCL+kSxdFRgIo2KAcJVdpsqbw/hclfNY7cQ==", + }, + } + + for i, example := range validExamples { + err := example.Validate() + require.NoError(t, err, "Valid example %d was regarded as invalid", i) + } +} + +func TestSAMLConfigurationPopulateFields(t *tst.T) { + c := &SAMLConfiguration{ + Enabled: true, + PrivateKey: "MIIEowIBAAKCAQEAt7dS8iM5MsQ+1mVkNpoaUnL8BCdxSrSx8jsSnvqN/GIJ4ipqbdrTgLpFVklVTqfaa5CykGVEV577l6AWkpkm2p7SvSkCQglmyAMMjY9glmztytAnfBpm+cQ6ZVTHC4XKlUG1aJigEuXPcZUU3FiBHWEuV2huYy2bLOtIY1v9N0i2v61QCdG+SM/Yb5t86KzApRl7VyHqquge6vvRuchfF0msv/2LW32hwxg3Gt4zkAF0SJqCCcfAPZ9pQwmbdUhoX16dRFU98nyIvuR8LH/wONZe/YyywFFHDEwkFa4XEzjCEm+AD+xvK7eEu55w21xB8JKMLEBy8uRuI3bIEG4pawIDAQABAoIBADw4IT4xgYw8e4R3U7P6K2qfOjB6ZU5hkHqgFmh6JJR35ll2IdDEi9OEOzofa5EOwC/GDGH8b7xw5nM7DGsdPHko2lca3BydTE1/glvchYKJTiDOvkKVvO9d/O4+Lch/IHpwQXB5pu7K2YaXoXDgqeHhevk3yAdGabj9norDGmtGIeU/x1hialKbw6L080CdbxpjeAsM/w+G/VtwvyOKYFBYxBflRW+sS8UeclVqKRAvaXKd1JGleWzH3hFZyFI54x5LyyjPI1JyVXRjNbf8xcS6eRaN849grL1+wBxEs/lQFn4JLhAcNi912iJ3lhxvkNleXZw7B7JAM8x4wUbK7zECgYEA6SYmu3YH8XuLUfT8MMCp+ETjPkNMOJGQmTXOkW6zuXP3J8iCPIxtuz09cGIro+yJU23yPUzOVCDZMmnMWBmkoTKAFoFL9TX0Eyqn/t1MD77i3NdkMp16yI5fwOO6yX1bZgLiG00W2E5/IGgNfTtEafU/mre95JBnTgxS3sAvz8UCgYEAybjfBVt+1X0vSVAGKYHI9wtzoSx3dIGE8G5LIchPTdNDZ0ke0QCRffhyCGKy6bPos0P2z5nLgWSePBPZQowpwZiQVXdWE05ID641E2zGULdYL1yVHDt6tVTpSzTAy89BiS1G8HvgpQyaBTmvmF11Fyd/YbrDxEIHN+qQdDkM928CgYEA4lJ4ksz21QF6sqpADQtZc3lbplspqFgVp8RFq4Nsz3+00lefpSskcff2phuGBXBdtjEqTzs5pwzkCj4NcRAjcZ9WG4KTu4sOTXTA83TamwZPrtUfnMqmH/2lEdd+wI0BpjryRlJE9ODuIwUe4wwfU0QQ5B2tJizPO0JXR4gEYYkCgYBzqidm4QGm1DLq7JG79wkObmiMv/x2t1VMr1ExO7QNQdfiP1EGMjc6bdyk5kMEMf5527yHaP4BYXpBpHfs6oV+1kXcW6LlSvuS0iboznQgECDmd0WgfJJtqxRh5QuvUVWYnHeSqNU0jjc6S8tdqCjdb+5gUUCzJdERxNOzcIr4zQKBgAqcBQwlWy0PdlZ06JhJUYlwX1pOU8mWPz9LIF0wrSm9LEtAl37zZJaD3uscvk/fCixAGHOktkDGVO7aUYIAlX9iD49huGkeRTn9tz7Wanw6am04Xj0y7H1oPPV7k5nJ4s9AOWq/gkZEhrRIis2anAczsx1YHSjq/M05+AbuRzvs", + } + + err := c.PopulateFields("https://projectref.supabase.co") + require.NoError(t, err) + + require.NotNil(t, c.RSAPrivateKey) + require.NotNil(t, c.RSAPublicKey) + require.NotNil(t, c.Certificate) +} + +func TestSAMLConfigurationDeterministicCertificate(t *tst.T) { + a := &SAMLConfiguration{ + Enabled: true, + PrivateKey: "MIIEowIBAAKCAQEAt7dS8iM5MsQ+1mVkNpoaUnL8BCdxSrSx8jsSnvqN/GIJ4ipqbdrTgLpFVklVTqfaa5CykGVEV577l6AWkpkm2p7SvSkCQglmyAMMjY9glmztytAnfBpm+cQ6ZVTHC4XKlUG1aJigEuXPcZUU3FiBHWEuV2huYy2bLOtIY1v9N0i2v61QCdG+SM/Yb5t86KzApRl7VyHqquge6vvRuchfF0msv/2LW32hwxg3Gt4zkAF0SJqCCcfAPZ9pQwmbdUhoX16dRFU98nyIvuR8LH/wONZe/YyywFFHDEwkFa4XEzjCEm+AD+xvK7eEu55w21xB8JKMLEBy8uRuI3bIEG4pawIDAQABAoIBADw4IT4xgYw8e4R3U7P6K2qfOjB6ZU5hkHqgFmh6JJR35ll2IdDEi9OEOzofa5EOwC/GDGH8b7xw5nM7DGsdPHko2lca3BydTE1/glvchYKJTiDOvkKVvO9d/O4+Lch/IHpwQXB5pu7K2YaXoXDgqeHhevk3yAdGabj9norDGmtGIeU/x1hialKbw6L080CdbxpjeAsM/w+G/VtwvyOKYFBYxBflRW+sS8UeclVqKRAvaXKd1JGleWzH3hFZyFI54x5LyyjPI1JyVXRjNbf8xcS6eRaN849grL1+wBxEs/lQFn4JLhAcNi912iJ3lhxvkNleXZw7B7JAM8x4wUbK7zECgYEA6SYmu3YH8XuLUfT8MMCp+ETjPkNMOJGQmTXOkW6zuXP3J8iCPIxtuz09cGIro+yJU23yPUzOVCDZMmnMWBmkoTKAFoFL9TX0Eyqn/t1MD77i3NdkMp16yI5fwOO6yX1bZgLiG00W2E5/IGgNfTtEafU/mre95JBnTgxS3sAvz8UCgYEAybjfBVt+1X0vSVAGKYHI9wtzoSx3dIGE8G5LIchPTdNDZ0ke0QCRffhyCGKy6bPos0P2z5nLgWSePBPZQowpwZiQVXdWE05ID641E2zGULdYL1yVHDt6tVTpSzTAy89BiS1G8HvgpQyaBTmvmF11Fyd/YbrDxEIHN+qQdDkM928CgYEA4lJ4ksz21QF6sqpADQtZc3lbplspqFgVp8RFq4Nsz3+00lefpSskcff2phuGBXBdtjEqTzs5pwzkCj4NcRAjcZ9WG4KTu4sOTXTA83TamwZPrtUfnMqmH/2lEdd+wI0BpjryRlJE9ODuIwUe4wwfU0QQ5B2tJizPO0JXR4gEYYkCgYBzqidm4QGm1DLq7JG79wkObmiMv/x2t1VMr1ExO7QNQdfiP1EGMjc6bdyk5kMEMf5527yHaP4BYXpBpHfs6oV+1kXcW6LlSvuS0iboznQgECDmd0WgfJJtqxRh5QuvUVWYnHeSqNU0jjc6S8tdqCjdb+5gUUCzJdERxNOzcIr4zQKBgAqcBQwlWy0PdlZ06JhJUYlwX1pOU8mWPz9LIF0wrSm9LEtAl37zZJaD3uscvk/fCixAGHOktkDGVO7aUYIAlX9iD49huGkeRTn9tz7Wanw6am04Xj0y7H1oPPV7k5nJ4s9AOWq/gkZEhrRIis2anAczsx1YHSjq/M05+AbuRzvs", + } + + b := &SAMLConfiguration{ + Enabled: a.Enabled, + PrivateKey: a.PrivateKey, + } + + err := a.PopulateFields("https://projectref.supabase.co") + require.NoError(t, err) + + err = b.PopulateFields("https://projectref.supabase.co") + require.NoError(t, err) + + require.Equal(t, a.Certificate.Raw, b.Certificate.Raw, "Certificate generation should be deterministic") +} diff --git a/auth_v2.169.0/internal/conf/tracing.go b/auth_v2.169.0/internal/conf/tracing.go new file mode 100644 index 0000000..9a1d9be --- /dev/null +++ b/auth_v2.169.0/internal/conf/tracing.go @@ -0,0 +1,33 @@ +package conf + +type TracingExporter = string + +const ( + OpenTelemetryTracing TracingExporter = "opentelemetry" +) + +type TracingConfig struct { + Enabled bool + Exporter TracingExporter `default:"opentelemetry"` + + // ExporterProtocol is the OTEL_EXPORTER_OTLP_PROTOCOL env variable, + // only available when exporter is opentelemetry. See: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md + ExporterProtocol string `default:"http/protobuf" envconfig:"OTEL_EXPORTER_OTLP_PROTOCOL"` + + // Host is the host of the OpenTracing collector. + Host string + + // Port is the port of the OpenTracing collector. + Port string + + // ServiceName is the service name to use with OpenTracing. + ServiceName string `default:"gotrue" split_words:"true"` + + // Tags are the tags to associate with OpenTracing. + Tags map[string]string +} + +func (tc *TracingConfig) Validate() error { + return nil +} diff --git a/auth_v2.169.0/internal/crypto/crypto.go b/auth_v2.169.0/internal/crypto/crypto.go new file mode 100644 index 0000000..6fc2b71 --- /dev/null +++ b/auth_v2.169.0/internal/crypto/crypto.go @@ -0,0 +1,159 @@ +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math" + "math/big" + "strconv" + "strings" + + "golang.org/x/crypto/hkdf" +) + +// SecureToken creates a new random token +func SecureToken() string { + b := make([]byte, 16) + must(io.ReadFull(rand.Reader, b)) + + return base64.RawURLEncoding.EncodeToString(b) +} + +// GenerateOtp generates a random n digit otp +func GenerateOtp(digits int) string { + upper := math.Pow10(digits) + val := must(rand.Int(rand.Reader, big.NewInt(int64(upper)))) + + // adds a variable zero-padding to the left to ensure otp is uniformly random + expr := "%0" + strconv.Itoa(digits) + "v" + otp := fmt.Sprintf(expr, val.String()) + + return otp +} +func GenerateTokenHash(emailOrPhone, otp string) string { + return fmt.Sprintf("%x", sha256.Sum224([]byte(emailOrPhone+otp))) +} + +// Generated a random secure integer from [0, max[ +func secureRandomInt(max int) int { + randomInt := must(rand.Int(rand.Reader, big.NewInt(int64(max)))) + return int(randomInt.Int64()) +} + +type EncryptedString struct { + KeyID string `json:"key_id"` + Algorithm string `json:"alg"` + Data []byte `json:"data"` + Nonce []byte `json:"nonce,omitempty"` +} + +func (es *EncryptedString) IsValid() bool { + return es.KeyID != "" && len(es.Data) > 0 && len(es.Nonce) > 0 && es.Algorithm == "aes-gcm-hkdf" +} + +// ShouldReEncrypt tells you if the value encrypted needs to be encrypted again with a newer key. +func (es *EncryptedString) ShouldReEncrypt(encryptionKeyID string) bool { + return es.KeyID != encryptionKeyID +} + +func (es *EncryptedString) Decrypt(id string, decryptionKeys map[string]string) ([]byte, error) { + decryptionKey := decryptionKeys[es.KeyID] + + if decryptionKey == "" { + return nil, fmt.Errorf("crypto: decryption key with name %q does not exist", es.KeyID) + } + + key, err := deriveSymmetricKey(id, es.KeyID, decryptionKey) + if err != nil { + return nil, err + } + + block := must(aes.NewCipher(key)) + cipher := must(cipher.NewGCM(block)) + + decrypted, err := cipher.Open(nil, es.Nonce, es.Data, nil) // #nosec G407 + if err != nil { + return nil, err + } + + return decrypted, nil +} + +func ParseEncryptedString(str string) *EncryptedString { + if !strings.HasPrefix(str, "{") { + return nil + } + + var es EncryptedString + + if err := json.Unmarshal([]byte(str), &es); err != nil { + return nil + } + + if !es.IsValid() { + return nil + } + + return &es +} + +func (es *EncryptedString) String() string { + out := must(json.Marshal(es)) + + return string(out) +} + +func deriveSymmetricKey(id, keyID, keyBase64URL string) ([]byte, error) { + hkdfKey, err := base64.RawURLEncoding.DecodeString(keyBase64URL) + if err != nil { + return nil, err + } + + if len(hkdfKey) != 256/8 { + return nil, fmt.Errorf("crypto: key with ID %q is not 256 bits", keyID) + } + + // Since we use AES-GCM here, the same symmetric key *must not be used + // more than* 2^32 times. But, that's not that much. Suppose a system + // with 100 million users, then a user can only change their password + // 42 times. To prevent this, the actual symmetric key is derived by + // using HKDF using the encryption key and the "ID" of the object + // containing the encryption string. Ideally this ID is a UUID. This + // has the added benefit that the encrypted string is bound to that + // specific object, and can't accidentally be "moved" to other objects + // without changing their ID to the original one. + + keyReader := hkdf.New(sha256.New, hkdfKey, nil, []byte(id)) + key := make([]byte, 256/8) + + must(io.ReadFull(keyReader, key)) + + return key, nil +} + +func NewEncryptedString(id string, data []byte, keyID string, keyBase64URL string) (*EncryptedString, error) { + key, err := deriveSymmetricKey(id, keyID, keyBase64URL) + if err != nil { + return nil, err + } + + block := must(aes.NewCipher(key)) + cipher := must(cipher.NewGCM(block)) + + es := EncryptedString{ + KeyID: keyID, + Algorithm: "aes-gcm-hkdf", + Nonce: make([]byte, 12), + } + + must(io.ReadFull(rand.Reader, es.Nonce)) + es.Data = cipher.Seal(nil, es.Nonce, data, nil) // #nosec G407 + + return &es, nil +} diff --git a/auth_v2.169.0/internal/crypto/crypto_test.go b/auth_v2.169.0/internal/crypto/crypto_test.go new file mode 100644 index 0000000..f1c8e67 --- /dev/null +++ b/auth_v2.169.0/internal/crypto/crypto_test.go @@ -0,0 +1,108 @@ +package crypto + +import ( + "testing" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" +) + +func TestEncryptedStringPositive(t *testing.T) { + id := uuid.Must(uuid.NewV4()).String() + + es, err := NewEncryptedString(id, []byte("data"), "key-id", "pwFoiPyybQMqNmYVN0gUnpbfpGQV2sDv9vp0ZAxi_Y4") + assert.NoError(t, err) + + assert.Equal(t, es.KeyID, "key-id") + assert.Equal(t, es.Algorithm, "aes-gcm-hkdf") + assert.Len(t, es.Data, 20) + assert.Len(t, es.Nonce, 12) + + dec := ParseEncryptedString(es.String()) + + assert.NotNil(t, dec) + assert.Equal(t, dec.Algorithm, "aes-gcm-hkdf") + assert.Len(t, dec.Data, 20) + assert.Len(t, dec.Nonce, 12) + + decrypted, err := dec.Decrypt(id, map[string]string{ + "key-id": "pwFoiPyybQMqNmYVN0gUnpbfpGQV2sDv9vp0ZAxi_Y4", + }) + + assert.NoError(t, err) + assert.Equal(t, []byte("data"), decrypted) +} + +func TestParseEncryptedStringNegative(t *testing.T) { + negativeExamples := []string{ + "not-an-encrypted-string", + // not json + "{{", + // not parsable json + `{"key_id":1}`, + `{"alg":1}`, + `{"data":"!!!"}`, + `{"nonce":"!!!"}`, + // not valid + `{}`, + `{"key_id":"key_id"}`, + `{"key_id":"key_id","alg":"different","data":"AQAB=","nonce":"AQAB="}`, + } + + for _, example := range negativeExamples { + assert.Nil(t, ParseEncryptedString(example)) + } +} + +func TestEncryptedStringDecryptNegative(t *testing.T) { + id := uuid.Must(uuid.NewV4()).String() + + // short key + _, err := NewEncryptedString(id, []byte("data"), "key-id", "short_key") + assert.Error(t, err) + + // not base64 + _, err = NewEncryptedString(id, []byte("data"), "key-id", "!!!") + assert.Error(t, err) + + es, err := NewEncryptedString(id, []byte("data"), "key-id", "pwFoiPyybQMqNmYVN0gUnpbfpGQV2sDv9vp0ZAxi_Y4") + assert.NoError(t, err) + + dec := ParseEncryptedString(es.String()) + assert.NotNil(t, dec) + + _, err = dec.Decrypt(id, map[string]string{ + // empty map + }) + assert.Error(t, err) + + // short key + _, err = dec.Decrypt(id, map[string]string{ + "key-id": "AQAB", + }) + assert.Error(t, err) + + // key not base64 + _, err = dec.Decrypt(id, map[string]string{ + "key-id": "!!!", + }) + assert.Error(t, err) + + // bad key + _, err = dec.Decrypt(id, map[string]string{ + "key-id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + }) + assert.Error(t, err) + + // bad tag for AEAD failure + dec.Data[len(dec.Data)-1] += 1 + + _, err = dec.Decrypt(id, map[string]string{ + "key-id": "pwFoiPyybQMqNmYVN0gUnpbfpGQV2sDv9vp0ZAxi_Y4", + }) + assert.Error(t, err) +} + +func TestSecureToken(t *testing.T) { + assert.Equal(t, len(SecureToken()), 22) +} diff --git a/auth_v2.169.0/internal/crypto/password.go b/auth_v2.169.0/internal/crypto/password.go new file mode 100644 index 0000000..7cf4607 --- /dev/null +++ b/auth_v2.169.0/internal/crypto/password.go @@ -0,0 +1,418 @@ +package crypto + +import ( + "context" + "crypto/aes" + "crypto/cipher" + "crypto/subtle" + "encoding/base64" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/supabase/auth/internal/observability" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/scrypt" +) + +type HashCost = int + +const ( + // DefaultHashCost represents the default + // hashing cost for any hashing algorithm. + DefaultHashCost HashCost = iota + + // QuickHashCosts represents the quickest + // hashing cost for any hashing algorithm, + // useful for tests only. + QuickHashCost HashCost = iota + + Argon2Prefix = "$argon2" + FirebaseScryptPrefix = "$fbscrypt" + FirebaseScryptKeyLen = 32 // Firebase uses AES-256 which requires 32 byte keys: https://pkg.go.dev/golang.org/x/crypto/scrypt#Key +) + +// PasswordHashCost is the current pasword hashing cost +// for all new hashes generated with +// GenerateHashFromPassword. +var PasswordHashCost = DefaultHashCost + +var ( + generateFromPasswordSubmittedCounter = observability.ObtainMetricCounter("gotrue_generate_from_password_submitted", "Number of submitted GenerateFromPassword hashing attempts") + generateFromPasswordCompletedCounter = observability.ObtainMetricCounter("gotrue_generate_from_password_completed", "Number of completed GenerateFromPassword hashing attempts") +) + +var ( + compareHashAndPasswordSubmittedCounter = observability.ObtainMetricCounter("gotrue_compare_hash_and_password_submitted", "Number of submitted CompareHashAndPassword hashing attempts") + compareHashAndPasswordCompletedCounter = observability.ObtainMetricCounter("gotrue_compare_hash_and_password_completed", "Number of completed CompareHashAndPassword hashing attempts") +) + +var ErrArgon2MismatchedHashAndPassword = errors.New("crypto: argon2 hash and password mismatch") +var ErrScryptMismatchedHashAndPassword = errors.New("crypto: fbscrypt hash and password mismatch") + +// argon2HashRegexp https://github.com/P-H-C/phc-string-format/blob/master/phc-sf-spec.md#argon2-encoding +var argon2HashRegexp = regexp.MustCompile("^[$](?Pargon2(d|i|id))[$]v=(?P(16|19))[$]m=(?P[0-9]+),t=(?P[0-9]+),p=(?P

[0-9]+)(,keyid=(?P[^,$]+))?(,data=(?P[^$]+))?[$](?P[^$]*)[$](?P.*)$") +var fbscryptHashRegexp = regexp.MustCompile(`^\$fbscrypt\$v=(?P[0-9]+),n=(?P[0-9]+),r=(?P[0-9]+),p=(?P

[0-9]+)(?:,ss=(?P[^,]+))?(?:,sk=(?P[^$]+))?\$(?P[^$]+)\$(?P.+)$`) + +type Argon2HashInput struct { + alg string + v string + memory uint64 + time uint64 + threads uint64 + keyid string + data string + salt []byte + rawHash []byte +} + +type FirebaseScryptHashInput struct { + v string + memory uint64 + rounds uint64 + threads uint64 + saltSeparator []byte + signerKey []byte + salt []byte + rawHash []byte +} + +// See: https://github.com/firebase/scrypt for implementation +func ParseFirebaseScryptHash(hash string) (*FirebaseScryptHashInput, error) { + submatch := fbscryptHashRegexp.FindStringSubmatchIndex(hash) + if submatch == nil { + return nil, errors.New("crypto: incorrect scrypt hash format") + } + + v := string(fbscryptHashRegexp.ExpandString(nil, "$v", hash, submatch)) + n := string(fbscryptHashRegexp.ExpandString(nil, "$n", hash, submatch)) + r := string(fbscryptHashRegexp.ExpandString(nil, "$r", hash, submatch)) + p := string(fbscryptHashRegexp.ExpandString(nil, "$p", hash, submatch)) + ss := string(fbscryptHashRegexp.ExpandString(nil, "$ss", hash, submatch)) + sk := string(fbscryptHashRegexp.ExpandString(nil, "$sk", hash, submatch)) + saltB64 := string(fbscryptHashRegexp.ExpandString(nil, "$salt", hash, submatch)) + hashB64 := string(fbscryptHashRegexp.ExpandString(nil, "$hash", hash, submatch)) + + if v != "1" { + return nil, fmt.Errorf("crypto: Firebase scrypt hash uses unsupported version %q only version 1 is supported", v) + } + memoryPower, err := strconv.ParseUint(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("crypto: Firebase scrypt hash has invalid n parameter %q %w", n, err) + } + if memoryPower == 0 { + return nil, fmt.Errorf("crypto: Firebase scrypt hash has invalid n=0") + } + rounds, err := strconv.ParseUint(r, 10, 64) + if err != nil { + return nil, fmt.Errorf("crypto: Firebase scrypt hash has invalid r parameter %q: %w", r, err) + } + if rounds == 0 { + return nil, fmt.Errorf("crypto: Firebase scrypt hash has invalid r=0") + } + + threads, err := strconv.ParseUint(p, 10, 8) + if err != nil { + return nil, fmt.Errorf("crypto: Firebase scrypt hash has invalid p parameter %q %w", p, err) + } + if threads == 0 { + return nil, fmt.Errorf("crypto: Firebase scrypt hash has invalid p=0") + } + + rawHash, err := base64.StdEncoding.DecodeString(hashB64) + if err != nil { + return nil, fmt.Errorf("crypto: Firebase scrypt hash has invalid base64 in the hash section %w", err) + } + + salt, err := base64.StdEncoding.DecodeString(saltB64) + if err != nil { + return nil, fmt.Errorf("crypto: Firebase scrypt salt has invalid base64 in the hash section %w", err) + } + + var saltSeparator, signerKey []byte + if signerKey, err = base64.StdEncoding.DecodeString(sk); err != nil { + return nil, err + } + if saltSeparator, err = base64.StdEncoding.DecodeString(ss); err != nil { + return nil, err + } + + input := &FirebaseScryptHashInput{ + v: v, + memory: uint64(1) << memoryPower, + rounds: rounds, + threads: threads, + salt: salt, + rawHash: rawHash, + saltSeparator: saltSeparator, + signerKey: signerKey, + } + + return input, nil +} + +func ParseArgon2Hash(hash string) (*Argon2HashInput, error) { + submatch := argon2HashRegexp.FindStringSubmatchIndex(hash) + if submatch == nil { + return nil, errors.New("crypto: incorrect argon2 hash format") + } + + alg := string(argon2HashRegexp.ExpandString(nil, "$alg", hash, submatch)) + v := string(argon2HashRegexp.ExpandString(nil, "$v", hash, submatch)) + m := string(argon2HashRegexp.ExpandString(nil, "$m", hash, submatch)) + t := string(argon2HashRegexp.ExpandString(nil, "$t", hash, submatch)) + p := string(argon2HashRegexp.ExpandString(nil, "$p", hash, submatch)) + keyid := string(argon2HashRegexp.ExpandString(nil, "$keyid", hash, submatch)) + data := string(argon2HashRegexp.ExpandString(nil, "$data", hash, submatch)) + saltB64 := string(argon2HashRegexp.ExpandString(nil, "$salt", hash, submatch)) + hashB64 := string(argon2HashRegexp.ExpandString(nil, "$hash", hash, submatch)) + + if alg != "argon2i" && alg != "argon2id" { + return nil, fmt.Errorf("crypto: argon2 hash uses unsupported algorithm %q only argon2i and argon2id supported", alg) + } + + if v != "19" { + return nil, fmt.Errorf("crypto: argon2 hash uses unsupported version %q only %d is supported", v, argon2.Version) + } + + if data != "" { + return nil, fmt.Errorf("crypto: argon2 hashes with the data parameter not supported") + } + + if keyid != "" { + return nil, fmt.Errorf("crypto: argon2 hashes with the keyid parameter not supported") + } + + memory, err := strconv.ParseUint(m, 10, 32) + if err != nil { + return nil, fmt.Errorf("crypto: argon2 hash has invalid m parameter %q %w", m, err) + } + + time, err := strconv.ParseUint(t, 10, 32) + if err != nil { + return nil, fmt.Errorf("crypto: argon2 hash has invalid t parameter %q %w", t, err) + } + + threads, err := strconv.ParseUint(p, 10, 8) + if err != nil { + return nil, fmt.Errorf("crypto: argon2 hash has invalid p parameter %q %w", p, err) + } + + rawHash, err := base64.RawStdEncoding.DecodeString(hashB64) + if err != nil { + return nil, fmt.Errorf("crypto: argon2 hash has invalid base64 in the hash section %w", err) + } + if len(rawHash) == 0 { + return nil, errors.New("crypto: argon2 hash is empty") + } + + salt, err := base64.RawStdEncoding.DecodeString(saltB64) + if err != nil { + return nil, fmt.Errorf("crypto: argon2 hash has invalid base64 in the salt section %w", err) + } + if len(salt) == 0 { + return nil, errors.New("crypto: argon2 salt is empty") + } + + input := Argon2HashInput{ + alg: alg, + v: v, + memory: memory, + time: time, + threads: threads, + keyid: keyid, + data: data, + salt: salt, + rawHash: rawHash, + } + + return &input, nil +} + +func compareHashAndPasswordArgon2(ctx context.Context, hash, password string) error { + input, err := ParseArgon2Hash(hash) + if err != nil { + return err + } + + attributes := []attribute.KeyValue{ + attribute.String("alg", input.alg), + attribute.String("v", input.v), + attribute.Int64("m", int64(input.memory)), + attribute.Int64("t", int64(input.time)), + attribute.Int("p", int(input.threads)), + attribute.Int("len", len(input.rawHash)), + } // #nosec G115 + + var match bool + var derivedKey []byte + compareHashAndPasswordSubmittedCounter.Add(ctx, 1, metric.WithAttributes(attributes...)) + defer func() { + attributes = append(attributes, attribute.Bool( + "match", + match, + )) + + compareHashAndPasswordCompletedCounter.Add(ctx, 1, metric.WithAttributes(attributes...)) + }() + + switch input.alg { + case "argon2i": + derivedKey = argon2.Key([]byte(password), input.salt, uint32(input.time), uint32(input.memory), uint8(input.threads), uint32(len(input.rawHash))) // #nosec G115 + + case "argon2id": + derivedKey = argon2.IDKey([]byte(password), input.salt, uint32(input.time), uint32(input.memory), uint8(input.threads), uint32(len(input.rawHash))) // #nosec G115 + } + + match = subtle.ConstantTimeCompare(derivedKey, input.rawHash) == 1 + + if !match { + return ErrArgon2MismatchedHashAndPassword + } + + return nil +} + +func compareHashAndPasswordFirebaseScrypt(ctx context.Context, hash, password string) error { + input, err := ParseFirebaseScryptHash(hash) + if err != nil { + return err + } + + attributes := []attribute.KeyValue{ + attribute.String("v", input.v), + attribute.Int64("n", int64(input.memory)), + attribute.Int64("r", int64(input.rounds)), + attribute.Int("p", int(input.threads)), + attribute.Int("len", len(input.rawHash)), + } // #nosec G115 + + var match bool + compareHashAndPasswordSubmittedCounter.Add(ctx, 1, metric.WithAttributes(attributes...)) + defer func() { + attributes = append(attributes, attribute.Bool("match", match)) + compareHashAndPasswordCompletedCounter.Add(ctx, 1, metric.WithAttributes(attributes...)) + }() + + derivedKey := firebaseScrypt([]byte(password), input.salt, input.signerKey, input.saltSeparator, input.memory, input.rounds, input.threads) + + match = subtle.ConstantTimeCompare(derivedKey, input.rawHash) == 1 + if !match { + return ErrScryptMismatchedHashAndPassword + } + + return nil +} + +func firebaseScrypt(password, salt, signerKey, saltSeparator []byte, memCost, rounds, p uint64) []byte { + ck := must(scrypt.Key(password, append(salt, saltSeparator...), int(memCost), int(rounds), int(p), FirebaseScryptKeyLen)) // #nosec G115 + block := must(aes.NewCipher(ck)) + + cipherText := make([]byte, aes.BlockSize+len(signerKey)) + + // #nosec G407 -- Firebase scrypt requires deterministic IV for consistent results. See: JaakkoL/firebase-scrypt-python@master/firebasescrypt/firebasescrypt.py#L58 + stream := cipher.NewCTR(block, cipherText[:aes.BlockSize]) + stream.XORKeyStream(cipherText[aes.BlockSize:], signerKey) + + return cipherText[aes.BlockSize:] +} + +// CompareHashAndPassword compares the hash and +// password, returns nil if equal otherwise an error. Context can be used to +// cancel the hashing if the algorithm supports it. +func CompareHashAndPassword(ctx context.Context, hash, password string) error { + if strings.HasPrefix(hash, Argon2Prefix) { + return compareHashAndPasswordArgon2(ctx, hash, password) + } else if strings.HasPrefix(hash, FirebaseScryptPrefix) { + return compareHashAndPasswordFirebaseScrypt(ctx, hash, password) + } + + // assume bcrypt + hashCost, err := bcrypt.Cost([]byte(hash)) + if err != nil { + return err + } + + attributes := []attribute.KeyValue{ + attribute.String("alg", "bcrypt"), + attribute.Int("bcrypt_cost", hashCost), + } + + compareHashAndPasswordSubmittedCounter.Add(ctx, 1, metric.WithAttributes(attributes...)) + defer func() { + attributes = append(attributes, attribute.Bool( + "match", + !errors.Is(err, bcrypt.ErrMismatchedHashAndPassword), + )) + + compareHashAndPasswordCompletedCounter.Add(ctx, 1, metric.WithAttributes(attributes...)) + }() + + err = bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) + return err +} + +// GenerateFromPassword generates a password hash from a +// password, using PasswordHashCost. Context can be used to cancel the hashing +// if the algorithm supports it. +func GenerateFromPassword(ctx context.Context, password string) (string, error) { + hashCost := bcrypt.DefaultCost + + switch PasswordHashCost { + case QuickHashCost: + hashCost = bcrypt.MinCost + } + + attributes := []attribute.KeyValue{ + attribute.String("alg", "bcrypt"), + attribute.Int("bcrypt_cost", hashCost), + } + + generateFromPasswordSubmittedCounter.Add(ctx, 1, metric.WithAttributes(attributes...)) + defer generateFromPasswordCompletedCounter.Add(ctx, 1, metric.WithAttributes(attributes...)) + + hash := must(bcrypt.GenerateFromPassword([]byte(password), hashCost)) + + return string(hash), nil +} + +func GeneratePassword(requiredChars []string, length int) string { + passwordBuilder := strings.Builder{} + passwordBuilder.Grow(length) + + // Add required characters + for _, group := range requiredChars { + if len(group) > 0 { + randomIndex := secureRandomInt(len(group)) + + passwordBuilder.WriteByte(group[randomIndex]) + } + } + + // Define a default character set for random generation (if needed) + const allChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + + // Fill the rest of the password + for passwordBuilder.Len() < length { + randomIndex := secureRandomInt(len(allChars)) + passwordBuilder.WriteByte(allChars[randomIndex]) + } + + // Convert to byte slice for shuffling + passwordBytes := []byte(passwordBuilder.String()) + + // Secure shuffling + for i := len(passwordBytes) - 1; i > 0; i-- { + j := secureRandomInt(i + 1) + + passwordBytes[i], passwordBytes[j] = passwordBytes[j], passwordBytes[i] + } + + return string(passwordBytes) +} diff --git a/auth_v2.169.0/internal/crypto/password_test.go b/auth_v2.169.0/internal/crypto/password_test.go new file mode 100644 index 0000000..289c9fe --- /dev/null +++ b/auth_v2.169.0/internal/crypto/password_test.go @@ -0,0 +1,178 @@ +package crypto + +import ( + "context" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestArgon2(t *testing.T) { + // all of these hash the `test` string with various parameters + + examples := []string{ + "$argon2i$v=19$m=16,t=2,p=1$bGJRWThNOHJJTVBSdHl2dQ$NfEnUOuUpb7F2fQkgFUG4g", + "$argon2id$v=19$m=32,t=3,p=2$SFVpOWJ0eXhjRzVkdGN1RQ$RXnb8rh7LaDcn07xsssqqulZYXOM/EUCEFMVcAcyYVk", + } + + for _, example := range examples { + assert.NoError(t, CompareHashAndPassword(context.Background(), example, "test")) + } + + for _, example := range examples { + assert.Error(t, CompareHashAndPassword(context.Background(), example, "test1")) + } + + negativeExamples := []string{ + // 2d + "$argon2d$v=19$m=16,t=2,p=1$bGJRWThNOHJJTVBSdHl2dQ$NfEnUOuUpb7F2fQkgFUG4g", + // v=16 + "$argon2id$v=16$m=16,t=2,p=1$bGJRWThNOHJJTVBSdHl2dQ$NfEnUOuUpb7F2fQkgFUG4g", + // data + "$argon2id$v=19$m=16,t=2,p=1,data=abc$bGJRWThNOHJJTVBSdHl2dQ$NfEnUOuUpb7F2fQkgFUG4g", + // keyid + "$argon2id$v=19$m=16,t=2,p=1,keyid=abc$bGJRWThNOHJJTVBSdHl2dQ$NfEnUOuUpb7F2fQkgFUG4g", + // m larger than 32 bits + "$argon2id$v=19$m=4294967297,t=2,p=1$bGJRWThNOHJJTVBSdHl2dQ$NfEnUOuUpb7F2fQkgFUG4g", + // t larger than 32 bits + "$argon2id$v=19$m=16,t=4294967297,p=1$bGJRWThNOHJJTVBSdHl2dQ$NfEnUOuUpb7F2fQkgFUG4g", + // p larger than 8 bits + "$argon2id$v=19$m=16,t=2,p=256$bGJRWThNOHJJTVBSdHl2dQ$NfEnUOuUpb7F2fQkgFUG4g", + // salt not Base64 + "$argon2id$v=19$m=16,t=2,p=1$!!!$NfEnUOuUpb7F2fQkgFUG4g", + // hash not Base64 + "$argon2id$v=19$m=16,t=2,p=1$bGJRWThNOHJJTVBSdHl2dQ$!!!", + // salt empty + "$argon2id$v=19$m=16,t=2,p=1$$NfEnUOuUpb7F2fQkgFUG4g", + // hash empty + "$argon2id$v=19$m=16,t=2,p=1$bGJRWThNOHJJTVBSdHl2dQ$", + } + + for _, example := range negativeExamples { + assert.Error(t, CompareHashAndPassword(context.Background(), example, "test")) + } +} + +func TestGeneratePassword(t *testing.T) { + tests := []struct { + name string + requiredChars []string + length int + }{ + { + name: "Valid password generation", + requiredChars: []string{"ABC", "123", "@#$"}, + length: 12, + }, + { + name: "Empty required chars", + requiredChars: []string{}, + length: 8, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := GeneratePassword(tt.requiredChars, tt.length) + + if len(got) != tt.length { + t.Errorf("GeneratePassword() returned password of length %d, want %d", len(got), tt.length) + } + + // Check if all required characters are present + for _, chars := range tt.requiredChars { + found := false + for _, c := range got { + if strings.ContainsRune(chars, c) { + found = true + break + } + } + if !found && len(chars) > 0 { + t.Errorf("GeneratePassword() missing required character from set %s", chars) + } + } + }) + } + + // Check for duplicates passwords + passwords := make(map[string]bool) + for i := 0; i < 30; i++ { + p := GeneratePassword([]string{"ABC", "123", "@#$"}, 30) + + if passwords[p] { + t.Errorf("GeneratePassword() generated duplicate password: %s", p) + } + passwords[p] = true + } +} + +func TestFirebaseScrypt(t *testing.T) { + // all of these use the `mytestpassword` string as the valid one + + examples := []string{ + "$fbscrypt$v=1,n=14,r=8,p=1,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + } + + for _, example := range examples { + assert.NoError(t, CompareHashAndPassword(context.Background(), example, "mytestpassword")) + } + + for _, example := range examples { + assert.Error(t, CompareHashAndPassword(context.Background(), example, "mytestpassword1")) + } + + negativeExamples := []string{ + // v not 1 + "$fbscrypt$v=2,n=14,r=8,p=1,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + // n not 32 bits + "$fbscrypt$v=1,n=4294967297,r=8,p=1,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + // n is 0 + "$fbscrypt$v=1,n=0,r=8,p=1,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + // rounds is not 64 bits + "$fbscrypt$v=1,n=14,r=18446744073709551617,p=1,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + // rounds is 0 + "$fbscrypt$v=1,n=14,r=0,p=1,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + // threads is not 8 bits + "$fbscrypt$v=1,n=14,r=8,p=256,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + // threads is 0 + "$fbscrypt$v=1,n=14,r=8,p=0,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + // hash is not base64 + "$fbscrypt$v=1,n=14,r=8,p=1,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$!!!", + // salt is not base64 + "$fbscrypt$v=1,n=14,r=8,p=1,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$!!!$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + // signer key is not base64 + "$fbscrypt$v=1,n=14,r=8,p=1,ss=Bw==,sk=!!!$C0sHCg9ek77hsg==$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + // salt separator is not base64 + "$fbscrypt$v=1,n=14,r=8,p=1,ss=!!!,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$zKVTMvnWVw5BBOZNUdnsalx4c4c7y/w7IS5p6Ut2+CfEFFlz37J9huyQfov4iizN8dbjvEJlM5tQaJP84+hfTw==", + } + + for _, example := range negativeExamples { + assert.Error(t, CompareHashAndPassword(context.Background(), example, "mytestpassword")) + } +} + +func TestBcrypt(t *testing.T) { + // all use the `test` password + + examples := []string{ + "$2y$04$mIJxfrCaEI3GukZe11CiXublhEFanu5.ododkll1WphfSp6pn4zIu", + "$2y$10$srNl09aPtc2qr.0Vl.NtjekJRt/NxRxYQm3qd3OvfcKsJgVnr6.Ve", + } + + for _, example := range examples { + assert.NoError(t, CompareHashAndPassword(context.Background(), example, "test")) + } + + for _, example := range examples { + assert.Error(t, CompareHashAndPassword(context.Background(), example, "test1")) + } + + negativeExamples := []string{ + "not-a-hash", + } + for _, example := range negativeExamples { + assert.Error(t, CompareHashAndPassword(context.Background(), example, "test")) + } +} diff --git a/auth_v2.169.0/internal/crypto/utils.go b/auth_v2.169.0/internal/crypto/utils.go new file mode 100644 index 0000000..a6b38b8 --- /dev/null +++ b/auth_v2.169.0/internal/crypto/utils.go @@ -0,0 +1,9 @@ +package crypto + +func must[T any](a T, err error) T { + if err != nil { + panic(err) + } + + return a +} diff --git a/auth_v2.169.0/internal/crypto/utils_test.go b/auth_v2.169.0/internal/crypto/utils_test.go new file mode 100644 index 0000000..1aeeab8 --- /dev/null +++ b/auth_v2.169.0/internal/crypto/utils_test.go @@ -0,0 +1,14 @@ +package crypto + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestMust(t *testing.T) { + require.Panics(t, func() { + must(123, errors.New("panic")) + }) +} diff --git a/auth_v2.169.0/internal/hooks/auth_hooks.go b/auth_v2.169.0/internal/hooks/auth_hooks.go new file mode 100644 index 0000000..1b881d3 --- /dev/null +++ b/auth_v2.169.0/internal/hooks/auth_hooks.go @@ -0,0 +1,220 @@ +package hooks + +import ( + "github.com/gofrs/uuid" + "github.com/golang-jwt/jwt/v5" + "github.com/supabase/auth/internal/mailer" + "github.com/supabase/auth/internal/models" +) + +type HookType string + +const ( + PostgresHook HookType = "pg-functions" +) + +const ( + // In Miliseconds + DefaultTimeout = 2000 +) + +// Hook Names +const ( + HookRejection = "reject" +) + +type HTTPHookInput interface { + IsHTTPHook() +} + +type HookOutput interface { + IsError() bool + Error() string +} + +// TODO(joel): Move this to phone package +type SMS struct { + OTP string `json:"otp,omitempty"` + SMSType string `json:"sms_type,omitempty"` +} + +// #nosec +const MinimumViableTokenSchema = `{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "aud": { + "type": ["string", "array"] + }, + "exp": { + "type": "integer" + }, + "jti": { + "type": "string" + }, + "iat": { + "type": "integer" + }, + "iss": { + "type": "string" + }, + "nbf": { + "type": "integer" + }, + "sub": { + "type": "string" + }, + "email": { + "type": "string" + }, + "phone": { + "type": "string" + }, + "app_metadata": { + "type": "object", + "additionalProperties": true + }, + "user_metadata": { + "type": "object", + "additionalProperties": true + }, + "role": { + "type": "string" + }, + "aal": { + "type": "string" + }, + "amr": { + "type": "array", + "items": { + "type": "object" + } + }, + "session_id": { + "type": "string" + } + }, + "required": ["aud", "exp", "iat", "sub", "email", "phone", "role", "aal", "session_id", "is_anonymous"] +}` + +// AccessTokenClaims is a struct thats used for JWT claims +type AccessTokenClaims struct { + jwt.RegisteredClaims + Email string `json:"email"` + Phone string `json:"phone"` + AppMetaData map[string]interface{} `json:"app_metadata"` + UserMetaData map[string]interface{} `json:"user_metadata"` + Role string `json:"role"` + AuthenticatorAssuranceLevel string `json:"aal,omitempty"` + AuthenticationMethodReference []models.AMREntry `json:"amr,omitempty"` + SessionId string `json:"session_id,omitempty"` + IsAnonymous bool `json:"is_anonymous"` +} + +type MFAVerificationAttemptInput struct { + UserID uuid.UUID `json:"user_id"` + FactorID uuid.UUID `json:"factor_id"` + FactorType string `json:"factor_type"` + Valid bool `json:"valid"` +} + +type MFAVerificationAttemptOutput struct { + Decision string `json:"decision"` + Message string `json:"message"` + HookError AuthHookError `json:"error"` +} + +type PasswordVerificationAttemptInput struct { + UserID uuid.UUID `json:"user_id"` + Valid bool `json:"valid"` +} + +type PasswordVerificationAttemptOutput struct { + Decision string `json:"decision"` + Message string `json:"message"` + ShouldLogoutUser bool `json:"should_logout_user"` + HookError AuthHookError `json:"error"` +} + +type CustomAccessTokenInput struct { + UserID uuid.UUID `json:"user_id"` + Claims *AccessTokenClaims `json:"claims"` + AuthenticationMethod string `json:"authentication_method"` +} + +type CustomAccessTokenOutput struct { + Claims map[string]interface{} `json:"claims"` + HookError AuthHookError `json:"error,omitempty"` +} + +type SendSMSInput struct { + User *models.User `json:"user,omitempty"` + SMS SMS `json:"sms,omitempty"` +} + +type SendSMSOutput struct { + HookError AuthHookError `json:"error,omitempty"` +} + +type SendEmailInput struct { + User *models.User `json:"user"` + EmailData mailer.EmailData `json:"email_data"` +} + +type SendEmailOutput struct { + HookError AuthHookError `json:"error,omitempty"` +} + +func (mf *MFAVerificationAttemptOutput) IsError() bool { + return mf.HookError.Message != "" +} + +func (mf *MFAVerificationAttemptOutput) Error() string { + return mf.HookError.Message +} + +func (p *PasswordVerificationAttemptOutput) IsError() bool { + return p.HookError.Message != "" +} + +func (p *PasswordVerificationAttemptOutput) Error() string { + return p.HookError.Message +} + +func (ca *CustomAccessTokenOutput) IsError() bool { + return ca.HookError.Message != "" +} + +func (ca *CustomAccessTokenOutput) Error() string { + return ca.HookError.Message +} + +func (cs *SendSMSOutput) IsError() bool { + return cs.HookError.Message != "" +} + +func (cs *SendSMSOutput) Error() string { + return cs.HookError.Message +} + +func (cs *SendEmailOutput) IsError() bool { + return cs.HookError.Message != "" +} + +func (cs *SendEmailOutput) Error() string { + return cs.HookError.Message +} + +type AuthHookError struct { + HTTPCode int `json:"http_code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (a *AuthHookError) Error() string { + return a.Message +} + +const ( + DefaultMFAHookRejectionMessage = "Further MFA verification attempts will be rejected." + DefaultPasswordHookRejectionMessage = "Further password verification attempts will be rejected." +) diff --git a/auth_v2.169.0/internal/mailer/mailer.go b/auth_v2.169.0/internal/mailer/mailer.go new file mode 100644 index 0000000..1499960 --- /dev/null +++ b/auth_v2.169.0/internal/mailer/mailer.go @@ -0,0 +1,93 @@ +package mailer + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +// Mailer defines the interface a mailer must implement. +type Mailer interface { + InviteMail(r *http.Request, user *models.User, otp, referrerURL string, externalURL *url.URL) error + ConfirmationMail(r *http.Request, user *models.User, otp, referrerURL string, externalURL *url.URL) error + RecoveryMail(r *http.Request, user *models.User, otp, referrerURL string, externalURL *url.URL) error + MagicLinkMail(r *http.Request, user *models.User, otp, referrerURL string, externalURL *url.URL) error + EmailChangeMail(r *http.Request, user *models.User, otpNew, otpCurrent, referrerURL string, externalURL *url.URL) error + ReauthenticateMail(r *http.Request, user *models.User, otp string) error + GetEmailActionLink(user *models.User, actionType, referrerURL string, externalURL *url.URL) (string, error) +} + +type EmailParams struct { + Token string + Type string + RedirectTo string +} + +type EmailData struct { + Token string `json:"token"` + TokenHash string `json:"token_hash"` + RedirectTo string `json:"redirect_to"` + EmailActionType string `json:"email_action_type"` + SiteURL string `json:"site_url"` + TokenNew string `json:"token_new"` + TokenHashNew string `json:"token_hash_new"` +} + +// NewMailer returns a new gotrue mailer +func NewMailer(globalConfig *conf.GlobalConfiguration) Mailer { + from := globalConfig.SMTP.FromAddress() + u, _ := url.ParseRequestURI(globalConfig.API.ExternalURL) + + var mailClient MailClient + if globalConfig.SMTP.Host == "" { + logrus.Infof("Noop mail client being used for %v", globalConfig.SiteURL) + mailClient = &noopMailClient{ + EmailValidator: newEmailValidator(globalConfig.Mailer), + } + } else { + mailClient = &MailmeMailer{ + Host: globalConfig.SMTP.Host, + Port: globalConfig.SMTP.Port, + User: globalConfig.SMTP.User, + Pass: globalConfig.SMTP.Pass, + LocalName: u.Hostname(), + From: from, + BaseURL: globalConfig.SiteURL, + Logger: logrus.StandardLogger(), + MailLogging: globalConfig.SMTP.LoggingEnabled, + EmailValidator: newEmailValidator(globalConfig.Mailer), + } + } + + return &TemplateMailer{ + SiteURL: globalConfig.SiteURL, + Config: globalConfig, + Mailer: mailClient, + } +} + +func withDefault(value, defaultValue string) string { + if value == "" { + return defaultValue + } + return value +} + +func getPath(filepath string, params *EmailParams) (*url.URL, error) { + path := &url.URL{} + if filepath != "" { + if p, err := url.Parse(filepath); err != nil { + return nil, err + } else { + path = p + } + } + if params != nil { + path.RawQuery = fmt.Sprintf("token=%s&type=%s&redirect_to=%s", url.QueryEscape(params.Token), url.QueryEscape(params.Type), encodeRedirectURL(params.RedirectTo)) + } + return path, nil +} diff --git a/auth_v2.169.0/internal/mailer/mailer_test.go b/auth_v2.169.0/internal/mailer/mailer_test.go new file mode 100644 index 0000000..290d65d --- /dev/null +++ b/auth_v2.169.0/internal/mailer/mailer_test.go @@ -0,0 +1,87 @@ +package mailer + +import ( + "net/url" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +var urlRegexp = regexp.MustCompile(`^https?://[^/]+`) + +func enforceRelativeURL(url string) string { + return urlRegexp.ReplaceAllString(url, "") +} + +func TestGetPath(t *testing.T) { + params := EmailParams{ + Token: "token", + Type: "signup", + RedirectTo: "https://example.com", + } + cases := []struct { + SiteURL string + Path string + Params *EmailParams + Expected string + }{ + { + SiteURL: "https://test.example.com", + Path: "/templates/confirm.html", + Params: nil, + Expected: "https://test.example.com/templates/confirm.html", + }, + { + SiteURL: "https://test.example.com/removedpath", + Path: "/templates/confirm.html", + Params: nil, + Expected: "https://test.example.com/templates/confirm.html", + }, + { + SiteURL: "https://test.example.com/", + Path: "/trailingslash/", + Params: nil, + Expected: "https://test.example.com/trailingslash/", + }, + { + SiteURL: "https://test.example.com", + Path: "f", + Params: ¶ms, + Expected: "https://test.example.com/f?token=token&type=signup&redirect_to=https://example.com", + }, + { + SiteURL: "https://test.example.com", + Path: "", + Params: ¶ms, + Expected: "https://test.example.com?token=token&type=signup&redirect_to=https://example.com", + }, + } + + for _, c := range cases { + u, err := url.ParseRequestURI(c.SiteURL) + assert.NoError(t, err, "error parsing URI request") + + path, err := getPath(c.Path, c.Params) + + assert.NoError(t, err) + assert.Equal(t, c.Expected, u.ResolveReference(path).String()) + } +} + +func TestRelativeURL(t *testing.T) { + cases := []struct { + URL string + Expected string + }{ + {"https://test.example.com", ""}, + {"http://test.example.com", ""}, + {"test.example.com", "test.example.com"}, + {"/some/path#fragment", "/some/path#fragment"}, + } + + for _, c := range cases { + res := enforceRelativeURL(c.URL) + assert.Equal(t, c.Expected, res, c.URL) + } +} diff --git a/auth_v2.169.0/internal/mailer/mailme.go b/auth_v2.169.0/internal/mailer/mailme.go new file mode 100644 index 0000000..20ff177 --- /dev/null +++ b/auth_v2.169.0/internal/mailer/mailme.go @@ -0,0 +1,230 @@ +package mailer + +import ( + "bytes" + "context" + "errors" + "html/template" + "io" + "log" + "net/http" + "strings" + "sync" + "time" + + "gopkg.in/gomail.v2" + + "github.com/sirupsen/logrus" +) + +// TemplateRetries is the amount of time MailMe will try to fetch a URL before giving up +const TemplateRetries = 3 + +// TemplateExpiration is the time period that the template will be cached for +const TemplateExpiration = 10 * time.Second + +// MailmeMailer lets MailMe send templated mails +type MailmeMailer struct { + From string + Host string + Port int + User string + Pass string + BaseURL string + LocalName string + FuncMap template.FuncMap + cache *TemplateCache + Logger logrus.FieldLogger + MailLogging bool + EmailValidator *EmailValidator +} + +// Mail sends a templated mail. It will try to load the template from a URL, and +// otherwise fall back to the default +func (m *MailmeMailer) Mail( + ctx context.Context, + to, subjectTemplate, templateURL, defaultTemplate string, + templateData map[string]interface{}, + headers map[string][]string, + typ string, +) error { + if m.FuncMap == nil { + m.FuncMap = map[string]interface{}{} + } + if m.cache == nil { + m.cache = &TemplateCache{ + templates: map[string]*MailTemplate{}, + funcMap: m.FuncMap, + logger: m.Logger, + } + } + + if m.EmailValidator != nil { + if err := m.EmailValidator.Validate(ctx, to); err != nil { + return err + } + } + + tmp, err := template.New("Subject").Funcs(template.FuncMap(m.FuncMap)).Parse(subjectTemplate) + if err != nil { + return err + } + + subject := &bytes.Buffer{} + err = tmp.Execute(subject, templateData) + if err != nil { + return err + } + + body, err := m.MailBody(templateURL, defaultTemplate, templateData) + if err != nil { + return err + } + + mail := gomail.NewMessage() + mail.SetHeader("From", m.From) + mail.SetHeader("To", to) + mail.SetHeader("Subject", subject.String()) + + for k, v := range headers { + if v != nil { + mail.SetHeader(k, v...) + } + } + + mail.SetBody("text/html", body) + + dial := gomail.NewDialer(m.Host, m.Port, m.User, m.Pass) + if m.LocalName != "" { + dial.LocalName = m.LocalName + } + + if m.MailLogging { + defer func() { + fields := logrus.Fields{ + "event": "mail.send", + "mail_type": typ, + "mail_from": m.From, + "mail_to": to, + } + m.Logger.WithFields(fields).Info("mail.send") + }() + } + if err := dial.DialAndSend(mail); err != nil { + return err + } + return nil +} + +type MailTemplate struct { + tmp *template.Template + expiresAt time.Time +} + +type TemplateCache struct { + templates map[string]*MailTemplate + mutex sync.Mutex + funcMap template.FuncMap + logger logrus.FieldLogger +} + +func (t *TemplateCache) Get(url string) (*template.Template, error) { + cached, ok := t.templates[url] + if ok && (cached.expiresAt.Before(time.Now())) { + return cached.tmp, nil + } + data, err := t.fetchTemplate(url, TemplateRetries) + if err != nil { + return nil, err + } + return t.Set(url, data, TemplateExpiration) +} + +func (t *TemplateCache) Set(key, value string, expirationTime time.Duration) (*template.Template, error) { + parsed, err := template.New(key).Funcs(t.funcMap).Parse(value) + if err != nil { + return nil, err + } + + cached := &MailTemplate{ + tmp: parsed, + expiresAt: time.Now().Add(expirationTime), + } + t.mutex.Lock() + t.templates[key] = cached + t.mutex.Unlock() + return parsed, nil +} + +func (t *TemplateCache) fetchTemplate(url string, triesLeft int) (string, error) { + client := &http.Client{ + Timeout: 10 * time.Second, + } + + resp, err := client.Get(url) + if err != nil && triesLeft > 0 { + return t.fetchTemplate(url, triesLeft-1) + } + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode == 200 { // OK + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil && triesLeft > 0 { + return t.fetchTemplate(url, triesLeft-1) + } + if err != nil { + return "", err + } + return string(bodyBytes), err + } + if triesLeft > 0 { + return t.fetchTemplate(url, triesLeft-1) + } + return "", errors.New("mailer: unable to fetch mail template") +} + +func (m *MailmeMailer) MailBody(url string, defaultTemplate string, data map[string]interface{}) (string, error) { + if m.FuncMap == nil { + m.FuncMap = map[string]interface{}{} + } + if m.cache == nil { + m.cache = &TemplateCache{templates: map[string]*MailTemplate{}, funcMap: m.FuncMap} + } + + var temp *template.Template + var err error + + if url != "" { + var absoluteURL string + if strings.HasPrefix(url, "http") { + absoluteURL = url + } else { + absoluteURL = m.BaseURL + url + } + temp, err = m.cache.Get(absoluteURL) + if err != nil { + log.Printf("Error loading template from %v: %v\n", url, err) + } + } + + if temp == nil { + cached, ok := m.cache.templates[url] + if ok { + temp = cached.tmp + } else { + temp, err = m.cache.Set(url, defaultTemplate, 0) + if err != nil { + return "", err + } + } + } + + buf := &bytes.Buffer{} + err = temp.Execute(buf, data) + if err != nil { + return "", err + } + return buf.String(), nil +} diff --git a/auth_v2.169.0/internal/mailer/noop.go b/auth_v2.169.0/internal/mailer/noop.go new file mode 100644 index 0000000..0e0e3bf --- /dev/null +++ b/auth_v2.169.0/internal/mailer/noop.go @@ -0,0 +1,28 @@ +package mailer + +import ( + "context" + "errors" +) + +type noopMailClient struct { + EmailValidator *EmailValidator +} + +func (m *noopMailClient) Mail( + ctx context.Context, + to, subjectTemplate, templateURL, defaultTemplate string, + templateData map[string]interface{}, + headers map[string][]string, + typ string, +) error { + if to == "" { + return errors.New("to field cannot be empty") + } + if m.EmailValidator != nil { + if err := m.EmailValidator.Validate(ctx, to); err != nil { + return err + } + } + return nil +} diff --git a/auth_v2.169.0/internal/mailer/template.go b/auth_v2.169.0/internal/mailer/template.go new file mode 100644 index 0000000..59a4854 --- /dev/null +++ b/auth_v2.169.0/internal/mailer/template.go @@ -0,0 +1,420 @@ +package mailer + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/models" +) + +type MailRequest struct { + To string + SubjectTemplate string + TemplateURL string + DefaultTemplate string + TemplateData map[string]interface{} + Headers map[string][]string + Type string +} + +type MailClient interface { + Mail( + ctx context.Context, + to string, + subjectTemplate string, + templateURL string, + defaultTemplate string, + templateData map[string]interface{}, + headers map[string][]string, + typ string, + ) error +} + +// TemplateMailer will send mail and use templates from the site for easy mail styling +type TemplateMailer struct { + SiteURL string + Config *conf.GlobalConfiguration + Mailer MailClient +} + +func encodeRedirectURL(referrerURL string) string { + if len(referrerURL) > 0 { + if strings.ContainsAny(referrerURL, "&=#") { + // if the string contains &, = or # it has not been URL + // encoded by the caller, which means it should be URL + // encoded by us otherwise, it should be taken as-is + referrerURL = url.QueryEscape(referrerURL) + } + } + return referrerURL +} + +const ( + SignupVerification = "signup" + RecoveryVerification = "recovery" + InviteVerification = "invite" + MagicLinkVerification = "magiclink" + EmailChangeVerification = "email_change" + EmailOTPVerification = "email" + EmailChangeCurrentVerification = "email_change_current" + EmailChangeNewVerification = "email_change_new" + ReauthenticationVerification = "reauthentication" +) + +const defaultInviteMail = `

You have been invited

+ +

You have been invited to create a user on {{ .SiteURL }}. Follow this link to accept the invite:

+

Accept the invite

+

Alternatively, enter the code: {{ .Token }}

` + +const defaultConfirmationMail = `

Confirm your email

+ +

Follow this link to confirm your email:

+

Confirm your email address

+

Alternatively, enter the code: {{ .Token }}

+` + +const defaultRecoveryMail = `

Reset password

+ +

Follow this link to reset the password for your user:

+

Reset password

+

Alternatively, enter the code: {{ .Token }}

` + +const defaultMagicLinkMail = `

Magic Link

+ +

Follow this link to login:

+

Log In

+

Alternatively, enter the code: {{ .Token }}

` + +const defaultEmailChangeMail = `

Confirm email address change

+ +

Follow this link to confirm the update of your email address from {{ .Email }} to {{ .NewEmail }}:

+

Change email address

+

Alternatively, enter the code: {{ .Token }}

` + +const defaultReauthenticateMail = `

Confirm reauthentication

+ +

Enter the code: {{ .Token }}

` + +func (m *TemplateMailer) Headers(messageType string) map[string][]string { + originalHeaders := m.Config.SMTP.NormalizedHeaders() + + if originalHeaders == nil { + return nil + } + + headers := make(map[string][]string, len(originalHeaders)) + + for header, values := range originalHeaders { + replacedValues := make([]string, 0, len(values)) + + if header == "" { + continue + } + + for _, value := range values { + if value == "" { + continue + } + + // TODO: in the future, use a templating engine to add more contextual data available to headers + if strings.Contains(value, "$messageType") { + replacedValues = append(replacedValues, strings.ReplaceAll(value, "$messageType", messageType)) + } else { + replacedValues = append(replacedValues, value) + } + } + + headers[header] = replacedValues + } + + return headers +} + +// InviteMail sends a invite mail to a new user +func (m *TemplateMailer) InviteMail(r *http.Request, user *models.User, otp, referrerURL string, externalURL *url.URL) error { + path, err := getPath(m.Config.Mailer.URLPaths.Invite, &EmailParams{ + Token: user.ConfirmationToken, + Type: "invite", + RedirectTo: referrerURL, + }) + + if err != nil { + return err + } + + data := map[string]interface{}{ + "SiteURL": m.Config.SiteURL, + "ConfirmationURL": externalURL.ResolveReference(path).String(), + "Email": user.Email, + "Token": otp, + "TokenHash": user.ConfirmationToken, + "Data": user.UserMetaData, + "RedirectTo": referrerURL, + } + + return m.Mailer.Mail( + r.Context(), + user.GetEmail(), + withDefault(m.Config.Mailer.Subjects.Invite, "You have been invited"), + m.Config.Mailer.Templates.Invite, + defaultInviteMail, + data, + m.Headers("invite"), + "invite", + ) +} + +// ConfirmationMail sends a signup confirmation mail to a new user +func (m *TemplateMailer) ConfirmationMail(r *http.Request, user *models.User, otp, referrerURL string, externalURL *url.URL) error { + path, err := getPath(m.Config.Mailer.URLPaths.Confirmation, &EmailParams{ + Token: user.ConfirmationToken, + Type: "signup", + RedirectTo: referrerURL, + }) + if err != nil { + return err + } + + data := map[string]interface{}{ + "SiteURL": m.Config.SiteURL, + "ConfirmationURL": externalURL.ResolveReference(path).String(), + "Email": user.Email, + "Token": otp, + "TokenHash": user.ConfirmationToken, + "Data": user.UserMetaData, + "RedirectTo": referrerURL, + } + + return m.Mailer.Mail( + r.Context(), + user.GetEmail(), + withDefault(m.Config.Mailer.Subjects.Confirmation, "Confirm Your Email"), + m.Config.Mailer.Templates.Confirmation, + defaultConfirmationMail, + data, + m.Headers("confirm"), + "confirm", + ) +} + +// ReauthenticateMail sends a reauthentication mail to an authenticated user +func (m *TemplateMailer) ReauthenticateMail(r *http.Request, user *models.User, otp string) error { + data := map[string]interface{}{ + "SiteURL": m.Config.SiteURL, + "Email": user.Email, + "Token": otp, + "Data": user.UserMetaData, + } + + return m.Mailer.Mail( + r.Context(), + user.GetEmail(), + withDefault(m.Config.Mailer.Subjects.Reauthentication, "Confirm reauthentication"), + m.Config.Mailer.Templates.Reauthentication, + defaultReauthenticateMail, + data, + m.Headers("reauthenticate"), + "reauthenticate", + ) +} + +// EmailChangeMail sends an email change confirmation mail to a user +func (m *TemplateMailer) EmailChangeMail(r *http.Request, user *models.User, otpNew, otpCurrent, referrerURL string, externalURL *url.URL) error { + type Email struct { + Address string + Otp string + TokenHash string + Subject string + Template string + } + emails := []Email{ + { + Address: user.EmailChange, + Otp: otpNew, + TokenHash: user.EmailChangeTokenNew, + Subject: withDefault(m.Config.Mailer.Subjects.EmailChange, "Confirm Email Change"), + Template: m.Config.Mailer.Templates.EmailChange, + }, + } + + currentEmail := user.GetEmail() + if m.Config.Mailer.SecureEmailChangeEnabled && currentEmail != "" { + emails = append(emails, Email{ + Address: currentEmail, + Otp: otpCurrent, + TokenHash: user.EmailChangeTokenCurrent, + Subject: withDefault(m.Config.Mailer.Subjects.Confirmation, "Confirm Email Address"), + Template: m.Config.Mailer.Templates.EmailChange, + }) + } + + ctx, cancel := context.WithCancel(r.Context()) + defer cancel() + + errors := make(chan error, len(emails)) + for _, email := range emails { + path, err := getPath( + m.Config.Mailer.URLPaths.EmailChange, + &EmailParams{ + Token: email.TokenHash, + Type: "email_change", + RedirectTo: referrerURL, + }, + ) + if err != nil { + return err + } + go func(address, token, tokenHash, template string) { + data := map[string]interface{}{ + "SiteURL": m.Config.SiteURL, + "ConfirmationURL": externalURL.ResolveReference(path).String(), + "Email": user.GetEmail(), + "NewEmail": user.EmailChange, + "Token": token, + "TokenHash": tokenHash, + "SendingTo": address, + "Data": user.UserMetaData, + "RedirectTo": referrerURL, + } + errors <- m.Mailer.Mail( + ctx, + address, + withDefault(m.Config.Mailer.Subjects.EmailChange, "Confirm Email Change"), + template, + defaultEmailChangeMail, + data, + m.Headers("email_change"), + "email_change", + ) + }(email.Address, email.Otp, email.TokenHash, email.Template) + } + + for i := 0; i < len(emails); i++ { + e := <-errors + if e != nil { + return e + } + } + return nil +} + +// RecoveryMail sends a password recovery mail +func (m *TemplateMailer) RecoveryMail(r *http.Request, user *models.User, otp, referrerURL string, externalURL *url.URL) error { + path, err := getPath(m.Config.Mailer.URLPaths.Recovery, &EmailParams{ + Token: user.RecoveryToken, + Type: "recovery", + RedirectTo: referrerURL, + }) + if err != nil { + return err + } + data := map[string]interface{}{ + "SiteURL": m.Config.SiteURL, + "ConfirmationURL": externalURL.ResolveReference(path).String(), + "Email": user.Email, + "Token": otp, + "TokenHash": user.RecoveryToken, + "Data": user.UserMetaData, + "RedirectTo": referrerURL, + } + + return m.Mailer.Mail( + r.Context(), + user.GetEmail(), + withDefault(m.Config.Mailer.Subjects.Recovery, "Reset Your Password"), + m.Config.Mailer.Templates.Recovery, + defaultRecoveryMail, + data, + m.Headers("recovery"), + "recovery", + ) +} + +// MagicLinkMail sends a login link mail +func (m *TemplateMailer) MagicLinkMail(r *http.Request, user *models.User, otp, referrerURL string, externalURL *url.URL) error { + path, err := getPath(m.Config.Mailer.URLPaths.Recovery, &EmailParams{ + Token: user.RecoveryToken, + Type: "magiclink", + RedirectTo: referrerURL, + }) + if err != nil { + return err + } + + data := map[string]interface{}{ + "SiteURL": m.Config.SiteURL, + "ConfirmationURL": externalURL.ResolveReference(path).String(), + "Email": user.Email, + "Token": otp, + "TokenHash": user.RecoveryToken, + "Data": user.UserMetaData, + "RedirectTo": referrerURL, + } + + return m.Mailer.Mail( + r.Context(), + user.GetEmail(), + withDefault(m.Config.Mailer.Subjects.MagicLink, "Your Magic Link"), + m.Config.Mailer.Templates.MagicLink, + defaultMagicLinkMail, + data, + m.Headers("magiclink"), + "magiclink", + ) +} + +// GetEmailActionLink returns a magiclink, recovery or invite link based on the actionType passed. +func (m TemplateMailer) GetEmailActionLink(user *models.User, actionType, referrerURL string, externalURL *url.URL) (string, error) { + var err error + var path *url.URL + + switch actionType { + case "magiclink": + path, err = getPath(m.Config.Mailer.URLPaths.Recovery, &EmailParams{ + Token: user.RecoveryToken, + Type: "magiclink", + RedirectTo: referrerURL, + }) + case "recovery": + path, err = getPath(m.Config.Mailer.URLPaths.Recovery, &EmailParams{ + Token: user.RecoveryToken, + Type: "recovery", + RedirectTo: referrerURL, + }) + case "invite": + path, err = getPath(m.Config.Mailer.URLPaths.Invite, &EmailParams{ + Token: user.ConfirmationToken, + Type: "invite", + RedirectTo: referrerURL, + }) + case "signup": + path, err = getPath(m.Config.Mailer.URLPaths.Confirmation, &EmailParams{ + Token: user.ConfirmationToken, + Type: "signup", + RedirectTo: referrerURL, + }) + case "email_change_current": + path, err = getPath(m.Config.Mailer.URLPaths.EmailChange, &EmailParams{ + Token: user.EmailChangeTokenCurrent, + Type: "email_change", + RedirectTo: referrerURL, + }) + case "email_change_new": + path, err = getPath(m.Config.Mailer.URLPaths.EmailChange, &EmailParams{ + Token: user.EmailChangeTokenNew, + Type: "email_change", + RedirectTo: referrerURL, + }) + default: + return "", fmt.Errorf("invalid email action link type: %s", actionType) + } + if err != nil { + return "", err + } + return externalURL.ResolveReference(path).String(), nil +} diff --git a/auth_v2.169.0/internal/mailer/template_test.go b/auth_v2.169.0/internal/mailer/template_test.go new file mode 100644 index 0000000..f8fcd74 --- /dev/null +++ b/auth_v2.169.0/internal/mailer/template_test.go @@ -0,0 +1,65 @@ +package mailer + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" +) + +func TestTemplateHeaders(t *testing.T) { + cases := []struct { + from string + typ string + exp map[string][]string + }{ + { + from: `{"x-supabase-project-ref": ["abcjrhohrqmvcpjpsyzc"]}`, + typ: "OTHER-TYPE", + exp: map[string][]string{ + "x-supabase-project-ref": {"abcjrhohrqmvcpjpsyzc"}, + }, + }, + + { + from: `{"X-Test-A": ["test-a", "test-b"], "X-Test-B": ["test-c", "abc $messageType"]}`, + typ: "TEST-MESSAGE-TYPE", + exp: map[string][]string{ + "X-Test-A": {"test-a", "test-b"}, + "X-Test-B": {"test-c", "abc TEST-MESSAGE-TYPE"}, + }, + }, + + { + from: `{"X-Test-A": ["test-a", "test-b"], "X-Test-B": ["test-c", "abc $messageType"]}`, + typ: "OTHER-TYPE", + exp: map[string][]string{ + "X-Test-A": {"test-a", "test-b"}, + "X-Test-B": {"test-c", "abc OTHER-TYPE"}, + }, + }, + + { + from: `{"X-Test-A": ["test-a", "test-b"], "X-Test-B": ["test-c", "abc $messageType"], "x-supabase-project-ref": ["abcjrhohrqmvcpjpsyzc"]}`, + typ: "OTHER-TYPE", + exp: map[string][]string{ + "X-Test-A": {"test-a", "test-b"}, + "X-Test-B": {"test-c", "abc OTHER-TYPE"}, + "x-supabase-project-ref": {"abcjrhohrqmvcpjpsyzc"}, + }, + }, + } + for _, tc := range cases { + mailer := TemplateMailer{ + Config: &conf.GlobalConfiguration{ + SMTP: conf.SMTPConfiguration{ + Headers: tc.from, + }, + }, + } + require.NoError(t, mailer.Config.SMTP.Validate()) + + hdrs := mailer.Headers(tc.typ) + require.Equal(t, hdrs, tc.exp) + } +} diff --git a/auth_v2.169.0/internal/mailer/validate.go b/auth_v2.169.0/internal/mailer/validate.go new file mode 100644 index 0000000..1827466 --- /dev/null +++ b/auth_v2.169.0/internal/mailer/validate.go @@ -0,0 +1,298 @@ +package mailer + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net" + "net/http" + "net/mail" + "strings" + "time" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/sync/errgroup" +) + +var invalidEmailMap = map[string]bool{ + + // People type these often enough to be special cased. + "test@gmail.com": true, + "example@gmail.com": true, + "someone@gmail.com": true, + "test@email.com": true, +} + +var invalidHostSuffixes = []string{ + + // These are a directly from Section 2 of RFC2606[1]. + // + // [1] https://www.rfc-editor.org/rfc/rfc2606.html#section-2 + ".test", + ".example", + ".invalid", + ".local", + ".localhost", +} + +var invalidHostMap = map[string]bool{ + + // These exist here too for when they are typed as "test@test" + "test": true, + "example": true, + "invalid": true, + "local": true, + "localhost": true, + + // These are commonly typed and have DNS records which cause a + // large enough volume of bounce backs to special case. + "test.com": true, + "example.com": true, + "example.net": true, + "example.org": true, + + // Hundreds of typos per day for this. + "gamil.com": true, + + // These are not email providers, but people often use them. + "anonymous.com": true, + "email.com": true, +} + +const ( + validateEmailTimeout = 3 * time.Second +) + +var ( + // We use the default resolver for this. + validateEmailResolver net.Resolver +) + +var ( + ErrInvalidEmailAddress = errors.New("invalid_email_address") + ErrInvalidEmailFormat = errors.New("invalid_email_format") + ErrInvalidEmailDNS = errors.New("invalid_email_dns") +) + +type EmailValidator struct { + extended bool + serviceURL string + serviceHeaders map[string][]string +} + +func newEmailValidator(mc conf.MailerConfiguration) *EmailValidator { + return &EmailValidator{ + extended: mc.EmailValidationExtended, + serviceURL: mc.EmailValidationServiceURL, + serviceHeaders: mc.GetEmailValidationServiceHeaders(), + } +} + +func (ev *EmailValidator) isExtendedEnabled() bool { return ev.extended } +func (ev *EmailValidator) isServiceEnabled() bool { return ev.serviceURL != "" } + +// Validate performs validation on the given email. +// +// When extended is true, returns a nil error in all cases but the following: +// - `email` cannot be parsed by mail.ParseAddress +// - `email` has a domain with no DNS configured +// +// When serviceURL AND serviceKey are non-empty strings it uses the remote +// service to determine if the email is valid. +func (ev *EmailValidator) Validate(ctx context.Context, email string) error { + if !ev.isExtendedEnabled() && !ev.isServiceEnabled() { + return nil + } + + // One of the two validation methods are enabled, set a timeout. + ctx, cancel := context.WithTimeout(ctx, validateEmailTimeout) + defer cancel() + + // Easier control flow here to always use errgroup, it has very little + // overhad in comparison to the network calls it makes. The reason + // we run both checks concurrently is to tighten the timeout without + // potentially missing a call to the validation service due to a + // dns timeout or something more nefarious like a honeypot dns entry. + g := new(errgroup.Group) + + // Validate the static rules first to prevent round trips on bad emails + // and to parse the host ahead of time. + if ev.isExtendedEnabled() { + + // First validate static checks such as format, known invalid hosts + // and any other network free checks. Running this check before we + // call the service will help reduce the number of calls with known + // invalid emails. + host, err := ev.validateStatic(email) + if err != nil { + return err + } + + // Start the goroutine to validate the host. + g.Go(func() error { return ev.validateHost(ctx, host) }) + } + + // If the service check is enabled we start a goroutine to run + // that check as well. + if ev.isServiceEnabled() { + g.Go(func() error { return ev.validateService(ctx, email) }) + } + return g.Wait() +} + +// validateStatic will validate the format and do the static checks before +// returning the host portion of the email. +func (ev *EmailValidator) validateStatic(email string) (string, error) { + if !ev.isExtendedEnabled() { + return "", nil + } + + ea, err := mail.ParseAddress(email) + if err != nil { + return "", ErrInvalidEmailFormat + } + + i := strings.LastIndex(ea.Address, "@") + if i == -1 { + return "", ErrInvalidEmailFormat + } + + // few static lookups that are typed constantly and known to be invalid. + if invalidEmailMap[email] { + return "", ErrInvalidEmailAddress + } + + host := email[i+1:] + if invalidHostMap[host] { + return "", ErrInvalidEmailDNS + } + + for i := range invalidHostSuffixes { + if strings.HasSuffix(host, invalidHostSuffixes[i]) { + return "", ErrInvalidEmailDNS + } + } + + name := email[:i] + if err := ev.validateProviders(name, host); err != nil { + return "", err + } + return host, nil +} + +func (ev *EmailValidator) validateService(ctx context.Context, email string) error { + if !ev.isServiceEnabled() { + return nil + } + + reqObject := struct { + EmailAddress string `json:"email"` + }{email} + + reqData, err := json.Marshal(&reqObject) + if err != nil { + return nil + } + + rdr := bytes.NewReader(reqData) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, ev.serviceURL, rdr) + if err != nil { + return nil + } + req.Header.Set("Content-Type", "application/json") + for name, vals := range ev.serviceHeaders { + for _, val := range vals { + req.Header.Set(name, val) + } + } + + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil + } + defer res.Body.Close() + + resObject := struct { + Valid *bool `json:"valid"` + }{} + + if res.StatusCode/100 != 2 { + // we ignore the error here just in case the service is down + return nil + } + + dec := json.NewDecoder(io.LimitReader(res.Body, 1<<5)) + if err := dec.Decode(&resObject); err != nil { + return nil + } + + // If the object did not contain a valid key we consider the check as + // failed. We _must_ get a valid JSON response with a "valid" field. + if resObject.Valid == nil || *resObject.Valid { + return nil + } + + return ErrInvalidEmailAddress +} + +func (ev *EmailValidator) validateProviders(name, host string) error { + switch host { + case "gmail.com": + // Based on a sample of internal data, this reduces the number of + // bounced emails by 23%. Gmail documentation specifies that the + // min user name length is 6 characters. There may be some accounts + // from early gmail beta with shorter email addresses, but I think + // this reduces bounce rates enough to be worth adding for now. + if len(name) < 6 { + return ErrInvalidEmailAddress + } + } + return nil +} + +func (ev *EmailValidator) validateHost(ctx context.Context, host string) error { + _, err := validateEmailResolver.LookupMX(ctx, host) + if !isHostNotFound(err) { + return nil + } + + _, err = validateEmailResolver.LookupHost(ctx, host) + if !isHostNotFound(err) { + return nil + } + + // No addrs or mx records were found + return ErrInvalidEmailDNS +} + +func isHostNotFound(err error) bool { + if err == nil { + // We had no err, so we treat it as valid. We don't check the mx records + // because RFC 5321 specifies that if an empty list of MX's are returned + // the host should be treated as the MX[1]. + // + // See section 2 and 3 of: https://www.rfc-editor.org/rfc/rfc2606 + // [1] https://www.rfc-editor.org/rfc/rfc5321.html#section-5.1 + return false + } + + // No names present, we will try to get a positive assertion that the + // domain is not configured to receive email. + var dnsError *net.DNSError + if !errors.As(err, &dnsError) { + // We will be unable to determine with absolute certainy the email was + // invalid so we will err on the side of caution and return nil. + return false + } + + // The type of err is dnsError, inspect it to see if we can be certain + // the domain has no mx records currently. For this we require that + // the error was not temporary or a timeout. If those are both false + // we trust the value in IsNotFound. + if !dnsError.IsTemporary && !dnsError.IsTimeout && dnsError.IsNotFound { + return true + } + return false +} diff --git a/auth_v2.169.0/internal/mailer/validate_test.go b/auth_v2.169.0/internal/mailer/validate_test.go new file mode 100644 index 0000000..e1a86c2 --- /dev/null +++ b/auth_v2.169.0/internal/mailer/validate_test.go @@ -0,0 +1,287 @@ +package mailer + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" +) + +func TestEmalValidatorService(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Second*60) + defer cancel() + + testResVal := new(atomic.Value) + testResVal.Store(`{"valid": true}`) + + testHdrsVal := new(atomic.Value) + testHdrsVal.Store(map[string]string{"apikey": "test"}) + + // testHeaders := map[string][]string{"apikey": []string{"test"}} + testHeaders := `{"apikey": ["test"]}` + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + key := r.Header.Get("apikey") + if key == "" { + fmt.Fprintln(w, `{"error": true}`) + return + } + + fmt.Fprintln(w, testResVal.Load().(string)) + })) + defer ts.Close() + + // Return nil err from service + // when svc and extended checks both report email as valid + { + testResVal.Store(`{"valid": true}`) + cfg := conf.MailerConfiguration{ + EmailValidationExtended: true, + EmailValidationServiceURL: ts.URL, + EmailValidationServiceHeaders: testHeaders, + } + if err := cfg.Validate(); err != nil { + t.Fatal(err) + } + + ev := newEmailValidator(cfg) + err := ev.Validate(ctx, "chris.stockton@supabase.io") + if err != nil { + t.Fatalf("exp nil err; got %v", err) + } + } + + // Return nil err from service when + // extended is disabled for a known invalid address + // service reports valid + { + testResVal.Store(`{"valid": true}`) + + cfg := conf.MailerConfiguration{ + EmailValidationExtended: false, + EmailValidationServiceURL: ts.URL, + EmailValidationServiceHeaders: testHeaders, + } + if err := cfg.Validate(); err != nil { + t.Fatal(err) + } + + ev := newEmailValidator(cfg) + err := ev.Validate(ctx, "test@gmail.com") + if err != nil { + t.Fatalf("exp nil err; got %v", err) + } + } + + // Return nil err from service when + // extended is disabled for a known invalid address + // service is disabled for a known invalid address + { + testResVal.Store(`{"valid": false}`) + + cfg := conf.MailerConfiguration{ + EmailValidationExtended: false, + EmailValidationServiceURL: "", + EmailValidationServiceHeaders: "", + } + if err := cfg.Validate(); err != nil { + t.Fatal(err) + } + + ev := newEmailValidator(cfg) + err := ev.Validate(ctx, "test@gmail.com") + if err != nil { + t.Fatalf("exp nil err; got %v", err) + } + } + + // Return err from service when + // extended reports invalid + // service is disabled for a known invalid address + { + testResVal.Store(`{"valid": true}`) + cfg := conf.MailerConfiguration{ + EmailValidationExtended: true, + EmailValidationServiceURL: "", + EmailValidationServiceHeaders: "", + } + if err := cfg.Validate(); err != nil { + t.Fatal(err) + } + + ev := newEmailValidator(cfg) + err := ev.Validate(ctx, "test@gmail.com") + if err == nil { + t.Fatal("exp non-nil err") + } + } + + // Return err from service when + // extended reports invalid + // service reports valid + { + testResVal.Store(`{"valid": true}`) + cfg := conf.MailerConfiguration{ + EmailValidationExtended: true, + EmailValidationServiceURL: ts.URL, + EmailValidationServiceHeaders: testHeaders, + } + if err := cfg.Validate(); err != nil { + t.Fatal(err) + } + + ev := newEmailValidator(cfg) + err := ev.Validate(ctx, "test@gmail.com") + if err == nil { + t.Fatal("exp non-nil err") + } + } + + // Return err from service when + // extended reports valid + // service reports invalid + { + testResVal.Store(`{"valid": false}`) + cfg := conf.MailerConfiguration{ + EmailValidationExtended: true, + EmailValidationServiceURL: ts.URL, + EmailValidationServiceHeaders: testHeaders, + } + if err := cfg.Validate(); err != nil { + t.Fatal(err) + } + + ev := newEmailValidator(cfg) + err := ev.Validate(ctx, "chris.stockton@supabase.io") + if err == nil { + t.Fatal("exp non-nil err") + } + } + + // Return err from service when + // extended reports invalid + // service reports invalid + { + testResVal.Store(`{"valid": false}`) + + cfg := conf.MailerConfiguration{ + EmailValidationExtended: false, + EmailValidationServiceURL: ts.URL, + EmailValidationServiceHeaders: testHeaders, + } + if err := cfg.Validate(); err != nil { + t.Fatal(err) + } + + ev := newEmailValidator(cfg) + err := ev.Validate(ctx, "test@gmail.com") + if err == nil { + t.Fatal("exp non-nil err") + } + } +} + +func TestValidateEmailExtended(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Second*60) + defer cancel() + + cases := []struct { + email string + timeout time.Duration + err string + }{ + // valid (has mx record) + {email: "a@supabase.io"}, + {email: "support@supabase.io"}, + {email: "chris.stockton@supabase.io"}, + + // bad format + {email: "", err: "invalid_email_format"}, + {email: "io", err: "invalid_email_format"}, + {email: "supabase.io", err: "invalid_email_format"}, + {email: "@supabase.io", err: "invalid_email_format"}, + {email: "test@.supabase.io", err: "invalid_email_format"}, + + // invalid: valid mx records, but invalid and often typed + // (invalidEmailMap) + {email: "test@email.com", err: "invalid_email_address"}, + {email: "test@gmail.com", err: "invalid_email_address"}, + {email: "test@test.com", err: "invalid_email_dns"}, + + // very common typo + {email: "test@gamil.com", err: "invalid_email_dns"}, + + // invalid: valid mx records, but invalid and often typed + // (invalidHostMap) + {email: "a@example.com", err: "invalid_email_dns"}, + {email: "a@example.net", err: "invalid_email_dns"}, + {email: "a@example.org", err: "invalid_email_dns"}, + + // invalid: no mx records + {email: "a@test", err: "invalid_email_dns"}, + {email: "test@local", err: "invalid_email_dns"}, + {email: "test@test.local", err: "invalid_email_dns"}, + {email: "test@example", err: "invalid_email_dns"}, + {email: "test@invalid", err: "invalid_email_dns"}, + + // valid but not actually valid and typed a lot + {email: "a@invalid", err: "invalid_email_dns"}, + {email: "a@a.invalid", err: "invalid_email_dns"}, + {email: "test@invalid", err: "invalid_email_dns"}, + + // various invalid emails + {email: "test@test.localhost", err: "invalid_email_dns"}, + {email: "test@invalid.example.com", err: "invalid_email_dns"}, + {email: "test@no.such.email.host.supabase.io", err: "invalid_email_dns"}, + + // this low timeout should simulate a dns timeout, which should + // not be treated as an invalid email. + {email: "validemail@probablyaaaaaaaanotarealdomain.com", + timeout: time.Millisecond}, + + // likewise for a valid email + {email: "support@supabase.io", timeout: time.Millisecond}, + } + + cfg := conf.MailerConfiguration{ + EmailValidationExtended: true, + EmailValidationServiceURL: "", + EmailValidationServiceHeaders: "", + } + ev := newEmailValidator(cfg) + + for idx, tc := range cases { + func(timeout time.Duration) { + if timeout == 0 { + timeout = validateEmailTimeout + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + now := time.Now() + err := ev.Validate(ctx, tc.email) + dur := time.Since(now) + if max := timeout + (time.Millisecond * 50); max < dur { + t.Fatal("timeout was not respected") + } + + t.Logf("tc #%v - email %q", idx, tc.email) + if tc.err != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.err) + return + } + require.NoError(t, err) + + }(tc.timeout) + } +} diff --git a/auth_v2.169.0/internal/metering/record.go b/auth_v2.169.0/internal/metering/record.go new file mode 100644 index 0000000..d9f9c5c --- /dev/null +++ b/auth_v2.169.0/internal/metering/record.go @@ -0,0 +1,17 @@ +package metering + +import ( + "github.com/gofrs/uuid" + "github.com/sirupsen/logrus" +) + +var logger = logrus.StandardLogger().WithField("metering", true) + +func RecordLogin(loginType string, userID uuid.UUID) { + logger.WithFields(logrus.Fields{ + "action": "login", + "login_method": loginType, + "instance_id": uuid.Nil.String(), + "user_id": userID.String(), + }).Info("Login") +} diff --git a/auth_v2.169.0/internal/models/amr.go b/auth_v2.169.0/internal/models/amr.go new file mode 100644 index 0000000..fdfd883 --- /dev/null +++ b/auth_v2.169.0/internal/models/amr.go @@ -0,0 +1,43 @@ +package models + +import ( + "time" + + "github.com/gobuffalo/pop/v6" + "github.com/gofrs/uuid" + "github.com/supabase/auth/internal/storage" +) + +type AMRClaim struct { + ID uuid.UUID `json:"id" db:"id"` + SessionID uuid.UUID `json:"session_id" db:"session_id"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` + AuthenticationMethod *string `json:"authentication_method" db:"authentication_method"` +} + +func (AMRClaim) TableName() string { + tableName := "mfa_amr_claims" + return tableName +} + +func (cl *AMRClaim) IsAAL2Claim() bool { + return *cl.AuthenticationMethod == TOTPSignIn.String() || *cl.AuthenticationMethod == MFAPhone.String() || *cl.AuthenticationMethod == MFAWebAuthn.String() +} + +func AddClaimToSession(tx *storage.Connection, sessionId uuid.UUID, authenticationMethod AuthenticationMethod) error { + id := uuid.Must(uuid.NewV4()) + + currentTime := time.Now() + return tx.RawQuery("INSERT INTO "+(&pop.Model{Value: AMRClaim{}}).TableName()+ + `(id, session_id, created_at, updated_at, authentication_method) values (?, ?, ?, ?, ?) + ON CONFLICT ON CONSTRAINT mfa_amr_claims_session_id_authentication_method_pkey + DO UPDATE SET updated_at = ?;`, id, sessionId, currentTime, currentTime, authenticationMethod.String(), currentTime).Exec() +} + +func (a *AMRClaim) GetAuthenticationMethod() string { + if a.AuthenticationMethod == nil { + return "" + } + return *(a.AuthenticationMethod) +} diff --git a/auth_v2.169.0/internal/models/audit_log_entry.go b/auth_v2.169.0/internal/models/audit_log_entry.go new file mode 100644 index 0000000..5bbc9b0 --- /dev/null +++ b/auth_v2.169.0/internal/models/audit_log_entry.go @@ -0,0 +1,166 @@ +package models + +import ( + "bytes" + "fmt" + "net/http" + "time" + + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" +) + +type AuditAction string +type auditLogType string + +const ( + LoginAction AuditAction = "login" + LogoutAction AuditAction = "logout" + InviteAcceptedAction AuditAction = "invite_accepted" + UserSignedUpAction AuditAction = "user_signedup" + UserInvitedAction AuditAction = "user_invited" + UserDeletedAction AuditAction = "user_deleted" + UserModifiedAction AuditAction = "user_modified" + UserRecoveryRequestedAction AuditAction = "user_recovery_requested" + UserReauthenticateAction AuditAction = "user_reauthenticate_requested" + UserConfirmationRequestedAction AuditAction = "user_confirmation_requested" + UserRepeatedSignUpAction AuditAction = "user_repeated_signup" + UserUpdatePasswordAction AuditAction = "user_updated_password" + TokenRevokedAction AuditAction = "token_revoked" + TokenRefreshedAction AuditAction = "token_refreshed" + GenerateRecoveryCodesAction AuditAction = "generate_recovery_codes" + EnrollFactorAction AuditAction = "factor_in_progress" + UnenrollFactorAction AuditAction = "factor_unenrolled" + CreateChallengeAction AuditAction = "challenge_created" + VerifyFactorAction AuditAction = "verification_attempted" + DeleteFactorAction AuditAction = "factor_deleted" + DeleteRecoveryCodesAction AuditAction = "recovery_codes_deleted" + UpdateFactorAction AuditAction = "factor_updated" + MFACodeLoginAction AuditAction = "mfa_code_login" + IdentityUnlinkAction AuditAction = "identity_unlinked" + + account auditLogType = "account" + team auditLogType = "team" + token auditLogType = "token" + user auditLogType = "user" + factor auditLogType = "factor" + recoveryCodes auditLogType = "recovery_codes" +) + +var ActionLogTypeMap = map[AuditAction]auditLogType{ + LoginAction: account, + LogoutAction: account, + InviteAcceptedAction: account, + UserSignedUpAction: team, + UserInvitedAction: team, + UserDeletedAction: team, + TokenRevokedAction: token, + TokenRefreshedAction: token, + UserModifiedAction: user, + UserRecoveryRequestedAction: user, + UserConfirmationRequestedAction: user, + UserRepeatedSignUpAction: user, + UserUpdatePasswordAction: user, + GenerateRecoveryCodesAction: user, + EnrollFactorAction: factor, + UnenrollFactorAction: factor, + CreateChallengeAction: factor, + VerifyFactorAction: factor, + DeleteFactorAction: factor, + UpdateFactorAction: factor, + MFACodeLoginAction: factor, + DeleteRecoveryCodesAction: recoveryCodes, +} + +// AuditLogEntry is the database model for audit log entries. +type AuditLogEntry struct { + ID uuid.UUID `json:"id" db:"id"` + Payload JSONMap `json:"payload" db:"payload"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + IPAddress string `json:"ip_address" db:"ip_address"` + + DONTUSEINSTANCEID uuid.UUID `json:"-" db:"instance_id"` +} + +func (AuditLogEntry) TableName() string { + tableName := "audit_log_entries" + return tableName +} + +func NewAuditLogEntry(r *http.Request, tx *storage.Connection, actor *User, action AuditAction, ipAddress string, traits map[string]interface{}) error { + id := uuid.Must(uuid.NewV4()) + + username := actor.GetEmail() + + if actor.GetPhone() != "" { + username = actor.GetPhone() + } + + payload := map[string]interface{}{ + "actor_id": actor.ID, + "actor_via_sso": actor.IsSSOUser, + "actor_username": username, + "action": action, + "log_type": ActionLogTypeMap[action], + } + l := AuditLogEntry{ + ID: id, + Payload: JSONMap(payload), + IPAddress: ipAddress, + } + + observability.LogEntrySetFields(r, logrus.Fields{ + "auth_event": logrus.Fields(payload), + }) + + if name, ok := actor.UserMetaData["full_name"]; ok { + l.Payload["actor_name"] = name + } + + if traits != nil { + l.Payload["traits"] = traits + } + + if err := tx.Create(&l); err != nil { + return errors.Wrap(err, "Database error creating audit log entry") + } + + return nil +} + +func FindAuditLogEntries(tx *storage.Connection, filterColumns []string, filterValue string, pageParams *Pagination) ([]*AuditLogEntry, error) { + q := tx.Q().Order("created_at desc").Where("instance_id = ?", uuid.Nil) + + if len(filterColumns) > 0 && filterValue != "" { + lf := "%" + filterValue + "%" + + builder := bytes.NewBufferString("(") + values := make([]interface{}, len(filterColumns)) + + for idx, col := range filterColumns { + builder.WriteString(fmt.Sprintf("payload->>'%s' ILIKE ?", col)) + values[idx] = lf + + if idx+1 < len(filterColumns) { + builder.WriteString(" OR ") + } + } + builder.WriteString(")") + + q = q.Where(builder.String(), values...) + } + + logs := []*AuditLogEntry{} + var err error + if pageParams != nil { + err = q.Paginate(int(pageParams.Page), int(pageParams.PerPage)).All(&logs) // #nosec G115 + pageParams.Count = uint64(q.Paginator.TotalEntriesSize) // #nosec G115 + } else { + err = q.All(&logs) + } + + return logs, err +} diff --git a/auth_v2.169.0/internal/models/challenge.go b/auth_v2.169.0/internal/models/challenge.go new file mode 100644 index 0000000..3de5b4d --- /dev/null +++ b/auth_v2.169.0/internal/models/challenge.go @@ -0,0 +1,124 @@ +package models + +import ( + "database/sql/driver" + "fmt" + + "encoding/json" + "github.com/go-webauthn/webauthn/webauthn" + "github.com/gofrs/uuid" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/storage" + "time" +) + +type Challenge struct { + ID uuid.UUID `json:"challenge_id" db:"id"` + FactorID uuid.UUID `json:"factor_id" db:"factor_id"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + VerifiedAt *time.Time `json:"verified_at,omitempty" db:"verified_at"` + IPAddress string `json:"ip_address" db:"ip_address"` + Factor *Factor `json:"factor,omitempty" belongs_to:"factor"` + OtpCode string `json:"otp_code,omitempty" db:"otp_code"` + WebAuthnSessionData *WebAuthnSessionData `json:"web_authn_session_data,omitempty" db:"web_authn_session_data"` +} + +type WebAuthnSessionData struct { + *webauthn.SessionData +} + +func (s *WebAuthnSessionData) Scan(value interface{}) error { + if value == nil { + s.SessionData = nil + return nil + } + + // Handle byte and string as a precaution, in postgres driver, json/jsonb should be returned as []byte + var data []byte + switch v := value.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + panic(fmt.Sprintf("unsupported type for web_authn_session_data: %T", value)) + } + + if len(data) == 0 { + s.SessionData = nil + return nil + } + if s.SessionData == nil { + s.SessionData = &webauthn.SessionData{} + } + return json.Unmarshal(data, s.SessionData) + +} + +func (s *WebAuthnSessionData) Value() (driver.Value, error) { + if s == nil || s.SessionData == nil { + return nil, nil + } + return json.Marshal(s.SessionData) +} + +func (ws *WebAuthnSessionData) ToChallenge(factorID uuid.UUID, ipAddress string) *Challenge { + id := uuid.Must(uuid.NewV4()) + return &Challenge{ + ID: id, + FactorID: factorID, + IPAddress: ipAddress, + WebAuthnSessionData: &WebAuthnSessionData{ + ws.SessionData, + }, + } + +} + +func (Challenge) TableName() string { + tableName := "mfa_challenges" + return tableName +} + +// Update the verification timestamp +func (c *Challenge) Verify(tx *storage.Connection) error { + now := time.Now() + c.VerifiedAt = &now + return tx.UpdateOnly(c, "verified_at") +} + +func (c *Challenge) HasExpired(expiryDuration float64) bool { + return time.Now().After(c.GetExpiryTime(expiryDuration)) +} + +func (c *Challenge) GetExpiryTime(expiryDuration float64) time.Time { + return c.CreatedAt.Add(time.Second * time.Duration(expiryDuration)) +} + +func (c *Challenge) SetOtpCode(otpCode string, encrypt bool, encryptionKeyID, encryptionKey string) error { + c.OtpCode = otpCode + if encrypt { + es, err := crypto.NewEncryptedString(c.ID.String(), []byte(otpCode), encryptionKeyID, encryptionKey) + if err != nil { + return err + } + + c.OtpCode = es.String() + } + return nil + +} + +func (c *Challenge) GetOtpCode(decryptionKeys map[string]string, encrypt bool, encryptionKeyID string) (string, bool, error) { + if es := crypto.ParseEncryptedString(c.OtpCode); es != nil { + bytes, err := es.Decrypt(c.ID.String(), decryptionKeys) + if err != nil { + return "", false, err + } + + return string(bytes), encrypt && es.ShouldReEncrypt(encryptionKeyID), nil + } + + return c.OtpCode, encrypt, nil + +} diff --git a/auth_v2.169.0/internal/models/cleanup.go b/auth_v2.169.0/internal/models/cleanup.go new file mode 100644 index 0000000..9669c8d --- /dev/null +++ b/auth_v2.169.0/internal/models/cleanup.go @@ -0,0 +1,136 @@ +package models + +import ( + "context" + "fmt" + "sync/atomic" + + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + + "go.opentelemetry.io/otel/attribute" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/observability" + "github.com/supabase/auth/internal/storage" +) + +type Cleaner interface { + Clean(*storage.Connection) (int, error) +} + +type Cleanup struct { + cleanupStatements []string + + // cleanupNext holds an atomically incrementing value that determines which of + // the cleanupStatements will be run next. + cleanupNext uint32 + + // cleanupAffectedRows tracks an OpenTelemetry metric on the total number of + // cleaned up rows. + cleanupAffectedRows atomic.Int64 +} + +func NewCleanup(config *conf.GlobalConfiguration) *Cleanup { + tableUsers := User{}.TableName() + tableRefreshTokens := RefreshToken{}.TableName() + tableSessions := Session{}.TableName() + tableRelayStates := SAMLRelayState{}.TableName() + tableFlowStates := FlowState{}.TableName() + tableMFAChallenges := Challenge{}.TableName() + tableMFAFactors := Factor{}.TableName() + + c := &Cleanup{} + + // These statements intentionally use SELECT ... FOR UPDATE SKIP LOCKED + // as this makes sure that only rows that are not being used in another + // transaction are deleted. These deletes are thus very quick and + // efficient, as they don't wait on other transactions. + c.cleanupStatements = append(c.cleanupStatements, + fmt.Sprintf("delete from %q where id in (select id from %q where revoked is true and updated_at < now() - interval '24 hours' limit 100 for update skip locked);", tableRefreshTokens, tableRefreshTokens), + fmt.Sprintf("update %q set revoked = true, updated_at = now() where id in (select %q.id from %q join %q on %q.session_id = %q.id where %q.not_after < now() - interval '24 hours' and %q.revoked is false limit 100 for update skip locked);", tableRefreshTokens, tableRefreshTokens, tableRefreshTokens, tableSessions, tableRefreshTokens, tableSessions, tableSessions, tableRefreshTokens), + // sessions are deleted after 72 hours to allow refresh tokens + // to be deleted piecemeal; 10 at once so that cascades don't + // overwork the database + fmt.Sprintf("delete from %q where id in (select id from %q where not_after < now() - interval '72 hours' limit 10 for update skip locked);", tableSessions, tableSessions), + fmt.Sprintf("delete from %q where id in (select id from %q where created_at < now() - interval '24 hours' limit 100 for update skip locked);", tableRelayStates, tableRelayStates), + fmt.Sprintf("delete from %q where id in (select id from %q where created_at < now() - interval '24 hours' limit 100 for update skip locked);", tableFlowStates, tableFlowStates), + fmt.Sprintf("delete from %q where id in (select id from %q where created_at < now() - interval '24 hours' limit 100 for update skip locked);", tableMFAChallenges, tableMFAChallenges), + fmt.Sprintf("delete from %q where id in (select id from %q where created_at < now() - interval '24 hours' and status = 'unverified' limit 100 for update skip locked);", tableMFAFactors, tableMFAFactors), + ) + + if config.External.AnonymousUsers.Enabled { + // delete anonymous users older than 30 days + c.cleanupStatements = append(c.cleanupStatements, + fmt.Sprintf("delete from %q where id in (select id from %q where created_at < now() - interval '30 days' and is_anonymous is true limit 100 for update skip locked);", tableUsers, tableUsers), + ) + } + + if config.Sessions.Timebox != nil { + timeboxSeconds := int((*config.Sessions.Timebox).Seconds()) + + c.cleanupStatements = append(c.cleanupStatements, fmt.Sprintf("delete from %q where id in (select id from %q where created_at + interval '%d seconds' < now() - interval '24 hours' limit 100 for update skip locked);", tableSessions, tableSessions, timeboxSeconds)) + } + + if config.Sessions.InactivityTimeout != nil { + inactivitySeconds := int((*config.Sessions.InactivityTimeout).Seconds()) + + // delete sessions with a refreshed_at column + c.cleanupStatements = append(c.cleanupStatements, fmt.Sprintf("delete from %q where id in (select id from %q where refreshed_at is not null and refreshed_at + interval '%d seconds' < now() - interval '24 hours' limit 100 for update skip locked);", tableSessions, tableSessions, inactivitySeconds)) + + // delete sessions without a refreshed_at column by looking for + // unrevoked refresh_tokens + c.cleanupStatements = append(c.cleanupStatements, fmt.Sprintf("delete from %q where id in (select %q.id as id from %q, %q where %q.session_id = %q.id and %q.refreshed_at is null and %q.revoked is false and %q.updated_at + interval '%d seconds' < now() - interval '24 hours' limit 100 for update skip locked)", tableSessions, tableSessions, tableSessions, tableRefreshTokens, tableRefreshTokens, tableSessions, tableSessions, tableRefreshTokens, tableRefreshTokens, inactivitySeconds)) + } + + meter := otel.Meter("gotrue") + + _, err := meter.Int64ObservableCounter( + "gotrue_cleanup_affected_rows", + metric.WithDescription("Number of affected rows from cleaning up stale entities"), + metric.WithInt64Callback(func(_ context.Context, o metric.Int64Observer) error { + o.Observe(c.cleanupAffectedRows.Load()) + return nil + }), + ) + + if err != nil { + logrus.WithError(err).Error("unable to get gotrue.gotrue_cleanup_rows counter metric") + } + + return c +} + +// Cleanup removes stale entities in the database. You can call it on each +// request or as a periodic background job. It does quick lockless updates or +// deletes, has an execution timeout and acquire timeout so that cleanups do +// not affect performance of other database jobs. Note that calling this does +// not clean up the whole database, but does a small piecemeal clean up each +// time when called. +func (c *Cleanup) Clean(db *storage.Connection) (int, error) { + ctx, span := observability.Tracer("gotrue").Start(db.Context(), "database-cleanup") + defer span.End() + + affectedRows := 0 + defer span.SetAttributes(attribute.Int64("gotrue.cleanup.affected_rows", int64(affectedRows))) + + if err := db.WithContext(ctx).Transaction(func(tx *storage.Connection) error { + nextIndex := atomic.AddUint32(&c.cleanupNext, 1) % uint32(len(c.cleanupStatements)) // #nosec G115 + statement := c.cleanupStatements[nextIndex] + + count, terr := tx.RawQuery(statement).ExecWithCount() + if terr != nil { + return terr + } + + affectedRows += count + + return nil + }); err != nil { + return affectedRows, err + } + c.cleanupAffectedRows.Add(int64(affectedRows)) + + return affectedRows, nil +} diff --git a/auth_v2.169.0/internal/models/cleanup_test.go b/auth_v2.169.0/internal/models/cleanup_test.go new file mode 100644 index 0000000..618fbba --- /dev/null +++ b/auth_v2.169.0/internal/models/cleanup_test.go @@ -0,0 +1,31 @@ +package models + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage/test" +) + +func TestCleanup(t *testing.T) { + globalConfig, err := conf.LoadGlobal(modelsTestConfig) + require.NoError(t, err) + conn, err := test.SetupDBConnection(globalConfig) + require.NoError(t, err) + + timebox := 10 * time.Second + inactivityTimeout := 5 * time.Second + globalConfig.Sessions.Timebox = &timebox + globalConfig.Sessions.InactivityTimeout = &inactivityTimeout + globalConfig.External.AnonymousUsers.Enabled = true + + cleanup := NewCleanup(globalConfig) + + for i := 0; i < 100; i += 1 { + _, err := cleanup.Clean(conn) + require.NoError(t, err) + } +} diff --git a/auth_v2.169.0/internal/models/connection.go b/auth_v2.169.0/internal/models/connection.go new file mode 100644 index 0000000..80acccc --- /dev/null +++ b/auth_v2.169.0/internal/models/connection.go @@ -0,0 +1,62 @@ +package models + +import ( + "github.com/gobuffalo/pop/v6" + "github.com/supabase/auth/internal/storage" +) + +type Pagination struct { + Page uint64 + PerPage uint64 + Count uint64 +} + +func (p *Pagination) Offset() uint64 { + return (p.Page - 1) * p.PerPage +} + +type SortDirection string + +const Ascending SortDirection = "ASC" +const Descending SortDirection = "DESC" +const CreatedAt = "created_at" + +type SortParams struct { + Fields []SortField +} + +type SortField struct { + Name string + Dir SortDirection +} + +// TruncateAll deletes all data from the database, as managed by GoTrue. Not +// intended for use outside of tests. +func TruncateAll(conn *storage.Connection) error { + return conn.Transaction(func(tx *storage.Connection) error { + tables := []string{ + (&pop.Model{Value: User{}}).TableName(), + (&pop.Model{Value: Identity{}}).TableName(), + (&pop.Model{Value: RefreshToken{}}).TableName(), + (&pop.Model{Value: AuditLogEntry{}}).TableName(), + (&pop.Model{Value: Session{}}).TableName(), + (&pop.Model{Value: Factor{}}).TableName(), + (&pop.Model{Value: Challenge{}}).TableName(), + (&pop.Model{Value: AMRClaim{}}).TableName(), + (&pop.Model{Value: SSOProvider{}}).TableName(), + (&pop.Model{Value: SSODomain{}}).TableName(), + (&pop.Model{Value: SAMLProvider{}}).TableName(), + (&pop.Model{Value: SAMLRelayState{}}).TableName(), + (&pop.Model{Value: FlowState{}}).TableName(), + (&pop.Model{Value: OneTimeToken{}}).TableName(), + } + + for _, tableName := range tables { + if err := tx.RawQuery("DELETE FROM " + tableName + " CASCADE").Exec(); err != nil { + return err + } + } + + return nil + }) +} diff --git a/auth_v2.169.0/internal/models/db_test.go b/auth_v2.169.0/internal/models/db_test.go new file mode 100644 index 0000000..c3d6ab2 --- /dev/null +++ b/auth_v2.169.0/internal/models/db_test.go @@ -0,0 +1,24 @@ +package models + +import ( + "testing" + + "github.com/gobuffalo/pop/v6" + "github.com/stretchr/testify/assert" +) + +func TestTableNameNamespacing(t *testing.T) { + cases := []struct { + expected string + value interface{} + }{ + {expected: "audit_log_entries", value: []*AuditLogEntry{}}, + {expected: "refresh_tokens", value: []*RefreshToken{}}, + {expected: "users", value: []*User{}}, + } + + for _, tc := range cases { + m := &pop.Model{Value: tc.value} + assert.Equal(t, tc.expected, m.TableName()) + } +} diff --git a/auth_v2.169.0/internal/models/errors.go b/auth_v2.169.0/internal/models/errors.go new file mode 100644 index 0000000..96f8319 --- /dev/null +++ b/auth_v2.169.0/internal/models/errors.go @@ -0,0 +1,125 @@ +package models + +// IsNotFoundError returns whether an error represents a "not found" error. +func IsNotFoundError(err error) bool { + switch err.(type) { + case UserNotFoundError, *UserNotFoundError: + return true + case SessionNotFoundError, *SessionNotFoundError: + return true + case ConfirmationTokenNotFoundError, *ConfirmationTokenNotFoundError: + return true + case ConfirmationOrRecoveryTokenNotFoundError, *ConfirmationOrRecoveryTokenNotFoundError: + return true + case RefreshTokenNotFoundError, *RefreshTokenNotFoundError: + return true + case IdentityNotFoundError, *IdentityNotFoundError: + return true + case ChallengeNotFoundError, *ChallengeNotFoundError: + return true + case FactorNotFoundError, *FactorNotFoundError: + return true + case SSOProviderNotFoundError, *SSOProviderNotFoundError: + return true + case SAMLRelayStateNotFoundError, *SAMLRelayStateNotFoundError: + return true + case FlowStateNotFoundError, *FlowStateNotFoundError: + return true + case OneTimeTokenNotFoundError, *OneTimeTokenNotFoundError: + return true + } + return false +} + +type SessionNotFoundError struct{} + +func (e SessionNotFoundError) Error() string { + return "Session not found" +} + +// UserNotFoundError represents when a user is not found. +type UserNotFoundError struct{} + +func (e UserNotFoundError) Error() string { + return "User not found" +} + +// IdentityNotFoundError represents when an identity is not found. +type IdentityNotFoundError struct{} + +func (e IdentityNotFoundError) Error() string { + return "Identity not found" +} + +// ConfirmationOrRecoveryTokenNotFoundError represents when a confirmation or recovery token is not found. +type ConfirmationOrRecoveryTokenNotFoundError struct{} + +func (e ConfirmationOrRecoveryTokenNotFoundError) Error() string { + return "Confirmation or Recovery Token not found" +} + +// ConfirmationTokenNotFoundError represents when a confirmation token is not found. +type ConfirmationTokenNotFoundError struct{} + +func (e ConfirmationTokenNotFoundError) Error() string { + return "Confirmation Token not found" +} + +// RefreshTokenNotFoundError represents when a refresh token is not found. +type RefreshTokenNotFoundError struct{} + +func (e RefreshTokenNotFoundError) Error() string { + return "Refresh Token not found" +} + +// FactorNotFoundError represents when a user is not found. +type FactorNotFoundError struct{} + +func (e FactorNotFoundError) Error() string { + return "Factor not found" +} + +// ChallengeNotFoundError represents when a user is not found. +type ChallengeNotFoundError struct{} + +func (e ChallengeNotFoundError) Error() string { + return "Challenge not found" +} + +// SSOProviderNotFoundError represents an error when a SSO Provider can't be +// found. +type SSOProviderNotFoundError struct{} + +func (e SSOProviderNotFoundError) Error() string { + return "SSO Identity Provider not found" +} + +// SAMLRelayStateNotFoundError represents an error when a SAML relay state +// can't be found. +type SAMLRelayStateNotFoundError struct{} + +func (e SAMLRelayStateNotFoundError) Error() string { + return "SAML RelayState not found" +} + +// FlowStateNotFoundError represents an error when an FlowState can't be +// found. +type FlowStateNotFoundError struct{} + +func (e FlowStateNotFoundError) Error() string { + return "Flow State not found" +} + +func IsUniqueConstraintViolatedError(err error) bool { + switch err.(type) { + case UserEmailUniqueConflictError, *UserEmailUniqueConflictError: + return true + } + return false +} + +type UserEmailUniqueConflictError struct{} + +func (e UserEmailUniqueConflictError) Error() string { + return "User email unique constraint violated" +} diff --git a/auth_v2.169.0/internal/models/factor.go b/auth_v2.169.0/internal/models/factor.go new file mode 100644 index 0000000..a88874d --- /dev/null +++ b/auth_v2.169.0/internal/models/factor.go @@ -0,0 +1,398 @@ +package models + +import ( + "database/sql" + "database/sql/driver" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/go-webauthn/webauthn/webauthn" + "github.com/gobuffalo/pop/v6" + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/storage" +) + +type FactorState int + +const ( + FactorStateUnverified FactorState = iota + FactorStateVerified +) + +func (factorState FactorState) String() string { + switch factorState { + case FactorStateUnverified: + return "unverified" + case FactorStateVerified: + return "verified" + } + return "" +} + +const TOTP = "totp" +const Phone = "phone" +const WebAuthn = "webauthn" + +type AuthenticationMethod int + +const ( + OAuth AuthenticationMethod = iota + PasswordGrant + OTP + TOTPSignIn + MFAPhone + MFAWebAuthn + SSOSAML + Recovery + Invite + MagicLink + EmailSignup + EmailChange + TokenRefresh + Anonymous +) + +func (authMethod AuthenticationMethod) String() string { + switch authMethod { + case OAuth: + return "oauth" + case PasswordGrant: + return "password" + case OTP: + return "otp" + case TOTPSignIn: + return "totp" + case Recovery: + return "recovery" + case Invite: + return "invite" + case SSOSAML: + return "sso/saml" + case MagicLink: + return "magiclink" + case EmailSignup: + return "email/signup" + case EmailChange: + return "email_change" + case TokenRefresh: + return "token_refresh" + case Anonymous: + return "anonymous" + case MFAPhone: + return "mfa/phone" + case MFAWebAuthn: + return "mfa/webauthn" + } + return "" +} + +func ParseAuthenticationMethod(authMethod string) (AuthenticationMethod, error) { + if strings.HasSuffix(authMethod, "signup") { + authMethod = "email/signup" + } + switch authMethod { + case "oauth": + return OAuth, nil + case "password": + return PasswordGrant, nil + case "otp": + return OTP, nil + case "totp": + return TOTPSignIn, nil + case "recovery": + return Recovery, nil + case "invite": + return Invite, nil + case "sso/saml": + return SSOSAML, nil + case "magiclink": + return MagicLink, nil + case "email/signup": + return EmailSignup, nil + case "email_change": + return EmailChange, nil + case "token_refresh": + return TokenRefresh, nil + case "mfa/sms": + return MFAPhone, nil + case "mfa/webauthn": + return MFAWebAuthn, nil + } + return 0, fmt.Errorf("unsupported authentication method %q", authMethod) +} + +type Factor struct { + ID uuid.UUID `json:"id" db:"id"` + // TODO: Consider removing this nested user field. We don't use it. + User User `json:"-" belongs_to:"user"` + UserID uuid.UUID `json:"-" db:"user_id"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` + Status string `json:"status" db:"status"` + FriendlyName string `json:"friendly_name,omitempty" db:"friendly_name"` + Secret string `json:"-" db:"secret"` + FactorType string `json:"factor_type" db:"factor_type"` + Challenge []Challenge `json:"-" has_many:"challenges"` + Phone storage.NullString `json:"phone" db:"phone"` + LastChallengedAt *time.Time `json:"last_challenged_at" db:"last_challenged_at"` + WebAuthnCredential *WebAuthnCredential `json:"-" db:"web_authn_credential"` + WebAuthnAAGUID *uuid.UUID `json:"web_authn_aaguid,omitempty" db:"web_authn_aaguid"` +} + +type WebAuthnCredential struct { + webauthn.Credential +} + +func (wc *WebAuthnCredential) Value() (driver.Value, error) { + if wc == nil { + return nil, nil + } + return json.Marshal(wc) +} + +func (wc *WebAuthnCredential) Scan(value interface{}) error { + if value == nil { + wc.Credential = webauthn.Credential{} + return nil + } + // Handle byte and string as a precaution, in postgres driver, json/jsonb should be returned as []byte + var data []byte + switch v := value.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("unsupported type for web_authn_credential: %T", value) + } + if len(data) == 0 { + wc.Credential = webauthn.Credential{} + return nil + } + return json.Unmarshal(data, &wc.Credential) +} + +func (Factor) TableName() string { + tableName := "mfa_factors" + return tableName +} + +func NewFactor(user *User, friendlyName string, factorType string, state FactorState) *Factor { + id := uuid.Must(uuid.NewV4()) + + factor := &Factor{ + ID: id, + UserID: user.ID, + Status: state.String(), + FriendlyName: friendlyName, + FactorType: factorType, + } + return factor +} + +func NewTOTPFactor(user *User, friendlyName string) *Factor { + return NewFactor(user, friendlyName, TOTP, FactorStateUnverified) +} + +func NewPhoneFactor(user *User, phone, friendlyName string) *Factor { + factor := NewFactor(user, friendlyName, Phone, FactorStateUnverified) + factor.Phone = storage.NullString(phone) + return factor +} + +func NewWebAuthnFactor(user *User, friendlyName string) *Factor { + factor := NewFactor(user, friendlyName, WebAuthn, FactorStateUnverified) + return factor +} + +func (f *Factor) SetSecret(secret string, encrypt bool, encryptionKeyID, encryptionKey string) error { + f.Secret = secret + if encrypt { + es, err := crypto.NewEncryptedString(f.ID.String(), []byte(secret), encryptionKeyID, encryptionKey) + if err != nil { + return err + } + + f.Secret = es.String() + } + + return nil +} + +func (f *Factor) GetSecret(decryptionKeys map[string]string, encrypt bool, encryptionKeyID string) (string, bool, error) { + if es := crypto.ParseEncryptedString(f.Secret); es != nil { + bytes, err := es.Decrypt(f.ID.String(), decryptionKeys) + if err != nil { + return "", false, err + } + + return string(bytes), encrypt && es.ShouldReEncrypt(encryptionKeyID), nil + } + + return f.Secret, encrypt, nil +} + +func (f *Factor) SaveWebAuthnCredential(tx *storage.Connection, credential *webauthn.Credential) error { + f.WebAuthnCredential = &WebAuthnCredential{ + Credential: *credential, + } + + if len(credential.Authenticator.AAGUID) > 0 { + aaguidUUID, err := uuid.FromBytes(credential.Authenticator.AAGUID) + if err != nil { + return fmt.Errorf("WebAuthn authenticator AAGUID is not UUID: %w", err) + } + f.WebAuthnAAGUID = &aaguidUUID + } else { + f.WebAuthnAAGUID = nil + } + + return tx.UpdateOnly(f, "web_authn_credential", "web_authn_aaguid", "updated_at") +} + +func FindFactorByFactorID(conn *storage.Connection, factorID uuid.UUID) (*Factor, error) { + var factor Factor + err := conn.Find(&factor, factorID) + if err != nil && errors.Cause(err) == sql.ErrNoRows { + return nil, FactorNotFoundError{} + } else if err != nil { + return nil, err + } + return &factor, nil +} + +func DeleteUnverifiedFactors(tx *storage.Connection, user *User, factorType string) error { + if err := tx.RawQuery("DELETE FROM "+(&pop.Model{Value: Factor{}}).TableName()+" WHERE user_id = ? and status = ? and factor_type = ?", user.ID, FactorStateUnverified.String(), factorType).Exec(); err != nil { + return err + } + + return nil +} + +func (f *Factor) CreateChallenge(ipAddress string) *Challenge { + id := uuid.Must(uuid.NewV4()) + challenge := &Challenge{ + ID: id, + FactorID: f.ID, + IPAddress: ipAddress, + } + + return challenge +} +func (f *Factor) WriteChallengeToDatabase(tx *storage.Connection, challenge *Challenge) error { + if challenge.FactorID != f.ID { + return errors.New("Can only write challenges that you own") + } + now := time.Now() + f.LastChallengedAt = &now + if terr := tx.Create(challenge); terr != nil { + return terr + } + if err := tx.UpdateOnly(f, "last_challenged_at"); err != nil { + return err + } + return nil +} + +func (f *Factor) CreatePhoneChallenge(ipAddress string, otpCode string, encrypt bool, encryptionKeyID, encryptionKey string) (*Challenge, error) { + phoneChallenge := f.CreateChallenge(ipAddress) + if err := phoneChallenge.SetOtpCode(otpCode, encrypt, encryptionKeyID, encryptionKey); err != nil { + return nil, err + } + return phoneChallenge, nil +} + +// UpdateFriendlyName changes the friendly name +func (f *Factor) UpdateFriendlyName(tx *storage.Connection, friendlyName string) error { + f.FriendlyName = friendlyName + return tx.UpdateOnly(f, "friendly_name", "updated_at") +} + +func (f *Factor) UpdatePhone(tx *storage.Connection, phone string) error { + f.Phone = storage.NullString(phone) + return tx.UpdateOnly(f, "phone", "updated_at") +} + +// UpdateStatus modifies the factor status +func (f *Factor) UpdateStatus(tx *storage.Connection, state FactorState) error { + f.Status = state.String() + return tx.UpdateOnly(f, "status", "updated_at") +} + +func (f *Factor) DowngradeSessionsToAAL1(tx *storage.Connection) error { + sessions, err := FindSessionsByFactorID(tx, f.ID) + if err != nil { + return err + } + for _, session := range sessions { + if err := tx.RawQuery("DELETE FROM "+(&pop.Model{Value: AMRClaim{}}).TableName()+" WHERE session_id = ? AND authentication_method = ?", session.ID, f.FactorType).Exec(); err != nil { + return err + } + } + return updateFactorAssociatedSessions(tx, f.UserID, f.ID, AAL1.String()) +} + +func (f *Factor) IsVerified() bool { + return f.Status == FactorStateVerified.String() +} + +func (f *Factor) IsUnverified() bool { + return f.Status == FactorStateUnverified.String() +} + +func (f *Factor) IsPhoneFactor() bool { + return f.FactorType == Phone +} + +func (f *Factor) FindChallengeByID(conn *storage.Connection, challengeID uuid.UUID) (*Challenge, error) { + var challenge Challenge + err := conn.Q().Where("id = ? and factor_id = ?", challengeID, f.ID).First(&challenge) + if err != nil && errors.Cause(err) == sql.ErrNoRows { + return nil, ChallengeNotFoundError{} + } else if err != nil { + return nil, err + } + return &challenge, nil +} + +func DeleteFactorsByUserId(tx *storage.Connection, userId uuid.UUID) error { + if err := tx.RawQuery("DELETE FROM "+(&pop.Model{Value: Factor{}}).TableName()+" WHERE user_id = ?", userId).Exec(); err != nil { + return err + } + return nil +} + +func DeleteExpiredFactors(tx *storage.Connection, validityDuration time.Duration) error { + totalSeconds := int64(validityDuration / time.Second) + validityInterval := fmt.Sprintf("interval '%d seconds'", totalSeconds) + + factorTable := (&pop.Model{Value: Factor{}}).TableName() + challengeTable := (&pop.Model{Value: Challenge{}}).TableName() + + query := fmt.Sprintf(`delete from %q where status != 'verified' and not exists (select * from %q where %q.id = %q.factor_id ) and created_at + %s < current_timestamp;`, factorTable, challengeTable, factorTable, challengeTable, validityInterval) + if err := tx.RawQuery(query).Exec(); err != nil { + return err + } + return nil +} + +func (f *Factor) FindLatestUnexpiredChallenge(tx *storage.Connection, expiryDuration float64) (*Challenge, error) { + now := time.Now() + var challenge Challenge + expirationTime := now.Add(time.Duration(expiryDuration) * time.Second) + + err := tx.Where("sent_at > ? and factor_id = ?", expirationTime, f.ID). + Order("sent_at desc"). + First(&challenge) + + if err != nil && errors.Cause(err) == sql.ErrNoRows { + return nil, ChallengeNotFoundError{} + } else if err != nil { + return nil, err + } + return &challenge, nil +} diff --git a/auth_v2.169.0/internal/models/factor_test.go b/auth_v2.169.0/internal/models/factor_test.go new file mode 100644 index 0000000..614cff2 --- /dev/null +++ b/auth_v2.169.0/internal/models/factor_test.go @@ -0,0 +1,74 @@ +package models + +import ( + "encoding/json" + "testing" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/storage/test" +) + +type FactorTestSuite struct { + suite.Suite + db *storage.Connection + TestFactor *Factor +} + +func TestFactor(t *testing.T) { + globalConfig, err := conf.LoadGlobal(modelsTestConfig) + require.NoError(t, err) + conn, err := test.SetupDBConnection(globalConfig) + require.NoError(t, err) + ts := &FactorTestSuite{ + db: conn, + } + defer ts.db.Close() + suite.Run(t, ts) +} + +func (ts *FactorTestSuite) SetupTest() { + TruncateAll(ts.db) + user, err := NewUser("", "agenericemail@gmail.com", "secret", "test", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(user)) + + factor := NewTOTPFactor(user, "asimplename") + require.NoError(ts.T(), factor.SetSecret("topsecret", false, "", "")) + require.NoError(ts.T(), ts.db.Create(factor)) + ts.TestFactor = factor +} + +func (ts *FactorTestSuite) TestFindFactorByFactorID() { + n, err := FindFactorByFactorID(ts.db, ts.TestFactor.ID) + require.NoError(ts.T(), err) + require.Equal(ts.T(), ts.TestFactor.ID, n.ID) + + _, err = FindFactorByFactorID(ts.db, uuid.Nil) + require.EqualError(ts.T(), err, FactorNotFoundError{}.Error()) +} + +func (ts *FactorTestSuite) TestUpdateStatus() { + newFactorStatus := FactorStateVerified + require.NoError(ts.T(), ts.TestFactor.UpdateStatus(ts.db, newFactorStatus)) + require.Equal(ts.T(), newFactorStatus.String(), ts.TestFactor.Status) +} + +func (ts *FactorTestSuite) TestUpdateFriendlyName() { + newName := "newfactorname" + require.NoError(ts.T(), ts.TestFactor.UpdateFriendlyName(ts.db, newName)) + require.Equal(ts.T(), newName, ts.TestFactor.FriendlyName) +} + +func (ts *FactorTestSuite) TestEncodedFactorDoesNotLeakSecret() { + encodedFactor, err := json.Marshal(ts.TestFactor) + require.NoError(ts.T(), err) + + decodedFactor := Factor{} + json.Unmarshal(encodedFactor, &decodedFactor) + require.Equal(ts.T(), decodedFactor.Secret, "") +} diff --git a/auth_v2.169.0/internal/models/flow_state.go b/auth_v2.169.0/internal/models/flow_state.go new file mode 100644 index 0000000..9a770d8 --- /dev/null +++ b/auth_v2.169.0/internal/models/flow_state.go @@ -0,0 +1,169 @@ +package models + +import ( + "crypto/sha256" + "crypto/subtle" + "database/sql" + "encoding/base64" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/supabase/auth/internal/storage" + + "github.com/gofrs/uuid" +) + +const InvalidCodeChallengeError = "code challenge does not match previously saved code verifier" +const InvalidCodeMethodError = "code challenge method not supported" + +type FlowState struct { + ID uuid.UUID `json:"id" db:"id"` + UserID *uuid.UUID `json:"user_id,omitempty" db:"user_id"` + AuthCode string `json:"auth_code" db:"auth_code"` + AuthenticationMethod string `json:"authentication_method" db:"authentication_method"` + CodeChallenge string `json:"code_challenge" db:"code_challenge"` + CodeChallengeMethod string `json:"code_challenge_method" db:"code_challenge_method"` + ProviderType string `json:"provider_type" db:"provider_type"` + ProviderAccessToken string `json:"provider_access_token" db:"provider_access_token"` + ProviderRefreshToken string `json:"provider_refresh_token" db:"provider_refresh_token"` + AuthCodeIssuedAt *time.Time `json:"auth_code_issued_at" db:"auth_code_issued_at"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` +} + +type CodeChallengeMethod int + +const ( + SHA256 CodeChallengeMethod = iota + Plain +) + +func (codeChallengeMethod CodeChallengeMethod) String() string { + switch codeChallengeMethod { + case SHA256: + return "s256" + case Plain: + return "plain" + } + return "" +} + +func ParseCodeChallengeMethod(codeChallengeMethod string) (CodeChallengeMethod, error) { + switch strings.ToLower(codeChallengeMethod) { + case "s256": + return SHA256, nil + case "plain": + return Plain, nil + } + return 0, fmt.Errorf("unsupported code_challenge method %q", codeChallengeMethod) +} + +type FlowType int + +const ( + PKCEFlow FlowType = iota + ImplicitFlow +) + +func (flowType FlowType) String() string { + switch flowType { + case PKCEFlow: + return "pkce" + case ImplicitFlow: + return "implicit" + } + return "" +} + +func (FlowState) TableName() string { + tableName := "flow_state" + return tableName +} + +func NewFlowState(providerType, codeChallenge string, codeChallengeMethod CodeChallengeMethod, authenticationMethod AuthenticationMethod, userID *uuid.UUID) *FlowState { + id := uuid.Must(uuid.NewV4()) + authCode := uuid.Must(uuid.NewV4()) + flowState := &FlowState{ + ID: id, + ProviderType: providerType, + CodeChallenge: codeChallenge, + CodeChallengeMethod: codeChallengeMethod.String(), + AuthCode: authCode.String(), + AuthenticationMethod: authenticationMethod.String(), + UserID: userID, + } + return flowState +} + +func FindFlowStateByAuthCode(tx *storage.Connection, authCode string) (*FlowState, error) { + obj := &FlowState{} + if err := tx.Eager().Q().Where("auth_code = ?", authCode).First(obj); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, FlowStateNotFoundError{} + } + return nil, errors.Wrap(err, "error finding flow state") + } + + return obj, nil +} + +func FindFlowStateByID(tx *storage.Connection, id string) (*FlowState, error) { + obj := &FlowState{} + if err := tx.Eager().Q().Where("id = ?", id).First(obj); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, FlowStateNotFoundError{} + } + return nil, errors.Wrap(err, "error finding flow state") + } + + return obj, nil +} + +func FindFlowStateByUserID(tx *storage.Connection, id string, authenticationMethod AuthenticationMethod) (*FlowState, error) { + obj := &FlowState{} + if err := tx.Eager().Q().Where("user_id = ? and authentication_method = ?", id, authenticationMethod).Last(obj); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, FlowStateNotFoundError{} + } + return nil, errors.Wrap(err, "error finding flow state") + } + + return obj, nil +} + +func (f *FlowState) VerifyPKCE(codeVerifier string) error { + switch f.CodeChallengeMethod { + case SHA256.String(): + hashedCodeVerifier := sha256.Sum256([]byte(codeVerifier)) + encodedCodeVerifier := base64.RawURLEncoding.EncodeToString(hashedCodeVerifier[:]) + if subtle.ConstantTimeCompare([]byte(f.CodeChallenge), []byte(encodedCodeVerifier)) != 1 { + return errors.New(InvalidCodeChallengeError) + } + case Plain.String(): + if subtle.ConstantTimeCompare([]byte(f.CodeChallenge), []byte(codeVerifier)) != 1 { + return errors.New(InvalidCodeChallengeError) + } + default: + return errors.New(InvalidCodeMethodError) + + } + return nil +} + +func (f *FlowState) IsExpired(expiryDuration time.Duration) bool { + if f.AuthCodeIssuedAt != nil && f.AuthenticationMethod == MagicLink.String() { + return time.Now().After(f.AuthCodeIssuedAt.Add(expiryDuration)) + } + return time.Now().After(f.CreatedAt.Add(expiryDuration)) +} + +func (f *FlowState) RecordAuthCodeIssuedAtTime(tx *storage.Connection) error { + issueTime := time.Now() + f.AuthCodeIssuedAt = &issueTime + if err := tx.Update(f); err != nil { + return err + } + return nil +} diff --git a/auth_v2.169.0/internal/models/identity.go b/auth_v2.169.0/internal/models/identity.go new file mode 100644 index 0000000..c647cbc --- /dev/null +++ b/auth_v2.169.0/internal/models/identity.go @@ -0,0 +1,142 @@ +package models + +import ( + "database/sql" + "strings" + "time" + + "github.com/gobuffalo/pop/v6" + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/supabase/auth/internal/storage" +) + +type Identity struct { + // returned as identity_id in JSON for backward compatibility with the interface exposed by the client library + // see https://github.com/supabase/gotrue-js/blob/c9296bbc27a2f036af55c1f33fca5930704bd021/src/lib/types.ts#L230-L240 + ID uuid.UUID `json:"identity_id" db:"id"` + // returned as id in JSON for backward compatibility with the interface exposed by the client library + // see https://github.com/supabase/gotrue-js/blob/c9296bbc27a2f036af55c1f33fca5930704bd021/src/lib/types.ts#L230-L240 + ProviderID string `json:"id" db:"provider_id"` + UserID uuid.UUID `json:"user_id" db:"user_id"` + IdentityData JSONMap `json:"identity_data,omitempty" db:"identity_data"` + Provider string `json:"provider" db:"provider"` + LastSignInAt *time.Time `json:"last_sign_in_at,omitempty" db:"last_sign_in_at"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` + Email storage.NullString `json:"email,omitempty" db:"email" rw:"r"` +} + +func (Identity) TableName() string { + tableName := "identities" + return tableName +} + +// GetEmail returns the user's email as a string +func (i *Identity) GetEmail() string { + return string(i.Email) +} + +// NewIdentity returns an identity associated to the user's id. +func NewIdentity(user *User, provider string, identityData map[string]interface{}) (*Identity, error) { + providerId, ok := identityData["sub"] + if !ok { + return nil, errors.New("error missing provider id") + } + now := time.Now() + + identity := &Identity{ + ProviderID: providerId.(string), + UserID: user.ID, + IdentityData: identityData, + Provider: provider, + LastSignInAt: &now, + } + if email, ok := identityData["email"]; ok { + identity.Email = storage.NullString(email.(string)) + } + + return identity, nil +} + +func (i *Identity) BeforeCreate(tx *pop.Connection) error { + return i.BeforeUpdate(tx) +} + +func (i *Identity) BeforeUpdate(tx *pop.Connection) error { + if _, ok := i.IdentityData["email"]; ok { + i.IdentityData["email"] = strings.ToLower(i.IdentityData["email"].(string)) + } + return nil +} + +func (i *Identity) IsForSSOProvider() bool { + return strings.HasPrefix(i.Provider, "sso:") +} + +// FindIdentityById searches for an identity with the matching id and provider given. +func FindIdentityByIdAndProvider(tx *storage.Connection, providerId, provider string) (*Identity, error) { + identity := &Identity{} + if err := tx.Q().Where("provider_id = ? AND provider = ?", providerId, provider).First(identity); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, IdentityNotFoundError{} + } + return nil, errors.Wrap(err, "error finding identity") + } + return identity, nil +} + +// FindIdentitiesByUserID returns all identities associated to a user ID. +func FindIdentitiesByUserID(tx *storage.Connection, userID uuid.UUID) ([]*Identity, error) { + identities := []*Identity{} + if err := tx.Q().Where("user_id = ?", userID).All(&identities); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return identities, nil + } + return nil, errors.Wrap(err, "error finding identities") + } + return identities, nil +} + +// FindProvidersByUser returns all providers associated to a user +func FindProvidersByUser(tx *storage.Connection, user *User) ([]string, error) { + identities := []Identity{} + providerExists := map[string]bool{} + providers := make([]string, 0) + if err := tx.Q().Select("provider").Where("user_id = ?", user.ID).All(&identities); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return providers, nil + } + return nil, errors.Wrap(err, "error finding providers") + } + for _, identity := range identities { + if _, ok := providerExists[identity.Provider]; !ok { + providers = append(providers, identity.Provider) + providerExists[identity.Provider] = true + } + } + return providers, nil +} + +// UpdateIdentityData sets all identity_data from a map of updates, +// ensuring that it doesn't override attributes that are not +// in the provided map. +func (i *Identity) UpdateIdentityData(tx *storage.Connection, updates map[string]interface{}) error { + if i.IdentityData == nil { + i.IdentityData = updates + } else { + for key, value := range updates { + if value != nil { + i.IdentityData[key] = value + } else { + delete(i.IdentityData, key) + } + } + } + // pop doesn't support updates on tables with composite primary keys so we use a raw query here. + return tx.RawQuery( + "update "+(&pop.Model{Value: Identity{}}).TableName()+" set identity_data = ? where id = ?", + i.IdentityData, + i.ID, + ).Exec() +} diff --git a/auth_v2.169.0/internal/models/identity_test.go b/auth_v2.169.0/internal/models/identity_test.go new file mode 100644 index 0000000..d27d17b --- /dev/null +++ b/auth_v2.169.0/internal/models/identity_test.go @@ -0,0 +1,117 @@ +package models + +import ( + "testing" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/storage/test" +) + +type IdentityTestSuite struct { + suite.Suite + db *storage.Connection +} + +func (ts *IdentityTestSuite) SetupTest() { + TruncateAll(ts.db) +} + +func TestIdentity(t *testing.T) { + globalConfig, err := conf.LoadGlobal(modelsTestConfig) + require.NoError(t, err) + + conn, err := test.SetupDBConnection(globalConfig) + require.NoError(t, err) + + ts := &IdentityTestSuite{ + db: conn, + } + defer ts.db.Close() + + suite.Run(t, ts) +} + +func (ts *IdentityTestSuite) TestNewIdentity() { + u := ts.createUserWithEmail("test@supabase.io") + ts.Run("Test create identity with no provider id", func() { + identityData := map[string]interface{}{} + _, err := NewIdentity(u, "email", identityData) + require.Error(ts.T(), err, "Error missing provider id") + }) + + ts.Run("Test create identity successfully", func() { + identityData := map[string]interface{}{"sub": uuid.Nil.String()} + identity, err := NewIdentity(u, "email", identityData) + require.NoError(ts.T(), err) + require.Equal(ts.T(), u.ID, identity.UserID) + }) +} + +func (ts *IdentityTestSuite) TestFindUserIdentities() { + u := ts.createUserWithIdentity("test@supabase.io") + identities, err := FindIdentitiesByUserID(ts.db, u.ID) + require.NoError(ts.T(), err) + + require.Len(ts.T(), identities, 1) + +} + +func (ts *IdentityTestSuite) TestUpdateIdentityData() { + u := ts.createUserWithIdentity("test@supabase.io") + + identities, err := FindIdentitiesByUserID(ts.db, u.ID) + require.NoError(ts.T(), err) + + updates := map[string]interface{}{ + "sub": nil, + "name": nil, + "email": nil, + } + for _, identity := range identities { + err := identity.UpdateIdentityData(ts.db, updates) + require.NoError(ts.T(), err) + } + + updatedIdentities, err := FindIdentitiesByUserID(ts.db, u.ID) + require.NoError(ts.T(), err) + for _, identity := range updatedIdentities { + require.Empty(ts.T(), identity.IdentityData) + } +} + +func (ts *IdentityTestSuite) createUserWithEmail(email string) *User { + user, err := NewUser("", email, "secret", "test", nil) + require.NoError(ts.T(), err) + + err = ts.db.Create(user) + require.NoError(ts.T(), err) + + return user +} + +func (ts *IdentityTestSuite) createUserWithIdentity(email string) *User { + user, err := NewUser("", email, "secret", "test", nil) + require.NoError(ts.T(), err) + + err = ts.db.Create(user) + require.NoError(ts.T(), err) + + identityData := map[string]interface{}{ + "sub": uuid.Nil.String(), + "name": "test", + "email": email, + } + require.NoError(ts.T(), err) + + identity, err := NewIdentity(user, "email", identityData) + require.NoError(ts.T(), err) + + err = ts.db.Create(identity) + require.NoError(ts.T(), err) + + return user +} diff --git a/auth_v2.169.0/internal/models/json_map.go b/auth_v2.169.0/internal/models/json_map.go new file mode 100644 index 0000000..77cee64 --- /dev/null +++ b/auth_v2.169.0/internal/models/json_map.go @@ -0,0 +1,36 @@ +package models + +import ( + "database/sql/driver" + "encoding/json" + "errors" +) + +type JSONMap map[string]interface{} + +func (j JSONMap) Value() (driver.Value, error) { + data, err := json.Marshal(j) + if err != nil { + return driver.Value(""), err + } + return driver.Value(string(data)), nil +} + +func (j JSONMap) Scan(src interface{}) error { + var source []byte + switch v := src.(type) { + case string: + source = []byte(v) + case []byte: + source = v + case nil: + source = []byte("") + default: + return errors.New("invalid data type for JSONMap") + } + + if len(source) == 0 { + source = []byte("{}") + } + return json.Unmarshal(source, &j) +} diff --git a/auth_v2.169.0/internal/models/linking.go b/auth_v2.169.0/internal/models/linking.go new file mode 100644 index 0000000..ca794bc --- /dev/null +++ b/auth_v2.169.0/internal/models/linking.go @@ -0,0 +1,203 @@ +package models + +import ( + "strings" + + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage" +) + +// GetAccountLinkingDomain returns a string that describes the account linking +// domain. An account linking domain describes a set of Identity entities that +// _should_ generally fall under the same User entity. It's just a runtime +// string, and is not typically persisted in the database. This value can vary +// across time. +func GetAccountLinkingDomain(provider string) string { + if strings.HasPrefix(provider, "sso:") { + // when the provider ID is a SSO provider, then the linking + // domain is the provider itself i.e. there can only be one + // user + identity per identity provider + return provider + } + + // otherwise, the linking domain is the default linking domain that + // links all accounts + return "default" +} + +type AccountLinkingDecision = int + +const ( + AccountExists AccountLinkingDecision = iota + CreateAccount + LinkAccount + MultipleAccounts +) + +type AccountLinkingResult struct { + Decision AccountLinkingDecision + User *User + Identities []*Identity + LinkingDomain string + CandidateEmail provider.Email +} + +// DetermineAccountLinking uses the provided data and database state to compute a decision on whether: +// - A new User should be created (CreateAccount) +// - A new Identity should be created (LinkAccount) with a UserID pointing to an existing user account +// - Nothing should be done (AccountExists) +// - It's not possible to decide due to data inconsistency (MultipleAccounts) and the caller should decide +// +// Errors signal failure in processing only, like database access errors. +func DetermineAccountLinking(tx *storage.Connection, config *conf.GlobalConfiguration, emails []provider.Email, aud, providerName, sub string) (AccountLinkingResult, error) { + var verifiedEmails []string + var candidateEmail provider.Email + for _, email := range emails { + if email.Verified || config.Mailer.Autoconfirm { + verifiedEmails = append(verifiedEmails, strings.ToLower(email.Email)) + } + if email.Primary { + candidateEmail = email + candidateEmail.Email = strings.ToLower(email.Email) + } + } + + if identity, terr := FindIdentityByIdAndProvider(tx, sub, providerName); terr == nil { + // account exists + + var user *User + if user, terr = FindUserByID(tx, identity.UserID); terr != nil { + return AccountLinkingResult{}, terr + } + + // we overwrite the email with the existing user's email since the user + // could have an empty email + candidateEmail.Email = user.GetEmail() + return AccountLinkingResult{ + Decision: AccountExists, + User: user, + Identities: []*Identity{identity}, + LinkingDomain: GetAccountLinkingDomain(providerName), + CandidateEmail: candidateEmail, + }, nil + } else if !IsNotFoundError(terr) { + return AccountLinkingResult{}, terr + } + + // the identity does not exist, so we need to check if we should create a new account + // or link to an existing one + + // this is the linking domain for the new identity + candidateLinkingDomain := GetAccountLinkingDomain(providerName) + if len(verifiedEmails) == 0 { + // if there are no verified emails, we always decide to create a new account + user, terr := IsDuplicatedEmail(tx, candidateEmail.Email, aud, nil) + if terr != nil { + return AccountLinkingResult{}, terr + } + if user != nil { + candidateEmail.Email = "" + } + return AccountLinkingResult{ + Decision: CreateAccount, + LinkingDomain: candidateLinkingDomain, + CandidateEmail: candidateEmail, + }, nil + } + + var similarIdentities []*Identity + var similarUsers []*User + // look for similar identities and users based on email + if terr := tx.Q().Eager().Where("email = any (?)", verifiedEmails).All(&similarIdentities); terr != nil { + return AccountLinkingResult{}, terr + } + + if !strings.HasPrefix(providerName, "sso:") { + // there can be multiple user accounts with the same email when is_sso_user is true + // so we just do not consider those similar user accounts + if terr := tx.Q().Eager().Where("email = any (?) and is_sso_user = false", verifiedEmails).All(&similarUsers); terr != nil { + return AccountLinkingResult{}, terr + } + } + + // Need to check if the new identity should be assigned to an + // existing user or to create a new user, according to the automatic + // linking rules + var linkingIdentities []*Identity + + // now let's see if there are any existing and similar identities in + // the same linking domain + for _, identity := range similarIdentities { + if GetAccountLinkingDomain(identity.Provider) == candidateLinkingDomain { + linkingIdentities = append(linkingIdentities, identity) + } + } + + if len(linkingIdentities) == 0 { + if len(similarUsers) == 1 { + // no similarIdentities but a user with the same email exists + // so we link this new identity to the user + // TODO: Backfill the missing identity for the user + return AccountLinkingResult{ + Decision: LinkAccount, + User: similarUsers[0], + Identities: linkingIdentities, + LinkingDomain: candidateLinkingDomain, + CandidateEmail: candidateEmail, + }, nil + } else if len(similarUsers) > 1 { + // this shouldn't happen since there is a partial unique index on (email and is_sso_user = false) + return AccountLinkingResult{ + Decision: MultipleAccounts, + Identities: linkingIdentities, + LinkingDomain: candidateLinkingDomain, + CandidateEmail: candidateEmail, + }, nil + } else { + // there are no identities in the linking domain, we have to + // create a new identity and new user + return AccountLinkingResult{ + Decision: CreateAccount, + LinkingDomain: candidateLinkingDomain, + CandidateEmail: candidateEmail, + }, nil + } + } + + // there is at least one identity in the linking domain let's do a + // sanity check to see if all of the identities in the domain share the + // same user ID + linkingUserId := linkingIdentities[0].UserID + for _, identity := range linkingIdentities { + if identity.UserID != linkingUserId { + // ok this linking domain has more than one user account + // caller should decide what to do + + return AccountLinkingResult{ + Decision: MultipleAccounts, + Identities: linkingIdentities, + LinkingDomain: candidateLinkingDomain, + CandidateEmail: candidateEmail, + }, nil + } + } + + // there's only one user ID in this linking domain, we can go on and + // create a new identity and link it to the existing account + + var user *User + var terr error + + if user, terr = FindUserByID(tx, linkingUserId); terr != nil { + return AccountLinkingResult{}, terr + } + + return AccountLinkingResult{ + Decision: LinkAccount, + User: user, + Identities: linkingIdentities, + LinkingDomain: candidateLinkingDomain, + CandidateEmail: candidateEmail, + }, nil +} diff --git a/auth_v2.169.0/internal/models/linking_test.go b/auth_v2.169.0/internal/models/linking_test.go new file mode 100644 index 0000000..05d4a8c --- /dev/null +++ b/auth_v2.169.0/internal/models/linking_test.go @@ -0,0 +1,314 @@ +package models + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/api/provider" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/storage/test" +) + +type AccountLinkingTestSuite struct { + suite.Suite + + config *conf.GlobalConfiguration + db *storage.Connection +} + +func (ts *AccountLinkingTestSuite) SetupTest() { + TruncateAll(ts.db) +} + +func TestAccountLinking(t *testing.T) { + globalConfig, err := conf.LoadGlobal(modelsTestConfig) + require.NoError(t, err) + + conn, err := test.SetupDBConnection(globalConfig) + require.NoError(t, err) + + ts := &AccountLinkingTestSuite{ + config: globalConfig, + db: conn, + } + defer ts.db.Close() + + suite.Run(t, ts) +} + +func (ts *AccountLinkingTestSuite) TestCreateAccountDecisionNoAccounts() { + // when there are no accounts in the system -- conventional provider + testEmail := provider.Email{ + Email: "test@example.com", + Verified: true, + Primary: true, + } + decision, err := DetermineAccountLinking(ts.db, ts.config, []provider.Email{testEmail}, ts.config.JWT.Aud, "provider", "abcdefgh") + require.NoError(ts.T(), err) + + require.Equal(ts.T(), decision.Decision, CreateAccount) + + // when there are no accounts in the system -- SSO provider + decision, err = DetermineAccountLinking(ts.db, ts.config, []provider.Email{testEmail}, ts.config.JWT.Aud, "sso:f06f9e3d-ff92-4c47-a179-7acf1fda6387", "abcdefgh") + require.NoError(ts.T(), err) + + require.Equal(ts.T(), decision.Decision, CreateAccount) +} + +func (ts *AccountLinkingTestSuite) TestCreateAccountDecisionWithAccounts() { + userA, err := NewUser("", "test@example.com", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(userA)) + identityA, err := NewIdentity(userA, "provider", map[string]interface{}{ + "sub": userA.ID.String(), + "email": "test@example.com", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(identityA)) + + userB, err := NewUser("", "test@samltest.id", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(userB)) + + ssoProvider := "sso:f06f9e3d-ff92-4c47-a179-7acf1fda6387" + identityB, err := NewIdentity(userB, ssoProvider, map[string]interface{}{ + "sub": userB.ID.String(), + "email": "test@samltest.id", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(identityB)) + + // when the email doesn't exist in the system -- conventional provider + decision, err := DetermineAccountLinking(ts.db, ts.config, []provider.Email{ + { + Email: "other@example.com", + Verified: true, + Primary: true, + }, + }, ts.config.JWT.Aud, "provider", "abcdefgh") + require.NoError(ts.T(), err) + + require.Equal(ts.T(), decision.Decision, CreateAccount) + require.Equal(ts.T(), decision.LinkingDomain, "default") + + // when looking for an email that doesn't exist in the SSO linking domain + decision, err = DetermineAccountLinking(ts.db, ts.config, []provider.Email{ + { + Email: "other@samltest.id", + Verified: true, + Primary: true, + }, + }, ts.config.JWT.Aud, ssoProvider, "abcdefgh") + require.NoError(ts.T(), err) + + require.Equal(ts.T(), decision.Decision, CreateAccount) + require.Equal(ts.T(), decision.LinkingDomain, ssoProvider) +} + +func (ts *AccountLinkingTestSuite) TestAccountExists() { + userA, err := NewUser("", "test@example.com", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(userA)) + identityA, err := NewIdentity(userA, "provider", map[string]interface{}{ + "sub": userA.ID.String(), + "email": "test@example.com", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(identityA)) + + decision, err := DetermineAccountLinking(ts.db, ts.config, []provider.Email{ + { + Email: "test@example.com", + Verified: true, + Primary: true, + }, + }, ts.config.JWT.Aud, "provider", userA.ID.String()) + require.NoError(ts.T(), err) + + require.Equal(ts.T(), decision.Decision, AccountExists) + require.Equal(ts.T(), decision.User.ID, userA.ID) +} + +func (ts *AccountLinkingTestSuite) TestLinkingScenarios() { + userA, err := NewUser("", "test@example.com", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(userA)) + identityA, err := NewIdentity(userA, "provider", map[string]interface{}{ + "sub": userA.ID.String(), + "email": "test@example.com", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(identityA)) + + userB, err := NewUser("", "test@samltest.id", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(userB)) + + identityB, err := NewIdentity(userB, "sso:f06f9e3d-ff92-4c47-a179-7acf1fda6387", map[string]interface{}{ + "sub": userB.ID.String(), + "email": "test@samltest.id", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(identityB)) + + cases := []struct { + desc string + email provider.Email + sub string + provider string + decision AccountLinkingResult + }{ + { + // link decision because the below described identity is in the default linking domain but uses "other-provider" instead of "provder" + desc: "same email address", + email: provider.Email{ + Email: "test@example.com", + Verified: true, + Primary: true, + }, + sub: userA.ID.String(), + provider: "other-provider", + decision: AccountLinkingResult{ + Decision: LinkAccount, + User: userA, + LinkingDomain: "default", + CandidateEmail: provider.Email{ + Email: "test@example.com", + Verified: true, + Primary: true, + }, + }, + }, + { + desc: "same email address in uppercase", + email: provider.Email{ + Email: "TEST@example.com", + Verified: true, + Primary: true, + }, + sub: userA.ID.String(), + provider: "other-provider", + decision: AccountLinkingResult{ + Decision: LinkAccount, + User: userA, + LinkingDomain: "default", + CandidateEmail: provider.Email{ + // expected email should be case insensitive + Email: "test@example.com", + Verified: true, + Primary: true, + }, + }, + }, + { + desc: "no link decision because the SSO linking domain is scoped to the provider unique ID", + email: provider.Email{ + Email: "test@samltest.id", + Verified: true, + Primary: true, + }, + sub: userB.ID.String(), + provider: "sso:f06f9e3d-ff92-4c47-a179-7acf1fda6387", + // decision: AccountExists, + decision: AccountLinkingResult{ + Decision: AccountExists, + User: userB, + LinkingDomain: "sso:f06f9e3d-ff92-4c47-a179-7acf1fda6387", + CandidateEmail: provider.Email{ + Email: "test@samltest.id", + Verified: true, + Primary: true, + }, + }, + }, + { + desc: "create account with empty email because email is unverified and user exists", + email: provider.Email{ + Email: "test@example.com", + Verified: false, + Primary: true, + }, + sub: userA.ID.String(), + provider: "other-provider", + decision: AccountLinkingResult{ + Decision: CreateAccount, + LinkingDomain: "default", + CandidateEmail: provider.Email{ + Email: "", + Verified: false, + Primary: true, + }, + }, + }, + { + desc: "create account because email is unverified and user doesn't exist", + email: provider.Email{ + Email: "other@example.com", + Verified: false, + Primary: true, + }, + sub: "000000000", + provider: "other-provider", + decision: AccountLinkingResult{ + Decision: CreateAccount, + LinkingDomain: "default", + CandidateEmail: provider.Email{ + Email: "other@example.com", + Verified: false, + Primary: true, + }, + }, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + decision, err := DetermineAccountLinking(ts.db, ts.config, []provider.Email{c.email}, ts.config.JWT.Aud, c.provider, c.sub) + require.NoError(ts.T(), err) + require.Equal(ts.T(), c.decision.Decision, decision.Decision) + require.Equal(ts.T(), c.decision.LinkingDomain, decision.LinkingDomain) + require.Equal(ts.T(), c.decision.CandidateEmail.Email, decision.CandidateEmail.Email) + require.Equal(ts.T(), c.decision.CandidateEmail.Verified, decision.CandidateEmail.Verified) + require.Equal(ts.T(), c.decision.CandidateEmail.Primary, decision.CandidateEmail.Primary) + }) + } + +} + +func (ts *AccountLinkingTestSuite) TestMultipleAccounts() { + userA, err := NewUser("", "test@example.com", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(userA)) + identityA, err := NewIdentity(userA, "provider", map[string]interface{}{ + "sub": userA.ID.String(), + "email": "test@example.com", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(identityA)) + + userB, err := NewUser("", "test-b@example.com", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(userB)) + identityB, err := NewIdentity(userB, "provider", map[string]interface{}{ + "sub": userB.ID.String(), + "email": "test@example.com", // intentionally same as userA + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(identityB)) + + // decision is multiple accounts because there are two distinct + // identities in the same "default" linking domain with the same email + // address pointing to two different user accounts + decision, err := DetermineAccountLinking(ts.db, ts.config, []provider.Email{ + { + Email: "test@example.com", + Verified: true, + Primary: true, + }, + }, ts.config.JWT.Aud, "provider", "abcdefgh") + require.NoError(ts.T(), err) + + require.Equal(ts.T(), decision.Decision, MultipleAccounts) +} diff --git a/auth_v2.169.0/internal/models/one_time_token.go b/auth_v2.169.0/internal/models/one_time_token.go new file mode 100644 index 0000000..3077647 --- /dev/null +++ b/auth_v2.169.0/internal/models/one_time_token.go @@ -0,0 +1,286 @@ +package models + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "strings" + "time" + + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/supabase/auth/internal/storage" +) + +type OneTimeTokenType int + +const ( + ConfirmationToken OneTimeTokenType = iota + ReauthenticationToken + RecoveryToken + EmailChangeTokenNew + EmailChangeTokenCurrent + PhoneChangeToken +) + +func (t OneTimeTokenType) String() string { + switch t { + case ConfirmationToken: + return "confirmation_token" + + case ReauthenticationToken: + return "reauthentication_token" + + case RecoveryToken: + return "recovery_token" + + case EmailChangeTokenNew: + return "email_change_token_new" + + case EmailChangeTokenCurrent: + return "email_change_token_current" + + case PhoneChangeToken: + return "phone_change_token" + + default: + panic("OneTimeToken: unreachable case") + } +} + +func ParseOneTimeTokenType(s string) (OneTimeTokenType, error) { + switch s { + case "confirmation_token": + return ConfirmationToken, nil + + case "reauthentication_token": + return ReauthenticationToken, nil + + case "recovery_token": + return RecoveryToken, nil + + case "email_change_token_new": + return EmailChangeTokenNew, nil + + case "email_change_token_current": + return EmailChangeTokenCurrent, nil + + case "phone_change_token": + return PhoneChangeToken, nil + + default: + return 0, fmt.Errorf("OneTimeTokenType: unrecognized string %q", s) + } +} + +func (t OneTimeTokenType) Value() (driver.Value, error) { + return t.String(), nil +} + +func (t *OneTimeTokenType) Scan(src interface{}) error { + s, ok := src.(string) + if !ok { + return fmt.Errorf("OneTimeTokenType: scan type is not string but is %T", src) + } + + parsed, err := ParseOneTimeTokenType(s) + if err != nil { + return err + } + + *t = parsed + return nil +} + +type OneTimeTokenNotFoundError struct { +} + +func (e OneTimeTokenNotFoundError) Error() string { + return "One-time token not found" +} + +type OneTimeToken struct { + ID uuid.UUID `json:"id" db:"id"` + + UserID uuid.UUID `json:"user_id" db:"user_id"` + TokenType OneTimeTokenType `json:"token_type" db:"token_type"` + + TokenHash string `json:"token_hash" db:"token_hash"` + RelatesTo string `json:"relates_to" db:"relates_to"` + + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` +} + +func (OneTimeToken) TableName() string { + return "one_time_tokens" +} + +func ClearAllOneTimeTokensForUser(tx *storage.Connection, userID uuid.UUID) error { + return tx.Q().Where("user_id = ?", userID).Delete(OneTimeToken{}) +} + +func ClearOneTimeTokenForUser(tx *storage.Connection, userID uuid.UUID, tokenType OneTimeTokenType) error { + if err := tx.Q().Where("token_type = ? and user_id = ?", tokenType, userID).Delete(OneTimeToken{}); err != nil { + return err + } + + return nil +} + +func CreateOneTimeToken(tx *storage.Connection, userID uuid.UUID, relatesTo, tokenHash string, tokenType OneTimeTokenType) error { + if err := ClearOneTimeTokenForUser(tx, userID, tokenType); err != nil { + return err + } + + oneTimeToken := &OneTimeToken{ + ID: uuid.Must(uuid.NewV4()), + UserID: userID, + TokenType: tokenType, + TokenHash: tokenHash, + RelatesTo: strings.ToLower(relatesTo), + } + + if err := tx.Eager().Create(oneTimeToken); err != nil { + return err + } + + return nil +} + +func FindOneTimeToken(tx *storage.Connection, tokenHash string, tokenTypes ...OneTimeTokenType) (*OneTimeToken, error) { + oneTimeToken := &OneTimeToken{} + + query := tx.Eager().Q() + + switch len(tokenTypes) { + case 2: + query = query.Where("(token_type = ? or token_type = ?) and token_hash = ?", tokenTypes[0], tokenTypes[1], tokenHash) + + case 1: + query = query.Where("token_type = ? and token_hash = ?", tokenTypes[0], tokenHash) + + default: + panic("at most 2 token types are accepted") + } + + if err := query.First(oneTimeToken); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, OneTimeTokenNotFoundError{} + } + + return nil, errors.Wrap(err, "error finding one time token") + } + + return oneTimeToken, nil +} + +// FindUserByConfirmationToken finds users with the matching confirmation token. +func FindUserByConfirmationOrRecoveryToken(tx *storage.Connection, token string) (*User, error) { + ott, err := FindOneTimeToken(tx, token, ConfirmationToken, RecoveryToken) + if err != nil { + return nil, err + } + + return FindUserByID(tx, ott.UserID) +} + +// FindUserByConfirmationToken finds users with the matching confirmation token. +func FindUserByConfirmationToken(tx *storage.Connection, token string) (*User, error) { + ott, err := FindOneTimeToken(tx, token, ConfirmationToken) + if err != nil { + return nil, err + } + + return FindUserByID(tx, ott.UserID) +} + +// FindUserByRecoveryToken finds a user with the matching recovery token. +func FindUserByRecoveryToken(tx *storage.Connection, token string) (*User, error) { + ott, err := FindOneTimeToken(tx, token, RecoveryToken) + if err != nil { + return nil, err + } + + return FindUserByID(tx, ott.UserID) +} + +// FindUserByEmailChangeToken finds a user with the matching email change token. +func FindUserByEmailChangeToken(tx *storage.Connection, token string) (*User, error) { + ott, err := FindOneTimeToken(tx, token, EmailChangeTokenCurrent, EmailChangeTokenNew) + if err != nil { + return nil, err + } + + return FindUserByID(tx, ott.UserID) +} + +// FindUserByEmailChangeCurrentAndAudience finds a user with the matching email change and audience. +func FindUserByEmailChangeCurrentAndAudience(tx *storage.Connection, email, token, aud string) (*User, error) { + ott, err := FindOneTimeToken(tx, token, EmailChangeTokenCurrent) + if err != nil { + return nil, err + } + + if ott == nil { + ott, err = FindOneTimeToken(tx, "pkce_"+token, EmailChangeTokenCurrent) + if err != nil { + return nil, err + } + } + if ott == nil { + return nil, err + } + + user, err := FindUserByID(tx, ott.UserID) + if err != nil { + return nil, err + } + + if user.Aud != aud && strings.EqualFold(user.GetEmail(), email) { + return nil, UserNotFoundError{} + } + + return user, nil +} + +// FindUserByEmailChangeNewAndAudience finds a user with the matching email change and audience. +func FindUserByEmailChangeNewAndAudience(tx *storage.Connection, email, token, aud string) (*User, error) { + ott, err := FindOneTimeToken(tx, token, EmailChangeTokenNew) + if err != nil && !IsNotFoundError(err) { + return nil, err + } + + if ott == nil { + ott, err = FindOneTimeToken(tx, "pkce_"+token, EmailChangeTokenNew) + if err != nil && !IsNotFoundError(err) { + return nil, err + } + } + if ott == nil { + return nil, err + } + + user, err := FindUserByID(tx, ott.UserID) + if err != nil { + return nil, err + } + + if user.Aud != aud && strings.EqualFold(user.EmailChange, email) { + return nil, UserNotFoundError{} + } + + return user, nil +} + +// FindUserForEmailChange finds a user requesting for an email change +func FindUserForEmailChange(tx *storage.Connection, email, token, aud string, secureEmailChangeEnabled bool) (*User, error) { + if secureEmailChangeEnabled { + if user, err := FindUserByEmailChangeCurrentAndAudience(tx, email, token, aud); err == nil { + return user, err + } else if !IsNotFoundError(err) { + return nil, err + } + } + return FindUserByEmailChangeNewAndAudience(tx, email, token, aud) +} diff --git a/auth_v2.169.0/internal/models/refresh_token.go b/auth_v2.169.0/internal/models/refresh_token.go new file mode 100644 index 0000000..c5fea83 --- /dev/null +++ b/auth_v2.169.0/internal/models/refresh_token.go @@ -0,0 +1,166 @@ +package models + +import ( + "database/sql" + "net/http" + "time" + + "github.com/gobuffalo/pop/v6" + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/utilities" +) + +// RefreshToken is the database model for refresh tokens. +type RefreshToken struct { + ID int64 `db:"id"` + + Token string `db:"token"` + + UserID uuid.UUID `db:"user_id"` + + Parent storage.NullString `db:"parent"` + SessionId *uuid.UUID `db:"session_id"` + + Revoked bool `db:"revoked"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` + + DONTUSEINSTANCEID uuid.UUID `json:"-" db:"instance_id"` +} + +func (RefreshToken) TableName() string { + tableName := "refresh_tokens" + return tableName +} + +// GrantParams is used to pass session-specific parameters when issuing a new +// refresh token to authenticated users. +type GrantParams struct { + FactorID *uuid.UUID + + SessionNotAfter *time.Time + SessionTag *string + + UserAgent string + IP string +} + +func (g *GrantParams) FillGrantParams(r *http.Request) { + g.UserAgent = r.Header.Get("User-Agent") + g.IP = utilities.GetIPAddress(r) +} + +// GrantAuthenticatedUser creates a refresh token for the provided user. +func GrantAuthenticatedUser(tx *storage.Connection, user *User, params GrantParams) (*RefreshToken, error) { + return createRefreshToken(tx, user, nil, ¶ms) +} + +// GrantRefreshTokenSwap swaps a refresh token for a new one, revoking the provided token. +func GrantRefreshTokenSwap(r *http.Request, tx *storage.Connection, user *User, token *RefreshToken) (*RefreshToken, error) { + var newToken *RefreshToken + err := tx.Transaction(func(rtx *storage.Connection) error { + var terr error + if terr = NewAuditLogEntry(r, tx, user, TokenRevokedAction, "", nil); terr != nil { + return errors.Wrap(terr, "error creating audit log entry") + } + + token.Revoked = true + if terr = tx.UpdateOnly(token, "revoked"); terr != nil { + return terr + } + + newToken, terr = createRefreshToken(rtx, user, token, &GrantParams{}) + return terr + }) + return newToken, err +} + +// RevokeTokenFamily revokes all refresh tokens that descended from the provided token. +func RevokeTokenFamily(tx *storage.Connection, token *RefreshToken) error { + var err error + tablename := (&pop.Model{Value: RefreshToken{}}).TableName() + if token.SessionId != nil { + err = tx.RawQuery(`update `+tablename+` set revoked = true, updated_at = now() where session_id = ? and revoked = false;`, token.SessionId).Exec() + } else { + err = tx.RawQuery(` + with recursive token_family as ( + select id, user_id, token, revoked, parent from `+tablename+` where parent = ? + union + select r.id, r.user_id, r.token, r.revoked, r.parent from `+tablename+` r inner join token_family t on t.token = r.parent + ) + update `+tablename+` r set revoked = true from token_family where token_family.id = r.id;`, token.Token).Exec() + } + if err != nil { + if errors.Cause(err) == sql.ErrNoRows || errors.Is(err, sql.ErrNoRows) { + return nil + } + + return err + } + return nil +} + +func FindTokenBySessionID(tx *storage.Connection, sessionId *uuid.UUID) (*RefreshToken, error) { + refreshToken := &RefreshToken{} + err := tx.Q().Where("instance_id = ? and session_id = ?", uuid.Nil, sessionId).Order("created_at asc").First(refreshToken) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, RefreshTokenNotFoundError{} + } + return nil, err + } + return refreshToken, nil +} + +func createRefreshToken(tx *storage.Connection, user *User, oldToken *RefreshToken, params *GrantParams) (*RefreshToken, error) { + token := &RefreshToken{ + UserID: user.ID, + Token: crypto.SecureToken(), + Parent: "", + } + if oldToken != nil { + token.Parent = storage.NullString(oldToken.Token) + token.SessionId = oldToken.SessionId + } + + if token.SessionId == nil { + session, err := NewSession(user.ID, params.FactorID) + if err != nil { + return nil, errors.Wrap(err, "error instantiating new session object") + } + + if params.SessionNotAfter != nil { + session.NotAfter = params.SessionNotAfter + } + + if params.UserAgent != "" { + session.UserAgent = ¶ms.UserAgent + } + + if params.IP != "" { + session.IP = ¶ms.IP + } + + if params.SessionTag != nil && *params.SessionTag != "" { + session.Tag = params.SessionTag + } + + if err := tx.Create(session); err != nil { + return nil, errors.Wrap(err, "error creating new session") + } + + token.SessionId = &session.ID + } + + if err := tx.Create(token); err != nil { + return nil, errors.Wrap(err, "error creating refresh token") + } + + if err := user.UpdateLastSignInAt(tx); err != nil { + return nil, errors.Wrap(err, "error update user`s last_sign_in field") + } + return token, nil +} diff --git a/auth_v2.169.0/internal/models/refresh_token_test.go b/auth_v2.169.0/internal/models/refresh_token_test.go new file mode 100644 index 0000000..675826d --- /dev/null +++ b/auth_v2.169.0/internal/models/refresh_token_test.go @@ -0,0 +1,89 @@ +package models + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/storage/test" +) + +type RefreshTokenTestSuite struct { + suite.Suite + db *storage.Connection +} + +func (ts *RefreshTokenTestSuite) SetupTest() { + TruncateAll(ts.db) +} + +func TestRefreshToken(t *testing.T) { + globalConfig, err := conf.LoadGlobal(modelsTestConfig) + require.NoError(t, err) + + conn, err := test.SetupDBConnection(globalConfig) + require.NoError(t, err) + + ts := &RefreshTokenTestSuite{ + db: conn, + } + defer ts.db.Close() + + suite.Run(t, ts) +} + +func (ts *RefreshTokenTestSuite) TestGrantAuthenticatedUser() { + u := ts.createUser() + r, err := GrantAuthenticatedUser(ts.db, u, GrantParams{}) + require.NoError(ts.T(), err) + + require.NotEmpty(ts.T(), r.Token) + require.Equal(ts.T(), u.ID, r.UserID) +} + +func (ts *RefreshTokenTestSuite) TestGrantRefreshTokenSwap() { + u := ts.createUser() + r, err := GrantAuthenticatedUser(ts.db, u, GrantParams{}) + require.NoError(ts.T(), err) + + s, err := GrantRefreshTokenSwap(&http.Request{}, ts.db, u, r) + require.NoError(ts.T(), err) + + _, nr, _, err := FindUserWithRefreshToken(ts.db, r.Token, false) + require.NoError(ts.T(), err) + + require.Equal(ts.T(), r.ID, nr.ID) + require.True(ts.T(), nr.Revoked, "expected old token to be revoked") + + require.NotEqual(ts.T(), r.ID, s.ID) + require.Equal(ts.T(), u.ID, s.UserID) +} + +func (ts *RefreshTokenTestSuite) TestLogout() { + u := ts.createUser() + r, err := GrantAuthenticatedUser(ts.db, u, GrantParams{}) + require.NoError(ts.T(), err) + + require.NoError(ts.T(), Logout(ts.db, u.ID)) + u, r, _, err = FindUserWithRefreshToken(ts.db, r.Token, false) + require.Errorf(ts.T(), err, "expected error when there are no refresh tokens to authenticate. user: %v token: %v", u, r) + + require.True(ts.T(), IsNotFoundError(err), "expected NotFoundError") +} + +func (ts *RefreshTokenTestSuite) createUser() *User { + return ts.createUserWithEmail("david@netlify.com") +} + +func (ts *RefreshTokenTestSuite) createUserWithEmail(email string) *User { + user, err := NewUser("", email, "secret", "test", nil) + require.NoError(ts.T(), err) + + err = ts.db.Create(user) + require.NoError(ts.T(), err) + + return user +} diff --git a/auth_v2.169.0/internal/models/sessions.go b/auth_v2.169.0/internal/models/sessions.go new file mode 100644 index 0000000..a93be44 --- /dev/null +++ b/auth_v2.169.0/internal/models/sessions.go @@ -0,0 +1,356 @@ +package models + +import ( + "database/sql" + "fmt" + "sort" + "strings" + "time" + + "github.com/gobuffalo/pop/v6" + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/supabase/auth/internal/storage" +) + +type AuthenticatorAssuranceLevel int + +const ( + AAL1 AuthenticatorAssuranceLevel = iota + AAL2 + AAL3 +) + +func (aal AuthenticatorAssuranceLevel) String() string { + switch aal { + case AAL1: + return "aal1" + case AAL2: + return "aal2" + case AAL3: + return "aal3" + default: + return "" + } +} + +// AMREntry represents a method that a user has logged in together with the corresponding time +type AMREntry struct { + Method string `json:"method"` + Timestamp int64 `json:"timestamp"` + Provider string `json:"provider,omitempty"` +} + +type sortAMREntries struct { + Array []AMREntry +} + +func (s sortAMREntries) Len() int { + return len(s.Array) +} + +func (s sortAMREntries) Less(i, j int) bool { + return s.Array[i].Timestamp < s.Array[j].Timestamp +} + +func (s sortAMREntries) Swap(i, j int) { + s.Array[j], s.Array[i] = s.Array[i], s.Array[j] +} + +type Session struct { + ID uuid.UUID `json:"-" db:"id"` + UserID uuid.UUID `json:"user_id" db:"user_id"` + + // NotAfter is overriden by timeboxed sessions. + NotAfter *time.Time `json:"not_after,omitempty" db:"not_after"` + + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` + FactorID *uuid.UUID `json:"factor_id" db:"factor_id"` + AMRClaims []AMRClaim `json:"amr,omitempty" has_many:"amr_claims"` + AAL *string `json:"aal" db:"aal"` + + RefreshedAt *time.Time `json:"refreshed_at,omitempty" db:"refreshed_at"` + UserAgent *string `json:"user_agent,omitempty" db:"user_agent"` + IP *string `json:"ip,omitempty" db:"ip"` + + Tag *string `json:"tag" db:"tag"` +} + +func (Session) TableName() string { + tableName := "sessions" + return tableName +} + +func (s *Session) LastRefreshedAt(refreshTokenTime *time.Time) time.Time { + refreshedAt := s.RefreshedAt + + if refreshedAt == nil || refreshedAt.IsZero() { + if refreshTokenTime != nil { + rtt := *refreshTokenTime + + if rtt.IsZero() { + return s.CreatedAt + } else if rtt.After(s.CreatedAt) { + return rtt + } + } + + return s.CreatedAt + } + + return *refreshedAt +} + +func (s *Session) UpdateOnlyRefreshInfo(tx *storage.Connection) error { + // TODO(kangmingtay): The underlying database type uses timestamp without timezone, + // so we need to convert the value to UTC before updating it. + // In the future, we should add a migration to update the type to contain the timezone. + *s.RefreshedAt = s.RefreshedAt.UTC() + return tx.UpdateOnly(s, "refreshed_at", "user_agent", "ip") +} + +type SessionValidityReason = int + +const ( + SessionValid SessionValidityReason = iota + SessionPastNotAfter = iota + SessionPastTimebox = iota + SessionTimedOut = iota +) + +func (s *Session) CheckValidity(now time.Time, refreshTokenTime *time.Time, timebox, inactivityTimeout *time.Duration) SessionValidityReason { + if s.NotAfter != nil && now.After(*s.NotAfter) { + return SessionPastNotAfter + } + + if timebox != nil && *timebox != 0 && now.After(s.CreatedAt.Add(*timebox)) { + return SessionPastTimebox + } + + if inactivityTimeout != nil && *inactivityTimeout != 0 && now.After(s.LastRefreshedAt(refreshTokenTime).Add(*inactivityTimeout)) { + return SessionTimedOut + } + + return SessionValid +} + +func (s *Session) DetermineTag(tags []string) string { + if len(tags) == 0 { + return "" + } + + if s.Tag == nil { + return tags[0] + } + + tag := *s.Tag + if tag == "" { + return tags[0] + } + + for _, t := range tags { + if t == tag { + return tag + } + } + + return tags[0] +} + +func NewSession(userID uuid.UUID, factorID *uuid.UUID) (*Session, error) { + id := uuid.Must(uuid.NewV4()) + + defaultAAL := AAL1.String() + + session := &Session{ + ID: id, + AAL: &defaultAAL, + UserID: userID, + FactorID: factorID, + } + + return session, nil +} + +// FindSessionByID looks up a Session by the provided id. If forUpdate is set +// to true, then the SELECT statement used by the query has the form SELECT ... +// FOR UPDATE SKIP LOCKED. This means that a FOR UPDATE lock will only be +// acquired if there's no other lock. In case there is a lock, a +// IsNotFound(err) error will be retured. +func FindSessionByID(tx *storage.Connection, id uuid.UUID, forUpdate bool) (*Session, error) { + session := &Session{} + + if forUpdate { + // pop does not provide us with a way to execute FOR UPDATE + // queries which lock the rows affected by the query from + // being accessed by any other transaction that also uses FOR + // UPDATE + if err := tx.RawQuery(fmt.Sprintf("SELECT * FROM %q WHERE id = ? LIMIT 1 FOR UPDATE SKIP LOCKED;", session.TableName()), id).First(session); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, SessionNotFoundError{} + } + + return nil, err + } + } + + // once the rows are locked (if forUpdate was true), we can query again using pop + if err := tx.Eager().Q().Where("id = ?", id).First(session); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, SessionNotFoundError{} + } + return nil, errors.Wrap(err, "error finding session") + } + return session, nil +} + +func FindSessionByUserID(tx *storage.Connection, userId uuid.UUID) (*Session, error) { + session := &Session{} + if err := tx.Eager().Q().Where("user_id = ?", userId).Order("created_at asc").First(session); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, SessionNotFoundError{} + } + return nil, errors.Wrap(err, "error finding session") + } + return session, nil +} + +func FindSessionsByFactorID(tx *storage.Connection, factorID uuid.UUID) ([]*Session, error) { + sessions := []*Session{} + if err := tx.Q().Where("factor_id = ?", factorID).All(&sessions); err != nil { + return nil, err + } + return sessions, nil +} + +// FindAllSessionsForUser finds all of the sessions for a user. If forUpdate is +// set, it will first lock on the user row which can be used to prevent issues +// with concurrency. If the lock is acquired, it will return a +// UserNotFoundError and the operation should be retried. If there are no +// sessions for the user, a nil result is returned without an error. +func FindAllSessionsForUser(tx *storage.Connection, userId uuid.UUID, forUpdate bool) ([]*Session, error) { + if forUpdate { + user := &User{} + if err := tx.RawQuery(fmt.Sprintf("SELECT id FROM %q WHERE id = ? LIMIT 1 FOR UPDATE SKIP LOCKED;", user.TableName()), userId).First(user); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, UserNotFoundError{} + } + + return nil, err + } + } + + var sessions []*Session + if err := tx.Where("user_id = ?", userId).All(&sessions); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, nil + } + + return nil, err + } + + return sessions, nil +} + +func updateFactorAssociatedSessions(tx *storage.Connection, userID, factorID uuid.UUID, aal string) error { + return tx.RawQuery("UPDATE "+(&pop.Model{Value: Session{}}).TableName()+" set aal = ?, factor_id = ? WHERE user_id = ? AND factor_id = ?", aal, nil, userID, factorID).Exec() +} + +func InvalidateSessionsWithAALLessThan(tx *storage.Connection, userID uuid.UUID, level string) error { + return tx.RawQuery("DELETE FROM "+(&pop.Model{Value: Session{}}).TableName()+" WHERE user_id = ? AND aal < ?", userID, level).Exec() +} + +// Logout deletes all sessions for a user. +func Logout(tx *storage.Connection, userId uuid.UUID) error { + return tx.RawQuery("DELETE FROM "+(&pop.Model{Value: Session{}}).TableName()+" WHERE user_id = ?", userId).Exec() +} + +// LogoutSession deletes the current session for a user +func LogoutSession(tx *storage.Connection, sessionId uuid.UUID) error { + return tx.RawQuery("DELETE FROM "+(&pop.Model{Value: Session{}}).TableName()+" WHERE id = ?", sessionId).Exec() +} + +// LogoutAllExceptMe deletes all sessions for a user except the current one +func LogoutAllExceptMe(tx *storage.Connection, sessionId uuid.UUID, userID uuid.UUID) error { + return tx.RawQuery("DELETE FROM "+(&pop.Model{Value: Session{}}).TableName()+" WHERE id != ? AND user_id = ?", sessionId, userID).Exec() +} + +func (s *Session) UpdateAALAndAssociatedFactor(tx *storage.Connection, aal AuthenticatorAssuranceLevel, factorID *uuid.UUID) error { + s.FactorID = factorID + aalAsString := aal.String() + s.AAL = &aalAsString + return tx.UpdateOnly(s, "aal", "factor_id") +} + +func (s *Session) CalculateAALAndAMR(user *User) (aal AuthenticatorAssuranceLevel, amr []AMREntry, err error) { + amr, aal = []AMREntry{}, AAL1 + for _, claim := range s.AMRClaims { + if claim.IsAAL2Claim() { + aal = AAL2 + } + amr = append(amr, AMREntry{Method: claim.GetAuthenticationMethod(), Timestamp: claim.UpdatedAt.Unix()}) + } + + // makes sure that the AMR claims are always ordered most-recent first + + // sort in ascending order + sort.Sort(sortAMREntries{ + Array: amr, + }) + + // now reverse for descending order + _ = sort.Reverse(sortAMREntries{ + Array: amr, + }) + + lastIndex := len(amr) - 1 + + if lastIndex > -1 && amr[lastIndex].Method == SSOSAML.String() { + // initial AMR claim is from sso/saml, we need to add information + // about the provider that was used for the authentication + identities := user.Identities + + if len(identities) == 1 { + identity := identities[0] + + if identity.IsForSSOProvider() { + amr[lastIndex].Provider = strings.TrimPrefix(identity.Provider, "sso:") + } + } + + // otherwise we can't identify that this user account has only + // one SSO identity, so we are not encoding the provider at + // this time + } + + return aal, amr, nil +} + +func (s *Session) GetAAL() string { + if s.AAL == nil { + return "" + } + return *(s.AAL) +} + +func (s *Session) IsAAL2() bool { + return s.GetAAL() == AAL2.String() +} + +// FindCurrentlyActiveRefreshToken returns the currently active refresh +// token in the session. This is the last created (ordered by the serial +// primary key) non-revoked refresh token for the session. +func (s *Session) FindCurrentlyActiveRefreshToken(tx *storage.Connection) (*RefreshToken, error) { + var activeRefreshToken RefreshToken + + if err := tx.Q().Where("session_id = ? and revoked is false", s.ID).Order("id desc").First(&activeRefreshToken); err != nil { + if errors.Cause(err) == sql.ErrNoRows || errors.Is(err, sql.ErrNoRows) { + return nil, RefreshTokenNotFoundError{} + } + + return nil, err + } + + return &activeRefreshToken, nil +} diff --git a/auth_v2.169.0/internal/models/sessions_test.go b/auth_v2.169.0/internal/models/sessions_test.go new file mode 100644 index 0000000..9dce78e --- /dev/null +++ b/auth_v2.169.0/internal/models/sessions_test.go @@ -0,0 +1,104 @@ +package models + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/storage/test" +) + +type SessionsTestSuite struct { + suite.Suite + db *storage.Connection + Config *conf.GlobalConfiguration +} + +func (ts *SessionsTestSuite) SetupTest() { + TruncateAll(ts.db) + email := "test@example.com" + user, err := NewUser("", email, "secret", ts.Config.JWT.Aud, nil) + require.NoError(ts.T(), err) + + err = ts.db.Create(user) + require.NoError(ts.T(), err) +} + +func TestSession(t *testing.T) { + globalConfig, err := conf.LoadGlobal(modelsTestConfig) + require.NoError(t, err) + conn, err := test.SetupDBConnection(globalConfig) + require.NoError(t, err) + ts := &SessionsTestSuite{ + db: conn, + Config: globalConfig, + } + defer ts.db.Close() + suite.Run(t, ts) +} + +func (ts *SessionsTestSuite) TestFindBySessionIDWithForUpdate() { + u, err := FindUserByEmailAndAudience(ts.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + session, err := NewSession(u.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(session)) + + found, err := FindSessionByID(ts.db, session.ID, true) + require.NoError(ts.T(), err) + + require.Equal(ts.T(), session.ID, found.ID) +} + +func (ts *SessionsTestSuite) AddClaimAndReloadSession(session *Session, claim AuthenticationMethod) *Session { + err := AddClaimToSession(ts.db, session.ID, claim) + require.NoError(ts.T(), err) + session, err = FindSessionByID(ts.db, session.ID, false) + require.NoError(ts.T(), err) + return session +} + +func (ts *SessionsTestSuite) TestCalculateAALAndAMR() { + totalDistinctClaims := 3 + u, err := FindUserByEmailAndAudience(ts.db, "test@example.com", ts.Config.JWT.Aud) + require.NoError(ts.T(), err) + session, err := NewSession(u.ID, nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(session)) + + session = ts.AddClaimAndReloadSession(session, PasswordGrant) + + firstClaimAddedTime := time.Now() + session = ts.AddClaimAndReloadSession(session, TOTPSignIn) + + _, _, err = session.CalculateAALAndAMR(u) + require.NoError(ts.T(), err) + + session = ts.AddClaimAndReloadSession(session, TOTPSignIn) + + session = ts.AddClaimAndReloadSession(session, SSOSAML) + + aal, amr, err := session.CalculateAALAndAMR(u) + require.NoError(ts.T(), err) + + require.Equal(ts.T(), AAL2, aal) + require.Equal(ts.T(), totalDistinctClaims, len(amr)) + + found := false + for _, claim := range session.AMRClaims { + if claim.GetAuthenticationMethod() == TOTPSignIn.String() { + require.True(ts.T(), firstClaimAddedTime.Before(claim.UpdatedAt)) + found = true + } + } + + for _, claim := range amr { + if claim.Method == SSOSAML.String() { + require.NotNil(ts.T(), claim.Provider) + } + } + require.True(ts.T(), found) +} diff --git a/auth_v2.169.0/internal/models/sso.go b/auth_v2.169.0/internal/models/sso.go new file mode 100644 index 0000000..28c2429 --- /dev/null +++ b/auth_v2.169.0/internal/models/sso.go @@ -0,0 +1,262 @@ +package models + +import ( + "database/sql" + "database/sql/driver" + "encoding/json" + "reflect" + "strings" + "time" + + "github.com/crewjam/saml" + "github.com/crewjam/saml/samlsp" + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/supabase/auth/internal/storage" +) + +type SSOProvider struct { + ID uuid.UUID `db:"id" json:"id"` + + SAMLProvider SAMLProvider `has_one:"saml_providers" fk_id:"sso_provider_id" json:"saml,omitempty"` + SSODomains []SSODomain `has_many:"sso_domains" fk_id:"sso_provider_id" json:"domains"` + + CreatedAt time.Time `db:"created_at" json:"created_at"` + UpdatedAt time.Time `db:"updated_at" json:"updated_at"` +} + +func (p SSOProvider) TableName() string { + return "sso_providers" +} + +func (p SSOProvider) Type() string { + return "saml" +} + +type SAMLAttribute struct { + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Default interface{} `json:"default,omitempty"` + Array bool `json:"array,omitempty"` +} + +type SAMLAttributeMapping struct { + Keys map[string]SAMLAttribute `json:"keys,omitempty"` +} + +func (m *SAMLAttributeMapping) Equal(o *SAMLAttributeMapping) bool { + if m == o { + return true + } + + if m == nil || o == nil { + return false + } + + if m.Keys == nil && o.Keys == nil { + return true + } + + if len(m.Keys) != len(o.Keys) { + return false + } + + for mkey, mvalue := range m.Keys { + value, ok := o.Keys[mkey] + if !ok { + return false + } + + if mvalue.Name != value.Name || len(mvalue.Names) != len(value.Names) { + return false + } + + for i := 0; i < len(mvalue.Names); i += 1 { + if mvalue.Names[i] != value.Names[i] { + return false + } + } + + if !reflect.DeepEqual(mvalue.Default, value.Default) { + return false + } + + if mvalue.Array != value.Array { + return false + } + } + + return true +} + +func (m *SAMLAttributeMapping) Scan(src interface{}) error { + b, ok := src.([]byte) + if !ok { + return errors.New("scan source was not []byte") + } + err := json.Unmarshal(b, m) + if err != nil { + return err + } + return nil +} + +func (m SAMLAttributeMapping) Value() (driver.Value, error) { + b, err := json.Marshal(m) + if err != nil { + return nil, err + } + return string(b), nil +} + +type SAMLProvider struct { + ID uuid.UUID `db:"id" json:"-"` + + SSOProvider *SSOProvider `belongs_to:"sso_providers" json:"-"` + SSOProviderID uuid.UUID `db:"sso_provider_id" json:"-"` + + EntityID string `db:"entity_id" json:"entity_id"` + MetadataXML string `db:"metadata_xml" json:"metadata_xml,omitempty"` + MetadataURL *string `db:"metadata_url" json:"metadata_url,omitempty"` + + AttributeMapping SAMLAttributeMapping `db:"attribute_mapping" json:"attribute_mapping,omitempty"` + + NameIDFormat *string `db:"name_id_format" json:"name_id_format,omitempty"` + + CreatedAt time.Time `db:"created_at" json:"-"` + UpdatedAt time.Time `db:"updated_at" json:"-"` +} + +func (p SAMLProvider) TableName() string { + return "saml_providers" +} + +func (p SAMLProvider) EntityDescriptor() (*saml.EntityDescriptor, error) { + return samlsp.ParseMetadata([]byte(p.MetadataXML)) +} + +type SSODomain struct { + ID uuid.UUID `db:"id" json:"-"` + + SSOProvider *SSOProvider `belongs_to:"sso_providers" json:"-"` + SSOProviderID uuid.UUID `db:"sso_provider_id" json:"-"` + + Domain string `db:"domain" json:"domain"` + + CreatedAt time.Time `db:"created_at" json:"-"` + UpdatedAt time.Time `db:"updated_at" json:"-"` +} + +func (d SSODomain) TableName() string { + return "sso_domains" +} + +type SAMLRelayState struct { + ID uuid.UUID `db:"id"` + + SSOProviderID uuid.UUID `db:"sso_provider_id"` + + RequestID string `db:"request_id"` + ForEmail *string `db:"for_email"` + + RedirectTo string `db:"redirect_to"` + + CreatedAt time.Time `db:"created_at" json:"-"` + UpdatedAt time.Time `db:"updated_at" json:"-"` + FlowStateID *uuid.UUID `db:"flow_state_id" json:"flow_state_id,omitempty"` + FlowState *FlowState `db:"-" json:"flow_state,omitempty" belongs_to:"flow_state"` +} + +func (s SAMLRelayState) TableName() string { + return "saml_relay_states" +} + +func FindSAMLProviderByEntityID(tx *storage.Connection, entityId string) (*SSOProvider, error) { + var samlProvider SAMLProvider + if err := tx.Q().Where("entity_id = ?", entityId).First(&samlProvider); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, SSOProviderNotFoundError{} + } + + return nil, errors.Wrap(err, "error finding SAML SSO provider by EntityID") + } + + var ssoProvider SSOProvider + if err := tx.Eager().Q().Where("id = ?", samlProvider.SSOProviderID).First(&ssoProvider); err != nil { + return nil, errors.Wrap(err, "error finding SAML SSO provider by ID (via EntityID)") + } + + return &ssoProvider, nil +} + +func FindSSOProviderByID(tx *storage.Connection, id uuid.UUID) (*SSOProvider, error) { + var ssoProvider SSOProvider + + if err := tx.Eager().Q().Where("id = ?", id).First(&ssoProvider); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, SSOProviderNotFoundError{} + } + + return nil, errors.Wrap(err, "error finding SAML SSO provider by ID") + } + + return &ssoProvider, nil +} + +func FindSSOProviderForEmailAddress(tx *storage.Connection, emailAddress string) (*SSOProvider, error) { + parts := strings.Split(emailAddress, "@") + emailDomain := strings.ToLower(parts[1]) + + return FindSSOProviderByDomain(tx, emailDomain) +} + +func FindSSOProviderByDomain(tx *storage.Connection, domain string) (*SSOProvider, error) { + var ssoDomain SSODomain + + if err := tx.Q().Where("domain = ?", domain).First(&ssoDomain); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, SSOProviderNotFoundError{} + } + + return nil, errors.Wrap(err, "error finding SAML SSO domain") + } + + var ssoProvider SSOProvider + if err := tx.Eager().Q().Where("id = ?", ssoDomain.SSOProviderID).First(&ssoProvider); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, SSOProviderNotFoundError{} + } + + return nil, errors.Wrap(err, "error finding SAML SSO provider by ID (via domain)") + } + + return &ssoProvider, nil +} + +func FindAllSAMLProviders(tx *storage.Connection) ([]SSOProvider, error) { + var providers []SSOProvider + + if err := tx.Eager().All(&providers); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, nil + } + + return nil, errors.Wrap(err, "error loading all SAML SSO providers") + } + + return providers, nil +} + +func FindSAMLRelayStateByID(tx *storage.Connection, id uuid.UUID) (*SAMLRelayState, error) { + var state SAMLRelayState + + if err := tx.Eager().Q().Where("id = ?", id).First(&state); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, SAMLRelayStateNotFoundError{} + } + + return nil, errors.Wrap(err, "error loading SAML Relay State") + } + + return &state, nil +} diff --git a/auth_v2.169.0/internal/models/sso_test.go b/auth_v2.169.0/internal/models/sso_test.go new file mode 100644 index 0000000..b6c9656 --- /dev/null +++ b/auth_v2.169.0/internal/models/sso_test.go @@ -0,0 +1,232 @@ +package models + +import ( + tst "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/storage/test" +) + +type SSOTestSuite struct { + suite.Suite + + db *storage.Connection +} + +func (ts *SSOTestSuite) SetupTest() { + TruncateAll(ts.db) +} + +func TestSSO(t *tst.T) { + globalConfig, err := conf.LoadGlobal(modelsTestConfig) + require.NoError(t, err) + + conn, err := test.SetupDBConnection(globalConfig) + require.NoError(t, err) + + ts := &SSOTestSuite{ + db: conn, + } + defer ts.db.Close() + + suite.Run(t, ts) +} + +func (ts *SSOTestSuite) TestConstraints() { + type exampleSpec struct { + Provider *SSOProvider + } + + examples := []exampleSpec{ + { + Provider: &SSOProvider{ + SAMLProvider: SAMLProvider{ + EntityID: "", + MetadataXML: "", + }, + }, + }, + { + Provider: &SSOProvider{ + SAMLProvider: SAMLProvider{ + EntityID: "https://example.com/saml/metadata", + MetadataXML: "", + }, + }, + }, + { + Provider: &SSOProvider{ + SAMLProvider: SAMLProvider{ + EntityID: "https://example.com/saml/metadata", + MetadataXML: "", + }, + SSODomains: []SSODomain{ + { + Domain: "", + }, + }, + }, + }, + } + + for i, example := range examples { + require.Error(ts.T(), ts.db.Eager().Create(example.Provider), "Example %d should have failed with error", i) + } +} + +func (ts *SSOTestSuite) TestDomainUniqueness() { + require.NoError(ts.T(), ts.db.Eager().Create(&SSOProvider{ + SAMLProvider: SAMLProvider{ + EntityID: "https://example.com/saml/metadata1", + MetadataXML: "", + }, + SSODomains: []SSODomain{ + { + Domain: "example.com", + }, + }, + })) + + require.Error(ts.T(), ts.db.Eager().Create(&SSOProvider{ + SAMLProvider: SAMLProvider{ + EntityID: "https://example.com/saml/metadata2", + MetadataXML: "", + }, + SSODomains: []SSODomain{ + { + Domain: "example.com", + }, + }, + })) +} + +func (ts *SSOTestSuite) TestEntityIDUniqueness() { + require.NoError(ts.T(), ts.db.Eager().Create(&SSOProvider{ + SAMLProvider: SAMLProvider{ + EntityID: "https://example.com/saml/metadata", + MetadataXML: "", + }, + SSODomains: []SSODomain{ + { + Domain: "example.com", + }, + }, + })) + + require.Error(ts.T(), ts.db.Eager().Create(&SSOProvider{ + SAMLProvider: SAMLProvider{ + EntityID: "https://example.com/saml/metadata", + MetadataXML: "", + }, + SSODomains: []SSODomain{ + { + Domain: "example.net", + }, + }, + })) +} + +func (ts *SSOTestSuite) TestFindSSOProviderForEmailAddress() { + provider := &SSOProvider{ + SAMLProvider: SAMLProvider{ + EntityID: "https://example.com/saml/metadata", + MetadataXML: "", + }, + SSODomains: []SSODomain{ + { + Domain: "example.com", + }, + { + Domain: "example.org", + }, + }, + } + + require.NoError(ts.T(), ts.db.Eager().Create(provider), "provider creation failed") + + type exampleSpec struct { + Address string + Provider *SSOProvider + } + + examples := []exampleSpec{ + { + Address: "someone@example.com", + Provider: provider, + }, + { + Address: "someone@example.org", + Provider: provider, + }, + { + Address: "someone@example.net", + Provider: nil, + }, + } + + for i, example := range examples { + rp, err := FindSSOProviderForEmailAddress(ts.db, example.Address) + + if nil == example.Provider { + require.Nil(ts.T(), rp) + require.True(ts.T(), IsNotFoundError(err), "Example %d failed with error %w", i, err) + } else { + require.Nil(ts.T(), err, "Example %d failed with error %w", i, err) + require.Equal(ts.T(), rp.ID, example.Provider.ID) + } + } +} + +func (ts *SSOTestSuite) TestFindSAMLProviderByEntityID() { + provider := &SSOProvider{ + SAMLProvider: SAMLProvider{ + EntityID: "https://example.com/saml/metadata", + MetadataXML: "", + }, + SSODomains: []SSODomain{ + { + Domain: "example.com", + }, + { + Domain: "example.org", + }, + }, + } + + require.NoError(ts.T(), ts.db.Eager().Create(provider)) + + type exampleSpec struct { + EntityID string + Provider *SSOProvider + } + + examples := []exampleSpec{ + { + EntityID: "https://example.com/saml/metadata", + Provider: provider, + }, + { + EntityID: "https://example.com/saml/metadata/", + Provider: nil, + }, + { + EntityID: "", + Provider: nil, + }, + } + + for i, example := range examples { + rp, err := FindSAMLProviderByEntityID(ts.db, example.EntityID) + + if nil == example.Provider { + require.True(ts.T(), IsNotFoundError(err), "Example %d failed with error", i) + require.Nil(ts.T(), rp) + } else { + require.Nil(ts.T(), err, "Example %d failed with error %w", i, err) + require.Equal(ts.T(), rp.ID, example.Provider.ID) + } + } +} diff --git a/auth_v2.169.0/internal/models/user.go b/auth_v2.169.0/internal/models/user.go new file mode 100644 index 0000000..3b16a54 --- /dev/null +++ b/auth_v2.169.0/internal/models/user.go @@ -0,0 +1,989 @@ +package models + +import ( + "context" + "crypto/sha256" + "database/sql" + "encoding/base64" + "fmt" + "strings" + "time" + + "github.com/go-webauthn/webauthn/webauthn" + "github.com/gobuffalo/pop/v6" + "github.com/gofrs/uuid" + "github.com/pkg/errors" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/storage" + "golang.org/x/crypto/bcrypt" +) + +// User respresents a registered user with email/password authentication +type User struct { + ID uuid.UUID `json:"id" db:"id"` + + Aud string `json:"aud" db:"aud"` + Role string `json:"role" db:"role"` + Email storage.NullString `json:"email" db:"email"` + IsSSOUser bool `json:"-" db:"is_sso_user"` + + EncryptedPassword *string `json:"-" db:"encrypted_password"` + EmailConfirmedAt *time.Time `json:"email_confirmed_at,omitempty" db:"email_confirmed_at"` + InvitedAt *time.Time `json:"invited_at,omitempty" db:"invited_at"` + + Phone storage.NullString `json:"phone" db:"phone"` + PhoneConfirmedAt *time.Time `json:"phone_confirmed_at,omitempty" db:"phone_confirmed_at"` + + ConfirmationToken string `json:"-" db:"confirmation_token"` + ConfirmationSentAt *time.Time `json:"confirmation_sent_at,omitempty" db:"confirmation_sent_at"` + + // For backward compatibility only. Use EmailConfirmedAt or PhoneConfirmedAt instead. + ConfirmedAt *time.Time `json:"confirmed_at,omitempty" db:"confirmed_at" rw:"r"` + + RecoveryToken string `json:"-" db:"recovery_token"` + RecoverySentAt *time.Time `json:"recovery_sent_at,omitempty" db:"recovery_sent_at"` + + EmailChangeTokenCurrent string `json:"-" db:"email_change_token_current"` + EmailChangeTokenNew string `json:"-" db:"email_change_token_new"` + EmailChange string `json:"new_email,omitempty" db:"email_change"` + EmailChangeSentAt *time.Time `json:"email_change_sent_at,omitempty" db:"email_change_sent_at"` + EmailChangeConfirmStatus int `json:"-" db:"email_change_confirm_status"` + + PhoneChangeToken string `json:"-" db:"phone_change_token"` + PhoneChange string `json:"new_phone,omitempty" db:"phone_change"` + PhoneChangeSentAt *time.Time `json:"phone_change_sent_at,omitempty" db:"phone_change_sent_at"` + + ReauthenticationToken string `json:"-" db:"reauthentication_token"` + ReauthenticationSentAt *time.Time `json:"reauthentication_sent_at,omitempty" db:"reauthentication_sent_at"` + + LastSignInAt *time.Time `json:"last_sign_in_at,omitempty" db:"last_sign_in_at"` + + AppMetaData JSONMap `json:"app_metadata" db:"raw_app_meta_data"` + UserMetaData JSONMap `json:"user_metadata" db:"raw_user_meta_data"` + + Factors []Factor `json:"factors,omitempty" has_many:"factors"` + Identities []Identity `json:"identities" has_many:"identities"` + + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` + BannedUntil *time.Time `json:"banned_until,omitempty" db:"banned_until"` + DeletedAt *time.Time `json:"deleted_at,omitempty" db:"deleted_at"` + IsAnonymous bool `json:"is_anonymous" db:"is_anonymous"` + + DONTUSEINSTANCEID uuid.UUID `json:"-" db:"instance_id"` +} + +func NewUserWithPasswordHash(phone, email, passwordHash, aud string, userData map[string]interface{}) (*User, error) { + if strings.HasPrefix(passwordHash, crypto.Argon2Prefix) { + _, err := crypto.ParseArgon2Hash(passwordHash) + if err != nil { + return nil, err + } + } else if strings.HasPrefix(passwordHash, crypto.FirebaseScryptPrefix) { + _, err := crypto.ParseFirebaseScryptHash(passwordHash) + if err != nil { + return nil, err + } + } else { + // verify that the hash is a bcrypt hash + _, err := bcrypt.Cost([]byte(passwordHash)) + if err != nil { + return nil, err + } + } + id := uuid.Must(uuid.NewV4()) + user := &User{ + ID: id, + Aud: aud, + Email: storage.NullString(strings.ToLower(email)), + Phone: storage.NullString(phone), + UserMetaData: userData, + EncryptedPassword: &passwordHash, + } + return user, nil +} + +// NewUser initializes a new user from an email, password and user data. +func NewUser(phone, email, password, aud string, userData map[string]interface{}) (*User, error) { + passwordHash := "" + + if password != "" { + pw, err := crypto.GenerateFromPassword(context.Background(), password) + if err != nil { + return nil, err + } + + passwordHash = pw + } + + if userData == nil { + userData = make(map[string]interface{}) + } + + id := uuid.Must(uuid.NewV4()) + user := &User{ + ID: id, + Aud: aud, + Email: storage.NullString(strings.ToLower(email)), + Phone: storage.NullString(phone), + UserMetaData: userData, + EncryptedPassword: &passwordHash, + } + return user, nil +} + +// TableName overrides the table name used by pop +func (User) TableName() string { + tableName := "users" + return tableName +} + +func (u *User) HasPassword() bool { + var pwd string + + if u.EncryptedPassword != nil { + pwd = *u.EncryptedPassword + } + + return pwd != "" +} + +// BeforeSave is invoked before the user is saved to the database +func (u *User) BeforeSave(tx *pop.Connection) error { + if u.EmailConfirmedAt != nil && u.EmailConfirmedAt.IsZero() { + u.EmailConfirmedAt = nil + } + if u.PhoneConfirmedAt != nil && u.PhoneConfirmedAt.IsZero() { + u.PhoneConfirmedAt = nil + } + if u.InvitedAt != nil && u.InvitedAt.IsZero() { + u.InvitedAt = nil + } + if u.ConfirmationSentAt != nil && u.ConfirmationSentAt.IsZero() { + u.ConfirmationSentAt = nil + } + if u.RecoverySentAt != nil && u.RecoverySentAt.IsZero() { + u.RecoverySentAt = nil + } + if u.EmailChangeSentAt != nil && u.EmailChangeSentAt.IsZero() { + u.EmailChangeSentAt = nil + } + if u.PhoneChangeSentAt != nil && u.PhoneChangeSentAt.IsZero() { + u.PhoneChangeSentAt = nil + } + if u.ReauthenticationSentAt != nil && u.ReauthenticationSentAt.IsZero() { + u.ReauthenticationSentAt = nil + } + if u.LastSignInAt != nil && u.LastSignInAt.IsZero() { + u.LastSignInAt = nil + } + if u.BannedUntil != nil && u.BannedUntil.IsZero() { + u.BannedUntil = nil + } + return nil +} + +// IsConfirmed checks if a user has already been +// registered and confirmed. +func (u *User) IsConfirmed() bool { + return u.EmailConfirmedAt != nil +} + +// HasBeenInvited checks if user has been invited +func (u *User) HasBeenInvited() bool { + return u.InvitedAt != nil +} + +// IsPhoneConfirmed checks if a user's phone has already been +// registered and confirmed. +func (u *User) IsPhoneConfirmed() bool { + return u.PhoneConfirmedAt != nil +} + +// SetRole sets the users Role to roleName +func (u *User) SetRole(tx *storage.Connection, roleName string) error { + u.Role = strings.TrimSpace(roleName) + return tx.UpdateOnly(u, "role") +} + +// HasRole returns true when the users role is set to roleName +func (u *User) HasRole(roleName string) bool { + return u.Role == roleName +} + +// GetEmail returns the user's email as a string +func (u *User) GetEmail() string { + return string(u.Email) +} + +// GetPhone returns the user's phone number as a string +func (u *User) GetPhone() string { + return string(u.Phone) +} + +// UpdateUserMetaData sets all user data from a map of updates, +// ensuring that it doesn't override attributes that are not +// in the provided map. +func (u *User) UpdateUserMetaData(tx *storage.Connection, updates map[string]interface{}) error { + if u.UserMetaData == nil { + u.UserMetaData = updates + } else { + for key, value := range updates { + if value != nil { + u.UserMetaData[key] = value + } else { + delete(u.UserMetaData, key) + } + } + } + return tx.UpdateOnly(u, "raw_user_meta_data") +} + +// UpdateAppMetaData updates all app data from a map of updates +func (u *User) UpdateAppMetaData(tx *storage.Connection, updates map[string]interface{}) error { + if u.AppMetaData == nil { + u.AppMetaData = updates + } else { + for key, value := range updates { + if value != nil { + u.AppMetaData[key] = value + } else { + delete(u.AppMetaData, key) + } + } + } + return tx.UpdateOnly(u, "raw_app_meta_data") +} + +// UpdateAppMetaDataProviders updates the provider field in AppMetaData column +func (u *User) UpdateAppMetaDataProviders(tx *storage.Connection) error { + providers, terr := FindProvidersByUser(tx, u) + if terr != nil { + return terr + } + payload := map[string]interface{}{ + "providers": providers, + } + if len(providers) > 0 { + payload["provider"] = providers[0] + } + return u.UpdateAppMetaData(tx, payload) +} + +// UpdateUserEmail updates the user's email to one of the identity's email +// if the current email used doesn't match any of the identities email +func (u *User) UpdateUserEmailFromIdentities(tx *storage.Connection) error { + identities, terr := FindIdentitiesByUserID(tx, u.ID) + if terr != nil { + return terr + } + for _, i := range identities { + if u.GetEmail() == i.GetEmail() { + // there's an existing identity that uses the same email + // so the user's email can be kept + return nil + } + } + + var primaryIdentity *Identity + for _, i := range identities { + if _, terr := FindUserByEmailAndAudience(tx, i.GetEmail(), u.Aud); terr != nil { + if IsNotFoundError(terr) { + // the identity's email is not used by another user + // so we can set it as the primary identity + primaryIdentity = i + break + } + return terr + } + } + if primaryIdentity == nil { + return UserEmailUniqueConflictError{} + } + // default to the first identity's email + if terr := u.SetEmail(tx, primaryIdentity.GetEmail()); terr != nil { + return terr + } + if primaryIdentity.GetEmail() == "" { + u.EmailConfirmedAt = nil + if terr := tx.UpdateOnly(u, "email_confirmed_at"); terr != nil { + return terr + } + } + return nil +} + +// SetEmail sets the user's email +func (u *User) SetEmail(tx *storage.Connection, email string) error { + u.Email = storage.NullString(email) + return tx.UpdateOnly(u, "email") +} + +// SetPhone sets the user's phone +func (u *User) SetPhone(tx *storage.Connection, phone string) error { + u.Phone = storage.NullString(phone) + return tx.UpdateOnly(u, "phone") +} + +func (u *User) SetPassword(ctx context.Context, password string, encrypt bool, encryptionKeyID, encryptionKey string) error { + if password == "" { + u.EncryptedPassword = nil + return nil + } + + pw, err := crypto.GenerateFromPassword(ctx, password) + if err != nil { + return err + } + + u.EncryptedPassword = &pw + if encrypt { + es, err := crypto.NewEncryptedString(u.ID.String(), []byte(pw), encryptionKeyID, encryptionKey) + if err != nil { + return err + } + + encryptedPassword := es.String() + u.EncryptedPassword = &encryptedPassword + } + + return nil +} + +// UpdatePassword updates the user's password. Use SetPassword outside of a transaction first! +func (u *User) UpdatePassword(tx *storage.Connection, sessionID *uuid.UUID) error { + // These need to be reset because password change may mean the user no longer trusts the actions performed by the previous password. + u.ConfirmationToken = "" + u.ConfirmationSentAt = nil + u.RecoveryToken = "" + u.RecoverySentAt = nil + u.EmailChangeTokenCurrent = "" + u.EmailChangeTokenNew = "" + u.EmailChangeSentAt = nil + u.PhoneChangeToken = "" + u.PhoneChangeSentAt = nil + u.ReauthenticationToken = "" + u.ReauthenticationSentAt = nil + + if err := tx.UpdateOnly(u, "encrypted_password", "confirmation_token", "confirmation_sent_at", "recovery_token", "recovery_sent_at", "email_change_token_current", "email_change_token_new", "email_change_sent_at", "phone_change_token", "phone_change_sent_at", "reauthentication_token", "reauthentication_sent_at"); err != nil { + return err + } + + if err := ClearAllOneTimeTokensForUser(tx, u.ID); err != nil { + return err + } + + if sessionID == nil { + // log out user from all sessions to ensure reauthentication after password change + return Logout(tx, u.ID) + } else { + // log out user from all other sessions to ensure reauthentication after password change + return LogoutAllExceptMe(tx, *sessionID, u.ID) + } +} + +// Authenticate a user from a password +func (u *User) Authenticate(ctx context.Context, tx *storage.Connection, password string, decryptionKeys map[string]string, encrypt bool, encryptionKeyID string) (bool, bool, error) { + if u.EncryptedPassword == nil { + return false, false, nil + } + + hash := *u.EncryptedPassword + + if hash == "" { + return false, false, nil + } + + es := crypto.ParseEncryptedString(hash) + if es != nil { + h, err := es.Decrypt(u.ID.String(), decryptionKeys) + if err != nil { + return false, false, err + } + + hash = string(h) + } + + compareErr := crypto.CompareHashAndPassword(ctx, hash, password) + + if !strings.HasPrefix(hash, crypto.Argon2Prefix) && !strings.HasPrefix(hash, crypto.FirebaseScryptPrefix) { + // check if cost exceeds default cost or is too low + cost, err := bcrypt.Cost([]byte(hash)) + if err != nil { + return compareErr == nil, false, err + } + + if cost > bcrypt.DefaultCost || cost == bcrypt.MinCost { + // don't bother with encrypting the password in Authenticate + // since it's handled separately + if err := u.SetPassword(ctx, password, false, "", ""); err != nil { + return compareErr == nil, false, err + } + } + } + + return compareErr == nil, encrypt && (es == nil || es.ShouldReEncrypt(encryptionKeyID)), nil +} + +// ConfirmReauthentication resets the reauthentication token +func (u *User) ConfirmReauthentication(tx *storage.Connection) error { + u.ReauthenticationToken = "" + if err := tx.UpdateOnly(u, "reauthentication_token"); err != nil { + return err + } + + if err := ClearAllOneTimeTokensForUser(tx, u.ID); err != nil { + return err + } + + return nil +} + +// Confirm resets the confimation token and sets the confirm timestamp +func (u *User) Confirm(tx *storage.Connection) error { + u.ConfirmationToken = "" + now := time.Now() + u.EmailConfirmedAt = &now + + if err := tx.UpdateOnly(u, "confirmation_token", "email_confirmed_at"); err != nil { + return err + } + + if err := u.UpdateUserMetaData(tx, map[string]interface{}{ + "email_verified": true, + }); err != nil { + return err + } + + if err := ClearAllOneTimeTokensForUser(tx, u.ID); err != nil { + return err + } + + return nil +} + +// ConfirmPhone resets the confimation token and sets the confirm timestamp +func (u *User) ConfirmPhone(tx *storage.Connection) error { + u.ConfirmationToken = "" + now := time.Now() + u.PhoneConfirmedAt = &now + if err := tx.UpdateOnly(u, "confirmation_token", "phone_confirmed_at"); err != nil { + return nil + } + + return ClearAllOneTimeTokensForUser(tx, u.ID) +} + +// UpdateLastSignInAt update field last_sign_in_at for user according to specified field +func (u *User) UpdateLastSignInAt(tx *storage.Connection) error { + return tx.UpdateOnly(u, "last_sign_in_at") +} + +// ConfirmEmailChange confirm the change of email for a user +func (u *User) ConfirmEmailChange(tx *storage.Connection, status int) error { + email := u.EmailChange + + u.Email = storage.NullString(email) + u.EmailChange = "" + u.EmailChangeTokenCurrent = "" + u.EmailChangeTokenNew = "" + u.EmailChangeConfirmStatus = status + + if err := tx.UpdateOnly( + u, + "email", + "email_change", + "email_change_token_current", + "email_change_token_new", + "email_change_confirm_status", + ); err != nil { + return err + } + + if err := ClearAllOneTimeTokensForUser(tx, u.ID); err != nil { + return err + } + + if !u.IsConfirmed() { + if err := u.Confirm(tx); err != nil { + return err + } + } + + identity, err := FindIdentityByIdAndProvider(tx, u.ID.String(), "email") + if err != nil { + if IsNotFoundError(err) { + // no email identity, not an error + return nil + } + return err + } + + if _, ok := identity.IdentityData["email"]; ok { + identity.IdentityData["email"] = email + if err := tx.UpdateOnly(identity, "identity_data"); err != nil { + return err + } + } + + return nil +} + +// ConfirmPhoneChange confirms the change of phone for a user +func (u *User) ConfirmPhoneChange(tx *storage.Connection) error { + now := time.Now() + phone := u.PhoneChange + + u.Phone = storage.NullString(phone) + u.PhoneChange = "" + u.PhoneChangeToken = "" + u.PhoneConfirmedAt = &now + + if err := tx.UpdateOnly( + u, + "phone", + "phone_change", + "phone_change_token", + "phone_confirmed_at", + ); err != nil { + return err + } + + if err := ClearAllOneTimeTokensForUser(tx, u.ID); err != nil { + return err + } + + identity, err := FindIdentityByIdAndProvider(tx, u.ID.String(), "phone") + if err != nil { + if IsNotFoundError(err) { + // no phone identity, not an error + return nil + } + + return err + } + + if _, ok := identity.IdentityData["phone"]; ok { + identity.IdentityData["phone"] = phone + } + + if err := tx.UpdateOnly(identity, "identity_data"); err != nil { + return err + } + + return nil +} + +// Recover resets the recovery token +func (u *User) Recover(tx *storage.Connection) error { + u.RecoveryToken = "" + if err := tx.UpdateOnly(u, "recovery_token"); err != nil { + return err + } + + return ClearAllOneTimeTokensForUser(tx, u.ID) +} + +// CountOtherUsers counts how many other users exist besides the one provided +func CountOtherUsers(tx *storage.Connection, id uuid.UUID) (int, error) { + userCount, err := tx.Q().Where("instance_id = ? and id != ?", uuid.Nil, id).Count(&User{}) + return userCount, errors.Wrap(err, "error finding registered users") +} + +func findUser(tx *storage.Connection, query string, args ...interface{}) (*User, error) { + obj := &User{} + if err := tx.Eager().Q().Where(query, args...).First(obj); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, UserNotFoundError{} + } + return nil, errors.Wrap(err, "error finding user") + } + + return obj, nil +} + +// FindUserByEmailAndAudience finds a user with the matching email and audience. +func FindUserByEmailAndAudience(tx *storage.Connection, email, aud string) (*User, error) { + return findUser(tx, "instance_id = ? and LOWER(email) = ? and aud = ? and is_sso_user = false", uuid.Nil, strings.ToLower(email), aud) +} + +// FindUserByPhoneAndAudience finds a user with the matching email and audience. +func FindUserByPhoneAndAudience(tx *storage.Connection, phone, aud string) (*User, error) { + return findUser(tx, "instance_id = ? and phone = ? and aud = ? and is_sso_user = false", uuid.Nil, phone, aud) +} + +// FindUserByID finds a user matching the provided ID. +func FindUserByID(tx *storage.Connection, id uuid.UUID) (*User, error) { + return findUser(tx, "instance_id = ? and id = ?", uuid.Nil, id) +} + +// FindUserWithRefreshToken finds a user from the provided refresh token. If +// forUpdate is set to true, then the SELECT statement used by the query has +// the form SELECT ... FOR UPDATE SKIP LOCKED. This means that a FOR UPDATE +// lock will only be acquired if there's no other lock. In case there is a +// lock, a IsNotFound(err) error will be returned. +func FindUserWithRefreshToken(tx *storage.Connection, token string, forUpdate bool) (*User, *RefreshToken, *Session, error) { + refreshToken := &RefreshToken{} + + if forUpdate { + // pop does not provide us with a way to execute FOR UPDATE + // queries which lock the rows affected by the query from + // being accessed by any other transaction that also uses FOR + // UPDATE + if err := tx.RawQuery(fmt.Sprintf("SELECT * FROM %q WHERE token = ? LIMIT 1 FOR UPDATE SKIP LOCKED;", refreshToken.TableName()), token).First(refreshToken); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, nil, nil, RefreshTokenNotFoundError{} + } + + return nil, nil, nil, errors.Wrap(err, "error finding refresh token for update") + } + } + + // once the rows are locked (if forUpdate was true), we can query again using pop + if err := tx.Where("token = ?", token).First(refreshToken); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, nil, nil, RefreshTokenNotFoundError{} + } + return nil, nil, nil, errors.Wrap(err, "error finding refresh token") + } + + user, err := FindUserByID(tx, refreshToken.UserID) + if err != nil { + return nil, nil, nil, err + } + + var session *Session + + if refreshToken.SessionId != nil { + sessionId := *refreshToken.SessionId + + if sessionId != uuid.Nil { + session, err = FindSessionByID(tx, sessionId, forUpdate) + if err != nil { + if forUpdate { + return nil, nil, nil, err + } + + if !IsNotFoundError(err) { + return nil, nil, nil, errors.Wrap(err, "error finding session from refresh token") + } + + // otherwise, there's no session for this refresh token + } + } + } + + return user, refreshToken, session, nil +} + +// FindUsersInAudience finds users with the matching audience. +func FindUsersInAudience(tx *storage.Connection, aud string, pageParams *Pagination, sortParams *SortParams, filter string) ([]*User, error) { + users := []*User{} + q := tx.Q().Where("instance_id = ? and aud = ?", uuid.Nil, aud) + + if filter != "" { + lf := "%" + filter + "%" + // we must specify the collation in order to get case insensitive search for the JSON column + q = q.Where("(email LIKE ? OR raw_user_meta_data->>'full_name' ILIKE ?)", lf, lf) + } + + if sortParams != nil && len(sortParams.Fields) > 0 { + for _, field := range sortParams.Fields { + q = q.Order(field.Name + " " + string(field.Dir)) + } + } + + var err error + if pageParams != nil { + err = q.Paginate(int(pageParams.Page), int(pageParams.PerPage)).All(&users) // #nosec G115 + pageParams.Count = uint64(q.Paginator.TotalEntriesSize) // #nosec G115 + } else { + err = q.All(&users) + } + + return users, err +} + +// IsDuplicatedEmail returns whether a user exists with a matching email and audience. +// If a currentUser is provided, we will need to filter out any identities that belong to the current user. +func IsDuplicatedEmail(tx *storage.Connection, email, aud string, currentUser *User) (*User, error) { + var identities []Identity + + if err := tx.Eager().Q().Where("email = ?", strings.ToLower(email)).All(&identities); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, nil + } + + return nil, errors.Wrap(err, "unable to find identity by email for duplicates") + } + + userIDs := make(map[string]uuid.UUID) + for _, identity := range identities { + if _, ok := userIDs[identity.UserID.String()]; !ok { + if !identity.IsForSSOProvider() { + userIDs[identity.UserID.String()] = identity.UserID + } + } + } + + var currentUserId uuid.UUID + if currentUser != nil { + currentUserId = currentUser.ID + } + + for _, userID := range userIDs { + if userID != currentUserId { + user, err := FindUserByID(tx, userID) + if err != nil { + return nil, errors.Wrap(err, "unable to find user from email identity for duplicates") + } + if user.Aud == aud { + return user, nil + } + } + } + + // out of an abundance of caution, if nothing was found via the + // identities table we also do a final check on the users table + user, err := FindUserByEmailAndAudience(tx, email, aud) + if err != nil && !IsNotFoundError(err) { + return nil, errors.Wrap(err, "unable to find user email address for duplicates") + } + + return user, nil +} + +// IsDuplicatedPhone checks if the phone number already exists in the users table +func IsDuplicatedPhone(tx *storage.Connection, phone, aud string) (bool, error) { + _, err := FindUserByPhoneAndAudience(tx, phone, aud) + if err != nil { + if IsNotFoundError(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// Ban a user for a given duration. +func (u *User) Ban(tx *storage.Connection, duration time.Duration) error { + if duration == time.Duration(0) { + u.BannedUntil = nil + } else { + t := time.Now().Add(duration) + u.BannedUntil = &t + } + return tx.UpdateOnly(u, "banned_until") +} + +// IsBanned checks if a user is banned or not +func (u *User) IsBanned() bool { + if u.BannedUntil == nil { + return false + } + return time.Now().Before(*u.BannedUntil) +} + +func (u *User) HasMFAEnabled() bool { + for _, factor := range u.Factors { + if factor.IsVerified() { + return true + } + } + + return false +} + +func (u *User) UpdateBannedUntil(tx *storage.Connection) error { + return tx.UpdateOnly(u, "banned_until") +} + +// RemoveUnconfirmedIdentities removes potentially malicious unconfirmed identities from a user (if any) +func (u *User) RemoveUnconfirmedIdentities(tx *storage.Connection, identity *Identity) error { + if identity.Provider != "email" && identity.Provider != "phone" { + // user is unconfirmed so the password should be reset + u.EncryptedPassword = nil + if terr := tx.UpdateOnly(u, "encrypted_password"); terr != nil { + return terr + } + } + + // user is unconfirmed so existing user_metadata should be overwritten + // to use the current identity metadata + u.UserMetaData = identity.IdentityData + if terr := u.UpdateUserMetaData(tx, u.UserMetaData); terr != nil { + return terr + } + + // finally, remove all identities except the current identity being authenticated + for i := range u.Identities { + if u.Identities[i].ID != identity.ID { + if terr := tx.Destroy(&u.Identities[i]); terr != nil { + return terr + } + } + } + + // user is unconfirmed so none of the providers associated to it are verified yet + // only the current provider should be kept + if terr := u.UpdateAppMetaDataProviders(tx); terr != nil { + return terr + } + return nil +} + +// SoftDeleteUser performs a soft deletion on the user by obfuscating and clearing certain fields +func (u *User) SoftDeleteUser(tx *storage.Connection) error { + u.Email = storage.NullString(obfuscateEmail(u, u.GetEmail())) + u.Phone = storage.NullString(obfuscatePhone(u, u.GetPhone())) + u.EmailChange = obfuscateEmail(u, u.EmailChange) + u.PhoneChange = obfuscatePhone(u, u.PhoneChange) + u.EncryptedPassword = nil + u.ConfirmationToken = "" + u.RecoveryToken = "" + u.EmailChangeTokenCurrent = "" + u.EmailChangeTokenNew = "" + u.PhoneChangeToken = "" + + // set deleted_at time + now := time.Now() + u.DeletedAt = &now + + if err := tx.UpdateOnly( + u, + "email", + "phone", + "encrypted_password", + "email_change", + "phone_change", + "confirmation_token", + "recovery_token", + "email_change_token_current", + "email_change_token_new", + "phone_change_token", + "deleted_at", + ); err != nil { + return err + } + + if err := ClearAllOneTimeTokensForUser(tx, u.ID); err != nil { + return err + } + + // set raw_user_meta_data to {} + userMetaDataUpdates := map[string]interface{}{} + for k := range u.UserMetaData { + userMetaDataUpdates[k] = nil + } + + if err := u.UpdateUserMetaData(tx, userMetaDataUpdates); err != nil { + return err + } + + // set raw_app_meta_data to {} + appMetaDataUpdates := map[string]interface{}{} + for k := range u.AppMetaData { + appMetaDataUpdates[k] = nil + } + + if err := u.UpdateAppMetaData(tx, appMetaDataUpdates); err != nil { + return err + } + + if err := Logout(tx, u.ID); err != nil { + return err + } + + return nil +} + +// SoftDeleteUserIdentities performs a soft deletion on all identities associated to a user +func (u *User) SoftDeleteUserIdentities(tx *storage.Connection) error { + identities, err := FindIdentitiesByUserID(tx, u.ID) + if err != nil { + return err + } + + // set identity_data to {} + for _, identity := range identities { + identityDataUpdates := map[string]interface{}{} + for k := range identity.IdentityData { + identityDataUpdates[k] = nil + } + if err := identity.UpdateIdentityData(tx, identityDataUpdates); err != nil { + return err + } + // updating the identity.ID has to happen last since the primary key is on (provider, id) + // we use RawQuery here instead of UpdateOnly because UpdateOnly relies on the primary key of Identity + if err := tx.RawQuery( + "update "+ + (&pop.Model{Value: Identity{}}).TableName()+ + " set provider_id = ? where id = ?", + obfuscateIdentityProviderId(identity), + identity.ID, + ).Exec(); err != nil { + return err + } + } + return nil +} + +func (u *User) FindOwnedFactorByID(tx *storage.Connection, factorID uuid.UUID) (*Factor, error) { + var factor Factor + err := tx.Where("user_id = ? AND id = ?", u.ID, factorID).First(&factor) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, &FactorNotFoundError{} + } + return nil, err + } + return &factor, nil +} + +func (user *User) WebAuthnID() []byte { + return []byte(user.ID.String()) +} + +func (user *User) WebAuthnName() string { + return user.Email.String() +} + +func (user *User) WebAuthnDisplayName() string { + return user.Email.String() +} + +func (user *User) WebAuthnCredentials() []webauthn.Credential { + var credentials []webauthn.Credential + + for _, factor := range user.Factors { + if factor.IsVerified() && factor.FactorType == WebAuthn { + credential := factor.WebAuthnCredential.Credential + credentials = append(credentials, credential) + } + } + + return credentials +} + +func obfuscateValue(id uuid.UUID, value string) string { + hash := sha256.Sum256([]byte(id.String() + value)) + return base64.RawURLEncoding.EncodeToString(hash[:]) +} + +func obfuscateEmail(u *User, email string) string { + return obfuscateValue(u.ID, email) +} + +func obfuscatePhone(u *User, phone string) string { + // Field converted from VARCHAR(15) to text + return obfuscateValue(u.ID, phone)[:15] +} + +func obfuscateIdentityProviderId(identity *Identity) string { + return obfuscateValue(identity.UserID, identity.Provider+":"+identity.ProviderID) +} + +// FindUserByPhoneChangeAndAudience finds a user with the matching phone change and audience. +func FindUserByPhoneChangeAndAudience(tx *storage.Connection, phone, aud string) (*User, error) { + return findUser(tx, "instance_id = ? and phone_change = ? and aud = ? and is_sso_user = false", uuid.Nil, phone, aud) +} diff --git a/auth_v2.169.0/internal/models/user_test.go b/auth_v2.169.0/internal/models/user_test.go new file mode 100644 index 0000000..0349543 --- /dev/null +++ b/auth_v2.169.0/internal/models/user_test.go @@ -0,0 +1,467 @@ +package models + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/crypto" + "github.com/supabase/auth/internal/storage" + "github.com/supabase/auth/internal/storage/test" + "golang.org/x/crypto/bcrypt" +) + +const modelsTestConfig = "../../hack/test.env" + +func init() { + crypto.PasswordHashCost = crypto.QuickHashCost +} + +type UserTestSuite struct { + suite.Suite + db *storage.Connection +} + +func (ts *UserTestSuite) SetupTest() { + TruncateAll(ts.db) +} + +func TestUser(t *testing.T) { + globalConfig, err := conf.LoadGlobal(modelsTestConfig) + require.NoError(t, err) + + conn, err := test.SetupDBConnection(globalConfig) + require.NoError(t, err) + + ts := &UserTestSuite{ + db: conn, + } + defer ts.db.Close() + + suite.Run(t, ts) +} + +func (ts *UserTestSuite) TestUpdateAppMetadata() { + u, err := NewUser("", "", "", "", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), u.UpdateAppMetaData(ts.db, make(map[string]interface{}))) + + require.NotNil(ts.T(), u.AppMetaData) + + require.NoError(ts.T(), u.UpdateAppMetaData(ts.db, map[string]interface{}{ + "foo": "bar", + })) + + require.Equal(ts.T(), "bar", u.AppMetaData["foo"]) + require.NoError(ts.T(), u.UpdateAppMetaData(ts.db, map[string]interface{}{ + "foo": nil, + })) + require.Len(ts.T(), u.AppMetaData, 0) + require.Equal(ts.T(), nil, u.AppMetaData["foo"]) +} + +func (ts *UserTestSuite) TestUpdateUserMetadata() { + u, err := NewUser("", "", "", "", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), u.UpdateUserMetaData(ts.db, make(map[string]interface{}))) + + require.NotNil(ts.T(), u.UserMetaData) + + require.NoError(ts.T(), u.UpdateUserMetaData(ts.db, map[string]interface{}{ + "foo": "bar", + })) + + require.Equal(ts.T(), "bar", u.UserMetaData["foo"]) + require.NoError(ts.T(), u.UpdateUserMetaData(ts.db, map[string]interface{}{ + "foo": nil, + })) + require.Len(ts.T(), u.UserMetaData, 0) + require.Equal(ts.T(), nil, u.UserMetaData["foo"]) +} + +func (ts *UserTestSuite) TestFindUserByConfirmationToken() { + u := ts.createUser() + tokenHash := "test_confirmation_token" + require.NoError(ts.T(), CreateOneTimeToken(ts.db, u.ID, "relates_to not used", tokenHash, ConfirmationToken)) + + n, err := FindUserByConfirmationToken(ts.db, tokenHash) + require.NoError(ts.T(), err) + require.Equal(ts.T(), u.ID, n.ID) +} + +func (ts *UserTestSuite) TestFindUserByEmailAndAudience() { + u := ts.createUser() + + n, err := FindUserByEmailAndAudience(ts.db, u.GetEmail(), "test") + require.NoError(ts.T(), err) + require.Equal(ts.T(), u.ID, n.ID) + + _, err = FindUserByEmailAndAudience(ts.db, u.GetEmail(), "invalid") + require.EqualError(ts.T(), err, UserNotFoundError{}.Error()) +} + +func (ts *UserTestSuite) TestFindUsersInAudience() { + u := ts.createUser() + + n, err := FindUsersInAudience(ts.db, u.Aud, nil, nil, "") + require.NoError(ts.T(), err) + require.Len(ts.T(), n, 1) + + p := Pagination{ + Page: 1, + PerPage: 50, + } + n, err = FindUsersInAudience(ts.db, u.Aud, &p, nil, "") + require.NoError(ts.T(), err) + require.Len(ts.T(), n, 1) + assert.Equal(ts.T(), uint64(1), p.Count) + + sp := &SortParams{ + Fields: []SortField{ + {Name: "created_at", Dir: Descending}, + }, + } + n, err = FindUsersInAudience(ts.db, u.Aud, nil, sp, "") + require.NoError(ts.T(), err) + require.Len(ts.T(), n, 1) +} + +func (ts *UserTestSuite) TestFindUserByID() { + u := ts.createUser() + + n, err := FindUserByID(ts.db, u.ID) + require.NoError(ts.T(), err) + require.Equal(ts.T(), u.ID, n.ID) +} + +func (ts *UserTestSuite) TestFindUserByRecoveryToken() { + u := ts.createUser() + tokenHash := "test_recovery_token" + require.NoError(ts.T(), CreateOneTimeToken(ts.db, u.ID, "relates_to not used", tokenHash, RecoveryToken)) + + n, err := FindUserByRecoveryToken(ts.db, tokenHash) + require.NoError(ts.T(), err) + require.Equal(ts.T(), u.ID, n.ID) +} + +func (ts *UserTestSuite) TestFindUserWithRefreshToken() { + u := ts.createUser() + r, err := GrantAuthenticatedUser(ts.db, u, GrantParams{}) + require.NoError(ts.T(), err) + + n, nr, s, err := FindUserWithRefreshToken(ts.db, r.Token, true /* forUpdate */) + require.NoError(ts.T(), err) + require.Equal(ts.T(), r.ID, nr.ID) + require.Equal(ts.T(), u.ID, n.ID) + require.NotNil(ts.T(), s) + require.Equal(ts.T(), *r.SessionId, s.ID) +} + +func (ts *UserTestSuite) TestIsDuplicatedEmail() { + _ = ts.createUserWithEmail("david.calavera@netlify.com") + + e, err := IsDuplicatedEmail(ts.db, "david.calavera@netlify.com", "test", nil) + require.NoError(ts.T(), err) + require.NotNil(ts.T(), e, "expected email to be duplicated") + + e, err = IsDuplicatedEmail(ts.db, "davidcalavera@netlify.com", "test", nil) + require.NoError(ts.T(), err) + require.Nil(ts.T(), e, "expected email to not be duplicated", nil) + + e, err = IsDuplicatedEmail(ts.db, "david@netlify.com", "test", nil) + require.NoError(ts.T(), err) + require.Nil(ts.T(), e, "expected same email to not be duplicated", nil) + + e, err = IsDuplicatedEmail(ts.db, "david.calavera@netlify.com", "other-aud", nil) + require.NoError(ts.T(), err) + require.Nil(ts.T(), e, "expected same email to not be duplicated") +} + +func (ts *UserTestSuite) createUser() *User { + return ts.createUserWithEmail("david@netlify.com") +} + +func (ts *UserTestSuite) createUserWithEmail(email string) *User { + user, err := NewUser("", email, "secret", "test", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(user)) + + identity, err := NewIdentity(user, "email", map[string]interface{}{ + "sub": user.ID.String(), + "email": email, + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(identity)) + + return user +} + +func (ts *UserTestSuite) TestRemoveUnconfirmedIdentities() { + user, err := NewUser("+29382983298", "someone@example.com", "abcdefgh", "authenticated", nil) + require.NoError(ts.T(), err) + + user.AppMetaData = map[string]interface{}{ + "provider": "email", + "providers": []string{"email", "phone", "twitter"}, + } + + require.NoError(ts.T(), ts.db.Create(user)) + + idEmail, err := NewIdentity(user, "email", map[string]interface{}{ + "sub": "someone@example.com", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(idEmail)) + + idPhone, err := NewIdentity(user, "phone", map[string]interface{}{ + "sub": "+29382983298", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(idPhone)) + + idTwitter, err := NewIdentity(user, "twitter", map[string]interface{}{ + "sub": "test_twitter_user_id", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(idTwitter)) + + user.Identities = append(user.Identities, *idEmail, *idPhone, *idTwitter) + + // reload the user + require.NoError(ts.T(), ts.db.Load(user)) + + require.False(ts.T(), user.IsConfirmed(), "user's email must not be confirmed") + + require.NoError(ts.T(), user.RemoveUnconfirmedIdentities(ts.db, idTwitter)) + + // reload the user to check that identities are deleted from the db too + require.NoError(ts.T(), ts.db.Load(user)) + require.Empty(ts.T(), user.EncryptedPassword, "password still remains in user") + + require.Len(ts.T(), user.Identities, 1, "only one identity must be remaining") + require.Equal(ts.T(), idTwitter.ID, user.Identities[0].ID, "remaining identity is not the expected one") + + require.NotNil(ts.T(), user.AppMetaData) + require.Equal(ts.T(), user.AppMetaData["provider"], "twitter") + require.Equal(ts.T(), user.AppMetaData["providers"], []string{"twitter"}) +} + +func (ts *UserTestSuite) TestConfirmEmailChange() { + user, err := NewUser("", "test@example.com", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(user)) + + identity, err := NewIdentity(user, "email", map[string]interface{}{ + "sub": user.ID.String(), + "email": "test@example.com", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(identity)) + + user.EmailChange = "new@example.com" + require.NoError(ts.T(), ts.db.UpdateOnly(user, "email_change")) + + require.NoError(ts.T(), user.ConfirmEmailChange(ts.db, 0)) + + require.NoError(ts.T(), ts.db.Eager().Load(user)) + identity, err = FindIdentityByIdAndProvider(ts.db, user.ID.String(), "email") + require.NoError(ts.T(), err) + + require.Equal(ts.T(), user.Email, storage.NullString("new@example.com")) + require.Equal(ts.T(), user.EmailChange, "") + + require.NotNil(ts.T(), identity.IdentityData) + require.Equal(ts.T(), identity.IdentityData["email"], "new@example.com") +} + +func (ts *UserTestSuite) TestConfirmPhoneChange() { + user, err := NewUser("123456789", "", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(user)) + + identity, err := NewIdentity(user, "phone", map[string]interface{}{ + "sub": user.ID.String(), + "phone": "123456789", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(identity)) + + user.PhoneChange = "987654321" + require.NoError(ts.T(), ts.db.UpdateOnly(user, "phone_change")) + + require.NoError(ts.T(), user.ConfirmPhoneChange(ts.db)) + + require.NoError(ts.T(), ts.db.Eager().Load(user)) + identity, err = FindIdentityByIdAndProvider(ts.db, user.ID.String(), "phone") + require.NoError(ts.T(), err) + + require.Equal(ts.T(), user.Phone, storage.NullString("987654321")) + require.Equal(ts.T(), user.PhoneChange, "") + + require.NotNil(ts.T(), identity.IdentityData) + require.Equal(ts.T(), identity.IdentityData["phone"], "987654321") +} + +func (ts *UserTestSuite) TestUpdateUserEmailSuccess() { + userA, err := NewUser("", "foo@example.com", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(userA)) + + primaryIdentity, err := NewIdentity(userA, "email", map[string]interface{}{ + "sub": userA.ID.String(), + "email": "foo@example.com", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(primaryIdentity)) + + secondaryIdentity, err := NewIdentity(userA, "google", map[string]interface{}{ + "sub": userA.ID.String(), + "email": "bar@example.com", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(secondaryIdentity)) + + // UpdateUserEmail should not do anything and the user's email should still use the primaryIdentity + require.NoError(ts.T(), userA.UpdateUserEmailFromIdentities(ts.db)) + require.Equal(ts.T(), primaryIdentity.GetEmail(), userA.GetEmail()) + + // remove primary identity + require.NoError(ts.T(), ts.db.Destroy(primaryIdentity)) + + // UpdateUserEmail should update the user to use the secondary identity's email + require.NoError(ts.T(), userA.UpdateUserEmailFromIdentities(ts.db)) + require.Equal(ts.T(), secondaryIdentity.GetEmail(), userA.GetEmail()) +} + +func (ts *UserTestSuite) TestUpdateUserEmailFailure() { + userA, err := NewUser("", "foo@example.com", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(userA)) + + primaryIdentity, err := NewIdentity(userA, "email", map[string]interface{}{ + "sub": userA.ID.String(), + "email": "foo@example.com", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(primaryIdentity)) + + secondaryIdentity, err := NewIdentity(userA, "google", map[string]interface{}{ + "sub": userA.ID.String(), + "email": "bar@example.com", + }) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(secondaryIdentity)) + + userB, err := NewUser("", "bar@example.com", "", "authenticated", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(userB)) + + // remove primary identity + require.NoError(ts.T(), ts.db.Destroy(primaryIdentity)) + + // UpdateUserEmail should fail with the email unique constraint violation error + // since userB is using the secondary identity's email + require.ErrorIs(ts.T(), userA.UpdateUserEmailFromIdentities(ts.db), UserEmailUniqueConflictError{}) + require.Equal(ts.T(), primaryIdentity.GetEmail(), userA.GetEmail()) +} + +func (ts *UserTestSuite) TestNewUserWithPasswordHashSuccess() { + cases := []struct { + desc string + hash string + }{ + { + desc: "Valid bcrypt hash", + hash: "$2y$10$SXEz2HeT8PUIGQXo9yeUIem8KzNxgG0d7o/.eGj2rj8KbRgAuRVlq", + }, + { + desc: "Valid argon2i hash", + hash: "$argon2i$v=19$m=16,t=2,p=1$bGJRWThNOHJJTVBSdHl2dQ$NfEnUOuUpb7F2fQkgFUG4g", + }, + { + desc: "Valid argon2id hash", + hash: "$argon2id$v=19$m=32,t=3,p=2$SFVpOWJ0eXhjRzVkdGN1RQ$RXnb8rh7LaDcn07xsssqqulZYXOM/EUCEFMVcAcyYVk", + }, + { + desc: "Valid Firebase scrypt hash", + hash: "$fbscrypt$v=1,n=14,r=8,p=1,ss=Bw==,sk=ou9tdYTGyYm8kuR6Dt0Bp0kDuAYoXrK16mbZO4yGwAn3oLspjnN0/c41v8xZnO1n14J3MjKj1b2g6AUCAlFwMw==$C0sHCg9ek77hsg==$ZGlmZmVyZW50aGFzaA==", + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + u, err := NewUserWithPasswordHash("", "", c.hash, "", nil) + require.NoError(ts.T(), err) + require.NotNil(ts.T(), u) + }) + } +} + +func (ts *UserTestSuite) TestNewUserWithPasswordHashFailure() { + cases := []struct { + desc string + hash string + }{ + { + desc: "Invalid argon2i hash", + hash: "$argon2id$test", + }, + { + desc: "Invalid bcrypt hash", + hash: "plaintest_password", + }, + { + desc: "Invalid scrypt hash", + hash: "$fbscrypt$invalid", + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + u, err := NewUserWithPasswordHash("", "", c.hash, "", nil) + require.Error(ts.T(), err) + require.Nil(ts.T(), u) + }) + } +} + +func (ts *UserTestSuite) TestAuthenticate() { + // every case uses "test" as the password + cases := []struct { + desc string + hash string + expectedHashCost int + }{ + { + desc: "Invalid bcrypt hash cost of 11", + hash: "$2y$11$4lH57PU7bGATpRcx93vIoObH3qDmft/pytbOzDG9/1WsyNmN5u4di", + expectedHashCost: bcrypt.MinCost, + }, + { + desc: "Valid bcrypt hash cost of 10", + hash: "$2y$10$va66S4MxFrH6G6L7BzYl0.QgcYgvSr/F92gc.3botlz7bG4p/g/1i", + expectedHashCost: bcrypt.DefaultCost, + }, + } + + for _, c := range cases { + ts.Run(c.desc, func() { + u, err := NewUserWithPasswordHash("", "", c.hash, "", nil) + require.NoError(ts.T(), err) + require.NoError(ts.T(), ts.db.Create(u)) + require.NotNil(ts.T(), u) + + isAuthenticated, _, err := u.Authenticate(context.Background(), ts.db, "test", nil, false, "") + require.NoError(ts.T(), err) + require.True(ts.T(), isAuthenticated) + + // check hash cost + hashCost, err := bcrypt.Cost([]byte(*u.EncryptedPassword)) + require.NoError(ts.T(), err) + require.Equal(ts.T(), c.expectedHashCost, hashCost) + }) + } +} diff --git a/auth_v2.169.0/internal/observability/cleanup.go b/auth_v2.169.0/internal/observability/cleanup.go new file mode 100644 index 0000000..2e88c35 --- /dev/null +++ b/auth_v2.169.0/internal/observability/cleanup.go @@ -0,0 +1,18 @@ +package observability + +import ( + "context" + "sync" + + "github.com/supabase/auth/internal/utilities" +) + +var ( + cleanupWaitGroup sync.WaitGroup +) + +// WaitForCleanup waits until all observability long-running goroutines shut +// down cleanly or until the provided context signals done. +func WaitForCleanup(ctx context.Context) { + utilities.WaitForCleanup(ctx, &cleanupWaitGroup) +} diff --git a/auth_v2.169.0/internal/observability/logging.go b/auth_v2.169.0/internal/observability/logging.go new file mode 100644 index 0000000..ff8ac96 --- /dev/null +++ b/auth_v2.169.0/internal/observability/logging.go @@ -0,0 +1,125 @@ +package observability + +import ( + "os" + "sync" + "time" + + "github.com/bombsimon/logrusr/v3" + "github.com/gobuffalo/pop/v6" + "github.com/gobuffalo/pop/v6/logging" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" + "go.opentelemetry.io/otel" +) + +const ( + LOG_SQL_ALL = "all" + LOG_SQL_NONE = "none" + LOG_SQL_STATEMENT = "statement" +) + +var ( + loggingOnce sync.Once +) + +type CustomFormatter struct { + logrus.JSONFormatter +} + +func NewCustomFormatter() *CustomFormatter { + return &CustomFormatter{ + JSONFormatter: logrus.JSONFormatter{ + DisableTimestamp: false, + TimestampFormat: time.RFC3339, + }, + } +} + +func (f *CustomFormatter) Format(entry *logrus.Entry) ([]byte, error) { + // logrus doesn't support formatting the time in UTC so we need to use a custom formatter + entry.Time = entry.Time.UTC() + return f.JSONFormatter.Format(entry) +} + +func ConfigureLogging(config *conf.LoggingConfig) error { + var err error + + loggingOnce.Do(func() { + formatter := NewCustomFormatter() + logrus.SetFormatter(formatter) + + // use a file if you want + if config.File != "" { + f, errOpen := os.OpenFile(config.File, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0660) //#nosec G302 -- Log files should be rw-rw-r-- + if errOpen != nil { + err = errOpen + return + } + logrus.SetOutput(f) + logrus.Infof("Set output file to %s", config.File) + } + + if config.Level != "" { + level, errParse := logrus.ParseLevel(config.Level) + if err != nil { + err = errParse + return + } + logrus.SetLevel(level) + logrus.Debug("Set log level to: " + logrus.GetLevel().String()) + } + + f := logrus.Fields{} + for k, v := range config.Fields { + f[k] = v + } + logrus.WithFields(f) + + setPopLogger(config.SQL) + + otel.SetLogger(logrusr.New(logrus.StandardLogger().WithField("component", "otel"))) + }) + + return err +} + +func setPopLogger(sql string) { + popLog := logrus.WithField("component", "pop") + sqlLog := logrus.WithField("component", "sql") + + shouldLogSQL := sql == LOG_SQL_STATEMENT || sql == LOG_SQL_ALL + shouldLogSQLArgs := sql == LOG_SQL_ALL + + pop.SetLogger(func(lvl logging.Level, s string, args ...interface{}) { + // Special case SQL logging since we have 2 extra flags to check + if lvl == logging.SQL { + if !shouldLogSQL { + return + } + + if shouldLogSQLArgs && len(args) > 0 { + sqlLog.WithField("args", args).Info(s) + } else { + sqlLog.Info(s) + } + return + } + + l := popLog + if len(args) > 0 { + l = l.WithField("args", args) + } + + switch lvl { + case logging.SQL, logging.Debug: + l.Debug(s) + case logging.Info: + l.Info(s) + case logging.Warn: + l.Warn(s) + case logging.Error: + l.Error(s) + } + }) +} diff --git a/auth_v2.169.0/internal/observability/metrics.go b/auth_v2.169.0/internal/observability/metrics.go new file mode 100644 index 0000000..b3632aa --- /dev/null +++ b/auth_v2.169.0/internal/observability/metrics.go @@ -0,0 +1,202 @@ +package observability + +import ( + "context" + "fmt" + "net" + "net/http" + "sync" + "time" + + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" + + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/metric" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + + otelruntimemetrics "go.opentelemetry.io/contrib/instrumentation/runtime" +) + +func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { + return otel.Meter(instrumentationName, opts...) +} + +func ObtainMetricCounter(name, desc string) metric.Int64Counter { + counter, err := Meter("gotrue").Int64Counter(name, metric.WithDescription(desc)) + if err != nil { + panic(err) + } + return counter +} + +func enablePrometheusMetrics(ctx context.Context, mc *conf.MetricsConfig) error { + exporter, err := prometheus.New() + if err != nil { + return err + } + + provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(exporter)) + + otel.SetMeterProvider(provider) + + cleanupWaitGroup.Add(1) + go func() { + addr := net.JoinHostPort(mc.PrometheusListenHost, mc.PrometheusListenPort) + baseContext, cancel := context.WithCancel(context.Background()) + + server := &http.Server{ + Addr: addr, + Handler: promhttp.Handler(), + BaseContext: func(net.Listener) context.Context { + return baseContext + }, + ReadHeaderTimeout: 2 * time.Second, // to mitigate a Slowloris attack + } + + go func() { + defer cleanupWaitGroup.Done() + <-ctx.Done() + + cancel() // close baseContext + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer shutdownCancel() + + if err := server.Shutdown(shutdownCtx); err != nil { + logrus.WithError(err).Errorf("prometheus server (%s) failed to gracefully shut down", addr) + } + }() + + logrus.Infof("prometheus server listening on %s", addr) + + if err := server.ListenAndServe(); err != nil { + logrus.WithError(err).Errorf("prometheus server (%s) shut down", addr) + } else { + logrus.Info("prometheus metric exporter shut down") + } + }() + + return nil +} + +func enableOpenTelemetryMetrics(ctx context.Context, mc *conf.MetricsConfig) error { + switch mc.ExporterProtocol { + case "grpc": + metricExporter, err := otlpmetricgrpc.New(ctx) + if err != nil { + return err + } + meterProvider := sdkmetric.NewMeterProvider( + sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter)), + ) + + otel.SetMeterProvider(meterProvider) + + cleanupWaitGroup.Add(1) + go func() { + defer cleanupWaitGroup.Done() + + <-ctx.Done() + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer shutdownCancel() + + if err := metricExporter.Shutdown(shutdownCtx); err != nil { + logrus.WithError(err).Error("unable to gracefully shut down OpenTelemetry metric exporter") + } else { + logrus.Info("OpenTelemetry metric exporter shut down") + } + }() + + case "http/protobuf": + metricExporter, err := otlpmetrichttp.New(ctx) + if err != nil { + return err + } + meterProvider := sdkmetric.NewMeterProvider( + sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter)), + ) + + otel.SetMeterProvider(meterProvider) + + cleanupWaitGroup.Add(1) + go func() { + defer cleanupWaitGroup.Done() + + <-ctx.Done() + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer shutdownCancel() + + if err := metricExporter.Shutdown(shutdownCtx); err != nil { + logrus.WithError(err).Error("unable to gracefully shut down OpenTelemetry metric exporter") + } else { + logrus.Info("OpenTelemetry metric exporter shut down") + } + }() + + default: // http/json for example + return fmt.Errorf("unsupported OpenTelemetry exporter protocol %q", mc.ExporterProtocol) + } + logrus.Info("OpenTelemetry metrics exporter started") + return nil + +} + +var ( + metricsOnce *sync.Once = &sync.Once{} +) + +func ConfigureMetrics(ctx context.Context, mc *conf.MetricsConfig) error { + if ctx == nil { + panic("context must not be nil") + } + + var err error + + metricsOnce.Do(func() { + if mc.Enabled { + switch mc.Exporter { + case conf.Prometheus: + if err = enablePrometheusMetrics(ctx, mc); err != nil { + logrus.WithError(err).Error("unable to start prometheus metrics exporter") + return + } + + case conf.OpenTelemetryMetrics: + if err = enableOpenTelemetryMetrics(ctx, mc); err != nil { + logrus.WithError(err).Error("unable to start OTLP metrics exporter") + + return + } + } + } + + if err := otelruntimemetrics.Start(otelruntimemetrics.WithMinimumReadMemStatsInterval(time.Second)); err != nil { + logrus.WithError(err).Error("unable to start OpenTelemetry Go runtime metrics collection") + } else { + logrus.Info("Go runtime metrics collection started") + } + + meter := otel.Meter("gotrue") + _, err := meter.Int64ObservableGauge( + "gotrue_running", + metric.WithDescription("Whether GoTrue is running (always 1)"), + metric.WithInt64Callback(func(_ context.Context, obsrv metric.Int64Observer) error { + obsrv.Observe(int64(1)) + return nil + }), + ) + if err != nil { + logrus.WithError(err).Error("unable to get gotrue.gotrue_running gague metric") + return + } + }) + + return err +} diff --git a/auth_v2.169.0/internal/observability/profiler.go b/auth_v2.169.0/internal/observability/profiler.go new file mode 100644 index 0000000..71acc11 --- /dev/null +++ b/auth_v2.169.0/internal/observability/profiler.go @@ -0,0 +1,87 @@ +package observability + +import ( + "context" + "net" + "time" + + "net/http" + "net/http/pprof" + + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" +) + +func ConfigureProfiler(ctx context.Context, pc *conf.ProfilerConfig) error { + if !pc.Enabled { + return nil + } + addr := net.JoinHostPort(pc.Host, pc.Port) + baseContext, cancel := context.WithCancel(context.Background()) + cleanupWaitGroup.Add(1) + go func() { + server := &http.Server{ + Addr: addr, + Handler: &ProfilerHandler{}, + BaseContext: func(net.Listener) context.Context { + return baseContext + }, + ReadHeaderTimeout: 2 * time.Second, + } + + go func() { + defer cleanupWaitGroup.Done() + <-ctx.Done() + + cancel() // close baseContext + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer shutdownCancel() + + if err := server.Shutdown(shutdownCtx); err != nil { + logrus.WithError(err).Errorf("profiler server (%s) failed to gracefully shut down", addr) + } + }() + + logrus.Infof("Profiler is listening on %s", addr) + + if err := server.ListenAndServe(); err != nil { + logrus.WithError(err).Errorf("profiler server (%s) shut down", addr) + } else { + logrus.Info("profiler shut down") + } + }() + + return nil +} + +type ProfilerHandler struct{} + +func (p *ProfilerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/debug/pprof/": + pprof.Index(w, r) + case "/debug/pprof/cmdline": + pprof.Cmdline(w, r) + case "/debug/pprof/profile": + pprof.Profile(w, r) + case "/debug/pprof/symbol": + pprof.Symbol(w, r) + case "/debug/pprof/trace": + pprof.Trace(w, r) + case "/debug/pprof/goroutine": + pprof.Handler("goroutine").ServeHTTP(w, r) + case "/debug/pprof/heap": + pprof.Handler("heap").ServeHTTP(w, r) + case "/debug/pprof/allocs": + pprof.Handler("allocs").ServeHTTP(w, r) + case "/debug/pprof/threadcreate": + pprof.Handler("threadcreate").ServeHTTP(w, r) + case "/debug/pprof/block": + pprof.Handler("block").ServeHTTP(w, r) + case "/debug/pprof/mutex": + pprof.Handler("mutex").ServeHTTP(w, r) + default: + http.NotFound(w, r) + } +} diff --git a/auth_v2.169.0/internal/observability/request-logger.go b/auth_v2.169.0/internal/observability/request-logger.go new file mode 100644 index 0000000..6eeffd6 --- /dev/null +++ b/auth_v2.169.0/internal/observability/request-logger.go @@ -0,0 +1,114 @@ +package observability + +import ( + "fmt" + "net/http" + "time" + + chimiddleware "github.com/go-chi/chi/v5/middleware" + "github.com/gofrs/uuid" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/utilities" +) + +func AddRequestID(globalConfig *conf.GlobalConfiguration) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + id := uuid.Must(uuid.NewV4()).String() + if globalConfig.API.RequestIDHeader != "" { + id = r.Header.Get(globalConfig.API.RequestIDHeader) + } + ctx := r.Context() + ctx = utilities.WithRequestID(ctx, id) + next.ServeHTTP(w, r.WithContext(ctx)) + } + return http.HandlerFunc(fn) + } +} + +func NewStructuredLogger(logger *logrus.Logger, config *conf.GlobalConfiguration) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/health" { + next.ServeHTTP(w, r) + } else { + chimiddleware.RequestLogger(&structuredLogger{logger, config})(next).ServeHTTP(w, r) + } + }) + } +} + +type structuredLogger struct { + Logger *logrus.Logger + Config *conf.GlobalConfiguration +} + +func (l *structuredLogger) NewLogEntry(r *http.Request) chimiddleware.LogEntry { + referrer := utilities.GetReferrer(r, l.Config) + e := &logEntry{Entry: logrus.NewEntry(l.Logger)} + logFields := logrus.Fields{ + "component": "api", + "method": r.Method, + "path": r.URL.Path, + "remote_addr": utilities.GetIPAddress(r), + "referer": referrer, + } + + if reqID := utilities.GetRequestID(r.Context()); reqID != "" { + logFields["request_id"] = reqID + } + + e.Entry = e.Entry.WithFields(logFields) + return e +} + +// logEntry implements the chiMiddleware.LogEntry interface +type logEntry struct { + Entry *logrus.Entry +} + +func (e *logEntry) Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) { + fields := logrus.Fields{ + "status": status, + "duration": elapsed.Nanoseconds(), + } + + errorCode := header.Get("x-sb-error-code") + if errorCode != "" { + fields["error_code"] = errorCode + } + + entry := e.Entry.WithFields(fields) + entry.Info("request completed") + e.Entry = entry +} + +func (e *logEntry) Panic(v interface{}, stack []byte) { + entry := e.Entry.WithFields(logrus.Fields{ + "stack": string(stack), + "panic": fmt.Sprintf("%+v", v), + }) + entry.Error("request panicked") + e.Entry = entry +} + +func GetLogEntry(r *http.Request) *logEntry { + l, _ := chimiddleware.GetLogEntry(r).(*logEntry) + if l == nil { + return &logEntry{Entry: logrus.NewEntry(logrus.StandardLogger())} + } + return l +} + +func LogEntrySetField(r *http.Request, key string, value interface{}) { + if l, ok := r.Context().Value(chimiddleware.LogEntryCtxKey).(*logEntry); ok { + l.Entry = l.Entry.WithField(key, value) + } +} + +func LogEntrySetFields(r *http.Request, fields logrus.Fields) { + if l, ok := r.Context().Value(chimiddleware.LogEntryCtxKey).(*logEntry); ok { + l.Entry = l.Entry.WithFields(fields) + } +} diff --git a/auth_v2.169.0/internal/observability/request-logger_test.go b/auth_v2.169.0/internal/observability/request-logger_test.go new file mode 100644 index 0000000..7ab244c --- /dev/null +++ b/auth_v2.169.0/internal/observability/request-logger_test.go @@ -0,0 +1,72 @@ +package observability + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" +) + +const apiTestConfig = "../../hack/test.env" + +func TestLogger(t *testing.T) { + var logBuffer bytes.Buffer + config, err := conf.LoadGlobal(apiTestConfig) + require.NoError(t, err) + + config.Logging.Level = "info" + require.NoError(t, ConfigureLogging(&config.Logging)) + + // logrus should write to the buffer so we can check if the logs are output correctly + logrus.SetOutput(&logBuffer) + + // add request id header + config.API.RequestIDHeader = "X-Request-ID" + addRequestIdHandler := AddRequestID(config) + + logHandler := NewStructuredLogger(logrus.StandardLogger(), config)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + w := httptest.NewRecorder() + req, err := http.NewRequest(http.MethodPost, "http://example.com/path", nil) + req.Header.Add("X-Request-ID", "test-request-id") + require.NoError(t, err) + addRequestIdHandler(logHandler).ServeHTTP(w, req) + require.Equal(t, http.StatusOK, w.Code) + + var logs map[string]interface{} + require.NoError(t, json.NewDecoder(&logBuffer).Decode(&logs)) + require.Equal(t, "api", logs["component"]) + require.Equal(t, http.MethodPost, logs["method"]) + require.Equal(t, "/path", logs["path"]) + require.Equal(t, "test-request-id", logs["request_id"]) + require.NotNil(t, logs["time"]) +} + +func TestExcludeHealthFromLogs(t *testing.T) { + var logBuffer bytes.Buffer + config, err := conf.LoadGlobal(apiTestConfig) + require.NoError(t, err) + + config.Logging.Level = "info" + require.NoError(t, ConfigureLogging(&config.Logging)) + + // logrus should write to the buffer so we can check if the logs are output correctly + logrus.SetOutput(&logBuffer) + + logHandler := NewStructuredLogger(logrus.StandardLogger(), config)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("ok")) + })) + w := httptest.NewRecorder() + req, err := http.NewRequest(http.MethodGet, "http://example.com/health", nil) + require.NoError(t, err) + logHandler.ServeHTTP(w, req) + require.Equal(t, http.StatusOK, w.Code) + + require.Empty(t, logBuffer) +} diff --git a/auth_v2.169.0/internal/observability/request-tracing.go b/auth_v2.169.0/internal/observability/request-tracing.go new file mode 100644 index 0000000..e8ee61b --- /dev/null +++ b/auth_v2.169.0/internal/observability/request-tracing.go @@ -0,0 +1,170 @@ +package observability + +import ( + "net/http" + + "github.com/go-chi/chi/v5" + "github.com/sirupsen/logrus" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + "go.opentelemetry.io/otel/trace" +) + +// traceChiRoutesSafely attempts to extract the Chi RouteContext. If the +// request does not have a RouteContext it will recover from the panic and +// attempt to figure out the route from the URL's path. +func traceChiRoutesSafely(r *http.Request) { + defer func() { + if rec := recover(); rec != nil { + logrus.WithField("error", rec).Error("unable to trace chi routes, traces may be off") + + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(semconv.HTTPRouteKey.String(r.URL.Path)) + } + }() + + routeContext := chi.RouteContext(r.Context()) + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(semconv.HTTPRouteKey.String(routeContext.RoutePattern())) +} + +// traceChiRouteURLParamsSafely attempts to extract the Chi RouteContext +// URLParams values for the route and assign them to the tracing span. If the +// request does not have a RouteContext it will recover from the panic and not +// set any params. +func traceChiRouteURLParamsSafely(r *http.Request) { + defer func() { + if rec := recover(); rec != nil { + logrus.WithField("error", rec).Error("unable to trace route with route params, traces may be off") + } + }() + + routeContext := chi.RouteContext(r.Context()) + span := trace.SpanFromContext(r.Context()) + + var attributes []attribute.KeyValue + + for i := 0; i < len(routeContext.URLParams.Keys); i += 1 { + key := routeContext.URLParams.Keys[i] + value := routeContext.URLParams.Values[i] + + attributes = append(attributes, attribute.String("http.route.param."+key, value)) + } + + if len(attributes) > 0 { + span.SetAttributes(attributes...) + } +} + +type interceptingResponseWriter struct { + writer http.ResponseWriter + + statusCode int +} + +func (w *interceptingResponseWriter) WriteHeader(statusCode int) { + w.statusCode = statusCode + + w.writer.WriteHeader(statusCode) +} + +func (w *interceptingResponseWriter) Write(data []byte) (int, error) { + return w.writer.Write(data) +} + +func (w *interceptingResponseWriter) Header() http.Header { + return w.writer.Header() +} + +// countStatusCodesSafely counts the number of HTTP status codes per route that +// occurred while GoTrue was running. If it is not able to identify the route +// via chi.RouteContext(ctx).RoutePattern() it counts with a noroute attribute. +func countStatusCodesSafely(w *interceptingResponseWriter, r *http.Request, counter metric.Int64Counter) { + if counter == nil { + return + } + + defer func() { + if rec := recover(); rec != nil { + logrus.WithField("error", rec).Error("unable to count status codes safely, metrics may be off") + counter.Add( + r.Context(), + 1, + metric.WithAttributes( + attribute.Bool("noroute", true), + attribute.Int("code", w.statusCode)), + ) + } + }() + + ctx := r.Context() + + routeContext := chi.RouteContext(ctx) + routePattern := semconv.HTTPRouteKey.String(routeContext.RoutePattern()) + + counter.Add( + ctx, + 1, + metric.WithAttributes(attribute.Int("code", w.statusCode), routePattern), + ) +} + +// RequestTracing returns an HTTP handler that traces all HTTP requests coming +// in. Supports Chi routers, so this should be one of the first middlewares on +// the router. +func RequestTracing() func(http.Handler) http.Handler { + meter := otel.Meter("gotrue") + statusCodes, err := meter.Int64Counter( + "http_status_codes", + metric.WithDescription("Number of returned HTTP status codes"), + ) + if err != nil { + logrus.WithError(err).Error("unable to get gotrue.http_status_codes counter metric") + } + + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + writer := interceptingResponseWriter{ + writer: w, + } + + defer traceChiRoutesSafely(r) + defer traceChiRouteURLParamsSafely(r) + defer countStatusCodesSafely(&writer, r, statusCodes) + + originalUserAgent := r.Header.Get("X-Gotrue-Original-User-Agent") + if originalUserAgent != "" { + r.Header.Set("User-Agent", originalUserAgent) + } + + next.ServeHTTP(&writer, r) + + if originalUserAgent != "" { + r.Header.Set("X-Gotrue-Original-User-Agent", originalUserAgent) + r.Header.Set("User-Agent", "stripped") + } + } + + otelHandler := otelhttp.NewHandler(http.HandlerFunc(fn), "api") + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // there is a vulnerability with otelhttp where + // User-Agent strings are kept in RAM indefinitely and + // can be used as an easy way to resource exhaustion; + // so this code strips the User-Agent header before + // it's passed to be traced by otelhttp, and then is + // returned back to the middleware + // https://github.com/supabase/gotrue/security/dependabot/11 + userAgent := r.UserAgent() + if userAgent != "" { + r.Header.Set("X-Gotrue-Original-User-Agent", userAgent) + r.Header.Set("User-Agent", "stripped") + } + + otelHandler.ServeHTTP(w, r) + }) + } +} diff --git a/auth_v2.169.0/internal/observability/tracing.go b/auth_v2.169.0/internal/observability/tracing.go new file mode 100644 index 0000000..cc18471 --- /dev/null +++ b/auth_v2.169.0/internal/observability/tracing.go @@ -0,0 +1,130 @@ +package observability + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/utilities" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/propagation" + sdkresource "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" +) + +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return otel.Tracer(name, opts...) +} + +func openTelemetryResource() *sdkresource.Resource { + environmentResource := sdkresource.Environment() + gotrueResource := sdkresource.NewSchemaless(attribute.String("gotrue.version", utilities.Version)) + + mergedResource, err := sdkresource.Merge(environmentResource, gotrueResource) + if err != nil { + logrus.WithError(err).Error("unable to merge OpenTelemetry environment and gotrue resources") + + return environmentResource + } + + return mergedResource +} + +func enableOpenTelemetryTracing(ctx context.Context, tc *conf.TracingConfig) error { + var ( + err error + traceExporter *otlptrace.Exporter + ) + + switch tc.ExporterProtocol { + case "grpc": + traceExporter, err = otlptracegrpc.New(ctx) + if err != nil { + return err + } + + case "http/protobuf": + traceExporter, err = otlptracehttp.New(ctx) + if err != nil { + return err + } + + default: // http/json for example + return fmt.Errorf("unsupported OpenTelemetry exporter protocol %q", tc.ExporterProtocol) + } + + traceProvider := sdktrace.NewTracerProvider( + sdktrace.WithBatcher(traceExporter), + sdktrace.WithResource(openTelemetryResource()), + ) + + otel.SetTracerProvider(traceProvider) + + // Register the W3C trace context and baggage propagators so data is + // propagated across services/processes + otel.SetTextMapPropagator( + propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, + propagation.Baggage{}, + ), + ) + + cleanupWaitGroup.Add(1) + go func() { + defer cleanupWaitGroup.Done() + + <-ctx.Done() + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer shutdownCancel() + + if err := traceExporter.Shutdown(shutdownCtx); err != nil { + logrus.WithError(err).Error("unable to shutdown OpenTelemetry trace exporter") + } + + if err := traceProvider.Shutdown(shutdownCtx); err != nil { + logrus.WithError(err).Error("unable to shutdown OpenTelemetry trace provider") + } + }() + + logrus.Info("OpenTelemetry trace exporter started") + + return nil +} + +var ( + tracingOnce sync.Once +) + +// ConfigureTracing sets up global tracing configuration for OpenTracing / +// OpenTelemetry. The context should be the global context. Cancelling this +// context will cancel tracing collection. +func ConfigureTracing(ctx context.Context, tc *conf.TracingConfig) error { + if ctx == nil { + panic("context must not be nil") + } + + var err error + + tracingOnce.Do(func() { + if tc.Enabled { + if tc.Exporter == conf.OpenTelemetryTracing { + if err = enableOpenTelemetryTracing(ctx, tc); err != nil { + logrus.WithError(err).Error("unable to start OTLP trace exporter") + } + + } + } + }) + + return err +} diff --git a/auth_v2.169.0/internal/ratelimit/burst.go b/auth_v2.169.0/internal/ratelimit/burst.go new file mode 100644 index 0000000..6ae0ef5 --- /dev/null +++ b/auth_v2.169.0/internal/ratelimit/burst.go @@ -0,0 +1,60 @@ +package ratelimit + +import ( + "time" + + "github.com/supabase/auth/internal/conf" + "golang.org/x/time/rate" +) + +const defaultOverTime = time.Hour + +// BurstLimiter wraps the golang.org/x/time/rate package. +type BurstLimiter struct { + rl *rate.Limiter +} + +// NewBurstLimiter returns a rate limiter configured using the given conf.Rate. +// +// The returned Limiter will be configured with a token bucket containing a +// single token, which will fill up at a rate of 1 event per r.OverTime with +// an initial burst amount of r.Events. +// +// For example: +// - 1/10s is 1 events per 10 seconds with burst of 1. +// - 1/2s is 1 events per 2 seconds with burst of 1. +// - 10/10s is 1 events per 10 seconds with burst of 10. +// +// If Rate.Events is <= 0, the burst amount will be set to 1. +// +// See Example_newBurstLimiter for a visualization. +func NewBurstLimiter(r conf.Rate) *BurstLimiter { + // The rate limiter deals in events per second. + d := r.OverTime + if d <= 0 { + d = defaultOverTime + } + + e := r.Events + if e <= 0 { + e = 0 + } + + // BurstLimiter will have an initial token bucket of size `e`. It will + // be refilled at a rate of 1 per duration `d` indefinitely. + rl := &BurstLimiter{ + rl: rate.NewLimiter(rate.Every(d), int(e)), + } + return rl +} + +// Allow implements Limiter by calling AllowAt with the current time. +func (l *BurstLimiter) Allow() bool { + return l.AllowAt(time.Now()) +} + +// AllowAt implements Limiter by calling the underlying x/time/rate.Limiter +// with the given time. +func (l *BurstLimiter) AllowAt(at time.Time) bool { + return l.rl.AllowN(at, 1) +} diff --git a/auth_v2.169.0/internal/ratelimit/burst_test.go b/auth_v2.169.0/internal/ratelimit/burst_test.go new file mode 100644 index 0000000..b854e3b --- /dev/null +++ b/auth_v2.169.0/internal/ratelimit/burst_test.go @@ -0,0 +1,214 @@ +package ratelimit + +import ( + "fmt" + "testing" + "time" + + "github.com/supabase/auth/internal/conf" +) + +func Example_newBurstLimiter() { + now, _ := time.Parse(time.RFC3339, "2024-09-24T10:00:00.00Z") + { + cfg := conf.Rate{Events: 10, OverTime: time.Second * 20} + rl := NewBurstLimiter(cfg) + cur := now + for i := 0; i < 20; i++ { + allowed := rl.AllowAt(cur) + fmt.Printf("%-5v @ %v\n", allowed, cur) + cur = cur.Add(time.Second * 5) + } + } + + // Output: + // true @ 2024-09-24 10:00:00 +0000 UTC + // true @ 2024-09-24 10:00:05 +0000 UTC + // true @ 2024-09-24 10:00:10 +0000 UTC + // true @ 2024-09-24 10:00:15 +0000 UTC + // true @ 2024-09-24 10:00:20 +0000 UTC + // true @ 2024-09-24 10:00:25 +0000 UTC + // true @ 2024-09-24 10:00:30 +0000 UTC + // true @ 2024-09-24 10:00:35 +0000 UTC + // true @ 2024-09-24 10:00:40 +0000 UTC + // true @ 2024-09-24 10:00:45 +0000 UTC + // true @ 2024-09-24 10:00:50 +0000 UTC + // true @ 2024-09-24 10:00:55 +0000 UTC + // true @ 2024-09-24 10:01:00 +0000 UTC + // false @ 2024-09-24 10:01:05 +0000 UTC + // false @ 2024-09-24 10:01:10 +0000 UTC + // false @ 2024-09-24 10:01:15 +0000 UTC + // true @ 2024-09-24 10:01:20 +0000 UTC + // false @ 2024-09-24 10:01:25 +0000 UTC + // false @ 2024-09-24 10:01:30 +0000 UTC + // false @ 2024-09-24 10:01:35 +0000 UTC +} + +func TestBurstLimiter(t *testing.T) { + t.Run("Allow", func(t *testing.T) { + for i := 1; i < 10; i++ { + cfg := conf.Rate{Events: float64(i), OverTime: time.Hour} + rl := NewBurstLimiter(cfg) + for y := i; y > 0; y-- { + if exp, got := true, rl.Allow(); exp != got { + t.Fatalf("exp Allow() to be %v; got %v", exp, got) + } + } + if exp, got := false, rl.Allow(); exp != got { + t.Fatalf("exp Allow() to be %v; got %v", exp, got) + } + } + }) + + t.Run("AllowAt", func(t *testing.T) { + now, _ := time.Parse(time.RFC3339, "2024-09-24T10:00:00.00Z") + + type event struct { + ok bool + at time.Time + + // Event should be `ok` at `at` for `i` times + i int + } + + type testCase struct { + cfg conf.Rate + now time.Time + evts []event + } + cases := []testCase{ + { + cfg: conf.Rate{Events: 20, OverTime: time.Second * 20}, + now: now, + evts: []event{ + // initial burst of 20 is permitted + {true, now, 19}, + + // then denied, even at same time + {false, now, 100}, + + // and continue to deny until the next generated token + {false, now.Add(time.Second), 100}, + {false, now.Add(time.Second * 19), 100}, + + // allows a single call to allow at 20 seconds + {true, now.Add(time.Second * 20), 0}, + + // then denied + {false, now.Add(time.Second * 20), 100}, + + // and the pattern repeats + {true, now.Add(time.Second * 40), 0}, + {false, now.Add(time.Second * 40), 100}, + {false, now.Add(time.Second * 59), 100}, + + {true, now.Add(time.Second * 60), 0}, + {false, now.Add(time.Second * 60), 100}, + {false, now.Add(time.Second * 79), 100}, + + {true, now.Add(time.Second * 80), 0}, + {false, now.Add(time.Second * 80), 100}, + {false, now.Add(time.Second * 99), 100}, + + // allow tokens to be built up still + {true, now.Add(time.Hour), 19}, + }, + }, + + { + cfg: conf.Rate{Events: 1, OverTime: time.Second * 20}, + now: now, + evts: []event{ + // initial burst of 1 is permitted + {true, now, 0}, + + // then denied, even at same time + {false, now, 100}, + + // and continue to deny until the next generated token + {false, now.Add(time.Second), 100}, + {false, now.Add(time.Second * 19), 100}, + + // allows a single call to allow at 20 seconds + {true, now.Add(time.Second * 20), 0}, + + // then denied + {false, now.Add(time.Second * 20), 100}, + + // and the pattern repeats + {true, now.Add(time.Second * 40), 0}, + {false, now.Add(time.Second * 40), 100}, + {false, now.Add(time.Second * 59), 100}, + + {true, now.Add(time.Second * 60), 0}, + {false, now.Add(time.Second * 60), 100}, + {false, now.Add(time.Second * 79), 100}, + + {true, now.Add(time.Second * 80), 0}, + {false, now.Add(time.Second * 80), 100}, + {false, now.Add(time.Second * 99), 100}, + }, + }, + + // 1 event per second + { + cfg: conf.Rate{Events: 1, OverTime: time.Second}, + now: now, + evts: []event{ + {true, now, 0}, + {true, now.Add(time.Second), 0}, + {false, now.Add(time.Second), 0}, + {true, now.Add(time.Second * 2), 0}, + }, + }, + + // 1 events per second and OverTime = 1 event per hour. + { + cfg: conf.Rate{Events: 1, OverTime: 0}, + now: now, + evts: []event{ + {true, now, 0}, + {false, now.Add(time.Hour - time.Second), 0}, + {true, now.Add(time.Hour), 0}, + {true, now.Add(time.Hour * 2), 0}, + }, + }, + + // zero value for Events = 0 event per second + { + cfg: conf.Rate{Events: 0, OverTime: time.Second}, + now: now, + evts: []event{ + {false, now, 0}, + {false, now.Add(-time.Second), 0}, + {false, now.Add(time.Second), 0}, + {false, now.Add(time.Second * 2), 0}, + }, + }, + + // zero value for both Events and OverTime = 1 event per hour. + { + cfg: conf.Rate{Events: 0, OverTime: 0}, + now: now, + evts: []event{ + {false, now, 0}, + {false, now.Add(time.Hour - time.Second), 0}, + {false, now.Add(-time.Hour), 0}, + {false, now.Add(time.Hour), 0}, + {false, now.Add(time.Hour * 2), 0}, + }, + }, + } + + for _, tc := range cases { + rl := NewBurstLimiter(tc.cfg) + for _, evt := range tc.evts { + for i := 0; i <= evt.i; i++ { + if exp, got := evt.ok, rl.AllowAt(evt.at); exp != got { + t.Fatalf("exp AllowAt(%v) to be %v; got %v", evt.at, exp, got) + } + } + } + } + }) +} diff --git a/auth_v2.169.0/internal/ratelimit/interval.go b/auth_v2.169.0/internal/ratelimit/interval.go new file mode 100644 index 0000000..a72302f --- /dev/null +++ b/auth_v2.169.0/internal/ratelimit/interval.go @@ -0,0 +1,63 @@ +package ratelimit + +import ( + "sync" + "time" + + "github.com/supabase/auth/internal/conf" +) + +// IntervalLimiter will limit the number of calls to Allow per interval. +type IntervalLimiter struct { + mu sync.Mutex + ival time.Duration // Count is reset and time updated every ival. + limit int // Limit calls to Allow() per ival. + + // Guarded by mu. + last time.Time // When the limiter was last reset. + count int // Total calls to Allow() since time. +} + +// NewIntervalLimiter returns a rate limiter using the given conf.Rate. +func NewIntervalLimiter(r conf.Rate) *IntervalLimiter { + return &IntervalLimiter{ + ival: r.OverTime, + limit: int(r.Events), + last: time.Now(), + } +} + +// Allow implements Limiter by calling AllowAt with the current time. +func (rl *IntervalLimiter) Allow() bool { + rl.mu.Lock() + defer rl.mu.Unlock() + + return rl.allowAt(time.Now()) +} + +// AllowAt implements Limiter by checking if the current number of permitted +// events within this interval would permit 1 additional event at the current +// time. +// +// When called with a time outside the current active interval the counter is +// reset, meaning it can be vulnerable at the edge of it's intervals so avoid +// small intervals. +func (rl *IntervalLimiter) AllowAt(at time.Time) bool { + rl.mu.Lock() + defer rl.mu.Unlock() + + return rl.allowAt(at) +} + +func (rl *IntervalLimiter) allowAt(at time.Time) bool { + since := at.Sub(rl.last) + if ivals := int64(since / rl.ival); ivals > 0 { + rl.last = rl.last.Add(time.Duration(ivals) * rl.ival) + rl.count = 0 + } + if rl.count < rl.limit { + rl.count++ + return true + } + return false +} diff --git a/auth_v2.169.0/internal/ratelimit/interval_test.go b/auth_v2.169.0/internal/ratelimit/interval_test.go new file mode 100644 index 0000000..835ee82 --- /dev/null +++ b/auth_v2.169.0/internal/ratelimit/interval_test.go @@ -0,0 +1,81 @@ +package ratelimit + +import ( + "fmt" + "testing" + "time" + + "github.com/supabase/auth/internal/conf" +) + +func Example_newIntervalLimiter() { + now, _ := time.Parse(time.RFC3339, "2024-09-24T10:00:00.00Z") + cfg := conf.Rate{Events: 100, OverTime: time.Hour * 24} + rl := NewIntervalLimiter(cfg) + rl.last = now + + cur := now + allowed := 0 + + for days := 0; days < 2; days++ { + // First 100 events succeed. + for i := 0; i < 100; i++ { + allow := rl.allowAt(cur) + cur = cur.Add(time.Second) + + if !allow { + fmt.Printf("false @ %v after %v events... [FAILED]\n", cur, allowed) + return + } + allowed++ + } + fmt.Printf("true @ %v for last %v events...\n", cur, allowed) + + // We try hourly until it allows us to make requests again. + denied := 0 + for i := 0; i < 23; i++ { + cur = cur.Add(time.Hour) + allow := rl.AllowAt(cur) + if allow { + fmt.Printf("true @ %v before quota reset... [FAILED]\n", cur) + return + } + denied++ + } + fmt.Printf("false @ %v for last %v events...\n", cur, denied) + + cur = cur.Add(time.Hour) + } + + // Output: + // true @ 2024-09-24 10:01:40 +0000 UTC for last 100 events... + // false @ 2024-09-25 09:01:40 +0000 UTC for last 23 events... + // true @ 2024-09-25 10:03:20 +0000 UTC for last 200 events... + // false @ 2024-09-26 09:03:20 +0000 UTC for last 23 events... +} + +func TestNewIntervalLimiter(t *testing.T) { + t.Run("Allow", func(t *testing.T) { + for i := 1; i < 10; i++ { + cfg := conf.Rate{Events: float64(i), OverTime: time.Hour} + rl := NewIntervalLimiter(cfg) + for y := i; y > 0; y-- { + if exp, got := true, rl.Allow(); exp != got { + t.Fatalf("exp Allow() to be %v; got %v", exp, got) + } + } + if exp, got := false, rl.Allow(); exp != got { + t.Fatalf("exp Allow() to be %v; got %v", exp, got) + } + } + + // should accept a negative burst. + cfg := conf.Rate{Events: 10, OverTime: time.Hour} + rl := NewBurstLimiter(cfg) + for y := 0; y < 10; y++ { + if exp, got := true, rl.Allow(); exp != got { + t.Fatalf("exp Allow() to be %v; got %v", exp, got) + } + } + }) +} diff --git a/auth_v2.169.0/internal/ratelimit/ratelimit.go b/auth_v2.169.0/internal/ratelimit/ratelimit.go new file mode 100644 index 0000000..35fbf9b --- /dev/null +++ b/auth_v2.169.0/internal/ratelimit/ratelimit.go @@ -0,0 +1,34 @@ +package ratelimit + +import ( + "time" + + "github.com/supabase/auth/internal/conf" +) + +// Limiter is the interface implemented by rate limiters. +// +// Implementations of Limiter must be safe for concurrent use. +type Limiter interface { + + // Allow should return true if an event should be allowed at the time + // which it was called, or false otherwise. + Allow() bool + + // AllowAt should return true if an event should be allowed at the given + // time, or false otherwise. + AllowAt(at time.Time) bool +} + +// New returns a new Limiter based on the given config. +// +// When the type is conf.BurstRateType it returns a BurstLimiter, otherwise +// New returns an IntervalLimiter. +func New(r conf.Rate) Limiter { + switch r.GetRateType() { + case conf.BurstRateType: + return NewBurstLimiter(r) + default: + return NewIntervalLimiter(r) + } +} diff --git a/auth_v2.169.0/internal/ratelimit/ratelimit_test.go b/auth_v2.169.0/internal/ratelimit/ratelimit_test.go new file mode 100644 index 0000000..3bac1dc --- /dev/null +++ b/auth_v2.169.0/internal/ratelimit/ratelimit_test.go @@ -0,0 +1,50 @@ +package ratelimit + +import ( + "testing" + + "github.com/supabase/auth/internal/conf" +) + +func TestNew(t *testing.T) { + + // IntervalLimiter + { + var r conf.Rate + err := r.Decode("100") + if err != nil { + t.Fatalf("exp nil err; got %v", err) + } + + rl := New(r) + if _, ok := rl.(*IntervalLimiter); !ok { + t.Fatalf("exp type *IntervalLimiter; got %T", rl) + } + } + { + var r conf.Rate + err := r.Decode("100.123") + if err != nil { + t.Fatalf("exp nil err; got %v", err) + } + + rl := New(r) + if _, ok := rl.(*IntervalLimiter); !ok { + t.Fatalf("exp type *IntervalLimiter; got %T", rl) + } + } + + // BurstLimiter + { + var r conf.Rate + err := r.Decode("20/200s") + if err != nil { + t.Fatalf("exp nil err; got %v", err) + } + + rl := New(r) + if _, ok := rl.(*BurstLimiter); !ok { + t.Fatalf("exp type *BurstLimiter; got %T", rl) + } + } +} diff --git a/auth_v2.169.0/internal/reloader/handler.go b/auth_v2.169.0/internal/reloader/handler.go new file mode 100644 index 0000000..bdd15ca --- /dev/null +++ b/auth_v2.169.0/internal/reloader/handler.go @@ -0,0 +1,42 @@ +package reloader + +import ( + "net/http" + "sync/atomic" +) + +// AtomicHandler provides an atomic http.Handler implementation, allowing safe +// handler replacement at runtime. AtomicHandler must be initialized with a call +// to NewAtomicHandler. It will never panic and is safe for concurrent use. +type AtomicHandler struct { + val atomic.Value +} + +// atomicHandlerValue is the value stored within an atomicHandler. +type atomicHandlerValue struct{ http.Handler } + +// NewAtomicHandler creates a new AtomicHandler ready for use. +func NewAtomicHandler(h http.Handler) *AtomicHandler { + ah := new(AtomicHandler) + ah.Store(h) + return ah +} + +// String implements fmt.Stringer by returning a string literal. +func (ah *AtomicHandler) String() string { return "reloader.AtomicHandler" } + +// Store will update this http.Handler to serve future requests using h. +func (ah *AtomicHandler) Store(h http.Handler) { + ah.val.Store(&atomicHandlerValue{h}) +} + +// load will return the underlying http.Handler used to serve requests. +func (ah *AtomicHandler) load() http.Handler { + return ah.val.Load().(*atomicHandlerValue).Handler +} + +// ServeHTTP implements the standard libraries http.Handler interface by +// atomically passing the request along to the most recently stored handler. +func (ah *AtomicHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ah.load().ServeHTTP(w, r) +} diff --git a/auth_v2.169.0/internal/reloader/handler_race_test.go b/auth_v2.169.0/internal/reloader/handler_race_test.go new file mode 100644 index 0000000..4d7b5e0 --- /dev/null +++ b/auth_v2.169.0/internal/reloader/handler_race_test.go @@ -0,0 +1,64 @@ +//go:build race +// +build race + +package reloader + +import ( + "context" + "net/http" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestAtomicHandlerRaces(t *testing.T) { + type testHandler struct{ http.Handler } + + hrFn := func() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + } + + const count = 8 + hrFuncMap := make(map[http.Handler]struct{}, count) + for i := 0; i < count; i++ { + hrFuncMap[&testHandler{hrFn()}] = struct{}{} + } + + hr := NewAtomicHandler(nil) + assert.NotNil(t, hr) + + var wg sync.WaitGroup + defer wg.Wait() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second/4) + defer cancel() + + // We create 8 goroutines reading & writing to the handler concurrently. If + // a race condition occurs the test will fail and halt. + for i := 0; i < count; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for hrFunc := range hrFuncMap { + select { + case <-ctx.Done(): + default: + } + + hr.Store(hrFunc) + + got := hr.load() + _, ok := hrFuncMap[got] + if !ok { + // This will trigger a race failure / exit test + t.Fatal("unknown handler returned from load()") + return + } + } + }() + } + wg.Wait() +} diff --git a/auth_v2.169.0/internal/reloader/handler_test.go b/auth_v2.169.0/internal/reloader/handler_test.go new file mode 100644 index 0000000..182c526 --- /dev/null +++ b/auth_v2.169.0/internal/reloader/handler_test.go @@ -0,0 +1,46 @@ +package reloader + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAtomicHandler(t *testing.T) { + // for ptr identity + type testHandler struct{ http.Handler } + + hrFn := func() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + } + + hrFunc1 := &testHandler{hrFn()} + hrFunc2 := &testHandler{hrFn()} + assert.NotEqual(t, hrFunc1, hrFunc2) + + // a new AtomicHandler should be non-nil + hr := NewAtomicHandler(nil) + assert.NotNil(t, hr) + + // should have no stored handler + { + hrCur := hr.load() + assert.Nil(t, hrCur) + assert.Equal(t, true, hrCur == nil) + } + + // should be non-nil after store + for i := 0; i < 3; i++ { + hr.Store(hrFunc1) + assert.NotNil(t, hr.load()) + assert.Equal(t, hr.load(), hrFunc1) + assert.Equal(t, hr.load() == hrFunc1, true) + + // should update to hrFunc2 + hr.Store(hrFunc2) + assert.NotNil(t, hr.load()) + assert.Equal(t, hr.load(), hrFunc2) + assert.Equal(t, hr.load() == hrFunc2, true) + } +} diff --git a/auth_v2.169.0/internal/reloader/reloader.go b/auth_v2.169.0/internal/reloader/reloader.go new file mode 100644 index 0000000..2b2b55e --- /dev/null +++ b/auth_v2.169.0/internal/reloader/reloader.go @@ -0,0 +1,141 @@ +// Package reloader provides support for live configuration reloading. +package reloader + +import ( + "context" + "log" + "strings" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" +) + +const ( + // reloadInterval is the interval between configuration reloading. At most + // one configuration change may be made between this duration. + reloadInterval = time.Second * 10 + + // tickerInterval is the maximum latency between configuration reloads. + tickerInterval = reloadInterval / 10 +) + +type ConfigFunc func(*conf.GlobalConfiguration) + +type Reloader struct { + watchDir string + reloadIval time.Duration + tickerIval time.Duration +} + +func NewReloader(watchDir string) *Reloader { + return &Reloader{ + watchDir: watchDir, + reloadIval: reloadInterval, + tickerIval: tickerInterval, + } +} + +// reload attempts to create a new *conf.GlobalConfiguration after loading the +// currently configured watchDir. +func (rl *Reloader) reload() (*conf.GlobalConfiguration, error) { + if err := conf.LoadDirectory(rl.watchDir); err != nil { + return nil, err + } + + cfg, err := conf.LoadGlobalFromEnv() + if err != nil { + return nil, err + } + return cfg, nil +} + +// reloadCheckAt checks if reloadConfig should be called, returns true if config +// should be reloaded or false otherwise. +func (rl *Reloader) reloadCheckAt(at, lastUpdate time.Time) bool { + if lastUpdate.IsZero() { + return false // no pending updates + } + if at.Sub(lastUpdate) < rl.reloadIval { + return false // waiting for reload interval + } + + // Update is pending. + return true +} + +func (rl *Reloader) Watch(ctx context.Context, fn ConfigFunc) error { + wr, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer wr.Close() + + tr := time.NewTicker(rl.tickerIval) + defer tr.Stop() + + // Ignore errors, if watch dir doesn't exist we can add it later. + if err := wr.Add(rl.watchDir); err != nil { + logrus.WithError(err).Error("watch dir failed") + } + + var lastUpdate time.Time + for { + select { + case <-ctx.Done(): + return ctx.Err() + + case <-tr.C: + // This is a simple way to solve watch dir being added later or + // being moved and then recreated. I've tested all of these basic + // scenarios and wr.WatchList() does not grow which aligns with + // the documented behavior. + if err := wr.Add(rl.watchDir); err != nil { + logrus.WithError(err).Error("watch dir failed") + } + + // Check to see if the config is ready to be relaoded. + if !rl.reloadCheckAt(time.Now(), lastUpdate) { + continue + } + + // Reset the last update time before we try to reload the config. + lastUpdate = time.Time{} + + cfg, err := rl.reload() + if err != nil { + logrus.WithError(err).Error("config reload failed") + continue + } + + // Call the callback function with the latest cfg. + fn(cfg) + + case evt, ok := <-wr.Events: + if !ok { + logrus.WithError(err).Error("fsnotify has exited") + return nil + } + + // We only read files ending in .env + if !strings.HasSuffix(evt.Name, ".env") { + continue + } + + switch { + case evt.Op.Has(fsnotify.Create), + evt.Op.Has(fsnotify.Remove), + evt.Op.Has(fsnotify.Rename), + evt.Op.Has(fsnotify.Write): + lastUpdate = time.Now() + } + case err, ok := <-wr.Errors: + if !ok { + logrus.Error("fsnotify has exited") + return nil + } + logrus.WithError(err).Error("fsnotify has reported an error") + } + } +} diff --git a/auth_v2.169.0/internal/reloader/reloader_test.go b/auth_v2.169.0/internal/reloader/reloader_test.go new file mode 100644 index 0000000..ec8e04b --- /dev/null +++ b/auth_v2.169.0/internal/reloader/reloader_test.go @@ -0,0 +1,173 @@ +package reloader + +import ( + "bytes" + "log" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestReloadConfig(t *testing.T) { + dir, cleanup := helpTestDir(t) + defer cleanup() + + rl := NewReloader(dir) + + // Copy the full and valid example configuration. + helpCopyEnvFile(t, dir, "01_example.env", "testdata/50_example.env") + { + cfg, err := rl.reload() + if err != nil { + t.Fatal(err) + } + assert.NotNil(t, cfg) + assert.Equal(t, cfg.External.Apple.Enabled, false) + } + + helpWriteEnvFile(t, dir, "02_example.env", map[string]string{ + "GOTRUE_EXTERNAL_APPLE_ENABLED": "true", + }) + { + cfg, err := rl.reload() + if err != nil { + t.Fatal(err) + } + assert.NotNil(t, cfg) + assert.Equal(t, cfg.External.Apple.Enabled, true) + } + + helpWriteEnvFile(t, dir, "03_example.env.bak", map[string]string{ + "GOTRUE_EXTERNAL_APPLE_ENABLED": "false", + }) + { + cfg, err := rl.reload() + if err != nil { + t.Fatal(err) + } + assert.NotNil(t, cfg) + assert.Equal(t, cfg.External.Apple.Enabled, true) + } +} + +func TestReloadCheckAt(t *testing.T) { + const s10 = time.Second * 10 + + now := time.Now() + tests := []struct { + rl *Reloader + at, lastUpdate time.Time + exp bool + }{ + // no lastUpdate is set (time.IsZero()) + { + rl: &Reloader{reloadIval: s10, tickerIval: s10 / 10}, + exp: false, + }, + { + rl: &Reloader{reloadIval: s10, tickerIval: s10 / 10}, + at: now, + exp: false, + }, + + // last update within reload interval + { + rl: &Reloader{reloadIval: s10, tickerIval: s10 / 10}, + at: now, + lastUpdate: now.Add(-s10 + 1), + exp: false, + }, + { + rl: &Reloader{reloadIval: s10, tickerIval: s10 / 10}, + at: now, + lastUpdate: now, + exp: false, + }, + { + rl: &Reloader{reloadIval: s10, tickerIval: s10 / 10}, + at: now, + lastUpdate: now.Add(s10 - 1), + exp: false, + }, + { + rl: &Reloader{reloadIval: s10, tickerIval: s10 / 10}, + at: now, + lastUpdate: now.Add(s10), + exp: false, + }, + { + rl: &Reloader{reloadIval: s10, tickerIval: s10 / 10}, + at: now, + lastUpdate: now.Add(s10 + 1), + exp: false, + }, + { + rl: &Reloader{reloadIval: s10, tickerIval: s10 / 10}, + at: now, + lastUpdate: now.Add(s10 * 2), + exp: false, + }, + + // last update was outside our reload interval + { + rl: &Reloader{reloadIval: s10, tickerIval: s10 / 10}, + at: now, + lastUpdate: now.Add(-s10), + exp: true, + }, + { + rl: &Reloader{reloadIval: s10, tickerIval: s10 / 10}, + at: now, + lastUpdate: now.Add(-s10 - 1), + exp: true, + }, + } + for _, tc := range tests { + rl := tc.rl + assert.NotNil(t, rl) + assert.Equal(t, rl.reloadCheckAt(tc.at, tc.lastUpdate), tc.exp) + } +} + +func helpTestDir(t testing.TB) (dir string, cleanup func()) { + dir = filepath.Join("testdata", t.Name()) + err := os.MkdirAll(dir, 0750) + if err != nil && !os.IsExist(err) { + t.Fatal(err) + } + return dir, func() { os.RemoveAll(dir) } +} + +func helpCopyEnvFile(t testing.TB, dir, name, src string) string { + data, err := os.ReadFile(src) // #nosec G304 + if err != nil { + log.Fatal(err) + } + + dst := filepath.Join(dir, name) + err = os.WriteFile(dst, data, 0600) + if err != nil { + t.Fatal(err) + } + return dst +} + +func helpWriteEnvFile(t testing.TB, dir, name string, values map[string]string) string { + var buf bytes.Buffer + for k, v := range values { + buf.WriteString(k) + buf.WriteString("=") + buf.WriteString(v) + buf.WriteString("\n") + } + + dst := filepath.Join(dir, name) + err := os.WriteFile(dst, buf.Bytes(), 0600) + if err != nil { + t.Fatal(err) + } + return dst +} diff --git a/auth_v2.169.0/internal/reloader/testdata/50_example.env b/auth_v2.169.0/internal/reloader/testdata/50_example.env new file mode 100644 index 0000000..1002d8b --- /dev/null +++ b/auth_v2.169.0/internal/reloader/testdata/50_example.env @@ -0,0 +1,235 @@ +# General Config +# NOTE: The service_role key is required as an authorization header for /admin endpoints + +GOTRUE_JWT_SECRET="CHANGE-THIS! VERY IMPORTANT!" +GOTRUE_JWT_EXP="3600" +GOTRUE_JWT_AUD="authenticated" +GOTRUE_JWT_DEFAULT_GROUP_NAME="authenticated" +GOTRUE_JWT_ADMIN_ROLES="supabase_admin,service_role" + +# Database & API connection details +GOTRUE_DB_DRIVER="postgres" +DB_NAMESPACE="auth" +DATABASE_URL="postgres://supabase_auth_admin:root@localhost:5432/postgres" +API_EXTERNAL_URL="http://localhost:9999" +GOTRUE_API_HOST="localhost" +PORT="9999" + +# SMTP config (generate credentials for signup to work) +GOTRUE_SMTP_HOST="" +GOTRUE_SMTP_PORT="587" +GOTRUE_SMTP_USER="" +GOTRUE_SMTP_MAX_FREQUENCY="5s" +GOTRUE_SMTP_PASS="" +GOTRUE_SMTP_ADMIN_EMAIL="" +GOTRUE_SMTP_SENDER_NAME="" + +# Mailer config +GOTRUE_MAILER_AUTOCONFIRM="true" +GOTRUE_MAILER_URLPATHS_CONFIRMATION="/verify" +GOTRUE_MAILER_URLPATHS_INVITE="/verify" +GOTRUE_MAILER_URLPATHS_RECOVERY="/verify" +GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE="/verify" +GOTRUE_MAILER_SUBJECTS_CONFIRMATION="Confirm Your Email" +GOTRUE_MAILER_SUBJECTS_RECOVERY="Reset Your Password" +GOTRUE_MAILER_SUBJECTS_MAGIC_LINK="Your Magic Link" +GOTRUE_MAILER_SUBJECTS_EMAIL_CHANGE="Confirm Email Change" +GOTRUE_MAILER_SUBJECTS_INVITE="You have been invited" +GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED="true" + +# Custom mailer template config +GOTRUE_MAILER_TEMPLATES_INVITE="" +GOTRUE_MAILER_TEMPLATES_CONFIRMATION="" +GOTRUE_MAILER_TEMPLATES_RECOVERY="" +GOTRUE_MAILER_TEMPLATES_MAGIC_LINK="" +GOTRUE_MAILER_TEMPLATES_EMAIL_CHANGE="" + +# Signup config +GOTRUE_DISABLE_SIGNUP="false" +GOTRUE_SITE_URL="http://localhost:3000" +GOTRUE_EXTERNAL_EMAIL_ENABLED="true" +GOTRUE_EXTERNAL_PHONE_ENABLED="true" +GOTRUE_EXTERNAL_IOS_BUNDLE_ID="com.supabase.auth" + +# Whitelist redirect to URLs here, a comma separated list of URIs (e.g. "https://foo.example.com,https://*.foo.example.com,https://bar.example.com") +GOTRUE_URI_ALLOW_LIST="http://localhost:3000" + +# Apple OAuth config +GOTRUE_EXTERNAL_APPLE_ENABLED="false" +GOTRUE_EXTERNAL_APPLE_CLIENT_ID="" +GOTRUE_EXTERNAL_APPLE_SECRET="" +GOTRUE_EXTERNAL_APPLE_REDIRECT_URI="http://localhost:9999/callback" + +# Azure OAuth config +GOTRUE_EXTERNAL_AZURE_ENABLED="false" +GOTRUE_EXTERNAL_AZURE_CLIENT_ID="" +GOTRUE_EXTERNAL_AZURE_SECRET="" +GOTRUE_EXTERNAL_AZURE_REDIRECT_URI="https://localhost:9999/callback" + +# Bitbucket OAuth config +GOTRUE_EXTERNAL_BITBUCKET_ENABLED="false" +GOTRUE_EXTERNAL_BITBUCKET_CLIENT_ID="" +GOTRUE_EXTERNAL_BITBUCKET_SECRET="" +GOTRUE_EXTERNAL_BITBUCKET_REDIRECT_URI="http://localhost:9999/callback" + +# Discord OAuth config +GOTRUE_EXTERNAL_DISCORD_ENABLED="false" +GOTRUE_EXTERNAL_DISCORD_CLIENT_ID="" +GOTRUE_EXTERNAL_DISCORD_SECRET="" +GOTRUE_EXTERNAL_DISCORD_REDIRECT_URI="https://localhost:9999/callback" + +# Facebook OAuth config +GOTRUE_EXTERNAL_FACEBOOK_ENABLED="false" +GOTRUE_EXTERNAL_FACEBOOK_CLIENT_ID="" +GOTRUE_EXTERNAL_FACEBOOK_SECRET="" +GOTRUE_EXTERNAL_FACEBOOK_REDIRECT_URI="https://localhost:9999/callback" + +# Figma OAuth config +GOTRUE_EXTERNAL_FIGMA_ENABLED="false" +GOTRUE_EXTERNAL_FIGMA_CLIENT_ID="" +GOTRUE_EXTERNAL_FIGMA_SECRET="" +GOTRUE_EXTERNAL_FIGMA_REDIRECT_URI="https://localhost:9999/callback" + +# Gitlab OAuth config +GOTRUE_EXTERNAL_GITLAB_ENABLED="false" +GOTRUE_EXTERNAL_GITLAB_CLIENT_ID="" +GOTRUE_EXTERNAL_GITLAB_SECRET="" +GOTRUE_EXTERNAL_GITLAB_REDIRECT_URI="http://localhost:9999/callback" + +# Google OAuth config +GOTRUE_EXTERNAL_GOOGLE_ENABLED="false" +GOTRUE_EXTERNAL_GOOGLE_CLIENT_ID="" +GOTRUE_EXTERNAL_GOOGLE_SECRET="" +GOTRUE_EXTERNAL_GOOGLE_REDIRECT_URI="http://localhost:9999/callback" + +# Github OAuth config +GOTRUE_EXTERNAL_GITHUB_ENABLED="false" +GOTRUE_EXTERNAL_GITHUB_CLIENT_ID="" +GOTRUE_EXTERNAL_GITHUB_SECRET="" +GOTRUE_EXTERNAL_GITHUB_REDIRECT_URI="http://localhost:9999/callback" + +# Kakao OAuth config +GOTRUE_EXTERNAL_KAKAO_ENABLED="false" +GOTRUE_EXTERNAL_KAKAO_CLIENT_ID="" +GOTRUE_EXTERNAL_KAKAO_SECRET="" +GOTRUE_EXTERNAL_KAKAO_REDIRECT_URI="http://localhost:9999/callback" + +# Notion OAuth config +GOTRUE_EXTERNAL_NOTION_ENABLED="false" +GOTRUE_EXTERNAL_NOTION_CLIENT_ID="" +GOTRUE_EXTERNAL_NOTION_SECRET="" +GOTRUE_EXTERNAL_NOTION_REDIRECT_URI="https://localhost:9999/callback" + +# Twitter OAuth1 config +GOTRUE_EXTERNAL_TWITTER_ENABLED="false" +GOTRUE_EXTERNAL_TWITTER_CLIENT_ID="" +GOTRUE_EXTERNAL_TWITTER_SECRET="" +GOTRUE_EXTERNAL_TWITTER_REDIRECT_URI="http://localhost:9999/callback" + +# Twitch OAuth config +GOTRUE_EXTERNAL_TWITCH_ENABLED="false" +GOTRUE_EXTERNAL_TWITCH_CLIENT_ID="" +GOTRUE_EXTERNAL_TWITCH_SECRET="" +GOTRUE_EXTERNAL_TWITCH_REDIRECT_URI="http://localhost:9999/callback" + +# Spotify OAuth config +GOTRUE_EXTERNAL_SPOTIFY_ENABLED="false" +GOTRUE_EXTERNAL_SPOTIFY_CLIENT_ID="" +GOTRUE_EXTERNAL_SPOTIFY_SECRET="" +GOTRUE_EXTERNAL_SPOTIFY_REDIRECT_URI="http://localhost:9999/callback" + +# Keycloak OAuth config +GOTRUE_EXTERNAL_KEYCLOAK_ENABLED="false" +GOTRUE_EXTERNAL_KEYCLOAK_CLIENT_ID="" +GOTRUE_EXTERNAL_KEYCLOAK_SECRET="" +GOTRUE_EXTERNAL_KEYCLOAK_REDIRECT_URI="http://localhost:9999/callback" +GOTRUE_EXTERNAL_KEYCLOAK_URL="https://keycloak.example.com/auth/realms/myrealm" + +# Linkedin OAuth config +GOTRUE_EXTERNAL_LINKEDIN_ENABLED="true" +GOTRUE_EXTERNAL_LINKEDIN_CLIENT_ID="" +GOTRUE_EXTERNAL_LINKEDIN_SECRET="" + +# Slack OAuth config +GOTRUE_EXTERNAL_SLACK_ENABLED="false" +GOTRUE_EXTERNAL_SLACK_CLIENT_ID="" +GOTRUE_EXTERNAL_SLACK_SECRET="" +GOTRUE_EXTERNAL_SLACK_REDIRECT_URI="http://localhost:9999/callback" + +# WorkOS OAuth config +GOTRUE_EXTERNAL_WORKOS_ENABLED="true" +GOTRUE_EXTERNAL_WORKOS_CLIENT_ID="" +GOTRUE_EXTERNAL_WORKOS_SECRET="" +GOTRUE_EXTERNAL_WORKOS_REDIRECT_URI="http://localhost:9999/callback" + +# Zoom OAuth config +GOTRUE_EXTERNAL_ZOOM_ENABLED="false" +GOTRUE_EXTERNAL_ZOOM_CLIENT_ID="" +GOTRUE_EXTERNAL_ZOOM_SECRET="" +GOTRUE_EXTERNAL_ZOOM_REDIRECT_URI="http://localhost:9999/callback" + +# Anonymous auth config +GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED="false" + +# PKCE Config +GOTRUE_EXTERNAL_FLOW_STATE_EXPIRY_DURATION="300s" + +# Phone provider config +GOTRUE_SMS_AUTOCONFIRM="false" +GOTRUE_SMS_MAX_FREQUENCY="5s" +GOTRUE_SMS_OTP_EXP="6000" +GOTRUE_SMS_OTP_LENGTH="6" +GOTRUE_SMS_PROVIDER="twilio" +GOTRUE_SMS_TWILIO_ACCOUNT_SID="" +GOTRUE_SMS_TWILIO_AUTH_TOKEN="" +GOTRUE_SMS_TWILIO_MESSAGE_SERVICE_SID="" +GOTRUE_SMS_TEMPLATE="This is from supabase. Your code is {{ .Code }} ." +GOTRUE_SMS_MESSAGEBIRD_ACCESS_KEY="" +GOTRUE_SMS_MESSAGEBIRD_ORIGINATOR="" +GOTRUE_SMS_TEXTLOCAL_API_KEY="" +GOTRUE_SMS_TEXTLOCAL_SENDER="" +GOTRUE_SMS_VONAGE_API_KEY="" +GOTRUE_SMS_VONAGE_API_SECRET="" +GOTRUE_SMS_VONAGE_FROM="" + +# Captcha config +GOTRUE_SECURITY_CAPTCHA_ENABLED="false" +GOTRUE_SECURITY_CAPTCHA_PROVIDER="hcaptcha" +GOTRUE_SECURITY_CAPTCHA_SECRET="0x0000000000000000000000000000000000000000" +GOTRUE_SECURITY_CAPTCHA_TIMEOUT="10s" +GOTRUE_SESSION_KEY="" + +# SAML config +GOTRUE_EXTERNAL_SAML_ENABLED="true" +GOTRUE_EXTERNAL_SAML_METADATA_URL="" +GOTRUE_EXTERNAL_SAML_API_BASE="http://localhost:9999" +GOTRUE_EXTERNAL_SAML_NAME="auth0" +GOTRUE_EXTERNAL_SAML_SIGNING_CERT="" +GOTRUE_EXTERNAL_SAML_SIGNING_KEY="" + +# Additional Security config +GOTRUE_LOG_LEVEL="debug" +GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED="false" +GOTRUE_SECURITY_REFRESH_TOKEN_REUSE_INTERVAL="0" +GOTRUE_SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION="false" +GOTRUE_OPERATOR_TOKEN="unused-operator-token" +GOTRUE_RATE_LIMIT_HEADER="X-Forwarded-For" +GOTRUE_RATE_LIMIT_EMAIL_SENT="100" + +GOTRUE_MAX_VERIFIED_FACTORS=10 + +# Auth Hook Configuration +GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED=false +GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI="" +# Only for HTTPS Hooks +GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRET="" + +GOTRUE_HOOK_CUSTOM_SMS_PROVIDER_ENABLED=false +GOTRUE_HOOK_CUSTOM_SMS_PROVIDER_URI="" +# Only for HTTPS Hooks +GOTRUE_HOOK_CUSTOM_SMS_PROVIDER_SECRET="" + + +# Test OTP Config +GOTRUE_SMS_TEST_OTP=":, :..." +GOTRUE_SMS_TEST_OTP_VALID_UNTIL="2050-01-01T01:00:00Z" # (e.g. 2023-09-29T08:14:06Z) diff --git a/auth_v2.169.0/internal/security/captcha.go b/auth_v2.169.0/internal/security/captcha.go new file mode 100644 index 0000000..aeacb63 --- /dev/null +++ b/auth_v2.169.0/internal/security/captcha.go @@ -0,0 +1,101 @@ +package security + +import ( + "encoding/json" + "log" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "fmt" + + "github.com/pkg/errors" + "github.com/supabase/auth/internal/utilities" +) + +type GotrueRequest struct { + Security GotrueSecurity `json:"gotrue_meta_security"` +} + +type GotrueSecurity struct { + Token string `json:"captcha_token"` +} + +type VerificationResponse struct { + Success bool `json:"success"` + ErrorCodes []string `json:"error-codes"` + Hostname string `json:"hostname"` +} + +var Client *http.Client + +func init() { + var defaultTimeout time.Duration = time.Second * 10 + timeoutStr := os.Getenv("GOTRUE_SECURITY_CAPTCHA_TIMEOUT") + if timeoutStr != "" { + if timeout, err := time.ParseDuration(timeoutStr); err != nil { + log.Fatalf("error loading GOTRUE_SECURITY_CAPTCHA_TIMEOUT: %v", err.Error()) + } else if timeout != 0 { + defaultTimeout = timeout + } + } + + Client = &http.Client{Timeout: defaultTimeout} +} + +func VerifyRequest(requestBody *GotrueRequest, clientIP, secretKey, captchaProvider string) (VerificationResponse, error) { + captchaResponse := strings.TrimSpace(requestBody.Security.Token) + + if captchaResponse == "" { + return VerificationResponse{}, errors.New("no captcha response (captcha_token) found in request") + } + + captchaURL, err := GetCaptchaURL(captchaProvider) + if err != nil { + return VerificationResponse{}, err + } + + return verifyCaptchaCode(captchaResponse, secretKey, clientIP, captchaURL) +} + +func verifyCaptchaCode(token, secretKey, clientIP, captchaURL string) (VerificationResponse, error) { + data := url.Values{} + data.Set("secret", secretKey) + data.Set("response", token) + data.Set("remoteip", clientIP) + // TODO (darora): pipe through sitekey + + r, err := http.NewRequest("POST", captchaURL, strings.NewReader(data.Encode())) + if err != nil { + return VerificationResponse{}, errors.Wrap(err, "couldn't initialize request object for captcha check") + } + r.Header.Add("Content-Type", "application/x-www-form-urlencoded") + r.Header.Add("Content-Length", strconv.Itoa(len(data.Encode()))) + res, err := Client.Do(r) + if err != nil { + return VerificationResponse{}, errors.Wrap(err, "failed to verify captcha response") + } + defer utilities.SafeClose(res.Body) + + var verificationResponse VerificationResponse + + if err := json.NewDecoder(res.Body).Decode(&verificationResponse); err != nil { + return VerificationResponse{}, errors.Wrap(err, "failed to decode captcha response: not JSON") + } + + return verificationResponse, nil +} + +func GetCaptchaURL(captchaProvider string) (string, error) { + switch captchaProvider { + case "hcaptcha": + return "https://hcaptcha.com/siteverify", nil + case "turnstile": + return "https://challenges.cloudflare.com/turnstile/v0/siteverify", nil + default: + return "", fmt.Errorf("captcha Provider %q could not be found", captchaProvider) + } +} diff --git a/auth_v2.169.0/internal/storage/dial.go b/auth_v2.169.0/internal/storage/dial.go new file mode 100644 index 0000000..3ee9939 --- /dev/null +++ b/auth_v2.169.0/internal/storage/dial.go @@ -0,0 +1,192 @@ +package storage + +import ( + "context" + "database/sql" + "net/url" + "reflect" + "time" + + "github.com/XSAM/otelsql" + "github.com/gobuffalo/pop/v6" + "github.com/gobuffalo/pop/v6/columns" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/supabase/auth/internal/conf" +) + +// Connection is the interface a storage provider must implement. +type Connection struct { + *pop.Connection +} + +// Dial will connect to that storage engine +func Dial(config *conf.GlobalConfiguration) (*Connection, error) { + if config.DB.Driver == "" && config.DB.URL != "" { + u, err := url.Parse(config.DB.URL) + if err != nil { + return nil, errors.Wrap(err, "parsing db connection url") + } + config.DB.Driver = u.Scheme + } + + driver := "" + if config.DB.Driver != "postgres" { + logrus.Warn("DEPRECATION NOTICE: only PostgreSQL is supported by Supabase's GoTrue, will be removed soon") + } else { + // pop v5 uses pgx as the default PostgreSQL driver + driver = "pgx" + } + + if driver != "" && (config.Tracing.Enabled || config.Metrics.Enabled) { + instrumentedDriver, err := otelsql.Register(driver) + if err != nil { + logrus.WithError(err).Errorf("unable to instrument sql driver %q for use with OpenTelemetry", driver) + } else { + logrus.Debugf("using %s as an instrumented driver for OpenTelemetry", instrumentedDriver) + + // sqlx needs to be informed that the new instrumented + // driver has the same semantics as the + // non-instrumented driver + sqlx.BindDriver(instrumentedDriver, sqlx.BindType(driver)) + + driver = instrumentedDriver + } + } + + options := make(map[string]string) + + if config.DB.HealthCheckPeriod != time.Duration(0) { + options["pool_health_check_period"] = config.DB.HealthCheckPeriod.String() + } + + if config.DB.ConnMaxIdleTime != time.Duration(0) { + options["pool_max_conn_idle_time"] = config.DB.ConnMaxIdleTime.String() + } + + db, err := pop.NewConnection(&pop.ConnectionDetails{ + Dialect: config.DB.Driver, + Driver: driver, + URL: config.DB.URL, + Pool: config.DB.MaxPoolSize, + IdlePool: config.DB.MaxIdlePoolSize, + ConnMaxLifetime: config.DB.ConnMaxLifetime, + ConnMaxIdleTime: config.DB.ConnMaxIdleTime, + Options: options, + }) + if err != nil { + return nil, errors.Wrap(err, "opening database connection") + } + if err := db.Open(); err != nil { + return nil, errors.Wrap(err, "checking database connection") + } + + if config.Metrics.Enabled { + registerOpenTelemetryDatabaseStats(db) + } + + return &Connection{db}, nil +} + +func registerOpenTelemetryDatabaseStats(db *pop.Connection) { + defer func() { + if rec := recover(); rec != nil { + logrus.WithField("error", rec).Error("registerOpenTelemetryDatabaseStats is not able to determine database object with reflection -- panicked") + } + }() + + dbval := reflect.Indirect(reflect.ValueOf(db.Store)) + dbfield := dbval.Field(0) + sqldbfield := reflect.Indirect(dbfield).Field(0) + + sqldb, ok := sqldbfield.Interface().(*sql.DB) + if !ok || sqldb == nil { + logrus.Error("registerOpenTelemetryDatabaseStats is not able to determine database object with reflection") + return + } + + if err := otelsql.RegisterDBStatsMetrics(sqldb); err != nil { + logrus.WithError(err).Error("unable to register OpenTelemetry stats metrics for databse") + } else { + logrus.Debug("registered OpenTelemetry stats metrics for database") + } +} + +type CommitWithError struct { + Err error +} + +func (e *CommitWithError) Error() string { + return e.Err.Error() +} + +func (e *CommitWithError) Cause() error { + return e.Err +} + +// NewCommitWithError creates an error that can be returned in a pop transaction +// without rolling back the transaction. This should only be used in cases where +// you want the transaction to commit but return an error message to the user. +func NewCommitWithError(err error) *CommitWithError { + return &CommitWithError{Err: err} +} + +func (c *Connection) Transaction(fn func(*Connection) error) error { + if c.TX == nil { + var returnErr error + if terr := c.Connection.Transaction(func(tx *pop.Connection) error { + err := fn(&Connection{tx}) + switch err.(type) { + case *CommitWithError: + returnErr = err + return nil + default: + return err + } + }); terr != nil { + // there exists a race condition when the context deadline is exceeded + // and whether the transaction has been committed or not + // e.g. if the context deadline has exceeded but the transaction has already been committed, + // it won't be possible to perform a rollback on the transaction since the transaction has been closed + if !errors.Is(terr, sql.ErrTxDone) { + return terr + } + } + return returnErr + } + return fn(c) +} + +// WithContext returns a new connection with an updated context. This is +// typically used for tracing as the context contains trace span information. +func (c *Connection) WithContext(ctx context.Context) *Connection { + return &Connection{c.Connection.WithContext(ctx)} +} + +func getExcludedColumns(model interface{}, includeColumns ...string) ([]string, error) { + sm := &pop.Model{Value: model} + st := reflect.TypeOf(model) + if st.Kind() == reflect.Ptr { + _ = st.Elem() + } + + // get all columns and remove included to get excluded set + cols := columns.ForStructWithAlias(model, sm.TableName(), sm.As, sm.IDField()) + for _, f := range includeColumns { + if _, ok := cols.Cols[f]; !ok { + return nil, errors.Errorf("Invalid column name %s", f) + } + cols.Remove(f) + } + + xcols := make([]string, 0, len(cols.Cols)) + for n := range cols.Cols { + // gobuffalo updates the updated_at column automatically + if n == "updated_at" { + continue + } + xcols = append(xcols, n) + } + return xcols, nil +} diff --git a/auth_v2.169.0/internal/storage/dial_test.go b/auth_v2.169.0/internal/storage/dial_test.go new file mode 100644 index 0000000..078b6d5 --- /dev/null +++ b/auth_v2.169.0/internal/storage/dial_test.go @@ -0,0 +1,60 @@ +package storage + +import ( + "errors" + "testing" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" +) + +type TestUser struct { + ID uuid.UUID + Role string `db:"role"` + Other string `db:"othercol"` +} + +func TestGetExcludedColumns(t *testing.T) { + u := TestUser{} + cols, err := getExcludedColumns(u, "role") + require.NoError(t, err) + require.NotContains(t, cols, "role") + require.Contains(t, cols, "othercol") +} + +func TestGetExcludedColumns_InvalidName(t *testing.T) { + u := TestUser{} + _, err := getExcludedColumns(u, "adsf") + require.Error(t, err) +} + +func TestTransaction(t *testing.T) { + apiTestConfig := "../../hack/test.env" + config, err := conf.LoadGlobal(apiTestConfig) + require.NoError(t, err) + conn, err := Dial(config) + require.NoError(t, err) + require.NotNil(t, conn) + + defer func() { + // clean up the test table created + require.NoError(t, conn.RawQuery("drop table if exists test").Exec(), "Error removing table") + }() + + commitWithError := NewCommitWithError(errors.New("commit with error")) + err = conn.Transaction(func(tx *Connection) error { + require.NoError(t, tx.RawQuery("create table if not exists test()").Exec(), "Error saving creating test table") + return commitWithError + }) + require.Error(t, err) + require.ErrorIs(t, err, commitWithError) + + type TestData struct{} + + // check that transaction is still being committed despite returning an error above + data := []TestData{} + err = conn.RawQuery("select * from test").All(&data) + require.NoError(t, err) + require.Empty(t, data) +} diff --git a/auth_v2.169.0/internal/storage/helper.go b/auth_v2.169.0/internal/storage/helper.go new file mode 100644 index 0000000..2359984 --- /dev/null +++ b/auth_v2.169.0/internal/storage/helper.go @@ -0,0 +1,31 @@ +package storage + +import ( + "database/sql/driver" + "errors" +) + +type NullString string + +func (s *NullString) Scan(value interface{}) error { + if value == nil { + *s = "" + return nil + } + strVal, ok := value.(string) + if !ok { + return errors.New("column is not a string") + } + *s = NullString(strVal) + return nil +} +func (s NullString) Value() (driver.Value, error) { + if len(s) == 0 { // if nil or empty string + return nil, nil + } + return string(s), nil +} + +func (s NullString) String() string { + return string(s) +} diff --git a/auth_v2.169.0/internal/storage/sql.go b/auth_v2.169.0/internal/storage/sql.go new file mode 100644 index 0000000..2173411 --- /dev/null +++ b/auth_v2.169.0/internal/storage/sql.go @@ -0,0 +1,9 @@ +package storage + +func (conn *Connection) UpdateOnly(model interface{}, includeColumns ...string) error { + xcols, err := getExcludedColumns(model, includeColumns...) + if err != nil { + return err + } + return conn.Update(model, xcols...) +} diff --git a/auth_v2.169.0/internal/storage/test/db_setup.go b/auth_v2.169.0/internal/storage/test/db_setup.go new file mode 100644 index 0000000..8eeb099 --- /dev/null +++ b/auth_v2.169.0/internal/storage/test/db_setup.go @@ -0,0 +1,10 @@ +package test + +import ( + "github.com/supabase/auth/internal/conf" + "github.com/supabase/auth/internal/storage" +) + +func SetupDBConnection(globalConfig *conf.GlobalConfiguration) (*storage.Connection, error) { + return storage.Dial(globalConfig) +} diff --git a/auth_v2.169.0/internal/utilities/context.go b/auth_v2.169.0/internal/utilities/context.go new file mode 100644 index 0000000..06aa74a --- /dev/null +++ b/auth_v2.169.0/internal/utilities/context.go @@ -0,0 +1,51 @@ +package utilities + +import ( + "context" + "sync" +) + +type contextKey string + +func (c contextKey) String() string { + return "gotrue api context key " + string(c) +} + +const ( + requestIDKey = contextKey("request_id") +) + +// WithRequestID adds the provided request ID to the context. +func WithRequestID(ctx context.Context, id string) context.Context { + return context.WithValue(ctx, requestIDKey, id) +} + +// GetRequestID reads the request ID from the context. +func GetRequestID(ctx context.Context) string { + obj := ctx.Value(requestIDKey) + if obj == nil { + return "" + } + + return obj.(string) +} + +// WaitForCleanup waits until all long-running goroutines shut +// down cleanly or until the provided context signals done. +func WaitForCleanup(ctx context.Context, wg *sync.WaitGroup) { + cleanupDone := make(chan struct{}) + + go func() { + defer close(cleanupDone) + + wg.Wait() + }() + + select { + case <-ctx.Done(): + return + + case <-cleanupDone: + return + } +} diff --git a/auth_v2.169.0/internal/utilities/hibpcache.go b/auth_v2.169.0/internal/utilities/hibpcache.go new file mode 100644 index 0000000..14c3fc3 --- /dev/null +++ b/auth_v2.169.0/internal/utilities/hibpcache.go @@ -0,0 +1,76 @@ +package utilities + +import ( + "context" + "sync" + + "github.com/bits-and-blooms/bloom/v3" +) + +const ( + // hibpHashLength is the length of a hex-encoded SHA1 hash. + hibpHashLength = 40 + // hibpHashPrefixLength is the length of the hashed password prefix. + hibpHashPrefixLength = 5 +) + +type HIBPBloomCache struct { + sync.RWMutex + + n uint + items uint + filter *bloom.BloomFilter +} + +func NewHIBPBloomCache(n uint, fp float64) *HIBPBloomCache { + cache := &HIBPBloomCache{ + n: n, + filter: bloom.NewWithEstimates(n, fp), + } + + return cache +} + +func (c *HIBPBloomCache) Cap() uint { + return c.filter.Cap() +} + +func (c *HIBPBloomCache) Add(ctx context.Context, prefix []byte, suffixes [][]byte) error { + c.Lock() + defer c.Unlock() + + c.items += uint(len(suffixes)) + + if c.items > (4*c.n)/5 { + // clear the filter if 80% full to keep the actual false + // positive rate low + c.filter.ClearAll() + + // reduce memory footprint when this happens + c.filter.BitSet().Compact() + + c.items = uint(len(suffixes)) + } + + var combined [hibpHashLength]byte + copy(combined[:], prefix) + + for _, suffix := range suffixes { + copy(combined[hibpHashPrefixLength:], suffix) + + c.filter.Add(combined[:]) + } + + return nil +} + +func (c *HIBPBloomCache) Contains(ctx context.Context, prefix, suffix []byte) (bool, error) { + var combined [hibpHashLength]byte + copy(combined[:], prefix) + copy(combined[hibpHashPrefixLength:], suffix) + + c.RLock() + defer c.RUnlock() + + return c.filter.Test(combined[:]), nil +} diff --git a/auth_v2.169.0/internal/utilities/io.go b/auth_v2.169.0/internal/utilities/io.go new file mode 100644 index 0000000..ab89b4c --- /dev/null +++ b/auth_v2.169.0/internal/utilities/io.go @@ -0,0 +1,13 @@ +package utilities + +import ( + "io" + + "github.com/sirupsen/logrus" +) + +func SafeClose(closer io.Closer) { + if err := closer.Close(); err != nil { + logrus.WithError(err).Warn("Close operation failed") + } +} diff --git a/auth_v2.169.0/internal/utilities/postgres.go b/auth_v2.169.0/internal/utilities/postgres.go new file mode 100644 index 0000000..4d7fde8 --- /dev/null +++ b/auth_v2.169.0/internal/utilities/postgres.go @@ -0,0 +1,76 @@ +package utilities + +import ( + "errors" + "strconv" + "strings" + + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" +) + +// PostgresError is a custom error struct for marshalling Postgres errors to JSON. +type PostgresError struct { + Code string `json:"code"` + HttpStatusCode int `json:"-"` + Message string `json:"message"` + Hint string `json:"hint,omitempty"` + Detail string `json:"detail,omitempty"` +} + +// NewPostgresError returns a new PostgresError if the error was from a publicly +// accessible Postgres error. +func NewPostgresError(err error) *PostgresError { + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) && isPubliclyAccessiblePostgresError(pgErr.Code) { + return &PostgresError{ + Code: pgErr.Code, + HttpStatusCode: getHttpStatusCodeFromPostgresErrorCode(pgErr.Code), + Message: pgErr.Message, + Detail: pgErr.Detail, + Hint: pgErr.Hint, + } + } + + return nil +} +func (pg *PostgresError) IsUniqueConstraintViolated() bool { + // See https://www.postgresql.org/docs/current/errcodes-appendix.html for list of error codes + return pg.Code == "23505" +} + +// isPubliclyAccessiblePostgresError checks if the Postgres error should be +// made accessible. +func isPubliclyAccessiblePostgresError(code string) bool { + if len(code) != 5 { + return false + } + + // default response + return getHttpStatusCodeFromPostgresErrorCode(code) != 0 +} + +// getHttpStatusCodeFromPostgresErrorCode maps a Postgres error code to a HTTP +// status code. Returns 0 if the code doesn't map to a given postgres error code. +func getHttpStatusCodeFromPostgresErrorCode(code string) int { + if code == pgerrcode.RaiseException || + code == pgerrcode.IntegrityConstraintViolation || + code == pgerrcode.RestrictViolation || + code == pgerrcode.NotNullViolation || + code == pgerrcode.ForeignKeyViolation || + code == pgerrcode.UniqueViolation || + code == pgerrcode.CheckViolation || + code == pgerrcode.ExclusionViolation { + return 500 + } + + // Use custom HTTP status code if Postgres error was triggered with `PTXXX` + // code. This is consistent with PostgREST's behaviour as well. + if strings.HasPrefix(code, "PT") { + if httpStatusCode, err := strconv.ParseInt(code[2:], 10, 0); err == nil { + return int(httpStatusCode) + } + } + + return 0 +} diff --git a/auth_v2.169.0/internal/utilities/request.go b/auth_v2.169.0/internal/utilities/request.go new file mode 100644 index 0000000..b6b8697 --- /dev/null +++ b/auth_v2.169.0/internal/utilities/request.go @@ -0,0 +1,117 @@ +package utilities + +import ( + "bytes" + "io" + "net" + "net/http" + "net/url" + "strings" + + "github.com/supabase/auth/internal/conf" +) + +// GetIPAddress returns the real IP address of the HTTP request. It parses the +// X-Forwarded-For header. +func GetIPAddress(r *http.Request) string { + if r.Header != nil { + xForwardedFor := r.Header.Get("X-Forwarded-For") + if xForwardedFor != "" { + ips := strings.Split(xForwardedFor, ",") + for i := range ips { + ips[i] = strings.TrimSpace(ips[i]) + } + + for _, ip := range ips { + if ip != "" { + parsed := net.ParseIP(ip) + if parsed == nil { + continue + } + + return parsed.String() + } + } + } + } + + ipPort := r.RemoteAddr + ip, _, err := net.SplitHostPort(ipPort) + if err != nil { + return ipPort + } + + return ip +} + +// GetBodyBytes reads the whole request body properly into a byte array. +func GetBodyBytes(req *http.Request) ([]byte, error) { + if req.Body == nil || req.Body == http.NoBody { + return nil, nil + } + + originalBody := req.Body + defer SafeClose(originalBody) + + buf, err := io.ReadAll(originalBody) + if err != nil { + return nil, err + } + + req.Body = io.NopCloser(bytes.NewReader(buf)) + + return buf, nil +} + +func GetReferrer(r *http.Request, config *conf.GlobalConfiguration) string { + // try get redirect url from query or post data first + reqref := getRedirectTo(r) + if IsRedirectURLValid(config, reqref) { + return reqref + } + + // instead try referrer header value + reqref = r.Referer() + if IsRedirectURLValid(config, reqref) { + return reqref + } + + return config.SiteURL +} + +func IsRedirectURLValid(config *conf.GlobalConfiguration, redirectURL string) bool { + if redirectURL == "" { + return false + } + + base, berr := url.Parse(config.SiteURL) + refurl, rerr := url.Parse(redirectURL) + + // As long as the referrer came from the site, we will redirect back there + if berr == nil && rerr == nil && base.Hostname() == refurl.Hostname() { + return true + } + + // For case when user came from mobile app or other permitted resource - redirect back + for _, pattern := range config.URIAllowListMap { + if pattern.Match(redirectURL) { + return true + } + } + + return false +} + +// getRedirectTo tries extract redirect url from header or from query params +func getRedirectTo(r *http.Request) (reqref string) { + reqref = r.Header.Get("redirect_to") + if reqref != "" { + return + } + + if err := r.ParseForm(); err == nil { + reqref = r.Form.Get("redirect_to") + } + + return +} diff --git a/auth_v2.169.0/internal/utilities/request_test.go b/auth_v2.169.0/internal/utilities/request_test.go new file mode 100644 index 0000000..6704e39 --- /dev/null +++ b/auth_v2.169.0/internal/utilities/request_test.go @@ -0,0 +1,134 @@ +package utilities + +import ( + "net/http" + "net/http/httptest" + tst "testing" + + "github.com/stretchr/testify/require" + "github.com/supabase/auth/internal/conf" +) + +func TestGetIPAddress(t *tst.T) { + examples := []func(r *http.Request) string{ + func(r *http.Request) string { + r.Header = nil + r.RemoteAddr = "127.0.0.1:8080" + + return "127.0.0.1" + }, + + func(r *http.Request) string { + r.Header = nil + r.RemoteAddr = "incorrect" + + return "incorrect" + }, + + func(r *http.Request) string { + r.Header = make(http.Header) + r.RemoteAddr = "127.0.0.1:8080" + + return "127.0.0.1" + }, + + func(r *http.Request) string { + r.Header = make(http.Header) + r.RemoteAddr = "[::1]:8080" + + return "::1" + }, + + func(r *http.Request) string { + r.Header = make(http.Header) + r.RemoteAddr = "127.0.0.1:8080" + r.Header.Add("X-Forwarded-For", "127.0.0.2") + + return "127.0.0.2" + }, + + func(r *http.Request) string { + r.Header = make(http.Header) + r.RemoteAddr = "127.0.0.1:8080" + r.Header.Add("X-Forwarded-For", "127.0.0.2") + + return "127.0.0.2" + }, + + func(r *http.Request) string { + r.Header = make(http.Header) + r.RemoteAddr = "127.0.0.1:8080" + r.Header.Add("X-Forwarded-For", "127.0.0.2,") + + return "127.0.0.2" + }, + + func(r *http.Request) string { + r.Header = make(http.Header) + r.RemoteAddr = "127.0.0.1:8080" + r.Header.Add("X-Forwarded-For", "127.0.0.2,127.0.0.3") + + return "127.0.0.2" + }, + + func(r *http.Request) string { + r.Header = make(http.Header) + r.RemoteAddr = "127.0.0.1:8080" + r.Header.Add("X-Forwarded-For", "::1,127.0.0.2") + + return "::1" + }, + } + + for _, example := range examples { + req := &http.Request{} + expected := example(req) + + require.Equal(t, GetIPAddress(req), expected) + } +} + +func TestGetReferrer(t *tst.T) { + config := conf.GlobalConfiguration{ + SiteURL: "https://example.com", + URIAllowList: []string{"http://localhost:8000/*"}, + JWT: conf.JWTConfiguration{ + Secret: "testsecret", + }, + } + require.NoError(t, config.ApplyDefaults()) + cases := []struct { + desc string + redirectURL string + expected string + }{ + { + desc: "valid redirect url", + redirectURL: "http://localhost:8000/path", + expected: "http://localhost:8000/path", + }, + { + desc: "invalid redirect url", + redirectURL: "http://localhost:3000", + expected: config.SiteURL, + }, + { + desc: "no / separator", + redirectURL: "http://localhost:8000", + expected: config.SiteURL, + }, + { + desc: "* respects separator", + redirectURL: "http://localhost:8000/path/to/page", + expected: config.SiteURL, + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *tst.T) { + r := httptest.NewRequest("GET", "http://localhost?redirect_to="+c.redirectURL, nil) + referrer := GetReferrer(r, &config) + require.Equal(t, c.expected, referrer) + }) + } +} diff --git a/auth_v2.169.0/internal/utilities/version.go b/auth_v2.169.0/internal/utilities/version.go new file mode 100644 index 0000000..b3ba95a --- /dev/null +++ b/auth_v2.169.0/internal/utilities/version.go @@ -0,0 +1,4 @@ +package utilities + +// Version is git commit or release tag from which this binary was built. +var Version string diff --git a/auth_v2.169.0/main.go b/auth_v2.169.0/main.go new file mode 100644 index 0000000..7455193 --- /dev/null +++ b/auth_v2.169.0/main.go @@ -0,0 +1,68 @@ +package main + +import ( + "context" + "embed" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/sirupsen/logrus" + "github.com/supabase/auth/cmd" + "github.com/supabase/auth/internal/observability" +) + +//go:embed migrations/* +var embeddedMigrations embed.FS + +func init() { + logrus.SetFormatter(&logrus.JSONFormatter{}) +} + +func main() { + cmd.EmbeddedMigrations = embeddedMigrations + + execCtx, execCancel := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGHUP, syscall.SIGINT) + defer execCancel() + + go func() { + <-execCtx.Done() + logrus.Info("received graceful shutdown signal") + }() + + // command is expected to obey the cancellation signal on execCtx and + // block while it is running + if err := cmd.RootCommand().ExecuteContext(execCtx); err != nil { + logrus.WithError(err).Fatal(err) + } + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), time.Minute) + defer shutdownCancel() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + + // wait for profiler, metrics and trace exporters to shut down gracefully + observability.WaitForCleanup(shutdownCtx) + }() + + cleanupDone := make(chan struct{}) + go func() { + defer close(cleanupDone) + wg.Wait() + }() + + select { + case <-shutdownCtx.Done(): + // cleanup timed out + return + + case <-cleanupDone: + // cleanup finished before timing out + return + } +} diff --git a/auth_v2.169.0/migrations/00_init_auth_schema.up.sql b/auth_v2.169.0/migrations/00_init_auth_schema.up.sql new file mode 100644 index 0000000..a040095 --- /dev/null +++ b/auth_v2.169.0/migrations/00_init_auth_schema.up.sql @@ -0,0 +1,88 @@ +-- auth.users definition + +CREATE TABLE IF NOT EXISTS {{ index .Options "Namespace" }}.users ( + instance_id uuid NULL, + id uuid NOT NULL UNIQUE, + aud varchar(255) NULL, + "role" varchar(255) NULL, + email varchar(255) NULL UNIQUE, + encrypted_password varchar(255) NULL, + confirmed_at timestamptz NULL, + invited_at timestamptz NULL, + confirmation_token varchar(255) NULL, + confirmation_sent_at timestamptz NULL, + recovery_token varchar(255) NULL, + recovery_sent_at timestamptz NULL, + email_change_token varchar(255) NULL, + email_change varchar(255) NULL, + email_change_sent_at timestamptz NULL, + last_sign_in_at timestamptz NULL, + raw_app_meta_data jsonb NULL, + raw_user_meta_data jsonb NULL, + is_super_admin bool NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT users_pkey PRIMARY KEY (id) +); +CREATE INDEX IF NOT EXISTS users_instance_id_email_idx ON {{ index .Options "Namespace" }}.users USING btree (instance_id, email); +CREATE INDEX IF NOT EXISTS users_instance_id_idx ON {{ index .Options "Namespace" }}.users USING btree (instance_id); +comment on table {{ index .Options "Namespace" }}.users is 'Auth: Stores user login data within a secure schema.'; + +-- auth.refresh_tokens definition + +CREATE TABLE IF NOT EXISTS {{ index .Options "Namespace" }}.refresh_tokens ( + instance_id uuid NULL, + id bigserial NOT NULL, + "token" varchar(255) NULL, + user_id varchar(255) NULL, + revoked bool NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id) +); +CREATE INDEX IF NOT EXISTS refresh_tokens_instance_id_idx ON {{ index .Options "Namespace" }}.refresh_tokens USING btree (instance_id); +CREATE INDEX IF NOT EXISTS refresh_tokens_instance_id_user_id_idx ON {{ index .Options "Namespace" }}.refresh_tokens USING btree (instance_id, user_id); +CREATE INDEX IF NOT EXISTS refresh_tokens_token_idx ON {{ index .Options "Namespace" }}.refresh_tokens USING btree (token); +comment on table {{ index .Options "Namespace" }}.refresh_tokens is 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + +-- auth.instances definition + +CREATE TABLE IF NOT EXISTS {{ index .Options "Namespace" }}.instances ( + id uuid NOT NULL, + uuid uuid NULL, + raw_base_config text NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT instances_pkey PRIMARY KEY (id) +); +comment on table {{ index .Options "Namespace" }}.instances is 'Auth: Manages users across multiple sites.'; + +-- auth.audit_log_entries definition + +CREATE TABLE IF NOT EXISTS {{ index .Options "Namespace" }}.audit_log_entries ( + instance_id uuid NULL, + id uuid NOT NULL, + payload json NULL, + created_at timestamptz NULL, + CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id) +); +CREATE INDEX IF NOT EXISTS audit_logs_instance_id_idx ON {{ index .Options "Namespace" }}.audit_log_entries USING btree (instance_id); +comment on table {{ index .Options "Namespace" }}.audit_log_entries is 'Auth: Audit trail for user actions.'; + +-- auth.schema_migrations definition + +CREATE TABLE IF NOT EXISTS {{ index .Options "Namespace" }}.schema_migrations ( + "version" varchar(255) NOT NULL, + CONSTRAINT schema_migrations_pkey PRIMARY KEY ("version") +); +comment on table {{ index .Options "Namespace" }}.schema_migrations is 'Auth: Manages updates to the auth system.'; + +-- Gets the User ID from the request cookie +create or replace function {{ index .Options "Namespace" }}.uid() returns uuid as $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$ language sql stable; + +-- Gets the User ID from the request cookie +create or replace function {{ index .Options "Namespace" }}.role() returns text as $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$ language sql stable; diff --git a/auth_v2.169.0/migrations/20210710035447_alter_users.up.sql b/auth_v2.169.0/migrations/20210710035447_alter_users.up.sql new file mode 100644 index 0000000..fc8de12 --- /dev/null +++ b/auth_v2.169.0/migrations/20210710035447_alter_users.up.sql @@ -0,0 +1,19 @@ +-- alter user schema + +ALTER TABLE {{ index .Options "Namespace" }}.users +ADD COLUMN IF NOT EXISTS phone VARCHAR(15) NULL UNIQUE DEFAULT NULL, +ADD COLUMN IF NOT EXISTS phone_confirmed_at timestamptz NULL DEFAULT NULL, +ADD COLUMN IF NOT EXISTS phone_change VARCHAR(15) NULL DEFAULT '', +ADD COLUMN IF NOT EXISTS phone_change_token VARCHAR(255) NULL DEFAULT '', +ADD COLUMN IF NOT EXISTS phone_change_sent_at timestamptz NULL DEFAULT NULL; + +DO $$ +BEGIN + IF NOT EXISTS(SELECT * + FROM information_schema.columns + WHERE table_schema = '{{ index .Options "Namespace" }}' and table_name='users' and column_name='email_confirmed_at') + THEN + ALTER TABLE "{{ index .Options "Namespace" }}"."users" RENAME COLUMN "confirmed_at" TO "email_confirmed_at"; + END IF; +END $$; + diff --git a/auth_v2.169.0/migrations/20210722035447_adds_confirmed_at.up.sql b/auth_v2.169.0/migrations/20210722035447_adds_confirmed_at.up.sql new file mode 100644 index 0000000..aabd42e --- /dev/null +++ b/auth_v2.169.0/migrations/20210722035447_adds_confirmed_at.up.sql @@ -0,0 +1,4 @@ +-- adds confirmed at + +ALTER TABLE {{ index .Options "Namespace" }}.users +ADD COLUMN IF NOT EXISTS confirmed_at timestamptz GENERATED ALWAYS AS (LEAST (users.email_confirmed_at, users.phone_confirmed_at)) STORED; diff --git a/auth_v2.169.0/migrations/20210730183235_add_email_change_confirmed.up.sql b/auth_v2.169.0/migrations/20210730183235_add_email_change_confirmed.up.sql new file mode 100644 index 0000000..dc92c9c --- /dev/null +++ b/auth_v2.169.0/migrations/20210730183235_add_email_change_confirmed.up.sql @@ -0,0 +1,15 @@ +-- adds email_change_confirmed + +ALTER TABLE {{ index .Options "Namespace" }}.users +ADD COLUMN IF NOT EXISTS email_change_token_current varchar(255) null DEFAULT '', +ADD COLUMN IF NOT EXISTS email_change_confirm_status smallint DEFAULT 0 CHECK (email_change_confirm_status >= 0 AND email_change_confirm_status <= 2); + +DO $$ +BEGIN + IF NOT EXISTS(SELECT * + FROM information_schema.columns + WHERE table_schema = '{{ index .Options "Namespace" }}' and table_name='users' and column_name='email_change_token_new') + THEN + ALTER TABLE "{{ index .Options "Namespace" }}"."users" RENAME COLUMN "email_change_token" TO "email_change_token_new"; + END IF; +END $$; diff --git a/auth_v2.169.0/migrations/20210909172000_create_identities_table.up.sql b/auth_v2.169.0/migrations/20210909172000_create_identities_table.up.sql new file mode 100644 index 0000000..2f3a535 --- /dev/null +++ b/auth_v2.169.0/migrations/20210909172000_create_identities_table.up.sql @@ -0,0 +1,14 @@ +-- adds identities table + +CREATE TABLE IF NOT EXISTS {{ index .Options "Namespace" }}.identities ( + id text NOT NULL, + user_id uuid NOT NULL, + identity_data JSONB NOT NULL, + provider text NOT NULL, + last_sign_in_at timestamptz NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT identities_pkey PRIMARY KEY (provider, id), + CONSTRAINT identities_user_id_fkey FOREIGN KEY (user_id) REFERENCES {{ index .Options "Namespace" }}.users(id) ON DELETE CASCADE +); +COMMENT ON TABLE {{ index .Options "Namespace" }}.identities is 'Auth: Stores identities associated to a user.'; diff --git a/auth_v2.169.0/migrations/20210927181326_add_refresh_token_parent.up.sql b/auth_v2.169.0/migrations/20210927181326_add_refresh_token_parent.up.sql new file mode 100644 index 0000000..a2b1c73 --- /dev/null +++ b/auth_v2.169.0/migrations/20210927181326_add_refresh_token_parent.up.sql @@ -0,0 +1,24 @@ +-- adds parent column + +ALTER TABLE {{ index .Options "Namespace" }}.refresh_tokens +ADD COLUMN IF NOT EXISTS parent varchar(255) NULL; + +DO $$ +BEGIN + IF NOT EXISTS(SELECT * + FROM information_schema.constraint_column_usage + WHERE table_schema = '{{ index .Options "Namespace" }}' and table_name='refresh_tokens' and constraint_name='refresh_tokens_token_unique') + THEN + ALTER TABLE "{{ index .Options "Namespace" }}"."refresh_tokens" ADD CONSTRAINT refresh_tokens_token_unique UNIQUE ("token"); + END IF; + + IF NOT EXISTS(SELECT * + FROM information_schema.constraint_column_usage + WHERE table_schema = '{{ index .Options "Namespace" }}' and table_name='refresh_tokens' and constraint_name='refresh_tokens_parent_fkey') + THEN + ALTER TABLE "{{ index .Options "Namespace" }}"."refresh_tokens" ADD CONSTRAINT refresh_tokens_parent_fkey FOREIGN KEY (parent) REFERENCES {{ index .Options "Namespace" }}.refresh_tokens("token"); + END IF; + + CREATE INDEX IF NOT EXISTS refresh_tokens_parent_idx ON "{{ index .Options "Namespace" }}"."refresh_tokens" USING btree (parent); +END $$; + diff --git a/auth_v2.169.0/migrations/20211122151130_create_user_id_idx.up.sql b/auth_v2.169.0/migrations/20211122151130_create_user_id_idx.up.sql new file mode 100644 index 0000000..d259aae --- /dev/null +++ b/auth_v2.169.0/migrations/20211122151130_create_user_id_idx.up.sql @@ -0,0 +1,3 @@ +-- create index on identities.user_id + +CREATE INDEX IF NOT EXISTS identities_user_id_idx ON "{{ index .Options "Namespace" }}".identities using btree (user_id); diff --git a/auth_v2.169.0/migrations/20211124214934_update_auth_functions.up.sql b/auth_v2.169.0/migrations/20211124214934_update_auth_functions.up.sql new file mode 100644 index 0000000..2fb784b --- /dev/null +++ b/auth_v2.169.0/migrations/20211124214934_update_auth_functions.up.sql @@ -0,0 +1,34 @@ +-- update auth functions + +create or replace function {{ index .Options "Namespace" }}.uid() +returns uuid +language sql stable +as $$ + select + coalesce( + current_setting('request.jwt.claim.sub', true), + (current_setting('request.jwt.claims', true)::jsonb ->> 'sub') + )::uuid +$$; + +create or replace function {{ index .Options "Namespace" }}.role() +returns text +language sql stable +as $$ + select + coalesce( + current_setting('request.jwt.claim.role', true), + (current_setting('request.jwt.claims', true)::jsonb ->> 'role') + )::text +$$; + +create or replace function {{ index .Options "Namespace" }}.email() +returns text +language sql stable +as $$ + select + coalesce( + current_setting('request.jwt.claim.email', true), + (current_setting('request.jwt.claims', true)::jsonb ->> 'email') + )::text +$$; diff --git a/auth_v2.169.0/migrations/20211202183645_update_auth_uid.up.sql b/auth_v2.169.0/migrations/20211202183645_update_auth_uid.up.sql new file mode 100644 index 0000000..3ecadfd --- /dev/null +++ b/auth_v2.169.0/migrations/20211202183645_update_auth_uid.up.sql @@ -0,0 +1,15 @@ +-- update auth.uid() + +create or replace function {{ index .Options "Namespace" }}.uid() +returns uuid +language sql stable +as $$ + select + nullif( + coalesce( + current_setting('request.jwt.claim.sub', true), + (current_setting('request.jwt.claims', true)::jsonb ->> 'sub') + ), + '' + )::uuid +$$; diff --git a/auth_v2.169.0/migrations/20220114185221_update_user_idx.up.sql b/auth_v2.169.0/migrations/20220114185221_update_user_idx.up.sql new file mode 100644 index 0000000..02fe76a --- /dev/null +++ b/auth_v2.169.0/migrations/20220114185221_update_user_idx.up.sql @@ -0,0 +1,4 @@ +-- updates users_instance_id_email_idx definition + +DROP INDEX IF EXISTS users_instance_id_email_idx; +CREATE INDEX IF NOT EXISTS users_instance_id_email_idx on "{{ index .Options "Namespace" }}".users using btree (instance_id, lower(email)); diff --git a/auth_v2.169.0/migrations/20220114185340_add_banned_until.up.sql b/auth_v2.169.0/migrations/20220114185340_add_banned_until.up.sql new file mode 100644 index 0000000..7530a7c --- /dev/null +++ b/auth_v2.169.0/migrations/20220114185340_add_banned_until.up.sql @@ -0,0 +1,4 @@ +-- adds banned_until column + +ALTER TABLE {{ index .Options "Namespace" }}.users +ADD COLUMN IF NOT EXISTS banned_until timestamptz NULL; diff --git a/auth_v2.169.0/migrations/20220224000811_update_auth_functions.up.sql b/auth_v2.169.0/migrations/20220224000811_update_auth_functions.up.sql new file mode 100644 index 0000000..4be4237 --- /dev/null +++ b/auth_v2.169.0/migrations/20220224000811_update_auth_functions.up.sql @@ -0,0 +1,34 @@ +-- update auth functions + +create or replace function {{ index .Options "Namespace" }}.uid() +returns uuid +language sql stable +as $$ + select + coalesce( + nullif(current_setting('request.jwt.claim.sub', true), ''), + (nullif(current_setting('request.jwt.claims', true), '')::jsonb ->> 'sub') + )::uuid +$$; + +create or replace function {{ index .Options "Namespace" }}.role() +returns text +language sql stable +as $$ + select + coalesce( + nullif(current_setting('request.jwt.claim.role', true), ''), + (nullif(current_setting('request.jwt.claims', true), '')::jsonb ->> 'role') + )::text +$$; + +create or replace function {{ index .Options "Namespace" }}.email() +returns text +language sql stable +as $$ + select + coalesce( + nullif(current_setting('request.jwt.claim.email', true), ''), + (nullif(current_setting('request.jwt.claims', true), '')::jsonb ->> 'email') + )::text +$$; diff --git a/auth_v2.169.0/migrations/20220323170000_add_user_reauthentication.up.sql b/auth_v2.169.0/migrations/20220323170000_add_user_reauthentication.up.sql new file mode 100644 index 0000000..277dbdb --- /dev/null +++ b/auth_v2.169.0/migrations/20220323170000_add_user_reauthentication.up.sql @@ -0,0 +1,5 @@ +-- adds reauthentication_token and reauthentication_sent_at + +ALTER TABLE {{ index .Options "Namespace" }}.users +ADD COLUMN IF NOT EXISTS reauthentication_token varchar(255) null default '', +ADD COLUMN IF NOT EXISTS reauthentication_sent_at timestamptz null default null; diff --git a/auth_v2.169.0/migrations/20220429102000_add_unique_idx.up.sql b/auth_v2.169.0/migrations/20220429102000_add_unique_idx.up.sql new file mode 100644 index 0000000..9d7644d --- /dev/null +++ b/auth_v2.169.0/migrations/20220429102000_add_unique_idx.up.sql @@ -0,0 +1,14 @@ +-- add partial unique indices to confirmation_token, recovery_token, email_change_token_current, email_change_token_new, phone_change_token, reauthentication_token +-- ignores partial unique index creation on fields which contain empty strings, whitespaces or purely numeric otps + +DROP INDEX IF EXISTS confirmation_token_idx; +DROP INDEX IF EXISTS recovery_token_idx; +DROP INDEX IF EXISTS email_change_token_current_idx; +DROP INDEX IF EXISTS email_change_token_new_idx; +DROP INDEX IF EXISTS reauthentication_token_idx; + +CREATE UNIQUE INDEX IF NOT EXISTS confirmation_token_idx ON {{ index .Options "Namespace" }}.users USING btree (confirmation_token) WHERE confirmation_token !~ '^[0-9 ]*$'; +CREATE UNIQUE INDEX IF NOT EXISTS recovery_token_idx ON {{ index .Options "Namespace" }}.users USING btree (recovery_token) WHERE recovery_token !~ '^[0-9 ]*$'; +CREATE UNIQUE INDEX IF NOT EXISTS email_change_token_current_idx ON {{ index .Options "Namespace" }}.users USING btree (email_change_token_current) WHERE email_change_token_current !~ '^[0-9 ]*$'; +CREATE UNIQUE INDEX IF NOT EXISTS email_change_token_new_idx ON {{ index .Options "Namespace" }}.users USING btree (email_change_token_new) WHERE email_change_token_new !~ '^[0-9 ]*$'; +CREATE UNIQUE INDEX IF NOT EXISTS reauthentication_token_idx ON {{ index .Options "Namespace" }}.users USING btree (reauthentication_token) WHERE reauthentication_token !~ '^[0-9 ]*$'; diff --git a/auth_v2.169.0/migrations/20220531120530_add_auth_jwt_function.up.sql b/auth_v2.169.0/migrations/20220531120530_add_auth_jwt_function.up.sql new file mode 100644 index 0000000..11f84e8 --- /dev/null +++ b/auth_v2.169.0/migrations/20220531120530_add_auth_jwt_function.up.sql @@ -0,0 +1,16 @@ +-- add auth.jwt function + +comment on function {{ index .Options "Namespace" }}.uid() is 'Deprecated. Use auth.jwt() -> ''sub'' instead.'; +comment on function {{ index .Options "Namespace" }}.role() is 'Deprecated. Use auth.jwt() -> ''role'' instead.'; +comment on function {{ index .Options "Namespace" }}.email() is 'Deprecated. Use auth.jwt() -> ''email'' instead.'; + +create or replace function {{ index .Options "Namespace" }}.jwt() +returns jsonb +language sql stable +as $$ + select + coalesce( + nullif(current_setting('request.jwt.claim', true), ''), + nullif(current_setting('request.jwt.claims', true), '') + )::jsonb +$$; diff --git a/auth_v2.169.0/migrations/20220614074223_add_ip_address_to_audit_log.postgres.up.sql b/auth_v2.169.0/migrations/20220614074223_add_ip_address_to_audit_log.postgres.up.sql new file mode 100644 index 0000000..a1a66b4 --- /dev/null +++ b/auth_v2.169.0/migrations/20220614074223_add_ip_address_to_audit_log.postgres.up.sql @@ -0,0 +1,3 @@ +-- Add IP Address to audit log +ALTER TABLE {{ index .Options "Namespace" }}.audit_log_entries +ADD COLUMN IF NOT EXISTS ip_address VARCHAR(64) NOT NULL DEFAULT ''; diff --git a/auth_v2.169.0/migrations/20220811173540_add_sessions_table.up.sql b/auth_v2.169.0/migrations/20220811173540_add_sessions_table.up.sql new file mode 100644 index 0000000..c16ef3c --- /dev/null +++ b/auth_v2.169.0/migrations/20220811173540_add_sessions_table.up.sql @@ -0,0 +1,23 @@ +-- Add session_id column to refresh_tokens table +create table if not exists {{ index .Options "Namespace" }}.sessions ( + id uuid not null, + user_id uuid not null, + created_at timestamptz null, + updated_at timestamptz null, + constraint sessions_pkey primary key (id), + constraint sessions_user_id_fkey foreign key (user_id) references {{ index .Options "Namespace" }}.users(id) on delete cascade +); +comment on table {{ index .Options "Namespace" }}.sessions is 'Auth: Stores session data associated to a user.'; + +alter table {{ index .Options "Namespace" }}.refresh_tokens +add column if not exists session_id uuid null; + +do $$ +begin + if not exists(select * + from information_schema.constraint_column_usage + where table_schema = '{{ index .Options "Namespace" }}' and table_name='sessions' and constraint_name='refresh_tokens_session_id_fkey') + then + alter table "{{ index .Options "Namespace" }}"."refresh_tokens" add constraint refresh_tokens_session_id_fkey foreign key (session_id) references {{ index .Options "Namespace" }}.sessions(id) on delete cascade; + end if; +END $$; diff --git a/auth_v2.169.0/migrations/20221003041349_add_mfa_schema.up.sql b/auth_v2.169.0/migrations/20221003041349_add_mfa_schema.up.sql new file mode 100644 index 0000000..a44654a --- /dev/null +++ b/auth_v2.169.0/migrations/20221003041349_add_mfa_schema.up.sql @@ -0,0 +1,50 @@ +-- see: https://stackoverflow.com/questions/7624919/check-if-a-user-defined-type-already-exists-in-postgresql/48382296#48382296 +do $$ begin + create type factor_type as enum('totp', 'webauthn'); + create type factor_status as enum('unverified', 'verified'); + create type aal_level as enum('aal1', 'aal2', 'aal3'); +exception + when duplicate_object then null; +end $$; + +-- auth.mfa_factors definition +create table if not exists {{ index .Options "Namespace" }}.mfa_factors( + id uuid not null, + user_id uuid not null, + friendly_name text null, + factor_type factor_type not null, + status factor_status not null, + created_at timestamptz not null, + updated_at timestamptz not null, + secret text null, + constraint mfa_factors_pkey primary key(id), + constraint mfa_factors_user_id_fkey foreign key (user_id) references {{ index .Options "Namespace" }}.users(id) on delete cascade +); +comment on table {{ index .Options "Namespace" }}.mfa_factors is 'auth: stores metadata about factors'; + +create unique index if not exists mfa_factors_user_friendly_name_unique on {{ index .Options "Namespace" }}.mfa_factors (friendly_name, user_id) where trim(friendly_name) <> ''; + +-- auth.mfa_challenges definition +create table if not exists {{ index .Options "Namespace" }}.mfa_challenges( + id uuid not null, + factor_id uuid not null, + created_at timestamptz not null, + verified_at timestamptz null, + ip_address inet not null, + constraint mfa_challenges_pkey primary key (id), + constraint mfa_challenges_auth_factor_id_fkey foreign key (factor_id) references {{ index .Options "Namespace" }}.mfa_factors(id) on delete cascade +); +comment on table {{ index .Options "Namespace" }}.mfa_challenges is 'auth: stores metadata about challenge requests made'; + + + +-- add factor_id and amr claims to session +create table if not exists {{ index .Options "Namespace" }}.mfa_amr_claims( + session_id uuid not null, + created_at timestamptz not null, + updated_at timestamptz not null, + authentication_method text not null, + constraint mfa_amr_claims_session_id_authentication_method_pkey unique(session_id, authentication_method), + constraint mfa_amr_claims_session_id_fkey foreign key(session_id) references {{ index .Options "Namespace" }}.sessions(id) on delete cascade +); +comment on table {{ index .Options "Namespace" }}.mfa_amr_claims is 'auth: stores authenticator method reference claims for multi factor authentication'; diff --git a/auth_v2.169.0/migrations/20221003041400_add_aal_and_factor_id_to_sessions.up.sql b/auth_v2.169.0/migrations/20221003041400_add_aal_and_factor_id_to_sessions.up.sql new file mode 100644 index 0000000..cc8a209 --- /dev/null +++ b/auth_v2.169.0/migrations/20221003041400_add_aal_and_factor_id_to_sessions.up.sql @@ -0,0 +1,3 @@ +-- add factor_id to sessions + alter table {{ index .Options "Namespace" }}.sessions add column if not exists factor_id uuid null; + alter table {{ index .Options "Namespace" }}.sessions add column if not exists aal aal_level null; diff --git a/auth_v2.169.0/migrations/20221011041400_add_mfa_indexes.up.sql b/auth_v2.169.0/migrations/20221011041400_add_mfa_indexes.up.sql new file mode 100644 index 0000000..def57a2 --- /dev/null +++ b/auth_v2.169.0/migrations/20221011041400_add_mfa_indexes.up.sql @@ -0,0 +1,19 @@ +alter table {{ index .Options "Namespace" }}.mfa_amr_claims + add column if not exists id uuid not null; + +do $$ +begin + if not exists + (select constraint_name + from information_schema.table_constraints + where table_schema = '{{ index .Options "Namespace" }}' + and table_name = 'mfa_amr_claims' + and constraint_name = 'amr_id_pk') + then + alter table {{ index .Options "Namespace" }}.mfa_amr_claims add constraint amr_id_pk primary key(id); + end if; +end $$; + +create index if not exists user_id_created_at_idx on {{ index .Options "Namespace" }}.sessions (user_id, created_at); +create index if not exists factor_id_created_at_idx on {{ index .Options "Namespace" }}.mfa_factors (user_id, created_at); + diff --git a/auth_v2.169.0/migrations/20221020193600_add_sessions_user_id_index.up.sql b/auth_v2.169.0/migrations/20221020193600_add_sessions_user_id_index.up.sql new file mode 100644 index 0000000..f5ba042 --- /dev/null +++ b/auth_v2.169.0/migrations/20221020193600_add_sessions_user_id_index.up.sql @@ -0,0 +1,2 @@ +create index if not exists sessions_user_id_idx on {{ index .Options "Namespace" }}.sessions (user_id); + diff --git a/auth_v2.169.0/migrations/20221021073300_add_refresh_tokens_session_id_revoked_index.up.sql b/auth_v2.169.0/migrations/20221021073300_add_refresh_tokens_session_id_revoked_index.up.sql new file mode 100644 index 0000000..0c47d4a --- /dev/null +++ b/auth_v2.169.0/migrations/20221021073300_add_refresh_tokens_session_id_revoked_index.up.sql @@ -0,0 +1 @@ +create index if not exists refresh_tokens_session_id_revoked_idx on {{ index .Options "Namespace" }}.refresh_tokens (session_id, revoked); diff --git a/auth_v2.169.0/migrations/20221021082433_add_saml.up.sql b/auth_v2.169.0/migrations/20221021082433_add_saml.up.sql new file mode 100644 index 0000000..30ac3d0 --- /dev/null +++ b/auth_v2.169.0/migrations/20221021082433_add_saml.up.sql @@ -0,0 +1,90 @@ +-- Multi-instance mode (see auth.instances) table intentionally not supported and ignored. + +create table if not exists {{ index .Options "Namespace" }}.sso_providers ( + id uuid not null, + resource_id text null, + created_at timestamptz null, + updated_at timestamptz null, + primary key (id), + constraint "resource_id not empty" check (resource_id = null or char_length(resource_id) > 0) +); + +comment on table {{ index .Options "Namespace" }}.sso_providers is 'Auth: Manages SSO identity provider information; see saml_providers for SAML.'; +comment on column {{ index .Options "Namespace" }}.sso_providers.resource_id is 'Auth: Uniquely identifies a SSO provider according to a user-chosen resource ID (case insensitive), useful in infrastructure as code.'; + +create unique index if not exists sso_providers_resource_id_idx on {{ index .Options "Namespace" }}.sso_providers (lower(resource_id)); + +create table if not exists {{ index .Options "Namespace" }}.sso_domains ( + id uuid not null, + sso_provider_id uuid not null, + domain text not null, + created_at timestamptz null, + updated_at timestamptz null, + primary key (id), + foreign key (sso_provider_id) references {{ index .Options "Namespace" }}.sso_providers (id) on delete cascade, + constraint "domain not empty" check (char_length(domain) > 0) +); + +create index if not exists sso_domains_sso_provider_id_idx on {{ index .Options "Namespace" }}.sso_domains (sso_provider_id); +create unique index if not exists sso_domains_domain_idx on {{ index .Options "Namespace" }}.sso_domains (lower(domain)); + +comment on table {{ index .Options "Namespace" }}.sso_domains is 'Auth: Manages SSO email address domain mapping to an SSO Identity Provider.'; + +create table if not exists {{ index .Options "Namespace" }}.saml_providers ( + id uuid not null, + sso_provider_id uuid not null, + entity_id text not null unique, + metadata_xml text not null, + metadata_url text null, + attribute_mapping jsonb null, + created_at timestamptz null, + updated_at timestamptz null, + primary key (id), + foreign key (sso_provider_id) references {{ index .Options "Namespace" }}.sso_providers (id) on delete cascade, + constraint "metadata_xml not empty" check (char_length(metadata_xml) > 0), + constraint "metadata_url not empty" check (metadata_url = null or char_length(metadata_url) > 0), + constraint "entity_id not empty" check (char_length(entity_id) > 0) +); + +create index if not exists saml_providers_sso_provider_id_idx on {{ index .Options "Namespace" }}.saml_providers (sso_provider_id); + +comment on table {{ index .Options "Namespace" }}.saml_providers is 'Auth: Manages SAML Identity Provider connections.'; + +create table if not exists {{ index .Options "Namespace" }}.saml_relay_states ( + id uuid not null, + sso_provider_id uuid not null, + request_id text not null, + for_email text null, + redirect_to text null, + from_ip_address inet null, + created_at timestamptz null, + updated_at timestamptz null, + primary key (id), + foreign key (sso_provider_id) references {{ index .Options "Namespace" }}.sso_providers (id) on delete cascade, + constraint "request_id not empty" check(char_length(request_id) > 0) +); + +create index if not exists saml_relay_states_sso_provider_id_idx on {{ index .Options "Namespace" }}.saml_relay_states (sso_provider_id); +create index if not exists saml_relay_states_for_email_idx on {{ index .Options "Namespace" }}.saml_relay_states (for_email); + +comment on table {{ index .Options "Namespace" }}.saml_relay_states is 'Auth: Contains SAML Relay State information for each Service Provider initiated login.'; + +create table if not exists {{ index .Options "Namespace" }}.sso_sessions ( + id uuid not null, + session_id uuid not null, + sso_provider_id uuid null, + not_before timestamptz null, + not_after timestamptz null, + idp_initiated boolean default false, + created_at timestamptz null, + updated_at timestamptz null, + primary key (id), + foreign key (session_id) references {{ index .Options "Namespace" }}.sessions (id) on delete cascade, + foreign key (sso_provider_id) references {{ index .Options "Namespace" }}.sso_providers (id) on delete cascade +); + +create index if not exists sso_sessions_session_id_idx on {{ index .Options "Namespace" }}.sso_sessions (session_id); +create index if not exists sso_sessions_sso_provider_id_idx on {{ index .Options "Namespace" }}.sso_sessions (sso_provider_id); + +comment on table {{ index .Options "Namespace" }}.sso_sessions is 'Auth: A session initiated by an SSO Identity Provider'; + diff --git a/auth_v2.169.0/migrations/20221027105023_add_identities_user_id_idx.up.sql b/auth_v2.169.0/migrations/20221027105023_add_identities_user_id_idx.up.sql new file mode 100644 index 0000000..12e7aa5 --- /dev/null +++ b/auth_v2.169.0/migrations/20221027105023_add_identities_user_id_idx.up.sql @@ -0,0 +1 @@ +create index if not exists identities_user_id_idx on {{ index .Options "Namespace" }}.identities using btree (user_id); diff --git a/auth_v2.169.0/migrations/20221114143122_add_session_not_after_column.up.sql b/auth_v2.169.0/migrations/20221114143122_add_session_not_after_column.up.sql new file mode 100644 index 0000000..c729911 --- /dev/null +++ b/auth_v2.169.0/migrations/20221114143122_add_session_not_after_column.up.sql @@ -0,0 +1,4 @@ +alter table only {{ index .Options "Namespace" }}.sessions + add column if not exists not_after timestamptz; + +comment on column {{ index .Options "Namespace" }}.sessions.not_after is 'Auth: Not after is a nullable column that contains a timestamp after which the session should be regarded as expired.'; diff --git a/auth_v2.169.0/migrations/20221114143410_remove_parent_foreign_key_refresh_tokens.up.sql b/auth_v2.169.0/migrations/20221114143410_remove_parent_foreign_key_refresh_tokens.up.sql new file mode 100644 index 0000000..62d2078 --- /dev/null +++ b/auth_v2.169.0/migrations/20221114143410_remove_parent_foreign_key_refresh_tokens.up.sql @@ -0,0 +1,2 @@ +alter table only {{ index .Options "Namespace" }}.refresh_tokens + drop constraint refresh_tokens_parent_fkey; diff --git a/auth_v2.169.0/migrations/20221125140132_backfill_email_identity.up.sql b/auth_v2.169.0/migrations/20221125140132_backfill_email_identity.up.sql new file mode 100644 index 0000000..cd06425 --- /dev/null +++ b/auth_v2.169.0/migrations/20221125140132_backfill_email_identity.up.sql @@ -0,0 +1,11 @@ +-- backfill the auth.identities column by adding an email identity +-- for all auth.users with an email and password + +do $$ +begin + insert into {{ index .Options "Namespace" }}.identities (id, user_id, identity_data, provider, last_sign_in_at, created_at, updated_at) + select id, id as user_id, jsonb_build_object('sub', id, 'email', email) as identity_data, 'email' as provider, null as last_sign_in_at, '2022-11-25' as created_at, '2022-11-25' as updated_at + from {{ index .Options "Namespace" }}.users as users + where encrypted_password != '' and email is not null and not exists(select user_id from {{ index .Options "Namespace" }}.identities where user_id = users.id); +end; +$$; diff --git a/auth_v2.169.0/migrations/20221208132122_backfill_email_last_sign_in_at.up.sql b/auth_v2.169.0/migrations/20221208132122_backfill_email_last_sign_in_at.up.sql new file mode 100644 index 0000000..19ec79e --- /dev/null +++ b/auth_v2.169.0/migrations/20221208132122_backfill_email_last_sign_in_at.up.sql @@ -0,0 +1,13 @@ +-- previous backfill migration left last_sign_in_at to be null, which broke some projects + +do $$ +begin +update {{ index .Options "Namespace" }}.identities + set last_sign_in_at = '2022-11-25' + where + last_sign_in_at is null and + created_at = '2022-11-25' and + updated_at = '2022-11-25' and + provider = 'email' and + id = user_id::text; +end $$; diff --git a/auth_v2.169.0/migrations/20221215195500_modify_users_email_unique_index.up.sql b/auth_v2.169.0/migrations/20221215195500_modify_users_email_unique_index.up.sql new file mode 100644 index 0000000..c12de04 --- /dev/null +++ b/auth_v2.169.0/migrations/20221215195500_modify_users_email_unique_index.up.sql @@ -0,0 +1,23 @@ +-- this change is relatively temporary +-- it is meant to keep database consistency guarantees until there is proper +-- introduction of account linking / merging / delinking APIs, at which point +-- rows in the users table will allow duplicates but with programmatic control + +alter table only {{ index .Options "Namespace" }}.users + add column if not exists is_sso_user boolean not null default false; + +comment on column {{ index .Options "Namespace" }}.users.is_sso_user is 'Auth: Set this column to true when the account comes from SSO. These accounts can have duplicate emails.'; + +do $$ +begin + alter table only {{ index .Options "Namespace" }}.users + drop constraint if exists users_email_key; +exception +-- dependent object: https://www.postgresql.org/docs/current/errcodes-appendix.html +when SQLSTATE '2BP01' then + raise notice 'Unable to drop users_email_key constraint due to dependent objects, please resolve this manually or SSO may not work'; +end $$; + +create unique index if not exists users_email_partial_key on {{ index .Options "Namespace" }}.users (email) where (is_sso_user = false); + +comment on index {{ index .Options "Namespace" }}.users_email_partial_key is 'Auth: A partial unique index that applies only when is_sso_user is false'; diff --git a/auth_v2.169.0/migrations/20221215195800_add_identities_email_column.up.sql b/auth_v2.169.0/migrations/20221215195800_add_identities_email_column.up.sql new file mode 100644 index 0000000..eb60334 --- /dev/null +++ b/auth_v2.169.0/migrations/20221215195800_add_identities_email_column.up.sql @@ -0,0 +1,18 @@ +do $$ +begin + update + {{ index .Options "Namespace" }}.identities as identities + set + identity_data = identity_data || jsonb_build_object('email', (select email from {{ index .Options "Namespace" }}.users where id = identities.user_id)), + updated_at = '2022-11-25' + where identities.provider = 'email' and identity_data->>'email' is null; +end $$; + +alter table only {{ index .Options "Namespace" }}.identities + add column if not exists email text generated always as (lower(identity_data->>'email')) stored; + +comment on column {{ index .Options "Namespace" }}.identities.email is 'Auth: Email is a generated column that references the optional email property in the identity_data'; + +create index if not exists identities_email_idx on {{ index .Options "Namespace" }}.identities (email text_pattern_ops); + +comment on index {{ index .Options "Namespace" }}.identities_email_idx is 'Auth: Ensures indexed queries on the email column'; diff --git a/auth_v2.169.0/migrations/20221215195900_remove_sso_sessions.up.sql b/auth_v2.169.0/migrations/20221215195900_remove_sso_sessions.up.sql new file mode 100644 index 0000000..228302d --- /dev/null +++ b/auth_v2.169.0/migrations/20221215195900_remove_sso_sessions.up.sql @@ -0,0 +1,3 @@ +-- sso_sessions is not used as all of the necessary data is in sessions +drop table if exists {{ index .Options "Namespace" }}.sso_sessions; + diff --git a/auth_v2.169.0/migrations/20230116124310_alter_phone_type.up.sql b/auth_v2.169.0/migrations/20230116124310_alter_phone_type.up.sql new file mode 100644 index 0000000..fa846db --- /dev/null +++ b/auth_v2.169.0/migrations/20230116124310_alter_phone_type.up.sql @@ -0,0 +1,14 @@ +-- alter phone field column type to accomodate for soft deletion + +do $$ +begin + alter table {{ index .Options "Namespace" }}.users + alter column phone type text, + alter column phone_change type text; +exception + -- SQLSTATE errcodes https://www.postgresql.org/docs/current/errcodes-appendix.html + when SQLSTATE '0A000' then + raise notice 'Unable to change data type of phone, phone_change columns due to use by a view or rule'; + when SQLSTATE '2BP01' then + raise notice 'Unable to change data type of phone, phone_change columns due to dependent objects'; +end $$; diff --git a/auth_v2.169.0/migrations/20230116124412_add_deleted_at.up.sql b/auth_v2.169.0/migrations/20230116124412_add_deleted_at.up.sql new file mode 100644 index 0000000..999abaa --- /dev/null +++ b/auth_v2.169.0/migrations/20230116124412_add_deleted_at.up.sql @@ -0,0 +1,4 @@ +-- adds deleted_at column to auth.users + +alter table {{ index .Options "Namespace" }}.users +add column if not exists deleted_at timestamptz null; diff --git a/auth_v2.169.0/migrations/20230131181311_backfill_invite_identities.up.sql b/auth_v2.169.0/migrations/20230131181311_backfill_invite_identities.up.sql new file mode 100644 index 0000000..2fcb358 --- /dev/null +++ b/auth_v2.169.0/migrations/20230131181311_backfill_invite_identities.up.sql @@ -0,0 +1,9 @@ +-- backfills the missing email identity for invited users + +do $$ +begin + insert into {{ index .Options "Namespace" }}.identities (id, user_id, identity_data, provider, last_sign_in_at, created_at, updated_at) + select id, id as user_id, jsonb_build_object('sub', id, 'email', email) as identity_data, 'email' as provider, null as last_sign_in_at, '2023-01-25' as created_at, '2023-01-25' as updated_at + from {{ index .Options "Namespace" }}.users as users + where invited_at is not null and not exists (select user_id from {{ index .Options "Namespace" }}.identities where user_id = users.id and provider = 'email'); +end $$; diff --git a/auth_v2.169.0/migrations/20230322519590_add_flow_state_table.up.sql b/auth_v2.169.0/migrations/20230322519590_add_flow_state_table.up.sql new file mode 100644 index 0000000..a8842e5 --- /dev/null +++ b/auth_v2.169.0/migrations/20230322519590_add_flow_state_table.up.sql @@ -0,0 +1,20 @@ +-- see: https://stackoverflow.com/questions/7624919/check-if-a-user-defined-type-already-exists-in-postgresql/48382296#48382296 +do $$ begin + create type code_challenge_method as enum('s256', 'plain'); +exception + when duplicate_object then null; +end $$; +create table if not exists {{ index .Options "Namespace" }}.flow_state( + id uuid primary key, + user_id uuid null, + auth_code text not null, + code_challenge_method code_challenge_method not null, + code_challenge text not null, + provider_type text not null, + provider_access_token text null, + provider_refresh_token text null, + created_at timestamptz null, + updated_at timestamptz null +); +create index if not exists idx_auth_code on {{ index .Options "Namespace" }}.flow_state(auth_code); +comment on table {{ index .Options "Namespace" }}.flow_state is 'stores metadata for pkce logins'; diff --git a/auth_v2.169.0/migrations/20230402418590_add_authentication_method_to_flow_state_table.up.sql b/auth_v2.169.0/migrations/20230402418590_add_authentication_method_to_flow_state_table.up.sql new file mode 100644 index 0000000..e83af85 --- /dev/null +++ b/auth_v2.169.0/migrations/20230402418590_add_authentication_method_to_flow_state_table.up.sql @@ -0,0 +1,6 @@ +alter table {{index .Options "Namespace" }}.flow_state +add column if not exists authentication_method text not null; +create index if not exists idx_user_id_auth_method on {{index .Options "Namespace" }}.flow_state (user_id, authentication_method); + +-- Update comment as we have generalized the table +comment on table {{ index .Options "Namespace" }}.flow_state is 'stores metadata for pkce logins'; diff --git a/auth_v2.169.0/migrations/20230411005111_remove_duplicate_idx.up.sql b/auth_v2.169.0/migrations/20230411005111_remove_duplicate_idx.up.sql new file mode 100644 index 0000000..dc23931 --- /dev/null +++ b/auth_v2.169.0/migrations/20230411005111_remove_duplicate_idx.up.sql @@ -0,0 +1 @@ +drop index if exists {{index .Options "Namespace" }}.refresh_tokens_token_idx; diff --git a/auth_v2.169.0/migrations/20230508135423_add_cleanup_indexes.up.sql b/auth_v2.169.0/migrations/20230508135423_add_cleanup_indexes.up.sql new file mode 100644 index 0000000..162acee --- /dev/null +++ b/auth_v2.169.0/migrations/20230508135423_add_cleanup_indexes.up.sql @@ -0,0 +1,17 @@ +-- Indexes used for cleaning up old or stale objects. + +create index if not exists + refresh_tokens_updated_at_idx + on {{ index .Options "Namespace" }}.refresh_tokens (updated_at desc); + +create index if not exists + flow_state_created_at_idx + on {{ index .Options "Namespace" }}.flow_state (created_at desc); + +create index if not exists + saml_relay_states_created_at_idx + on {{ index .Options "Namespace" }}.saml_relay_states (created_at desc); + +create index if not exists + sessions_not_after_idx + on {{ index .Options "Namespace" }}.sessions (not_after desc); diff --git a/auth_v2.169.0/migrations/20230523124323_add_mfa_challenge_cleanup_index.up.sql b/auth_v2.169.0/migrations/20230523124323_add_mfa_challenge_cleanup_index.up.sql new file mode 100644 index 0000000..667d502 --- /dev/null +++ b/auth_v2.169.0/migrations/20230523124323_add_mfa_challenge_cleanup_index.up.sql @@ -0,0 +1,5 @@ +-- Index used to clean up mfa challenges + +create index if not exists + mfa_challenge_created_at_idx + on {{ index .Options "Namespace" }}.mfa_challenges (created_at desc); diff --git a/auth_v2.169.0/migrations/20230818113222_add_flow_state_to_relay_state.up.sql b/auth_v2.169.0/migrations/20230818113222_add_flow_state_to_relay_state.up.sql new file mode 100644 index 0000000..f940e70 --- /dev/null +++ b/auth_v2.169.0/migrations/20230818113222_add_flow_state_to_relay_state.up.sql @@ -0,0 +1 @@ +alter table {{ index .Options "Namespace" }}.saml_relay_states add column if not exists flow_state_id uuid references {{ index .Options "Namespace" }}.flow_state(id) on delete cascade default null; diff --git a/auth_v2.169.0/migrations/20230914180801_add_mfa_factors_user_id_idx.up.sql b/auth_v2.169.0/migrations/20230914180801_add_mfa_factors_user_id_idx.up.sql new file mode 100644 index 0000000..805c97c --- /dev/null +++ b/auth_v2.169.0/migrations/20230914180801_add_mfa_factors_user_id_idx.up.sql @@ -0,0 +1 @@ +create index if not exists mfa_factors_user_id_idx on {{ index .Options "Namespace" }}.mfa_factors(user_id); diff --git a/auth_v2.169.0/migrations/20231027141322_add_session_refresh_columns.up.sql b/auth_v2.169.0/migrations/20231027141322_add_session_refresh_columns.up.sql new file mode 100644 index 0000000..79efba9 --- /dev/null +++ b/auth_v2.169.0/migrations/20231027141322_add_session_refresh_columns.up.sql @@ -0,0 +1,4 @@ +alter table if exists {{ index .Options "Namespace" }}.sessions + add column if not exists refreshed_at timestamp without time zone, + add column if not exists user_agent text, + add column if not exists ip inet; diff --git a/auth_v2.169.0/migrations/20231114161723_add_sessions_tag.up.sql b/auth_v2.169.0/migrations/20231114161723_add_sessions_tag.up.sql new file mode 100644 index 0000000..7acf1bb --- /dev/null +++ b/auth_v2.169.0/migrations/20231114161723_add_sessions_tag.up.sql @@ -0,0 +1,2 @@ +alter table if exists {{ index .Options "Namespace" }}.sessions + add column if not exists tag text; diff --git a/auth_v2.169.0/migrations/20231117164230_add_id_pkey_identities.up.sql b/auth_v2.169.0/migrations/20231117164230_add_id_pkey_identities.up.sql new file mode 100644 index 0000000..31ed280 --- /dev/null +++ b/auth_v2.169.0/migrations/20231117164230_add_id_pkey_identities.up.sql @@ -0,0 +1,29 @@ +do $$ +begin + if not exists(select * + from information_schema.columns + where table_schema = '{{ index .Options "Namespace" }}' and table_name='identities' and column_name='provider_id') + then + alter table if exists {{ index .Options "Namespace" }}.identities + rename column id to provider_id; + end if; +end$$; + +alter table if exists {{ index .Options "Namespace" }}.identities + drop constraint if exists identities_pkey, + add column if not exists id uuid default gen_random_uuid() primary key; + +do $$ +begin + if not exists + (select constraint_name + from information_schema.table_constraints + where table_schema = '{{ index .Options "Namespace" }}' + and table_name = 'identities' + and constraint_name = 'identities_provider_id_provider_unique') + then + alter table if exists {{ index .Options "Namespace" }}.identities + add constraint identities_provider_id_provider_unique + unique(provider_id, provider); + end if; +end $$; diff --git a/auth_v2.169.0/migrations/20240115144230_remove_ip_address_from_saml_relay_state.up.sql b/auth_v2.169.0/migrations/20240115144230_remove_ip_address_from_saml_relay_state.up.sql new file mode 100644 index 0000000..169ec37 --- /dev/null +++ b/auth_v2.169.0/migrations/20240115144230_remove_ip_address_from_saml_relay_state.up.sql @@ -0,0 +1,7 @@ +do $$ +begin + if exists (select from information_schema.columns where table_schema = '{{ index .Options "Namespace" }}' and table_name = 'saml_relay_states' and column_name = 'from_ip_address') then + alter table {{ index .Options "Namespace" }}.saml_relay_states drop column from_ip_address; + end if; +end +$$; diff --git a/auth_v2.169.0/migrations/20240214120130_add_is_anonymous_column.up.sql b/auth_v2.169.0/migrations/20240214120130_add_is_anonymous_column.up.sql new file mode 100644 index 0000000..6ef963f --- /dev/null +++ b/auth_v2.169.0/migrations/20240214120130_add_is_anonymous_column.up.sql @@ -0,0 +1,8 @@ +do $$ +begin + alter table {{ index .Options "Namespace" }}.users + add column if not exists is_anonymous boolean not null default false; + + create index if not exists users_is_anonymous_idx on {{ index .Options "Namespace" }}.users using btree (is_anonymous); +end +$$; diff --git a/auth_v2.169.0/migrations/20240306115329_add_issued_at_to_flow_state.up.sql b/auth_v2.169.0/migrations/20240306115329_add_issued_at_to_flow_state.up.sql new file mode 100644 index 0000000..d6eff15 --- /dev/null +++ b/auth_v2.169.0/migrations/20240306115329_add_issued_at_to_flow_state.up.sql @@ -0,0 +1,3 @@ +do $$ begin +alter table {{ index .Options "Namespace" }}.flow_state add column if not exists auth_code_issued_at timestamptz null; +end $$ diff --git a/auth_v2.169.0/migrations/20240314092811_add_saml_name_id_format.up.sql b/auth_v2.169.0/migrations/20240314092811_add_saml_name_id_format.up.sql new file mode 100644 index 0000000..0196250 --- /dev/null +++ b/auth_v2.169.0/migrations/20240314092811_add_saml_name_id_format.up.sql @@ -0,0 +1,3 @@ +do $$ begin +alter table {{ index .Options "Namespace" }}.saml_providers add column if not exists name_id_format text null; +end $$ diff --git a/auth_v2.169.0/migrations/20240427152123_add_one_time_tokens_table.up.sql b/auth_v2.169.0/migrations/20240427152123_add_one_time_tokens_table.up.sql new file mode 100644 index 0000000..be73126 --- /dev/null +++ b/auth_v2.169.0/migrations/20240427152123_add_one_time_tokens_table.up.sql @@ -0,0 +1,37 @@ +do $$ begin + create type one_time_token_type as enum ( + 'confirmation_token', + 'reauthentication_token', + 'recovery_token', + 'email_change_token_new', + 'email_change_token_current', + 'phone_change_token' + ); +exception + when duplicate_object then null; +end $$; + + +do $$ begin + create table if not exists {{ index .Options "Namespace" }}.one_time_tokens ( + id uuid primary key, + user_id uuid not null references {{ index .Options "Namespace" }}.users on delete cascade, + token_type one_time_token_type not null, + token_hash text not null, + relates_to text not null, + created_at timestamp without time zone not null default now(), + updated_at timestamp without time zone not null default now(), + check (char_length(token_hash) > 0) + ); + + begin + create index if not exists one_time_tokens_token_hash_hash_idx on {{ index .Options "Namespace" }}.one_time_tokens using hash (token_hash); + create index if not exists one_time_tokens_relates_to_hash_idx on {{ index .Options "Namespace" }}.one_time_tokens using hash (relates_to); + exception when others then + -- Fallback to btree indexes if hash creation fails + create index if not exists one_time_tokens_token_hash_hash_idx on {{ index .Options "Namespace" }}.one_time_tokens using btree (token_hash); + create index if not exists one_time_tokens_relates_to_hash_idx on {{ index .Options "Namespace" }}.one_time_tokens using btree (relates_to); + end; + + create unique index if not exists one_time_tokens_user_id_token_type_key on {{ index .Options "Namespace" }}.one_time_tokens (user_id, token_type); +end $$; diff --git a/auth_v2.169.0/migrations/20240612123726_enable_rls_update_grants.up.sql b/auth_v2.169.0/migrations/20240612123726_enable_rls_update_grants.up.sql new file mode 100644 index 0000000..9201e84 --- /dev/null +++ b/auth_v2.169.0/migrations/20240612123726_enable_rls_update_grants.up.sql @@ -0,0 +1,36 @@ +do $$ begin + -- enable RLS policy on auth tables + alter table {{ index .Options "Namespace" }}.schema_migrations enable row level security; + alter table {{ index .Options "Namespace" }}.instances enable row level security; + alter table {{ index .Options "Namespace" }}.users enable row level security; + alter table {{ index .Options "Namespace" }}.audit_log_entries enable row level security; + alter table {{ index .Options "Namespace" }}.saml_relay_states enable row level security; + alter table {{ index .Options "Namespace" }}.refresh_tokens enable row level security; + alter table {{ index .Options "Namespace" }}.mfa_factors enable row level security; + alter table {{ index .Options "Namespace" }}.sessions enable row level security; + alter table {{ index .Options "Namespace" }}.sso_providers enable row level security; + alter table {{ index .Options "Namespace" }}.sso_domains enable row level security; + alter table {{ index .Options "Namespace" }}.mfa_challenges enable row level security; + alter table {{ index .Options "Namespace" }}.mfa_amr_claims enable row level security; + alter table {{ index .Options "Namespace" }}.saml_providers enable row level security; + alter table {{ index .Options "Namespace" }}.flow_state enable row level security; + alter table {{ index .Options "Namespace" }}.identities enable row level security; + alter table {{ index .Options "Namespace" }}.one_time_tokens enable row level security; + -- allow postgres role to select from auth tables and allow it to grant select to other roles + grant select on {{ index .Options "Namespace" }}.schema_migrations to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.instances to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.users to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.audit_log_entries to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.saml_relay_states to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.refresh_tokens to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.mfa_factors to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.sessions to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.sso_providers to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.sso_domains to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.mfa_challenges to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.mfa_amr_claims to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.saml_providers to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.flow_state to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.identities to postgres with grant option; + grant select on {{ index .Options "Namespace" }}.one_time_tokens to postgres with grant option; +end $$; diff --git a/auth_v2.169.0/migrations/20240729123726_add_mfa_phone_config.up.sql b/auth_v2.169.0/migrations/20240729123726_add_mfa_phone_config.up.sql new file mode 100644 index 0000000..ec94d7b --- /dev/null +++ b/auth_v2.169.0/migrations/20240729123726_add_mfa_phone_config.up.sql @@ -0,0 +1,12 @@ +do $$ begin + alter type {{ index .Options "Namespace" }}.factor_type add value 'phone'; +exception + when duplicate_object then null; +end $$; + + +alter table {{ index .Options "Namespace" }}.mfa_factors add column if not exists phone text unique default null; +alter table {{ index .Options "Namespace" }}.mfa_challenges add column if not exists otp_code text null; + + +create unique index if not exists unique_verified_phone_factor on {{ index .Options "Namespace" }}.mfa_factors (user_id, phone); diff --git a/auth_v2.169.0/migrations/20240802193726_add_mfa_factors_column_last_challenged_at.up.sql b/auth_v2.169.0/migrations/20240802193726_add_mfa_factors_column_last_challenged_at.up.sql new file mode 100644 index 0000000..bc3eea9 --- /dev/null +++ b/auth_v2.169.0/migrations/20240802193726_add_mfa_factors_column_last_challenged_at.up.sql @@ -0,0 +1 @@ +alter table {{ index .Options "Namespace" }}.mfa_factors add column if not exists last_challenged_at timestamptz unique default null; diff --git a/auth_v2.169.0/migrations/20240806073726_drop_uniqueness_constraint_on_phone.up.sql b/auth_v2.169.0/migrations/20240806073726_drop_uniqueness_constraint_on_phone.up.sql new file mode 100644 index 0000000..ade27ea --- /dev/null +++ b/auth_v2.169.0/migrations/20240806073726_drop_uniqueness_constraint_on_phone.up.sql @@ -0,0 +1,22 @@ +alter table {{ index .Options "Namespace" }}.mfa_factors drop constraint if exists mfa_factors_phone_key; +do $$ +begin + -- if both indexes exist, it means that the schema_migrations table was truncated and the migrations had to be rerun + if ( + select count(*) = 2 + from pg_indexes + where indexname in ('unique_verified_phone_factor', 'unique_phone_factor_per_user') + and schemaname = '{{ index .Options "Namespace" }}' + ) then + execute 'drop index {{ index .Options "Namespace" }}.unique_verified_phone_factor'; + end if; + + if exists ( + select 1 + from pg_indexes + where indexname = 'unique_verified_phone_factor' + and schemaname = '{{ index .Options "Namespace" }}' + ) then + execute 'alter index {{ index .Options "Namespace" }}.unique_verified_phone_factor rename to unique_phone_factor_per_user'; + end if; +end $$; diff --git a/auth_v2.169.0/migrations/20241009103726_add_web_authn.up.sql b/auth_v2.169.0/migrations/20241009103726_add_web_authn.up.sql new file mode 100644 index 0000000..04d8972 --- /dev/null +++ b/auth_v2.169.0/migrations/20241009103726_add_web_authn.up.sql @@ -0,0 +1,3 @@ +alter table {{ index .Options "Namespace" }}.mfa_factors add column if not exists web_authn_credential jsonb null; +alter table {{ index .Options "Namespace" }}.mfa_factors add column if not exists web_authn_aaguid uuid null; +alter table {{ index .Options "Namespace" }}.mfa_challenges add column if not exists web_authn_session_data jsonb null; diff --git a/auth_v2.169.0/openapi.yaml b/auth_v2.169.0/openapi.yaml new file mode 100644 index 0000000..1f52436 --- /dev/null +++ b/auth_v2.169.0/openapi.yaml @@ -0,0 +1,2349 @@ +openapi: 3.0.3 +info: + version: latest + title: GoTrue REST API (Supabase Auth) + description: |- + GoTrue is the software behind [Supabase Auth](https://supabase.com/auth). This is its REST API. + + **Notes:** + - HTTP 5XX errors are not listed for each endpoint. + These should be handled globally. Not all HTTP 5XX errors are generated from GoTrue, and they may serve non-JSON content. Make sure you inspect the `Content-Type` header before parsing as JSON. + - Error responses are somewhat inconsistent. + Avoid using the `msg` and HTTP status code to identify errors. HTTP 400 and 422 are used interchangeably in many APIs. + - If the server has CAPTCHA protection enabled, the verification token should be included in the request body. + - Rate limit errors are consistently raised with the HTTP 429 code. + - Enums are used only in request bodies / parameters and not in responses to ensure wide compatibility with code generators that fail to include an unknown enum case. + + **Backward compatibility:** + - Endpoints marked as _Experimental_ may change without notice. + - Endpoints marked as _Deprecated_ will be supported for at least 3 months since being marked as deprecated. + - HTTP status codes like 400, 404, 422 may change for the same underlying error condition. + + termsOfService: https://supabase.com/terms + contact: + name: Ask a question about this API + url: https://github.com/supabase/supabase/discussions + license: + name: MIT License + url: https://github.com/supabase/gotrue/blob/master/LICENSE +externalDocs: + description: Learn more about Supabase Auth + url: https://supabase.com/docs/guides/auth/overview +servers: + - url: "https://{project}.supabase.co/auth/v1" + variables: + project: + description: > + Your Supabase project ID. + default: abcdefghijklmnopqrst +tags: + - name: auth + description: APIs for authentication and authorization. + - name: user + description: APIs used by a user to manage their account. + - name: oauth + description: APIs for dealing with OAuth flows. + - name: oidc + description: APIs for dealing with OIDC authentication flows. (Experimental.) + - name: sso + description: APIs for authenticating using SSO providers (SAML). (Experimental.) + - name: saml + description: SAML 2.0 Endpoints. (Experimental.) + - name: admin + description: Administration APIs requiring elevated access. + - name: general + description: General APIs. +paths: + /token: + post: + summary: Issues access and refresh tokens based on grant type. + tags: + - auth + - oidc + parameters: + - name: grant_type + in: query + required: true + description: > + What grant type should be used to issue an access and refresh token. Note that `id_token` is only offered in experimental mode. CAPTCHA protection is not effective on the `refresh_token` grant flow. + schema: + type: string + enum: + - password + - refresh_token + - id_token + - pkce + security: + - APIKeyAuth: [] + requestBody: + content: + application/json: + examples: + grant_type=password: + value: + email: user@example.com + password: password1 + grant_type=refresh_token: + value: + refresh_token: 4nYUCw0wZR_DNOTSDbSGMQ + grant_type=pkce: + value: + auth_code: 009e5066-fc11-4eca-8c8c-6fd82aa263f2 + code_verifier: ktPNXpR65N6JtgzQA8_5HHtH6PBSAahMNoLKRzQEa0Tzgl.vdV~b6lPk004XOd.4lR0inCde.NoQx5K63xPfzL8o7tJAjXncnhw5Niv9ycQ.QRV9JG.y3VapqbgLfIrJ + schema: + type: object + description: |- + For the refresh token flow, supply only `refresh_token`. + For the email/phone with password flow, supply `email`, `phone` and `password` with an optional `gotrue_meta_security`. + For the OIDC ID token flow, supply `id_token`, `nonce`, `provider`, `client_id`, `issuer` with an optional `gotrue_meta_security`. + properties: + refresh_token: + type: string + password: + type: string + email: + type: string + format: email + phone: + type: string + format: phone + id_token: + type: string + access_token: + type: string + description: Provide only when `grant_type` is `id_token` and the provided ID token requires the presence of an access token to be accepted (usually by having an `at_hash` claim). + nonce: + type: string + provider: + type: string + enum: + - google + - apple + - azure + - facebook + - keycloak + client_id: + type: string + issuer: + type: string + description: If `provider` is `azure` then you can specify any Azure OIDC issuer string here, which will be used for verification. + gotrue_meta_security: + $ref: "#/components/schemas/GoTrueMetaSecurity" + auth_code: + type: string + format: uuid + code_verifier: + type: string + responses: + 200: + description: > + An access and refresh token have been successfully issued. + content: + application/json: + schema: + $ref: "#/components/schemas/AccessTokenResponseSchema" + + 400: + $ref: "#/components/responses/BadRequestResponse" + 401: + $ref: "#/components/responses/ForbiddenResponse" + 403: + $ref: "#/components/responses/UnauthorizedResponse" + 500: + $ref: "#/components/responses/InternalServerErrorResponse" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /logout: + post: + summary: Logs out a user. + tags: + - auth + security: + - APIKeyAuth: [] + UserAuth: [] + parameters: + - name: scope + in: query + description: > + (Optional.) Determines how the user should be logged out. When `global` is used, the user is logged out from all active sessions. When `local` is used, the user is logged out from the current session. When `others` is used, the user is logged out from all other sessions except the current one. Clients should remove stored access and refresh tokens except when `others` is used. + schema: + type: string + enum: + - global + - local + - others + responses: + 204: + description: No content returned on successful logout. + 401: + $ref: "#/components/responses/UnauthorizedResponse" + + /verify: + get: + summary: Authenticate by verifying the possession of a one-time token. Usually for use as clickable links. + tags: + - auth + parameters: + - name: token + in: query + required: true + schema: + type: string + - name: type + in: query + required: true + schema: + type: string + enum: + - signup + - invite + - recovery + - magiclink + - email_change + - name: redirect_to + in: query + description: > + (Optional) URL to redirect back into the app on after verification completes successfully. If not specified will use the "Site URL" configuration option. If not allowed per the allow list it will use the "Site URL" configuration option. + schema: + type: string + format: uri + security: + - APIKeyAuth: [] + responses: + 302: + $ref: "#/components/responses/AccessRefreshTokenRedirectResponse" + post: + summary: Authenticate by verifying the possession of a one-time token. + tags: + - auth + security: + - APIKeyAuth: [] + requestBody: + content: + application/json: + schema: + type: object + properties: + type: + type: string + enum: + - signup + - recovery + - invite + - magiclink + - email_change + - sms + - phone_change + token: + type: string + token_hash: + type: string + description: > + The hashed value of token. Applicable only if used with `type` and nothing else. + email: + type: string + format: email + description: > + Applicable only if `type` is with regards to an email address. + phone: + type: string + format: phone + description: > + Applicable only if `type` is with regards to an phone number. + redirect_to: + type: string + format: uri + description: > + (Optional) URL to redirect back into the app on after verification completes successfully. If not specified will use the "Site URL" configuration option. If not allowed per the allow list it will use the "Site URL" configuration option. + + responses: + 200: + description: An access and refresh token. + content: + application/json: + schema: + $ref: "#/components/schemas/AccessTokenResponseSchema" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /authorize: + get: + summary: Redirects to an external OAuth provider. Usually for use as clickable links. + tags: + - oauth + security: + - APIKeyAuth: [] + parameters: + - name: provider + in: query + description: Name of the OAuth provider. + example: google + required: true + schema: + type: string + pattern: "^[a-zA-Z0-9]+$" + - name: scopes + in: query + required: true + description: Space separated list of OAuth scopes to pass on to `provider`. + schema: + type: string + pattern: "[^ ]+( +[^ ]+)*" + - name: invite_token + in: query + description: (Optional) A token representing a previous invitation of the user. A successful sign-in with OAuth will mark the invitation as completed. + schema: + type: string + - name: redirect_to + in: query + description: > + (Optional) URL to redirect back into the app on after OAuth sign-in completes successfully or not. If not specified will use the "Site URL" configuration option. If not allowed per the allow list it will use the "Site URL" configuration option. + schema: + type: string + format: uri + - name: code_challenge_method + in: query + description: (Optional) Method used to encrypt the verifier. Can be `plain` (no transformation) or `s256` (where SHA-256 is used). It is always recommended that `s256` is used. + schema: + type: string + enum: + - plain + - s256 + responses: + 302: + $ref: "#/components/responses/OAuthAuthorizeRedirectResponse" + + /signup: + post: + summary: Signs a user up. + description: > + Creates a new user. + tags: + - auth + security: + - APIKeyAuth: [] + requestBody: + content: + application/json: + examples: + "email+password": + value: + email: user@example.com + password: password1 + "phone+password": + value: + phone: "+1234567890" + password: password1 + "phone+password+whatsapp": + value: + phone: "+1234567890" + password: password1 + channel: whatsapp + "email+password+pkce": + value: + email: user@example.com + password: password1 + code_challenge_method: s256 + code_challenge: elU6u5zyqQT2f92GRQUq6PautAeNDf4DQPayyR0ek_c& + schema: + type: object + properties: + email: + type: string + format: email + phone: + type: string + format: phone + channel: + type: string + enum: + - sms + - whatsapp + password: + type: string + data: + type: object + code_challenge: + type: string + code_challenge_method: + type: string + enum: + - plain + - s256 + gotrue_meta_security: + $ref: "#/components/schemas/GoTrueMetaSecurity" + responses: + 200: + description: > + A user already exists and is not confirmed (in which case a user object is returned). A user did not exist and is signed up. If email or phone confirmation is enabled, returns a user object. If confirmation is disabled, returns an access token and refresh token response. + content: + application/json: + schema: + oneOf: + - $ref: "#/components/schemas/AccessTokenResponseSchema" + - $ref: "#/components/schemas/UserSchema" + 400: + $ref: "#/components/responses/BadRequestResponse" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /recover: + post: + summary: Request password recovery. + description: > + Users that have forgotten their password can have it reset with this API. + tags: + - auth + security: + - APIKeyAuth: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - email + properties: + email: + type: string + format: email + code_challenge: + type: string + code_challenge_method: + type: string + enum: + - plain + - s256 + gotrue_meta_security: + $ref: "#/components/schemas/GoTrueMetaSecurity" + responses: + 200: + description: A recovery email has been sent to the address. An empty JSON object is returned. To obfuscate whether such an email address already exists in the system this response is sent regardless whether the address exists or not. + content: + application/json: + schema: + type: object + 400: + $ref: "#/components/responses/BadRequestResponse" + 422: + description: Returned when unable to validate the email address. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /resend: + post: + summary: Resends a one-time password (OTP) through email or SMS. + description: > + Allows a user to resend an existing signup, sms, email_change or phone_change OTP. + tags: + - auth + security: + - APIKeyAuth: [] + requestBody: + content: + application/json: + schema: + type: object + properties: + email: + type: string + format: email + description: > + Applicable only if `type` is with regards to an email address. + phone: + type: string + format: phone + description: > + Applicable only if `type` is with regards to an phone number. + type: + type: string + enum: + - signup + - email_change + - sms + - phone_change + gotrue_meta_security: + $ref: "#/components/schemas/GoTrueMetaSecurity" + responses: + 200: + description: A One-Time Password was sent to the email or phone. To obfuscate whether such an address or number already exists in the system this response is sent in both cases. + content: + application/json: + schema: + type: object + properties: + message_id: + type: string + description: Unique ID of the message as reported by the SMS sending provider. Useful for tracking deliverability problems. + 400: + $ref: "#/components/responses/BadRequestResponse" + 422: + description: Returned when unable to validate the email address or phone number. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /magiclink: + post: + summary: Authenticate a user by sending them a magic link. + description: > + A magic link is a special type of URL that includes a One-Time Password. When a user visits this link in a browser they are immediately authenticated. + tags: + - auth + security: + - APIKeyAuth: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - email + properties: + email: + type: string + format: email + data: + type: object + gotrue_meta_security: + $ref: "#/components/schemas/GoTrueMetaSecurity" + responses: + 200: + description: A recovery email has been sent to the address. An empty JSON object is returned. To obfuscate whether such an email address already exists in the system this response is sent regardless whether the address exists or not. + content: + application/json: + schema: + type: object + 400: + $ref: "#/components/responses/BadRequestResponse" + 422: + description: Returned when unable to validate the email address. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /otp: + post: + summary: Authenticate a user by sending them a One-Time Password over email or SMS. + tags: + - auth + security: + - APIKeyAuth: [] + requestBody: + content: + application/json: + schema: + type: object + properties: + email: + type: string + format: email + phone: + type: string + format: phone + channel: + type: string + enum: + - sms + - whatsapp + create_user: + type: boolean + data: + type: object + code_challenge_method: + type: string + enum: + - s256 + - plain + code_challenge: + type: string + gotrue_meta_security: + $ref: "#/components/schemas/GoTrueMetaSecurity" + responses: + 200: + description: A One-Time Password was sent to the email or phone. To obfuscate whether such an address or number already exists in the system this response is sent in both cases. + content: + application/json: + schema: + type: object + properties: + message_id: + type: string + description: Unique ID of the message as reported by the SMS sending provider. Useful for tracking deliverability problems. + 400: + $ref: "#/components/responses/BadRequestResponse" + 422: + description: Returned when unable to validate the email or phone number. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /user: + get: + summary: Fetch the latest user account information. + tags: + - user + security: + - APIKeyAuth: [] + UserAuth: [] + responses: + 200: + description: User's account information. + content: + application/json: + schema: + $ref: "#/components/schemas/UserSchema" + put: + summary: Update certain properties of the current user account. + tags: + - user + security: + - APIKeyAuth: [] + UserAuth: [] + requestBody: + content: + application/json: + schema: + type: object + properties: + email: + type: string + format: email + phone: + type: string + format: phone + password: + type: string + nonce: + type: string + data: + type: object + app_metadata: + type: object + channel: + type: string + enum: + - sms + - whatsapp + responses: + 200: + description: User's updated account information. + content: + application/json: + schema: + $ref: "#/components/schemas/UserSchema" + 400: + $ref: "#/components/responses/BadRequestResponse" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /reauthenticate: + post: + summary: Reauthenticates the possession of an email or phone number for the purpose of password change. + description: > + For a password to be changed on a user account, the user's email or phone number needs to be confirmed before they are allowed to set a new password. This requirement is configurable. This API sends a confirmation email or SMS message. A nonce in this message can be provided in `PUT /user` to change the password on the account. + tags: + - user + security: + - APIKeyAuth: [] + UserAuth: [] + responses: + 200: + description: A One-Time Password was sent to the user's email or phone. + content: + application/json: + schema: + type: object + 400: + $ref: "#/components/responses/BadRequestResponse" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /factors: + post: + summary: Begin enrolling a new factor for MFA. + tags: + - user + security: + - APIKeyAuth: [] + UserAuth: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - factor_type + properties: + factor_type: + type: string + enum: + - totp + - phone + - webauthn + friendly_name: + type: string + issuer: + type: string + format: uri + phone: + type: string + format: phone + responses: + 200: + description: > + A new factor was created in the unverified state. Call `POST /factors/{factorId}/verify' to verify it. + content: + application/json: + schema: + type: object + properties: + id: + type: string + type: + type: string + enum: + - totp + - phone + - webauthn + totp: + type: object + properties: + qr_code: + type: string + secret: + type: string + uri: + type: string + phone: + type: string + format: phone + + 400: + $ref: "#/components/responses/BadRequestResponse" + + /factors/{factorId}/challenge: + post: + summary: Create a new challenge for a MFA factor. + tags: + - user + security: + - APIKeyAuth: [] + UserAuth: [] + parameters: + - name: factorId + in: path + required: true + example: 2b306a77-21dc-4110-ba71-537cb56b9e98 + schema: + type: string + format: uuid + requestBody: + content: + application/json: + schema: + type: object + properties: + channel: + type: string + enum: + - sms + - whatsapp + + responses: + 200: + description: > + A new challenge was generated for the factor. Use `POST /factors/{factorId}/verify` to verify the challenge. + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/TOTPPhoneChallengeResponse' + - $ref: '#/components/schemas/WebAuthnChallengeResponse' + 400: + $ref: "#/components/responses/BadRequestResponse" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /factors/{factorId}/verify: + post: + summary: Verify a challenge on a factor. + tags: + - user + security: + - APIKeyAuth: [] + UserAuth: [] + parameters: + - name: factorId + in: path + required: true + example: 2b306a77-21dc-4110-ba71-537cb56b9e98 + schema: + type: string + format: uuid + requestBody: + content: + application/json: + schema: + type: object + required: + - challenge_id + properties: + challenge_id: + type: string + format: uuid + code: + type: string + responses: + 200: + description: > + This challenge has been verified. Client libraries should replace their stored access and refresh tokens with the ones provided in this response. These new credentials have an increased Authenticator Assurance Level (AAL). + content: + application/json: + schema: + $ref: "#/components/schemas/AccessTokenResponseSchema" + 400: + $ref: "#/components/responses/BadRequestResponse" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /factors/{factorId}: + delete: + summary: Remove a MFA factor from a user. + tags: + - user + security: + - APIKeyAuth: [] + UserAuth: [] + parameters: + - name: factorId + in: path + required: true + example: 2b306a77-21dc-4110-ba71-537cb56b9e98 + schema: + type: string + format: uuid + responses: + 200: + description: > + This MFA factor is removed (unenrolled) and cannot be used for increasing the AAL level of user's sessions. Client libraries should use the `POST /token?grant_type=refresh_token` endpoint to get a new access and refresh token with a decreased AAL. + content: + application/json: + schema: + type: object + properties: + id: + type: string + format: uuid + example: 2b306a77-21dc-4110-ba71-537cb56b9e98 + 400: + $ref: "#/components/responses/BadRequestResponse" + + /callback: + get: + summary: Redirects OAuth flow errors to the frontend app. + description: > + When an OAuth sign-in flow fails for any reason, the error message needs to be delivered to the frontend app requesting the flow. This callback delivers the errors as `error` and `error_description` query params. Usually this request is not called directly. + tags: + - oauth + security: + - APIKeyAuth: [] + responses: + 302: + $ref: "#/components/responses/OAuthCallbackRedirectResponse" + post: + summary: Redirects OAuth flow errors to the frontend app. + description: > + When an OAuth sign-in flow fails for any reason, the error message needs to be delivered to the frontend app requesting the flow. This callback delivers the errors as `error` and `error_description` query params. Usually this request is not called directly. + tags: + - oauth + responses: + 302: + $ref: "#/components/responses/OAuthCallbackRedirectResponse" + + /sso: + post: + summary: Initiate a Single-Sign On flow. + tags: + - sso + security: + - APIKeyAuth: [] + requestBody: + content: + application/json: + schema: + type: object + properties: + domain: + type: string + format: hostname + description: Email address domain used to identify the SSO provider. + provider_id: + type: string + format: uuid + example: 40451fc2-4997-429c-bf7f-cc6f33c788e6 + redirect_to: + type: string + format: uri + skip_http_redirect: + type: boolean + description: Set to `true` if the response to this request should not be a HTTP 303 redirect -- useful for browser-based applications. + code_challenge: + type: string + code_challenge_method: + type: string + enum: + - plain + - s256 + gotrue_meta_security: + $ref: "#/components/schemas/GoTrueMetaSecurity" + responses: + 200: + description: > + Returned only when `skip_http_redirect` is `true` and the SSO provider could be identified from the `provider_id` or `domain`. Client libraries should use the returned URL to redirect or open a browser. + content: + application/json: + schema: + type: object + properties: + url: + type: string + format: uri + 303: + description: > + Returned only when `skip_http_redirect` is `false` or not present and the SSO provider could be identified from the `provider_id` or `domain`. Client libraries should follow the redirect. 303 is used instead of 302 because the request should be executed with a `GET` verb. + headers: + Location: + schema: + type: string + format: uri + 400: + $ref: "#/components/responses/BadRequestResponse" + 404: + description: > + Returned when the SSO provider could not be identified. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + /saml/metadata: + get: + summary: Returns the SAML 2.0 Metadata XML. + description: > + The metadata XML can be downloaded or used for the SAML 2.0 Metadata URL discovery mechanism. This URL is the SAML 2.0 EntityID of the Service Provider implemented by this server. + tags: + - saml + security: + - APIKeyAuth: [] + parameters: + - name: download + in: query + description: > + If set to `true` will add a `Content-Disposition` header to the response which will trigger a download dialog on the browser. + schema: + type: boolean + responses: + 200: + description: > + A valid SAML 2.0 Metadata XML document. Should be cached according to the `Cache-Control` header and/or caching data specified in the document itself. + headers: + Content-Disposition: + description: > + Present if `download=true`, which triggers the browser to show a donwload dialog. + schema: + type: string + example: attachment; filename="metadata.xml" + Cache-Control: + description: > + Should be parsed and obeyed to avoid putting strain on the server. + schema: + type: string + example: public, max-age=600 + + /saml/acs: + post: + summary: SAML 2.0 Assertion Consumer Service (ACS) endpoint. + description: > + Implements the SAML 2.0 Assertion Consumer Service (ACS) endpoint supporting the POST and Artifact bindings. + tags: + - saml + security: [] + parameters: + - name: RelayState + in: query + schema: + oneOf: + - type: string + format: uri + description: URL to take the user to after the ACS has been verified. Often sent by Identity Provider initiated login requests. + - type: string + format: uuid + description: UUID of the SAML Relay State stored in the database, used to identify the Service Provider initiated login request. + - name: SAMLArt + in: query + description: > + See the SAML 2.0 ACS specification. Cannot be used without a UUID `RelayState` parameter. + schema: + type: string + - name: SAMLResponse + in: query + description: > + See the SAML 2.0 ACS specification. Must be present unless `SAMLArt` is specified. If `RelayState` is not a UUID, the SAML Response is unpacked and the identity provider is identified from the response. + schema: + type: string + responses: + 302: + $ref: "#/components/responses/AccessRefreshTokenRedirectResponse" + 400: + $ref: "#/components/responses/BadRequestResponse" + 429: + $ref: "#/components/responses/RateLimitResponse" + + /invite: + post: + summary: Invite a user by email. + description: > + Sends an invitation email which contains a link that allows the user to sign-in. + tags: + - admin + security: + - APIKeyAuth: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - email + properties: + email: + type: string + data: + type: object + responses: + 200: + description: An invitation has been sent to the user. + content: + application/json: + schema: + $ref: "#/components/schemas/UserSchema" + 400: + $ref: "#/components/responses/BadRequestResponse" + 422: + description: User already exists and has confirmed their address. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + /admin/generate_link: + post: + summary: Generate a link to send in an email message. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - type + - email + properties: + type: + type: string + enum: + - magiclink + - signup + - recovery + - email_change_current + - email_change_new + email: + type: string + format: email + new_email: + type: string + format: email + password: + type: string + data: + type: object + redirect_to: + type: string + format: uri + responses: + 200: + description: User profile and generated link information. + content: + application/json: + schema: + type: object + additionalProperties: true + properties: + action_link: + type: string + format: uri + email_otp: + type: string + hashed_token: + type: string + verification_type: + type: string + redirect_to: + type: string + format: uri + 400: + $ref: "#/components/responses/BadRequestResponse" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + 404: + description: There is no such user. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + 422: + description: > + Has multiple meanings: + - User already exists + - Provided password does not meet minimum criteria + - Secure email change not enabled + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + /admin/audit: + get: + summary: Fetch audit log events. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + parameters: + - name: page + in: query + schema: + type: integer + minimum: 1 + default: 1 + - name: per_page + in: query + schema: + type: integer + minimum: 1 + default: 50 + responses: + 200: + description: List of audit logs. + content: + application/json: + schema: + type: array + items: + type: object + properties: + id: + type: string + format: uuid + payload: + type: object + properties: + actor_id: + type: string + actor_via_sso: + type: boolean + description: Whether the actor used a SSO protocol (like SAML 2.0 or OIDC) to authenticate. + actor_username: + type: string + actor_name: + type: string + traits: + type: object + action: + type: string + description: |- + Usually one of these values: + - login + - logout + - invite_accepted + - user_signedup + - user_invited + - user_deleted + - user_modified + - user_recovery_requested + - user_reauthenticate_requested + - user_confirmation_requested + - user_repeated_signup + - user_updated_password + - token_revoked + - token_refreshed + - generate_recovery_codes + - factor_in_progress + - factor_unenrolled + - challenge_created + - verification_attempted + - factor_deleted + - recovery_codes_deleted + - factor_updated + - mfa_code_login + log_type: + type: string + description: |- + Usually one of these values: + - account + - team + - token + - user + - factor + - recovery_codes + created_at: + type: string + format: date-time + ip_address: + type: string + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + + /admin/users: + get: + summary: Fetch a listing of users. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + parameters: + - name: page + in: query + schema: + type: integer + minimum: 1 + default: 1 + - name: per_page + in: query + schema: + type: integer + minimum: 1 + default: 50 + responses: + 200: + description: A page of users. + content: + application/json: + schema: + type: object + properties: + aud: + type: string + deprecated: true + users: + type: array + items: + $ref: "#/components/schemas/UserSchema" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + + /admin/users/{userId}: + parameters: + - name: userId + in: path + required: true + schema: + type: string + format: uuid + get: + summary: Fetch user account data for a user. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + responses: + 200: + description: User's account data. + content: + application/json: + schema: + $ref: "#/components/schemas/UserSchema" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + 404: + description: There is no such user. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + put: + summary: Update user's account data. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/UserSchema" + responses: + 200: + description: User's account data was updated. + content: + application/json: + schema: + $ref: "#/components/schemas/UserSchema" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + 404: + description: There is no such user. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + delete: + summary: Delete a user. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + responses: + 200: + description: User's account data. + content: + application/json: + schema: + $ref: "#/components/schemas/UserSchema" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + 404: + description: There is no such user. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + /admin/users/{userId}/factors: + parameters: + - name: userId + in: path + required: true + schema: + type: string + format: uuid + get: + summary: List all of the MFA factors for a user. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + responses: + 200: + description: User's MFA factors. + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/MFAFactorSchema" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + 404: + description: There is no such user. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + /admin/users/{userId}/factors/{factorId}: + parameters: + - name: userId + in: path + required: true + schema: + type: string + format: uuid + - name: factorId + in: path + required: true + schema: + type: string + format: uuid + put: + summary: Update a user's MFA factor. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + requestBody: + content: + application/json: + schema: + type: object + responses: + 200: + description: User's MFA factor. + content: + application/json: + schema: + $ref: "#/components/schemas/MFAFactorSchema" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + 404: + description: There is no such user and/or factor. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + delete: + summary: Remove a user's MFA factor. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + responses: + 200: + description: User's MFA factor. + content: + application/json: + schema: + $ref: "#/components/schemas/MFAFactorSchema" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + 404: + description: There is no such user and/or factor. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + /admin/sso/providers: + get: + summary: Fetch a list of all registered SSO providers. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + responses: + 200: + description: A list of all providers. + content: + application/json: + schema: + type: object + properties: + items: + type: array + items: + $ref: "#/components/schemas/SSOProviderSchema" + post: + summary: Register a new SSO provider. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - type + properties: + type: + type: string + enum: + - saml + metadata_url: + type: string + format: uri + metadata_xml: + type: string + domains: + type: array + items: + type: string + format: hostname + attribute_mapping: + $ref: "#/components/schemas/SAMLAttributeMappingSchema" + responses: + 200: + description: SSO provider was created. + content: + application/json: + schema: + $ref: "#/components/schemas/SSOProviderSchema" + 400: + $ref: "#/components/responses/BadRequestResponse" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + + /admin/sso/providers/{ssoProviderId}: + parameters: + - name: ssoProviderId + in: path + required: true + schema: + type: string + format: uuid + get: + summary: Fetch SSO provider details. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + responses: + 200: + description: SSO provider exists with these details. + content: + application/json: + schema: + $ref: "#/components/schemas/SSOProviderSchema" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + 404: + description: A provider with this UUID does not exist. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + put: + summary: Update details about a SSO provider. + description: > + You can only update only one of `metadata_url` or `metadata_xml` at once. The SAML Metadata represented by these updates must advertize the same Identity Provider EntityID. Do not include the `domains` or `attribute_mapping` property to keep the existing database values. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + requestBody: + content: + application/json: + schema: + type: object + properties: + metadata_url: + type: string + format: uri + metadata_xml: + type: string + domains: + type: array + items: + type: string + pattern: "[a-z0-9-]+([.][a-z0-9-]+)*" + attribute_mapping: + $ref: "#/components/schemas/SAMLAttributeMappingSchema" + responses: + 200: + description: SSO provider details were updated. + content: + application/json: + schema: + $ref: "#/components/schemas/SSOProviderSchema" + 400: + $ref: "#/components/responses/BadRequestResponse" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + 404: + description: A provider with this UUID does not exist. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + delete: + summary: Remove an SSO provider. + tags: + - admin + security: + - APIKeyAuth: [] + AdminAuth: [] + responses: + 200: + description: SSO provider was removed. + content: + application/json: + schema: + $ref: "#/components/schemas/SSOProviderSchema" + 401: + $ref: "#/components/responses/UnauthorizedResponse" + 403: + $ref: "#/components/responses/ForbiddenResponse" + 404: + description: A provider with this UUID does not exist. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + /health: + get: + summary: Service healthcheck. + description: Ping this endpoint to receive information about the health of the service. + tags: + - general + security: + - APIKeyAuth: [] + responses: + 200: + description: > + Service is healthy. + content: + application/json: + schema: + type: object + properties: + version: + type: string + example: v2.40.1 + name: + type: string + example: GoTrue + description: + type: string + example: GoTrue is a user registration and authentication API + + 500: + description: > + Service is not healthy. Retriable with exponential backoff. + 502: + description: > + Service is not healthy: infrastructure issue. Usually not retriable. + 503: + description: > + Service is not healthy: infrastrucutre issue. Retriable with exponential backoff. + 504: + description: > + Service is not healthy: request timed out. Retriable with exponential backoff. + + /settings: + get: + summary: Retrieve some of the public settings of the server. + description: > + Use this endpoint to configure parts of any authentication UIs depending on the configured settings. + tags: + - general + security: + - APIKeyAuth: [] + responses: + 200: + description: > + Currently applicable settings of the server. + content: + application/json: + schema: + type: object + properties: + disable_signup: + type: boolean + example: false + description: Whether new accounts can be created. (Valid for all providers.) + mailer_autoconfirm: + type: boolean + example: false + description: Whether new email addresses need to be confirmed before sign-in is possible. + phone_autoconfirm: + type: boolean + example: false + description: Whether new phone numbers need to be confirmed before sign-in is possible. + sms_provider: + type: string + optional: true + example: twilio + description: Which SMS provider is being used to send messages to phone numbers. + saml_enabled: + type: boolean + example: true + description: Whether SAML is enabled on this API server. Defaults to false. + external: + type: object + description: Which external identity providers are enabled. + example: + github: true + apple: true + email: true + phone: true + patternProperties: + "[a-zA-Z0-9]+": + type: boolean + +components: + securitySchemes: + UserAuth: + type: http + scheme: bearer + description: > + An access token in the form of a JWT issued by this server. + + AdminAuth: + type: http + scheme: bearer + description: > + A special admin JWT. + + APIKeyAuth: + type: apiKey + in: header + name: apikey + description: > + When deployed on Supabase, this server requires an `apikey` header containing a valid Supabase-issued API key to call any endpoint. + + schemas: + GoTrueMetaSecurity: + type: object + description: > + Use this property to pass a CAPTCHA token only if you have enabled CAPTCHA protection. + properties: + captcha_token: + type: string + + ErrorSchema: + type: object + properties: + error: + type: string + description: |- + Certain responses will contain this property with the provided values. + + Usually one of these: + - invalid_request + - unauthorized_client + - access_denied + - server_error + - temporarily_unavailable + - unsupported_otp_type + error_description: + type: string + description: > + Certain responses that have an `error` property may have this property which describes the error. + code: + type: integer + description: > + The HTTP status code. Usually missing if `error` is present. + example: 400 + msg: + type: string + description: > + A basic message describing the problem with the request. Usually missing if `error` is present. + weak_password: + type: object + description: > + Only returned on the `/signup` endpoint if the password used is too weak. Inspect the `reasons` and `msg` property to identify the causes. + properties: + reasons: + type: array + items: + type: string + enum: + - length + - characters + - pwned + + UserSchema: + type: object + description: Object describing the user related to the issued access and refresh tokens. + properties: + id: + type: string + format: uuid + aud: + type: string + deprecated: true + role: + type: string + email: + type: string + description: User's primary contact email. In most cases you can uniquely identify a user by their email address, but not in all cases. + email_confirmed_at: + type: string + format: date-time + phone: + type: string + format: phone + description: User's primary contact phone number. In most cases you can uniquely identify a user by their phone number, but not in all cases. + phone_confirmed_at: + type: string + format: date-time + confirmation_sent_at: + type: string + format: date-time + confirmed_at: + type: string + format: date-time + recovery_sent_at: + type: string + format: date-time + new_email: + type: string + format: email + email_change_sent_at: + type: string + format: date-time + new_phone: + type: string + format: phone + phone_change_sent_at: + type: string + format: date-time + reauthentication_sent_at: + type: string + format: date-time + last_sign_in_at: + type: string + format: date-time + app_metadata: + type: object + user_metadata: + type: object + factors: + type: array + items: + $ref: "#/components/schemas/MFAFactorSchema" + identities: + type: array + items: + $ref: "#/components/schemas/IdentitySchema" + banned_until: + type: string + format: date-time + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + deleted_at: + type: string + format: date-time + is_anonymous: + type: boolean + + SAMLAttributeMappingSchema: + type: object + properties: + keys: + type: object + patternProperties: + ".+": + type: object + properties: + name: + type: string + names: + type: array + items: + type: string + default: + oneOf: + - type: string + - type: number + - type: boolean + - type: object + + SSOProviderSchema: + type: object + properties: + id: + type: string + format: uuid + sso_domains: + type: array + items: + type: object + properties: + domain: + type: string + format: hostname + saml: + type: object + properties: + entity_id: + type: string + metadata_xml: + type: string + metadata_url: + type: string + attribute_mapping: + $ref: "#/components/schemas/SAMLAttributeMappingSchema" + + AccessTokenResponseSchema: + type: object + properties: + access_token: + type: string + description: A valid JWT that will expire in `expires_in` seconds. + refresh_token: + type: string + description: An opaque string that can be used once to obtain a new access and refresh token. + token_type: + type: string + description: What type of token this is. Only `bearer` returned, may change in the future. + expires_in: + type: integer + description: Number of seconds after which the `access_token` should be renewed by using the refresh token with the `refresh_token` grant type. + expires_at: + type: integer + description: UNIX timestamp after which the `access_token` should be renewed by using the refresh token with the `refresh_token` grant type. + weak_password: + type: object + description: Only returned on the `/token?grant_type=password` endpoint. When present, it indicates that the password used is weak. Inspect the `reasons` and/or `message` properties to identify why. + properties: + reasons: + type: array + items: + type: string + enum: + - length + - characters + - pwned + message: + type: string + user: + $ref: "#/components/schemas/UserSchema" + + MFAFactorSchema: + type: object + description: Represents a MFA factor. + properties: + id: + type: string + format: uuid + status: + type: string + description: |- + Usually one of: + - verified + - unverified + friendly_name: + type: string + factor_type: + type: string + description: |- + Usually one of: + - totp + - phone + - webauthn + web_authn_credential: + type: jsonb + phone: + type: string + format: phone + nullable: true + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + last_challenged_at: + type: string + format: date-time + nullable: true + + + IdentitySchema: + type: object + properties: + identity_id: + type: string + format: uuid + id: + type: string + format: uuid + user_id: + type: string + format: uuid + identity_data: + type: object + provider: + type: string + last_sign_in_at: + type: string + format: date-time + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + email: + type: string + format: email + TOTPPhoneChallengeResponse: + type: object + required: + - id + - type + - expires_at + properties: + id: + type: string + format: uuid + example: 14c1560e-2749-4522-bb62-d1458451830a + description: ID of the challenge. + type: + type: string + enum: [totp, phone] + description: Type of the challenge. + expires_at: + type: integer + example: 1674840917 + description: UNIX seconds of the timestamp past which the challenge should not be verified. + + WebAuthnChallengeResponse: + type: object + required: + - id + - type + - expires_at + - credential_options + properties: + id: + type: string + format: uuid + example: 14c1560e-2749-4522-bb62-d1458451830a + description: ID of the challenge. + type: + type: string + enum: [webauthn] + description: Type of the challenge. + expires_at: + type: integer + example: 1674840917 + description: UNIX seconds of the timestamp past which the challenge should not be verified. + credential_request_options: + $ref: '#/components/schemas/CredentialRequestOptions' + credential_creation_options: + $ref: '#/components/schemas/CredentialCreationOptions' + + CredentialAssertion: + type: object + description: WebAuthn credential assertion options + required: + - challenge + - rpId + - allowCredentials + - timeout + properties: + challenge: + type: string + description: A random challenge generated by the server, base64url encoded + example: "Y2hhbGxlbmdlAyv-5P0kw1SG-OxhLbSHpRLdWaVR1w" + rpId: + type: string + description: The relying party's identifier (usually the domain name) + example: "example.com" + allowCredentials: + type: array + description: List of credentials acceptable for this authentication + items: + type: object + required: + - id + - type + properties: + id: + type: string + description: Credential ID, base64url encoded + example: "AXwyVxYT7BgNKwNq0YqUXaHHIdRK6OdFGCYgZF9K6zNu" + type: + type: string + enum: [public-key] + description: Type of the credential + timeout: + type: integer + description: Time (in milliseconds) that the user has to respond to the authentication prompt + example: 60000 + userVerification: + type: string + enum: [required, preferred, discouraged] + description: The relying party's requirements for user verification + default: preferred + extensions: + type: object + description: Additional parameters requesting additional processing by the client + status: + type: string + enum: [ok, failed] + description: Status of the credential assertion + errorMessage: + type: string + description: Error message if the assertion failed + userHandle: + type: string + description: User handle, base64url encoded + authenticatorAttachment: + type: string + enum: [platform, cross-platform] + description: Type of authenticator to use + + CredentialRequest: + type: object + description: WebAuthn credential request (for the response from the client) + required: + - id + - rawId + - type + - response + properties: + id: + type: string + description: Base64url encoding of the credential ID + example: "AXwyVxYT7BgNKwNq0YqUXaHHIdRK6OdFGCYgZF9K6zNu" + rawId: + type: string + description: Base64url encoding of the credential ID (same as id) + example: "AXwyVxYT7BgNKwNq0YqUXaHHIdRK6OdFGCYgZF9K6zNu" + type: + type: string + enum: [public-key] + description: Type of the credential + response: + type: object + required: + - clientDataJSON + - authenticatorData + - signature + - userHandle + properties: + clientDataJSON: + type: string + description: Base64url encoding of the client data + example: "eyJ0eXBlIjoid2ViYXV0aG4uZ2V0IiwiY2hhbGxlbmdlIjoiY2hhbGxlbmdlIiwib3JpZ2luIjoiaHR0cHM6Ly9leGFtcGxlLmNvbSJ9" + authenticatorData: + type: string + description: Base64url encoding of the authenticator data + example: "SZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2MBAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAXwyVxYT7BgNKwNq0YqUXaHHIdRK6OdFGCYgZF9K6zNu" + signature: + type: string + description: Base64url encoding of the signature + example: "MEUCIQCx5cJVAB3kGP6bqCIoAV6CkBpVAf8rcx0WSZ22fIxXvQIgCKFt9pEu1vK8U4JKYTfn6tGjvGNfx2F4uXrHSXlefvM" + userHandle: + type: string + description: Base64url encoding of the user handle + example: "MQ" + clientExtensionResults: + type: object + description: Client extension results + + CredentialRequestOptions: + type: object + description: Options for requesting an assertion + properties: + challenge: + type: string + format: byte + description: A challenge to be signed by the authenticator + timeout: + type: integer + description: Time (in milliseconds) that the caller is willing to wait for the call to complete + rpId: + type: string + description: Relying Party ID + allowCredentials: + type: array + items: + $ref: '#/components/schemas/PublicKeyCredentialDescriptor' + userVerification: + type: string + enum: [required, preferred, discouraged] + description: User verification requirement + + CredentialCreationOptions: + type: object + description: Options for creating a new credential + properties: + rp: + type: object + properties: + id: + type: string + name: + type: string + user: + $ref: '#/components/schemas/UserSchema' + + challenge: + type: string + format: byte + description: A challenge to be signed by the authenticator + pubKeyCredParams: + type: array + items: + type: object + properties: + type: + type: string + enum: [public-key] + alg: + type: integer + timeout: + type: integer + description: Time (in milliseconds) that the caller is willing to wait for the call to complete + excludeCredentials: + type: array + items: + $ref: '#/components/schemas/PublicKeyCredentialDescriptor' + authenticatorSelection: + type: object + properties: + authenticatorAttachment: + type: string + enum: [platform, cross-platform] + requireResidentKey: + type: boolean + userVerification: + type: string + enum: [required, preferred, discouraged] + attestation: + type: string + enum: [none, indirect, direct] + description: Preferred attestation conveyance + + PublicKeyCredentialDescriptor: + type: object + properties: + type: + type: string + enum: [public-key] + id: + type: string + format: byte + description: Credential ID + transports: + type: array + items: + type: string + enum: [usb, nfc, ble, internal] + + responses: + OAuthCallbackRedirectResponse: + description: > + HTTP Redirect to a URL containing the `error` and `error_description` query parameters which should be shown to the user requesting the OAuth sign-in flow. + headers: + Location: + description: > + URL containing the `error` and `error_description` query parameters. + schema: + type: string + format: uri + example: https://example.com/?error=server_error&error_description=User%20does%20not%20exist. + + OAuthAuthorizeRedirectResponse: + description: > + HTTP Redirect to the OAuth identity provider's authorization URL. + headers: + Location: + description: > + URL to which the user agent should redirect (or open in a browser for mobile apps). + schema: + type: string + format: uri + + RateLimitResponse: + description: > + HTTP Too Many Requests response, when a rate limiter has been breached. + content: + application/json: + schema: + type: object + properties: + code: + type: integer + example: 429 + msg: + type: string + description: A basic message describing the rate limit breach. Do not use as an error code identifier. + example: Too many requests. Please try again in a few seconds. + + BadRequestResponse: + description: > + HTTP Bad Request response. Can occur if the passed in JSON cannot be unmarshalled properly or when CAPTCHA verification was not successful. In certain cases can also occur when features are disabled on the server (e.g. sign ups). It may also mean that the operation failed due to some constraint not being met (such a user already exists for example). + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + UnauthorizedResponse: + description: > + HTTP Unauthorizred response. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + ForbiddenResponse: + description: > + HTTP Forbidden response. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + InternalServerErrorResponse: + description: > + HTTP Internal Server Error. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorSchema" + + AccessRefreshTokenRedirectResponse: + description: > + HTTP See Other redirect response where `Location` is a specially formatted URL that includes an `access_token`, `refresh_token`, `expires_in` as URL query encoded values in the URL fragment (anything after `#`). These values are encoded in the fragment as this value is only visible to the browser handling the redirect and is not sent to the server. + headers: + Location: + schema: + type: string + format: uri + example: https://example.com/#access_token=...&refresh_token=...&expires_in=... diff --git a/docker-kong_v2.8.1/.editorconfig b/docker-kong_v2.8.1/.editorconfig new file mode 100644 index 0000000..878f31a --- /dev/null +++ b/docker-kong_v2.8.1/.editorconfig @@ -0,0 +1,18 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +charset = utf-8 + +[*.sh] +indent_style = space +indent_size = 2 + +[Dockerfile] +indent_style = space +indent_size = 2 + +[Makefile] +indent_style = tab diff --git a/docker-kong_v2.8.1/.gitignore b/docker-kong_v2.8.1/.gitignore new file mode 100644 index 0000000..26f21b2 --- /dev/null +++ b/docker-kong_v2.8.1/.gitignore @@ -0,0 +1,6 @@ +.DS_Store +.che +kong-build-tools +kong +*.bak +submit diff --git a/docker-kong_v2.8.1/Dockerfile.apk b/docker-kong_v2.8.1/Dockerfile.apk new file mode 100644 index 0000000..06f16b6 --- /dev/null +++ b/docker-kong_v2.8.1/Dockerfile.apk @@ -0,0 +1,54 @@ +FROM alpine:3.15 + +ARG KONG_VERSION=2.8.1 +ENV KONG_VERSION $KONG_VERSION + +ARG KONG_AMD64_SHA="ccda33bf02803b6b8dd46b22990f92265fe61d900ba94e3e0fa26db0433098c0" +ARG KONG_ARM64_SHA="d21690332a89adf9900f7266e083f41f565eb009f2771ef112f3564878eeff53" + +ARG ASSET=remote +ARG EE_PORTS + +COPY kong.apk.tar.gz /tmp/kong.apk.tar.gz + +RUN set -ex; \ + apk add bash curl ca-certificates; \ + arch="$(apk --print-arch)"; \ + case "${arch}" in \ + x86_64) export ARCH='amd64'; KONG_SHA256=$KONG_AMD64_SHA ;; \ + aarch64) export ARCH='arm64'; KONG_SHA256=$KONG_ARM64_SHA ;; \ + esac; \ + if [ "$ASSET" = "remote" ] ; then \ + curl -fL "https://download.konghq.com/gateway-${KONG_VERSION%%.*}.x-alpine/kong-${KONG_VERSION}.${ARCH}.apk.tar.gz" -o /tmp/kong.apk.tar.gz \ + && echo "$KONG_SHA256 /tmp/kong.apk.tar.gz" | sha256sum -c -; \ + fi \ + && apk add --no-cache --virtual .build-deps tar gzip \ + && tar -C / -xzf /tmp/kong.apk.tar.gz \ + && apk add --no-cache libstdc++ libgcc openssl pcre perl tzdata libcap zlib zlib-dev bash \ + && adduser -S kong \ + && addgroup -S kong \ + && mkdir -p "/usr/local/kong" \ + && chown -R kong:0 /usr/local/kong \ + && chown kong:0 /usr/local/bin/kong \ + && chmod -R g=u /usr/local/kong \ + && rm -rf /tmp/kong.tar.gz \ + && ln -s /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -s /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && apk del .build-deps \ + && kong version + +COPY docker-entrypoint.sh /docker-entrypoint.sh + +USER kong + +ENTRYPOINT ["/docker-entrypoint.sh"] + +EXPOSE 8000 8443 8001 8444 $EE_PORTS + +STOPSIGNAL SIGQUIT + +HEALTHCHECK --interval=10s --timeout=10s --retries=10 CMD kong health + +CMD ["kong", "docker-start"] diff --git a/docker-kong_v2.8.1/Dockerfile.deb b/docker-kong_v2.8.1/Dockerfile.deb new file mode 100644 index 0000000..6ad4625 --- /dev/null +++ b/docker-kong_v2.8.1/Dockerfile.deb @@ -0,0 +1,47 @@ +FROM debian:bullseye-slim + +ARG KONG_VERSION=2.8.1 +ENV KONG_VERSION $KONG_VERSION + +ARG KONG_SHA256="a57852f49eb6aea6b672a99d1361457b208e8a2d63df3fa362b2d2d6ac979b4c" + +ARG ASSET=remote +ARG EE_PORTS + +COPY kong.deb /tmp/kong.deb + +RUN set -ex; \ + apt-get update; \ + apt-get install -y curl; \ + if [ "$ASSET" = "remote" ] ; then \ + CODENAME=$(cat /etc/os-release | grep VERSION_CODENAME | cut -d = -f 2) \ + && DOWNLOAD_URL="https://download.konghq.com/gateway-${KONG_VERSION%%.*}.x-debian-${CODENAME}/pool/all/k/kong/kong_${KONG_VERSION}_amd64.deb" \ + && curl -fL $DOWNLOAD_URL -o /tmp/kong.deb \ + && echo "$KONG_SHA256 /tmp/kong.deb" | sha256sum -c -; \ + fi \ + && apt-get update \ + && apt-get install --yes /tmp/kong.deb \ + && rm -rf /var/lib/apt/lists/* \ + && rm -rf /tmp/kong.deb \ + && chown kong:0 /usr/local/bin/kong \ + && chown -R kong:0 /usr/local/kong \ + && ln -s /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -s /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && kong version \ + && apt-get purge curl -y + +COPY docker-entrypoint.sh /docker-entrypoint.sh + +USER kong + +ENTRYPOINT ["/docker-entrypoint.sh"] + +EXPOSE 8000 8443 8001 8444 $EE_PORTS + +STOPSIGNAL SIGQUIT + +HEALTHCHECK --interval=10s --timeout=10s --retries=10 CMD kong health + +CMD ["kong", "docker-start"] diff --git a/docker-kong_v2.8.1/Dockerfile.rpm b/docker-kong_v2.8.1/Dockerfile.rpm new file mode 100644 index 0000000..33687d6 --- /dev/null +++ b/docker-kong_v2.8.1/Dockerfile.rpm @@ -0,0 +1,43 @@ +FROM redhat/ubi8 + +ARG KONG_VERSION=2.8.1 +ENV KONG_VERSION $KONG_VERSION + +ARG KONG_SHA256="8bae61982b8d439e12037e24432adc185f87113989c27c89521d8cc024b38ddc" + +ARG ASSET=remote +ARG EE_PORTS + +COPY kong.rpm /tmp/kong.rpm + +# hadolint ignore=DL3015 +RUN set -ex; \ + if [ "$ASSET" = "remote" ] ; then \ + VERSION=$(grep '^VERSION_ID' /etc/os-release | cut -d = -f 2 | sed -e 's/^"//' -e 's/"$//' | cut -d . -f 1) \ + && DOWNLOAD_URL="https://download.konghq.com/gateway-${KONG_VERSION%%.*}.x-rhel-$VERSION/Packages/k/kong-$KONG_VERSION.rhel$VERSION.amd64.rpm" \ + && curl -fL $DOWNLOAD_URL -o /tmp/kong.rpm \ + && echo "$KONG_SHA256 /tmp/kong.rpm" | sha256sum -c -; \ + fi \ + && yum install -y /tmp/kong.rpm \ + && rm /tmp/kong.rpm \ + && chown kong:0 /usr/local/bin/kong \ + && chown -R kong:0 /usr/local/kong \ + && ln -s /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -s /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && kong version + +COPY docker-entrypoint.sh /docker-entrypoint.sh + +USER kong + +ENTRYPOINT ["/docker-entrypoint.sh"] + +EXPOSE 8000 8443 8001 8444 $EE_PORTS + +STOPSIGNAL SIGQUIT + +HEALTHCHECK --interval=10s --timeout=10s --retries=10 CMD kong health + +CMD ["kong", "docker-start"] diff --git a/docker-kong_v2.8.1/LICENSE b/docker-kong_v2.8.1/LICENSE new file mode 100644 index 0000000..7f21d9a --- /dev/null +++ b/docker-kong_v2.8.1/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2018 Kong Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docker-kong_v2.8.1/Makefile b/docker-kong_v2.8.1/Makefile new file mode 100644 index 0000000..112fbff --- /dev/null +++ b/docker-kong_v2.8.1/Makefile @@ -0,0 +1,25 @@ +# DO NOT update KONG_BUILD_TOOLS manually - it's set by update.sh +# to ensure same version is used here and in the respective kong version +KONG_BUILD_TOOLS?=4.25.3 +PACKAGE?=apk +BASE?=alpine +ASSET_LOCATION?=remote + +build: + docker build --no-cache -t kong-$(BASE) $(BASE)/ + +build_v2: + docker build --no-cache --build-arg ASSET=$(ASSET_LOCATION) -t kong-$(PACKAGE) -f Dockerfile.$(PACKAGE) . + +.PHONY: test +test: + if cd kong-build-tools; \ + then git pull; \ + else git clone https://github.com/Kong/kong-build-tools.git; fi + cd kong-build-tools && git reset --hard $(KONG_BUILD_TOOLS) + BASE=$(BASE) ./tests/test.sh --suite "Docker-Kong test suite" + +release-rhel: build + echo $$RHEL_REGISTRY_KEY | docker login -u unused scan.connect.redhat.com --password-stdin + docker tag kong-rhel scan.connect.redhat.com/ospid-dd198cd0-ed8b-41bd-9c18-65fd85059d31/kong:$$TAG + docker push scan.connect.redhat.com/ospid-dd198cd0-ed8b-41bd-9c18-65fd85059d31/kong:$$TAG diff --git a/docker-kong_v2.8.1/README.md b/docker-kong_v2.8.1/README.md new file mode 100644 index 0000000..6c4fb45 --- /dev/null +++ b/docker-kong_v2.8.1/README.md @@ -0,0 +1,53 @@ +![Build Status](https://github.com/kong/docker-kong/actions/workflows/test.yml/badge.svg) + +# About this Repo + +This is the Git repo of the Docker +[official image](https://docs.docker.com/docker-hub/official_repos/) for +[kong](https://registry.hub.docker.com/_/kong/). +See [the Docker Hub page](https://registry.hub.docker.com/_/kong/) +for the full readme on how to use this Docker image and for information +regarding contributing and issues. + +The full readme is generated over in [docker-library/docs](https://github.com/docker-library/docs), +specifically in [docker-library/docs/kong](https://github.com/docker-library/docs/tree/master/kong). + +See a change merged here that doesn't show up on the Docker Hub yet? +Check [the "library/kong" manifest file in the docker-library/official-images +repo](https://github.com/docker-library/official-images/blob/master/library/kong), +especially [PRs with the "library/kong" label on that +repo](https://github.com/docker-library/official-images/labels/library%2Fkong). For more information about the official images process, see the [docker-library/official-images readme](https://github.com/docker-library/official-images/blob/master/README.md). + +# For Kong developers + +## Pushing a Kong patch release (x.y.Z) update + +If the update does not require changes to the Dockerfiles other than +pointing to the latest Kong code, the process can be semi-automated as follows: + +1. Check out this repository. + +2. Run `./update.sh x.y.z` + + This will create a release branch, modify the relevant files automatically, + give you a chance to review the changes and press "y", then + it will push the branch and open a browser with the PR + to this repository. + +3. Peer review, run CI and merge the submitted PR. + +4. Run `./submit.sh -p x.y.z` + + Once the internal PR is merged, this script will do the same + for the [official-images](https://github.com/docker-library/official-images) + repository. It will clone [Kong's fork](https://github.com/kong/official-images), + create a branch, modify the relevant files automatically, + give you a chance to review the changes and press "y", then + it will push the branch and open a browser with the PR + to the docker-library repository. + +## Pushing a Kong minor release (x.Y.0) update + +Not semi-automated yet. Note that minor releases are more likely to require more +extensive changes to the Dockerfiles. + diff --git a/docker-kong_v2.8.1/alpine/Dockerfile b/docker-kong_v2.8.1/alpine/Dockerfile new file mode 100644 index 0000000..6f7102a --- /dev/null +++ b/docker-kong_v2.8.1/alpine/Dockerfile @@ -0,0 +1,68 @@ +FROM alpine:3.16 + +LABEL maintainer="Kong " + +ARG ASSET=ce +ENV ASSET $ASSET + +ARG EE_PORTS + +# hadolint ignore=DL3010 +COPY kong.tar.gz /tmp/kong.tar.gz + +ARG KONG_VERSION=2.8.1 +ENV KONG_VERSION $KONG_VERSION + +ARG KONG_AMD64_SHA="ccda33bf02803b6b8dd46b22990f92265fe61d900ba94e3e0fa26db0433098c0" +ARG KONG_ARM64_SHA="d21690332a89adf9900f7266e083f41f565eb009f2771ef112f3564878eeff53" + +# hadolint ignore=DL3018 +RUN set -eux; \ + arch="$(apk --print-arch)"; \ + case "${arch}" in \ + x86_64) arch='amd64'; KONG_SHA256=$KONG_AMD64_SHA ;; \ + aarch64) arch='arm64'; KONG_SHA256=$KONG_ARM64_SHA ;; \ + esac; \ + if [ "$ASSET" = "ce" ] ; then \ + apk add --no-cache --virtual .build-deps curl wget tar ca-certificates \ + && curl -fL "https://download.konghq.com/gateway-${KONG_VERSION%%.*}.x-alpine/kong-$KONG_VERSION.$arch.apk.tar.gz" -o /tmp/kong.tar.gz \ + && echo "$KONG_SHA256 /tmp/kong.tar.gz" | sha256sum -c - \ + && apk del .build-deps; \ + else \ + # this needs to stay inside this "else" block so that it does not become part of the "official images" builds (https://github.com/docker-library/official-images/pull/11532#issuecomment-996219700) + apk upgrade; \ + fi; \ + mkdir /kong \ + && tar -C /kong -xzf /tmp/kong.tar.gz \ + && mv /kong/usr/local/* /usr/local \ + && mv /kong/etc/* /etc \ + && rm -rf /kong \ + && apk add --no-cache libstdc++ libgcc openssl pcre perl tzdata libcap zip bash zlib zlib-dev git ca-certificates \ + && adduser -S kong \ + && addgroup -S kong \ + && mkdir -p "/usr/local/kong" \ + && chown -R kong:0 /usr/local/kong \ + && chown kong:0 /usr/local/bin/kong \ + && chmod -R g=u /usr/local/kong \ + && rm -rf /tmp/kong.tar.gz \ + && ln -s /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -s /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && if [ "$ASSET" = "ce" ] ; then \ + kong version; \ + fi + +COPY docker-entrypoint.sh /docker-entrypoint.sh + +USER kong + +ENTRYPOINT ["/docker-entrypoint.sh"] + +EXPOSE 8000 8443 8001 8444 $EE_PORTS + +STOPSIGNAL SIGQUIT + +HEALTHCHECK --interval=10s --timeout=10s --retries=10 CMD kong health + +CMD ["kong", "docker-start"] diff --git a/docker-kong_v2.8.1/alpine/docker-entrypoint.sh b/docker-kong_v2.8.1/alpine/docker-entrypoint.sh new file mode 100644 index 0000000..f37496e --- /dev/null +++ b/docker-kong_v2.8.1/alpine/docker-entrypoint.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +set -Eeo pipefail + +# usage: file_env VAR [DEFAULT] +# ie: file_env 'XYZ_DB_PASSWORD' 'example' +# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of +# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) +file_env() { + local var="$1" + local fileVar="${var}_FILE" + local def="${2:-}" + # Do not continue if _FILE env is not set + if ! [ "${!fileVar:-}" ]; then + return + elif [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then + echo >&2 "error: both $var and $fileVar are set (but are exclusive)" + exit 1 + fi + local val="$def" + if [ "${!var:-}" ]; then + val="${!var}" + elif [ "${!fileVar:-}" ]; then + val="$(< "${!fileVar}")" + fi + export "$var"="$val" + unset "$fileVar" +} + +export KONG_NGINX_DAEMON=${KONG_NGINX_DAEMON:=off} + +if [[ "$1" == "kong" ]]; then + + all_kong_options="/usr/local/share/lua/5.1/kong/templates/kong_defaults.lua" + set +Eeo pipefail + while IFS='' read -r LINE || [ -n "${LINE}" ]; do + opt=$(echo "$LINE" | grep "=" | sed "s/=.*$//" | sed "s/ //" | tr '[:lower:]' '[:upper:]') + file_env "KONG_$opt" + done < $all_kong_options + set -Eeo pipefail + + file_env KONG_PASSWORD + PREFIX=${KONG_PREFIX:=/usr/local/kong} + + if [[ "$2" == "docker-start" ]]; then + kong prepare -p "$PREFIX" "$@" + + ln -sf /dev/stdout $PREFIX/logs/access.log + ln -sf /dev/stdout $PREFIX/logs/admin_access.log + ln -sf /dev/stderr $PREFIX/logs/error.log + + exec /usr/local/openresty/nginx/sbin/nginx \ + -p "$PREFIX" \ + -c nginx.conf + fi +fi + +exec "$@" diff --git a/docker-kong_v2.8.1/alpine/kong.tar.gz b/docker-kong_v2.8.1/alpine/kong.tar.gz new file mode 100644 index 0000000..e69de29 diff --git a/docker-kong_v2.8.1/centos/Dockerfile b/docker-kong_v2.8.1/centos/Dockerfile new file mode 100644 index 0000000..e94e796 --- /dev/null +++ b/docker-kong_v2.8.1/centos/Dockerfile @@ -0,0 +1,57 @@ +FROM centos:8 +LABEL maintainer="Kong " + +ARG ASSET=ce +ENV ASSET $ASSET + +ARG EE_PORTS + +COPY kong.rpm /tmp/kong.rpm + +ARG KONG_VERSION=2.7.1 +ENV KONG_VERSION $KONG_VERSION + +ARG KONG_SHA256="d3769c15297d1b1b20cf684792a664ac851977b2c466f2776f2ae705708539e6" + +# hadolint ignore=DL3033 +RUN set -ex; \ + if [ "$ASSET" = "ce" ] ; then \ + curl -fL https://download.konghq.com/gateway-${KONG_VERSION%%.*}.x-centos-8/Packages/k/kong-$KONG_VERSION.el8.amd64.rpm -o /tmp/kong.rpm \ + && echo "$KONG_SHA256 /tmp/kong.rpm" | sha256sum -c -; \ + else \ + # this needs to stay inside this "else" block so that it does not become part of the "official images" builds (https://github.com/docker-library/official-images/pull/11532#issuecomment-996219700) + yum update -y \ + && yum upgrade -y ; \ + fi; \ + yum install -y -q unzip shadow-utils git \ + && yum clean all -q \ + && rm -fr /var/cache/yum/* /tmp/yum_save*.yumtx /root/.pki \ + # Please update the centos install docs if the below line is changed so that + # end users can properly install Kong along with its required dependencies + # and that our CI does not diverge from our docs. + && yum install -y /tmp/kong.rpm \ + && yum clean all \ + && rm /tmp/kong.rpm \ + && chown kong:0 /usr/local/bin/kong \ + && chown -R kong:0 /usr/local/kong \ + && ln -s /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -s /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && if [ "$ASSET" = "ce" ] ; then \ + kong version; \ + fi + +COPY docker-entrypoint.sh /docker-entrypoint.sh + +USER kong + +ENTRYPOINT ["/docker-entrypoint.sh"] + +EXPOSE 8000 8443 8001 8444 $EE_PORTS + +STOPSIGNAL SIGQUIT + +HEALTHCHECK --interval=10s --timeout=10s --retries=10 CMD kong health + +CMD ["kong", "docker-start"] diff --git a/docker-kong_v2.8.1/centos/docker-entrypoint.sh b/docker-kong_v2.8.1/centos/docker-entrypoint.sh new file mode 100644 index 0000000..f37496e --- /dev/null +++ b/docker-kong_v2.8.1/centos/docker-entrypoint.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +set -Eeo pipefail + +# usage: file_env VAR [DEFAULT] +# ie: file_env 'XYZ_DB_PASSWORD' 'example' +# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of +# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) +file_env() { + local var="$1" + local fileVar="${var}_FILE" + local def="${2:-}" + # Do not continue if _FILE env is not set + if ! [ "${!fileVar:-}" ]; then + return + elif [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then + echo >&2 "error: both $var and $fileVar are set (but are exclusive)" + exit 1 + fi + local val="$def" + if [ "${!var:-}" ]; then + val="${!var}" + elif [ "${!fileVar:-}" ]; then + val="$(< "${!fileVar}")" + fi + export "$var"="$val" + unset "$fileVar" +} + +export KONG_NGINX_DAEMON=${KONG_NGINX_DAEMON:=off} + +if [[ "$1" == "kong" ]]; then + + all_kong_options="/usr/local/share/lua/5.1/kong/templates/kong_defaults.lua" + set +Eeo pipefail + while IFS='' read -r LINE || [ -n "${LINE}" ]; do + opt=$(echo "$LINE" | grep "=" | sed "s/=.*$//" | sed "s/ //" | tr '[:lower:]' '[:upper:]') + file_env "KONG_$opt" + done < $all_kong_options + set -Eeo pipefail + + file_env KONG_PASSWORD + PREFIX=${KONG_PREFIX:=/usr/local/kong} + + if [[ "$2" == "docker-start" ]]; then + kong prepare -p "$PREFIX" "$@" + + ln -sf /dev/stdout $PREFIX/logs/access.log + ln -sf /dev/stdout $PREFIX/logs/admin_access.log + ln -sf /dev/stderr $PREFIX/logs/error.log + + exec /usr/local/openresty/nginx/sbin/nginx \ + -p "$PREFIX" \ + -c nginx.conf + fi +fi + +exec "$@" diff --git a/docker-kong_v2.8.1/centos/kong.rpm b/docker-kong_v2.8.1/centos/kong.rpm new file mode 100644 index 0000000..e69de29 diff --git a/docker-kong_v2.8.1/compose/Makefile b/docker-kong_v2.8.1/compose/Makefile new file mode 100644 index 0000000..b9a6398 --- /dev/null +++ b/docker-kong_v2.8.1/compose/Makefile @@ -0,0 +1,9 @@ +kong-postgres: + COMPOSE_PROFILES=database KONG_DATABASE=postgres docker-compose up -d + +kong-dbless: + docker-compose up -d + +clean: + docker-compose kill + docker-compose rm -f diff --git a/docker-kong_v2.8.1/compose/POSTGRES_PASSWORD b/docker-kong_v2.8.1/compose/POSTGRES_PASSWORD new file mode 100644 index 0000000..c6964ec --- /dev/null +++ b/docker-kong_v2.8.1/compose/POSTGRES_PASSWORD @@ -0,0 +1 @@ +kong \ No newline at end of file diff --git a/docker-kong_v2.8.1/compose/README.md b/docker-kong_v2.8.1/compose/README.md new file mode 100644 index 0000000..4d7016a --- /dev/null +++ b/docker-kong_v2.8.1/compose/README.md @@ -0,0 +1,41 @@ +# Kong in Docker Compose + +This is the official Docker Compose template for [Kong][kong-site-url]. + +# What is Kong? + +You can find the official Docker distribution for Kong at [https://hub.docker.com/_/kong](https://hub.docker.com/_/kong). + +# How to use this template + +This Docker Compose template provisions a Kong container with a Postgres database, plus a nginx load-balancer. After running the template, the `nginx-lb` load-balancer will be the entrypoint to Kong. + +To run this template execute: + +```shell +$ docker-compose up +``` + +To scale Kong (ie, to three instances) execute: + +```shell +$ docker-compose scale kong=3 +``` + +Kong will be available through the `nginx-lb` instance on port `8000`, and `8001`. You can customize the template with your own environment variables or datastore configuration. + +Kong's documentation can be found at [https://docs.konghq.com/][kong-docs-url]. + +## Issues + +If you have any problems with or questions about this image, please contact us through a [GitHub issue][github-new-issue]. + +## Contributing + +You are invited to contribute new features, fixes, or updates, large or small; we are always thrilled to receive pull requests, and do our best to process them as fast as we can. + +Before you start to code, we recommend discussing your plans through a [GitHub issue][github-new-issue], especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give you feedback on your design, and help you find out if someone else is working on the same thing. + +[kong-site-url]: https://konghq.com/ +[kong-docs-url]: https://docs.konghq.com/ +[github-new-issue]: https://github.com/Kong/docker-kong/issues/new diff --git a/docker-kong_v2.8.1/compose/config/kong.yaml b/docker-kong_v2.8.1/compose/config/kong.yaml new file mode 100644 index 0000000..c96cf73 --- /dev/null +++ b/docker-kong_v2.8.1/compose/config/kong.yaml @@ -0,0 +1,3 @@ +# a very minimal declarative config file +_format_version: "2.1" +_transform: true diff --git a/docker-kong_v2.8.1/compose/docker-compose.yml b/docker-kong_v2.8.1/compose/docker-compose.yml new file mode 100644 index 0000000..f426aa4 --- /dev/null +++ b/docker-kong_v2.8.1/compose/docker-compose.yml @@ -0,0 +1,121 @@ +version: '3.9' + +x-kong-config: &kong-env + KONG_DATABASE: ${KONG_DATABASE:-off} + KONG_PG_DATABASE: ${KONG_PG_DATABASE:-kong} + KONG_PG_HOST: db + KONG_PG_USER: ${KONG_PG_USER:-kong} + KONG_PG_PASSWORD_FILE: /run/secrets/kong_postgres_password + +volumes: + kong_data: {} + kong_prefix_vol: + driver_opts: + type: tmpfs + device: tmpfs + kong_tmp_vol: + driver_opts: + type: tmpfs + device: tmpfs + +networks: + kong-net: + external: false + +services: + kong-migrations: + image: "${KONG_DOCKER_TAG:-kong:latest}" + command: kong migrations bootstrap + profiles: ["database"] + depends_on: + - db + environment: + <<: *kong-env + secrets: + - kong_postgres_password + networks: + - kong-net + restart: on-failure + + kong-migrations-up: + image: "${KONG_DOCKER_TAG:-kong:latest}" + command: kong migrations up && kong migrations finish + profiles: ["database"] + depends_on: + - db + environment: + <<: *kong-env + secrets: + - kong_postgres_password + networks: + - kong-net + restart: on-failure + + kong: + image: "${KONG_DOCKER_TAG:-kong:latest}" + user: "${KONG_USER:-kong}" + environment: + <<: *kong-env + KONG_ADMIN_ACCESS_LOG: /dev/stdout + KONG_ADMIN_ERROR_LOG: /dev/stderr + KONG_PROXY_LISTEN: "${KONG_PROXY_LISTEN:-0.0.0.0:8000}" + KONG_ADMIN_LISTEN: "${KONG_ADMIN_LISTEN:-0.0.0.0:8001}" + KONG_PROXY_ACCESS_LOG: /dev/stdout + KONG_PROXY_ERROR_LOG: /dev/stderr + KONG_PREFIX: ${KONG_PREFIX:-/var/run/kong} + KONG_DECLARATIVE_CONFIG: "/opt/kong/kong.yaml" + secrets: + - kong_postgres_password + networks: + - kong-net + ports: + # The following two environment variables default to an insecure value (0.0.0.0) + # according to the CIS Security test. + - "${KONG_INBOUND_PROXY_LISTEN:-0.0.0.0}:8000:8000/tcp" + - "${KONG_INBOUND_SSL_PROXY_LISTEN:-0.0.0.0}:8443:8443/tcp" + # Making them mandatory but undefined, like so would be backwards-breaking: + # - "${KONG_INBOUND_PROXY_LISTEN?Missing inbound proxy host}:8000:8000/tcp" + # - "${KONG_INBOUND_SSL_PROXY_LISTEN?Missing inbound proxy ssl host}:8443:8443/tcp" + # Alternative is deactivating check 5.13 in the security bench, if we consider Kong's own config to be enough security here + + - "127.0.0.1:8001:8001/tcp" + - "127.0.0.1:8444:8444/tcp" + healthcheck: + test: ["CMD", "kong", "health"] + interval: 10s + timeout: 10s + retries: 10 + restart: on-failure:5 + read_only: true + volumes: + - kong_prefix_vol:${KONG_PREFIX:-/var/run/kong} + - kong_tmp_vol:/tmp + - ./config:/opt/kong + security_opt: + - no-new-privileges + + db: + image: postgres:9.5 + profiles: ["database"] + environment: + POSTGRES_DB: ${KONG_PG_DATABASE:-kong} + POSTGRES_USER: ${KONG_PG_USER:-kong} + POSTGRES_PASSWORD_FILE: /run/secrets/kong_postgres_password + secrets: + - kong_postgres_password + healthcheck: + test: ["CMD", "pg_isready", "-U", "${KONG_PG_USER:-kong}"] + interval: 30s + timeout: 30s + retries: 3 + restart: on-failure + stdin_open: true + tty: true + networks: + - kong-net + volumes: + - kong_data:/var/lib/postgresql/data + +secrets: + kong_postgres_password: + file: ./POSTGRES_PASSWORD diff --git a/docker-kong_v2.8.1/customize/Dockerfile b/docker-kong_v2.8.1/customize/Dockerfile new file mode 100644 index 0000000..872cf9d --- /dev/null +++ b/docker-kong_v2.8.1/customize/Dockerfile @@ -0,0 +1,56 @@ +ARG KONG_BASE=kong:latest + +FROM ${KONG_BASE} AS build + +ARG PLUGINS +ENV INJECTED_PLUGINS=${PLUGINS} + +ARG TEMPLATE=empty_file +ENV TEMPLATE=${TEMPLATE} + +ARG ROCKS_DIR=empty_file +ENV ROCKS_DIR=${ROCKS_DIR} + +ARG KONG_LICENSE_DATA +ENV KONG_LICENSE_DATA=${KONG_LICENSE_DATA} + +COPY $TEMPLATE /custom_nginx.conf +COPY $ROCKS_DIR /rocks-server +COPY packer.lua /packer.lua + +USER root + +RUN /usr/local/openresty/luajit/bin/luajit /packer.lua -- "$INJECTED_PLUGINS" + +FROM ${KONG_BASE} + +USER root + +# Workarounds used: +# 1 - the "custom_nginx.conf*" wildcard ensures it doesn't fail if it doesn't exist +# 2 - "RUN true", see https://stackoverflow.com/questions/51115856/docker-failed-to-export-image-failed-to-create-image-failed-to-get-layer/62409523#62409523 + +# replace the entrypoint, add the custom template if it exists +COPY --from=build /docker-entrypoint.sh /old-entrypoint.sh /custom_nginx.conf* / +RUN true + +# add the Lua files (.lua) +COPY --from=build /usr/local/share/lua/5.1 /usr/local/share/lua/5.1 +RUN true + +# add the compiled libraries (.so) +COPY --from=build /usr/local/lib/lua/5.1 /usr/local/lib/lua/5.1 +RUN true + +# add the Luarocks manifest +COPY --from=build /usr/local/lib/luarocks/rocks-5.1 /usr/local/lib/luarocks/rocks-5.1 +RUN true + +# add any Lua commandline scripts +# TODO: should commandline scripts be copied at all? +#COPY --from=build /usr/local/bin /usr/local/bin + + +HEALTHCHECK --interval=10s --timeout=10s --retries=10 CMD kong health + +USER kong diff --git a/docker-kong_v2.8.1/customize/empty_file b/docker-kong_v2.8.1/customize/empty_file new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/docker-kong_v2.8.1/customize/empty_file @@ -0,0 +1 @@ + diff --git a/docker-kong_v2.8.1/customize/example.sh b/docker-kong_v2.8.1/customize/example.sh new file mode 100644 index 0000000..0b685eb --- /dev/null +++ b/docker-kong_v2.8.1/customize/example.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# pick a plugin repo, and pack the rock, and its dependencies (clear first) +# because we use a local LuaRocks repo, we also need the dependencies in there +# since the public one will not be available +pushd ~/code/kong-plugin-enterprise-request-validator +luarocks remove kong-plugin-request-validator --force +luarocks remove net-url --force +luarocks remove lua-resty-ljsonschema --force +luarocks make +luarocks pack kong-plugin-request-validator +luarocks pack net-url +luarocks pack lua-resty-ljsonschema +popd + +# create a LuaRocks repo, and copy the rocks in there. This directory will be +# used as the base LuaRocks server we're installing from. These, and only these, +# rocks can be installed. +rm -rf ./rocksdir +mkdir ./rocksdir +mv ~/code/kong-plugin-enterprise-request-validator/*.rock ./rocksdir/ + +#build the custom image +docker build \ + --build-arg "KONG_LICENSE_DATA=$KONG_LICENSE_DATA" \ + --build-arg KONG_BASE="kong-ee" \ + --build-arg PLUGINS="kong-plugin-request-validator" \ + --build-arg ROCKS_DIR="./rocksdir" \ + --tag "your_new_image" . + + diff --git a/docker-kong_v2.8.1/customize/packer.lua b/docker-kong_v2.8.1/customize/packer.lua new file mode 100644 index 0000000..ad936a2 --- /dev/null +++ b/docker-kong_v2.8.1/customize/packer.lua @@ -0,0 +1,387 @@ +local split = require("pl.utils").split +local pretty = require("pl.pretty").write +local strip = require("pl.stringx").strip +local lines = require("pl.stringx").splitlines +local _execex = require("pl.utils").executeex +local _exec = require("pl.utils").execute +local directories = require("pl.dir").getdirectories +local writefile = require("pl.utils").writefile +local readfile = require("pl.utils").readfile +local is_dir = require("pl.path").isdir +local is_file = require("pl.path").isfile + +local CUSTOM_TEMPLATE="/custom_nginx.conf" + +io.stdout:setvbuf("no") +io.stderr:setvbuf("no") + + +local function stderr(...) + io.stderr:write(...) + io.stderr:write("\n") +end + + +local function stdout(...) + io.stdout:write(...) + io.stdout:write("\n") +end + + +local function fail(msg) + stderr(msg) + os.exit(1) +end + + +local function header(msg) + local fill1 = math.floor((80 - 2 - #msg)/2) + local fill2 = 80 - 2 - #msg - fill1 + stdout( + ("*"):rep(80).."\n".. + "*"..(" "):rep(fill1)..msg..(" "):rep(fill2).."*\n".. + ("*"):rep(80) + ) +end + + +local platforms = { + { + check = "apk -V", -- check for alpine + commands = { -- run before anything else in build container + "apk update", + "apk add git", + "apk add wget", + "apk add zip", + "apk add gcc", + "apk add musl-dev", + }, + }, { + check = "yum --version", -- check for rhel + commands = { -- run before anything else in build container + "yum -y install git", + "yum -y install unzip", + "yum -y install zip", + "yum -y install gcc gcc-c++ make", + }, + }, { + check = "apt -v", -- check for Ubuntu + commands = { -- run before anything else in build container + "apt update", + "apt install -y zip", + "apt install -y wget", + "apt install -y build-essential", + }, + }, +} + + +local execex = function(cmd, ...) + stdout("\027[32m", "[packer exec] ", cmd, "\027[0m") + return _execex(cmd, ...) +end + + +local exec = function(cmd, ...) + stdout("\027[32m", "[packer exec] ", cmd, "\027[0m") + return _exec(cmd, ...) +end + + +local function prep_platform() + for _, platform in ipairs(platforms) do + local ok = exec(platform.check) + if not ok then + stdout(("platform test '%s' was negative"):format(platform.check)) + else + stdout(("platform test '%s' was positive"):format(platform.check)) + for _, cmd in ipairs(platform.commands) do + stdout(cmd) + ok = exec(cmd) + if not ok then + fail(("failed executing '%s'"):format(cmd)) + end + end + return true + end + end + stderr("WARNING: no platform match!") +end + + +local function is_empty_file(filename) + local t = readfile(filename) + if t then + if t:gsub("\n", ""):gsub("\t", ""):gsub(" ","") == "" then + return true + end + end + return false +end + + +local function get_args() + if not arg or + not arg[1] or + arg[1] == "--" and not arg[2] then + -- no args, but maybe a custom config file? + + if is_empty_file(CUSTOM_TEMPLATE) then + fail("no arguments to parse, commandline: " .. pretty(arg or {})) + else + stdout("no plugins specified, but a custom template exists") + return + end + end + + local list = {} + for i = 1, #arg do + if arg[i] and arg[i] ~= "--" then + local sp = split(arg[i], ",") + for n = 1, #sp do + local rock = strip(sp[n]) + if rock ~= "" then + table.insert(list, rock) + end + end + end + end + + if #list == 0 then + if is_empty_file(CUSTOM_TEMPLATE) then + fail("no arguments to parse, commandline: " .. pretty(arg)) + else + stdout("no plugins specified, but a custom template exists") + end + end + + stdout("rocks to install: " .. pretty(list)) + return list +end + + +local function get_plugins() + local plugins = {} + local cnt = 0 + + for i = 1, 2 do + local pattern, paths, extension + if i == 1 then + pattern = "%?%.lua$" + extension = ".lua" + paths = split(package.path, ";") + else + pattern = "%?%.so$" + extension = ".so" + paths = split(package.cpath, ";") + end + + for _, path in ipairs(paths) do + path = path:gsub(pattern, "kong/plugins/") + if is_dir(path) then + for _, dir in ipairs(directories(path)) do + local plugin_name = dir:sub(#path + 1, -1) + if is_file(dir .. "/handler" .. extension) then + plugins[plugin_name] = true + cnt = cnt + 1 + end + end + end + end + end + + stdout("Found ", cnt, " plugins installed") + return plugins +end + + +local function get_rocks() + local cmd = "luarocks list --tree=system --porcelain" + local ok, _, sout, serr = execex(cmd) + if not ok then + fail(("failed to retrieve list of installed rocks: '%s' failed with\n%s\n%s"):format( + cmd, sout, serr)) + end + + local rocks = {} + local cnt = 0 + for _, rock in ipairs(lines(sout)) do + cnt = cnt + 1 + local name, spec = rock:match("^(.-)\t(.-)\t") + local rock_id = name.."-"..spec + rocks[rock_id] = { name = name, spec = spec } + end + stdout("Found ", cnt, " rocks installed") + return rocks +end + + +local function install_plugins(plugins, lr_flag) + local cmd = "luarocks install --tree=system %s " .. lr_flag + for _, rock in ipairs(plugins) do + stdout(cmd:format(rock)) + + local ok = exec(cmd:format(rock)) + if not ok then + fail(("failed installing rock: '%s' failed"):format(cmd:format(rock))) + end + + stdout("installed: "..rock) + exec("luarocks show "..rock) + end +end + + +local function check_custom_template() + if is_empty_file(CUSTOM_TEMPLATE) then + -- it's the empty_file, delete it + os.remove(CUSTOM_TEMPLATE) + stdout("No custom template found") + return + end + stdout("Found a custom template") +end + + +local function start_rocks_server() + if is_empty_file("/rocks-server") then + stdout("No custom rocks found, using public luarocks.org as server") + return "" + end + assert(exec("luarocks-admin make_manifest /rocks-server")) + stdout("Local LuaRocks server manifest created") + assert(exec("mkdir /nginx")) + assert(exec("mkdir /nginx/logs")) + assert(writefile("/nginx/nginx.conf", [[ +events { +} + +http { + server { + listen 127.0.0.1:8080; + + location / { + root /rocks-server; + } + } +} +]])) + assert(exec("touch /nginx/logs/error.log")) + assert(exec("/usr/local/openresty/nginx/sbin/nginx " .. + "-c /nginx/nginx.conf " .. + "-p /nginx")) + stdout("Nginx started as local LuaRocks server") + stdout("List of locally available rocks:") + assert(exec("luarocks search --all --porcelain --only-server=http://localhost:8080")) + return " --only-server=http://localhost:8080 " +end + + +-- ********************************************************** +-- Do the actual work +-- ********************************************************** +header("Set up platform") +prep_platform() + +header("Set up LuaRocks server") +local lr_flag = start_rocks_server() + +header("Get arguments") +local rocks = get_args() + + +header("Get existing rocks") +local pre_installed_rocks = get_rocks() + + +header("Get existing plugin list") +local pre_installed_plugins = get_plugins() + + +header("Getting custom template") +check_custom_template() + + +header("Install the requested plugins") +install_plugins(rocks, lr_flag) + + +header("Get post-install rocks list and get the delta") +local added_rocks +do + local post_installed_rocks = get_rocks() + for k in pairs(pre_installed_rocks) do + if post_installed_rocks[k] then + post_installed_rocks[k] = nil -- remove the ones we already had + end + end + added_rocks = post_installed_rocks +end +if (not next(added_rocks)) then + if is_empty_file(CUSTOM_TEMPLATE) then + fail("no additional rocks were added, nor a custom template specified") + end + stdout("No rocks were added") +else + for k in pairs(added_rocks) do + stdout("added rock: "..k) + end +end + + +header("Get post-install plugin list and get the delta") +local plugins = {} +for plugin_name in pairs(get_plugins()) do + if not pre_installed_plugins[plugin_name] then + table.insert(plugins, plugin_name) + stdout("added plugin: "..plugin_name) + end +end +if not next(plugins) then + stdout("No plugins were added") +end + + +header("Write new entry-point script") +assert(exec("mv /docker-entrypoint.sh /old-entrypoint.sh")) +local entrypoint = [=[ +#!/bin/sh +set -e + +if [ "$KONG_PLUGINS" = "" ]; then + KONG_PLUGINS="bundled" +fi +# replace 'bundled' with the new set, including the custom ones +export KONG_PLUGINS=$(echo ",$KONG_PLUGINS," | sed "s/,bundled,/,bundled,%s,/" | sed 's/^,//' | sed 's/,$//') + +# prefix the custom template option, since the last one on the command line +# wins, so the user can still override this template +INITIAL="$1 $2" +if [ -f /custom_nginx.conf ]; then + # only for these commands support "--nginx-conf" + echo 1: $INITIAL + if [ "$INITIAL" = "kong prepare" ] || \ + [ "$INITIAL" = "kong reload" ] || \ + [ "$INITIAL" = "kong restart" ] || \ + [ "$INITIAL" = "kong start" ] ; then + INITIAL="$1 $2 --nginx-conf=/custom_nginx.conf" + fi +fi +# shift 1 by 1; if there is only 1 arg, then "shift 2" fails +if [ ! "$1" = "" ]; then + shift +fi +if [ ! "$1" = "" ]; then + shift +fi + +exec /old-entrypoint.sh $INITIAL "$@" +]=] +entrypoint = entrypoint:format(table.concat(plugins, ",")) +assert(writefile("/docker-entrypoint.sh", entrypoint)) +assert(exec("chmod +x /docker-entrypoint.sh")) +stdout(entrypoint) + + +header("Completed building plugins, rocks and/or template") + diff --git a/docker-kong_v2.8.1/customize/readme.md b/docker-kong_v2.8.1/customize/readme.md new file mode 100644 index 0000000..1e5a75e --- /dev/null +++ b/docker-kong_v2.8.1/customize/readme.md @@ -0,0 +1,62 @@ +# Customize Kong by injecting plugins and templates + +This dockerfile takes an existing Kong image and adds custom plugins +and/or a custom template file to it. + +``` +docker build \ + --build-arg KONG_BASE="kong:0.14.1-alpine" \ + --build-arg PLUGINS="kong-http-to-https,kong-upstream-jwt" \ + --build-arg TEMPLATE="/mykong/nginx.conf" \ + --build-arg "KONG_LICENSE_DATA=$KONG_LICENSE_DATA" \ + --tag "your_new_image" . +``` + +The above command will take the `kong:0.14.1-alpine` image and add the plugins +(as known on [luarocks.org](https://luarocks.org)) `kong-http-to-https` and +`kong-upstream-jwt` to it. Also the custom template ([for rendering the +underlying nginx configuration file](https://docs.konghq.com/latest/configuration/#custom-nginx-templates--embedding-kong) +), located at `/mykong/nginx.conf` will be injected. +The resulting new image will be tagged as `your_new_image`. + +When starting a container from the newly created image, the added plugins and +template will automatically be applied. So there is no need to specify the +environment variable `KONG_PLUGINS` nor the `--nginx-conf` command line +switch to enable them. + +# Curated list of plugins + +This tool is based on the LuaRocks packagemanager to include all plugin +dependencies. The `ROCKS_DIR` variable allows you to only use a curated list of +rocks to be used (instead of the public ones). + +It will generate a local LuaRocks server, and not allow any public ones to be +used. For an example of how to use it see the `example.sh` script. + +## Arguments: + + - `KONG_BASE` the base image to use, defaults to `kong:latest`. + - `PLUGINS` a comma-separated list of the plugin names (NOT rock files!) that you wish to add to the image. All + dependencies will also be installed. + - `ROCKS_DIR` a local directory where the allowed plugins/rocks are located. If + specified, only rocks from this location will be allowed to be installed. If + not specified, then the public `luarocks.org` server is used. + - `TEMPLATE` the custom configuration template to use + - `KONG_LICENSE_DATA` this is required when the base image is an Enterprise + version of Kong. + +Note that the `PLUGINS` entries are simply LuaRocks commands used as: +`luarocks install `. So anything that LuaRocks accepts can be added +there, including commandline options. For example: + +``` +--build-arg PLUGINS="luassert --deps-mode=none" +``` + +Will add the `luassert` module, without resolving dependencies (this is useless, +but demonstrates how it works). + + +## Limitations + +- Only works for pure-Lua modules for now. diff --git a/docker-kong_v2.8.1/docker-entrypoint.sh b/docker-kong_v2.8.1/docker-entrypoint.sh new file mode 100644 index 0000000..f37496e --- /dev/null +++ b/docker-kong_v2.8.1/docker-entrypoint.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +set -Eeo pipefail + +# usage: file_env VAR [DEFAULT] +# ie: file_env 'XYZ_DB_PASSWORD' 'example' +# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of +# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) +file_env() { + local var="$1" + local fileVar="${var}_FILE" + local def="${2:-}" + # Do not continue if _FILE env is not set + if ! [ "${!fileVar:-}" ]; then + return + elif [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then + echo >&2 "error: both $var and $fileVar are set (but are exclusive)" + exit 1 + fi + local val="$def" + if [ "${!var:-}" ]; then + val="${!var}" + elif [ "${!fileVar:-}" ]; then + val="$(< "${!fileVar}")" + fi + export "$var"="$val" + unset "$fileVar" +} + +export KONG_NGINX_DAEMON=${KONG_NGINX_DAEMON:=off} + +if [[ "$1" == "kong" ]]; then + + all_kong_options="/usr/local/share/lua/5.1/kong/templates/kong_defaults.lua" + set +Eeo pipefail + while IFS='' read -r LINE || [ -n "${LINE}" ]; do + opt=$(echo "$LINE" | grep "=" | sed "s/=.*$//" | sed "s/ //" | tr '[:lower:]' '[:upper:]') + file_env "KONG_$opt" + done < $all_kong_options + set -Eeo pipefail + + file_env KONG_PASSWORD + PREFIX=${KONG_PREFIX:=/usr/local/kong} + + if [[ "$2" == "docker-start" ]]; then + kong prepare -p "$PREFIX" "$@" + + ln -sf /dev/stdout $PREFIX/logs/access.log + ln -sf /dev/stdout $PREFIX/logs/admin_access.log + ln -sf /dev/stderr $PREFIX/logs/error.log + + exec /usr/local/openresty/nginx/sbin/nginx \ + -p "$PREFIX" \ + -c nginx.conf + fi +fi + +exec "$@" diff --git a/docker-kong_v2.8.1/hadolint.yaml b/docker-kong_v2.8.1/hadolint.yaml new file mode 100644 index 0000000..1c6391b --- /dev/null +++ b/docker-kong_v2.8.1/hadolint.yaml @@ -0,0 +1,5 @@ +ignored: + - DL3008 + - DL3027 + - SC2046 + - DL4006 diff --git a/docker-kong_v2.8.1/kong.apk.tar.gz b/docker-kong_v2.8.1/kong.apk.tar.gz new file mode 100644 index 0000000..e69de29 diff --git a/docker-kong_v2.8.1/kong.deb b/docker-kong_v2.8.1/kong.deb new file mode 100644 index 0000000..e69de29 diff --git a/docker-kong_v2.8.1/kong.rpm b/docker-kong_v2.8.1/kong.rpm new file mode 100644 index 0000000..e69de29 diff --git a/docker-kong_v2.8.1/rhel/Dockerfile b/docker-kong_v2.8.1/rhel/Dockerfile new file mode 100644 index 0000000..de56d0a --- /dev/null +++ b/docker-kong_v2.8.1/rhel/Dockerfile @@ -0,0 +1,71 @@ +ARG RHEL_VERSION=7 + +FROM registry.access.redhat.com/ubi${RHEL_VERSION}/ubi + +MAINTAINER Kong + +ARG KONG_VERSION=2.8.1 +ENV KONG_VERSION $KONG_VERSION + +ARG KONG_SHA256="4f2d073122c97be80de301e6037d0913f15de1d8bb6eea2871542e9a4c164c72" + +LABEL name="Kong" \ + vendor="Kong" \ + version="$KONG_VERSION" \ + release="1" \ + url="https://konghq.com" \ + summary="Next-Generation API Platform for Modern Architectures" \ + description="Next-Generation API Platform for Modern Architectures" + +COPY LICENSE /licenses/ + +ARG ASSET=ce +ENV ASSET $ASSET + +ARG EE_PORTS + +COPY kong.rpm /tmp/kong.rpm + +ARG RHEL_VERSION +ENV RHEL_VERSION $RHEL_VERSION + +RUN set -ex; \ + if [ "$ASSET" = "ce" ] ; then \ + curl -fL "https://download.konghq.com/gateway-${KONG_VERSION%%.*}.x-rhel-$RHEL_VERSION/Packages/k/kong-$KONG_VERSION.rhel${RHEL_VERSION}.amd64.rpm" -o /tmp/kong.rpm \ + && echo "$KONG_SHA256 /tmp/kong.rpm" | sha256sum -c -; \ + else \ + yum update -y \ + && yum upgrade -y ; \ + fi; \ + yum install -y -q unzip shadow-utils \ + && yum clean all -q \ + && rm -fr /var/cache/yum/* /tmp/yum_save*.yumtx /root/.pki \ + # Please update the rhel install docs if the below line is changed so that + # end users can properly install Kong along with its required dependencies + # and that our CI does not diverge from our docs. + && yum install -y /tmp/kong.rpm \ + && yum clean all \ + && rm /tmp/kong.rpm \ + && chown kong:0 /usr/local/bin/kong \ + && chown -R kong:0 /usr/local/kong \ + && ln -s /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -s /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && if [ "$ASSET" = "ce" ] ; then \ + kong version ; \ + fi + +COPY docker-entrypoint.sh /docker-entrypoint.sh + +USER kong + +ENTRYPOINT ["/docker-entrypoint.sh"] + +EXPOSE 8000 8443 8001 8444 $EE_PORTS + +STOPSIGNAL SIGQUIT + +HEALTHCHECK --interval=10s --timeout=10s --retries=10 CMD kong health + +CMD ["kong", "docker-start"] diff --git a/docker-kong_v2.8.1/rhel/LICENSE b/docker-kong_v2.8.1/rhel/LICENSE new file mode 100644 index 0000000..7f21d9a --- /dev/null +++ b/docker-kong_v2.8.1/rhel/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2018 Kong Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docker-kong_v2.8.1/rhel/docker-entrypoint.sh b/docker-kong_v2.8.1/rhel/docker-entrypoint.sh new file mode 100644 index 0000000..393a8a3 --- /dev/null +++ b/docker-kong_v2.8.1/rhel/docker-entrypoint.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +set -Eeo pipefail + +# usage: file_env VAR [DEFAULT] +# ie: file_env 'XYZ_DB_PASSWORD' 'example' +# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of +# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) +file_env() { + local var="$1" + local fileVar="${var}_FILE" + local def="${2:-}" + # Do not continue if _FILE env is not set + if ! [ "${!fileVar:-}" ]; then + return + elif [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then + echo >&2 "error: both $var and $fileVar are set (but are exclusive)" + exit 1 + fi + local val="$def" + if [ "${!var:-}" ]; then + val="${!var}" + elif [ "${!fileVar:-}" ]; then + val="$(< "${!fileVar}")" + fi + export "$var"="$val" + unset "$fileVar" +} + +export KONG_NGINX_DAEMON=${KONG_NGINX_DAEMON:=off} + +if [[ "$1" == "kong" ]]; then + + all_kong_options="/usr/local/share/lua/5.1/kong/templates/kong_defaults.lua" + set +Eeo pipefail + while IFS='' read -r LINE || [ -n "${LINE}" ]; do + opt=$(echo "$LINE" | grep "=" | sed "s/=.*$//" | sed "s/ //" | tr '[:lower:]' '[:upper:]') + file_env "KONG_$opt" + done < $all_kong_options + set -Eeo pipefail + + file_env KONG_PASSWORD + PREFIX=${KONG_PREFIX:=/usr/local/kong} + + if [[ "$2" == "docker-start" ]]; then + kong prepare -p "$PREFIX" + + ln -sf /dev/stdout $PREFIX/logs/access.log + ln -sf /dev/stdout $PREFIX/logs/admin_access.log + ln -sf /dev/stderr $PREFIX/logs/error.log + + exec /usr/local/openresty/nginx/sbin/nginx \ + -p "$PREFIX" \ + -c nginx.conf + fi +fi + +exec "$@" diff --git a/docker-kong_v2.8.1/rhel/kong.rpm b/docker-kong_v2.8.1/rhel/kong.rpm new file mode 100644 index 0000000..e69de29 diff --git a/docker-kong_v2.8.1/submit.sh b/docker-kong_v2.8.1/submit.sh new file mode 100644 index 0000000..e31d81d --- /dev/null +++ b/docker-kong_v2.8.1/submit.sh @@ -0,0 +1,257 @@ +#!/usr/bin/env bash +set -e + +gawk --version &> /dev/null || { + echo "gawk is required to run this script." + exit 1 +} + +mode= +version= +force= + +function usage() { + echo "usage: $0 <-p|-m> " + echo " -p for patch release (x.y.Z)" + echo " -m for minor release (x.Y.0)" + echo " -r for release candidate (x.Y.0rcZ)" + echo "example: $0 -p 1.1.2" +} + +while [ "$1" ] +do + case "$1" in + --help) + usage + exit 0 + ;; + -p) + mode=patch + ;; + -r) + mode=rc + ;; + -m) + mode=minor + ;; + -f) + force=yes + ;; + [0-9]*) + version=$1 + ;; + esac + shift +done + +function red() { + echo -e "\033[1;31m$@\033[0m" +} + +function die() { + red "*** $@" + echo "See also: $0 --help" + echo + exit 1 +} + +hub --version &> /dev/null || die "hub is not in PATH. Get it from https://github.com/github/hub" + +if [ "$mode" = "" ] +then + die "Error: mode flag is mandatory" +fi + +if ! [ "$version" ] +then + die "Error: missing version" +fi + +git checkout master +git pull + +if ! grep -q "$version" alpine/Dockerfile +then + if [[ "$force" = "yes" ]] + then + echo "Forcing to use the tag even though it is not in master." + + git checkout "$version" + + if ! grep -q "$version$" alpine/Dockerfile + then + die "Error: version in build script doesn't match required version." + fi + else + echo "****************************************" + echo "Error: this script should be run only after the" + echo "desired release is merged in master of docker-kong." + echo "" + echo "For making releases based on old versions," + echo "Use -f to override and submit from the tag anyway." + echo "****************************************" + die "Failed." + fi +fi + +xy=${version%.*} +z=${version#$xy.} + +if [ "$mode" = "rc" ] +then + rc=${version#*rc} + z=${z%rc*} +fi + +commit=$(git show "$version" | grep "^commit" | head -n 1 | cut -b8-48) + +if [ "$mode" = "patch" ] +then + prev="$xy.$[z-1]" + prevcommit=$(git show "$prev" | grep "^commit" | head -n 1 | cut -b8-48) +elif [ "$mode" = "rc" -a "$rc" -gt 1 ] +then + prev="$xy.${z}rc$[rc-1]" + prevcommit=$(git show "$prev" | grep "^commit" | head -n 1 | cut -b8-48) +fi + +rm -rf submit +mkdir submit +cd submit +git clone https://github.com/kong/official-images +cd official-images +git remote add upstream http://github.com/docker-library/official-images +git fetch upstream +git checkout master +git merge upstream/master + +git checkout -b release/$version + +if [ "$mode" = "patch" ] +then + sed "s|$prev-alpine|$version-alpine|; + s|$prev-ubuntu|$version-ubuntu|; + s|$prev,|$version,|; + s|$prevcommit|$commit|; + s|refs/tags/$prev|refs/tags/$version|" library/kong > library/kong.new + mv library/kong.new library/kong + +elif [ "$mode" = "rc" -a "$rc" -gt 1 ] +then + sed "s|$prev-alpine|$version-alpine|; + s|$prev-ubuntu|$version-ubuntu|; + s|, ${xy}rc$[rc-1]|, ${xy}rc${rc}|; + s|$prev,|$version,|; + s|$prevcommit|$commit|; + s|refs/tags/$prev|refs/tags/$version|" library/kong > library/kong.new + mv library/kong.new library/kong + +elif [ "$mode" = "rc" -a "$rc" -eq 1 ] +then + gawk ' + BEGIN { + reset = 0 + not_yet_first = 1 + } + /^Tags/ { + if (not_yet_first == 1) { + not_yet_first = 0 + before_first = 1 + } + } + { + if (before_first == 1) { + v = "'$version'" + xy = "'$xy'" + commit = "'$commit'" + print "Tags: " v "-alpine, " v ", " xy ", alpine" + print "GitCommit: " commit + print "GitFetch: refs/tags/" v + print "Directory: alpine" + print "Architectures: amd64" + print "" + print "Tags: " v "-ubuntu" + print "GitCommit: " commit + print "GitFetch: refs/tags/" v + print "Directory: ubuntu" + print "Architectures: amd64, arm64v8" + print "" + before_first = 0 + } else { + print + } + } + ' library/kong > library/kong.new + mv library/kong.new library/kong + +elif [ "$mode" = "minor" ] +then + gawk ' + BEGIN { + reset = 0 + not_yet_first = 1 + } + /^Tags/ { + if (not_yet_first == 1) { + not_yet_first = 0 + before_first = 1 + } + } + /Tags: .*[0-9]rc[0-9].*/ { + in_rc_tag = 1 + } + /^ *$/ { + if (in_rc_tag == 1) { + reset = 1 + } + } + { + if (before_first == 1) { + v = "'$version'" + xy = "'$xy'" + commit = "'$commit'" + print "Tags: " v "-alpine, " v ", " xy ", alpine, latest" + print "GitCommit: " commit + print "GitFetch: refs/tags/" v + print "Directory: alpine" + print "Architectures: amd64, arm64v8" + print "" + print "Tags: " v "-ubuntu, " xy "-ubuntu, ubuntu" + print "GitCommit: " commit + print "GitFetch: refs/tags/" v + print "Directory: ubuntu" + print "Architectures: amd64, arm64v8" + print "" + before_first = 0 + } + if (!(in_rc_tag == 1)) { + gsub(", latest", "") + gsub(", alpine", "") + gsub(", ubuntu", "") + print + } + if (reset == 1) { + in_rc_tag = 0 + reset = 0 + } + } + ' library/kong > library/kong.new + mv library/kong.new library/kong +fi + +echo "****************************************" +git diff +echo "****************************************" + +echo "Everything looks all right? (y/n)" +echo "(Answering y will commit, push the branch, and submit the PR)" +read +if ! [ "$REPLY" == "y" ] +then + exit 1 +fi + +git commit -av -m "kong $version" +git push --set-upstream origin release/$version + +hub pull-request -b docker-library:master -h "release/$version" -m "bump Kong to $version" diff --git a/docker-kong_v2.8.1/tests/01-image.test.sh b/docker-kong_v2.8.1/tests/01-image.test.sh new file mode 100644 index 0000000..22a53ef --- /dev/null +++ b/docker-kong_v2.8.1/tests/01-image.test.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash + +function run_test { + tinitialize "Docker-Kong test suite" "${BASH_SOURCE[0]}" + + if [[ -f Dockerfile.$BASE ]]; then + docker run -i --rm -v $PWD/hadolint.yaml:/.config/hadolint.yaml hadolint/hadolint:2.7.0 < Dockerfile.$BASE + fi + + if [[ -f $BASE/Dockerfile ]]; then + docker run -i --rm -v $PWD/hadolint.yaml:/.config/hadolint.yaml hadolint/hadolint:2.7.0 < $BASE/Dockerfile + fi + + if [[ ! -z "${SNYK_SCAN_TOKEN}" ]]; then + docker scan --accept-license --login --token "${SNYK_SCAN_TOKEN}" + docker scan --accept-license --exclude-base --severity=high --file $BASE/Dockerfile kong-$BASE + fi + + # Test the proper version was buid + tchapter "test $BASE image" + ttest "the proper version was build" + + if [[ -f Dockerfile.$BASE ]]; then + version_given="$(grep 'ARG KONG_VERSION' Dockerfile.$BASE | awk -F "=" '{print $2}')" + fi + + if [[ -f $BASE/Dockerfile ]]; then + version_given="$(grep 'ARG KONG_VERSION' $BASE/Dockerfile | awk -F "=" '{print $2}')" + fi + + version_built="$(docker run -i --rm kong-$BASE kong version | tr -d '[:space:]')" + + if [[ "$version_given" != "$version_built" ]]; then + echo "Kong version mismatch:"; + echo "\tVersion given is $version_given"; + echo "\tVersion built is $version_built"; + tfailure + else + tsuccess + fi + + ttest "Dbless Test" + + pushd compose + docker-compose up -d + until docker ps -f health=healthy | grep -q ${KONG_DOCKER_TAG}; do + docker-compose up -d + docker ps + sleep 15 + done + + curl -I localhost:8001 | grep -E '(openresty|kong)' + if [ $? -eq 0 ]; then + tsuccess + else + tfailure + fi + + docker-compose kill + docker-compose rm -f + sleep 5 + docker volume prune -f + popd + + ttest "Upgrade Test" + + export COMPOSE_PROFILES=database + export KONG_DATABASE=postgres + pushd compose + curl -fsSL https://raw.githubusercontent.com/Kong/docker-kong/1.5.0/swarm/docker-compose.yml | KONG_DOCKER_TAG=kong:1.5.0 docker-compose -p kong -f - up -d + until docker ps -f health=healthy | grep -q kong:1.5.0; do + curl -fsSL https://raw.githubusercontent.com/Kong/docker-kong/1.5.0/swarm/docker-compose.yml | docker-compose -p kong -f - ps + docker ps + sleep 15 + curl -fsSL https://raw.githubusercontent.com/Kong/docker-kong/1.5.0/swarm/docker-compose.yml | KONG_DOCKER_TAG=kong:1.5.0 docker-compose -p kong -f - up -d + done + curl -I localhost:8001 | grep 'Server: openresty' + sed -i -e 's/127.0.0.1://g' docker-compose.yml + + KONG_DOCKER_TAG=${KONG_DOCKER_TAG} docker-compose -p kong up -d + until docker ps -f health=healthy | grep -q ${KONG_DOCKER_TAG}; do + docker-compose -p kong ps + docker ps + sleep 15 + done + + curl -I localhost:8001 | grep -E '(openresty|kong)' + if [ $? -eq 0 ]; then + tsuccess + else + tfailure + fi + + echo "cleanup" + + docker-compose -p kong kill + docker-compose -p kong rm -f + sleep 5 + docker volume prune -f + docker system prune -y + git checkout -- docker-compose.yml + popd + + # Run Kong functional tests + ttest "Kong functional test" + + git clone https://github.com/Kong/kong.git || true + pushd kong + git checkout $version_given || git checkout next + popd + + pushd kong-build-tools + rm -rf test/tests/01-package + docker tag kong-$BASE $BASE:$BASE + KONG_VERSION=$version_given KONG_TEST_IMAGE_NAME=kong-$BASE RESTY_IMAGE_BASE=$BASE RESTY_IMAGE_TAG=$BASE make test + if [ $? -eq 0 ]; then + tsuccess + else + tfailure + fi + popd + + + tfinish +} + +# No need to modify anything below this comment + +# shellcheck disable=SC1090 # do not follow source +[[ "$T_PROJECT_NAME" == "" ]] && set -e && if [[ -f "${1:-$(dirname "$(realpath "$0")")/test.sh}" ]]; then source "${1:-$(dirname "$(realpath "$0")")/test.sh}"; else source "${1:-$(dirname "$(realpath "$0")")/run.sh}"; fi && set +e +run_test diff --git a/docker-kong_v2.8.1/tests/02-customize.test.sh b/docker-kong_v2.8.1/tests/02-customize.test.sh new file mode 100644 index 0000000..a88e317 --- /dev/null +++ b/docker-kong_v2.8.1/tests/02-customize.test.sh @@ -0,0 +1,260 @@ +#!/usr/bin/env bash + +# to run this test locally do the following: +# > docker pull kong:latest +# > docker tag kong:latest kong-alpine +# > BASE=alpine tests/02-customize.test.sh + + +function build_custom_image { + # arg1: plugins; eg. "kong-http-to-https,kong-upstream-jwt" + # arg2: template; eg. "/mykong/nginx.conf" + # arg3: path to local rockserver dir; eg. "/some/dir/rockserver" + local plugins + local template + local rockserver + if [[ ! "$1" == "" ]]; then + plugins="--build-arg PLUGINS=$1" + fi + if [[ ! "$2" == "" ]]; then + cp $2 ./custom.conf + template="--build-arg TEMPLATE=./custom.conf" + fi + if [[ ! "$3" == "" ]]; then + # rockserver must be within docker build context, so copy files there + mkdir rockserver + ls $3 + cp -r -v "$3" . + rockserver="--build-arg ROCKS_DIR=./rockserver" + fi + #export BUILDKIT_PROGRESS=plain + docker build --build-arg KONG_BASE="kong-$BASE" \ + --build-arg "KONG_LICENSE_DATA=$KONG_LICENSE_DATA" \ + $plugins \ + $template \ + $rockserver \ + --tag "kong-$BASE-customize" \ + . + local result=$? + # cleanup the temporary files/directories + if [ -d rockserver ]; then + rm -rf rockserver + fi + if [ -f custom.conf ]; then + rm custom.conf + fi + return $result +} + +function delete_custom_image { + docker rmi "kong-$BASE-customize" > /dev/null 2>&1 +} + +unset TEST_CMD_OPTS +function run_kong_cmd { + docker run -ti --rm $TEST_CMD_OPTS "kong-$BASE-customize" $1 +} + + + +function run_test { + # the suite name below will only be used when rtunning this file directly, when + # running through "test.sh" it must be provided using the "--suite" option. + tinitialize "Docker-Kong test suite" "${BASH_SOURCE[0]}" + + local mypath + mypath=$(dirname "$(realpath "$0")") + pushd "$mypath/../customize" + + + + tchapter "Customize $BASE" + + ttest "injects a plugin, pure-Lua" + local test_plugin_name="kong-upstream-jwt" + build_custom_image "$test_plugin_name" + if [ ! $? -eq 0 ]; then + tfailure + else + run_kong_cmd "luarocks list --porcelain" | grep $test_plugin_name + if [ ! $? -eq 0 ]; then + tmessage "injected plugin '$test_plugin_name' was not found" + tfailure + else + tsuccess + fi + fi + delete_custom_image + + + + ttest "injects a plugin, with self-contained C code (no binding)" + local test_plugin_name="lua-protobuf" + build_custom_image "$test_plugin_name" + if [ ! $? -eq 0 ]; then + tfailure + else + run_kong_cmd "luarocks list --porcelain" | grep $test_plugin_name + if [ ! $? -eq 0 ]; then + tmessage "injected plugin '$test_plugin_name' was not found" + tfailure + else + tsuccess + fi + fi + delete_custom_image + + + + ttest "injects a plugin with local rockserver" + local test_plugin_name="kong-plugin-myplugin" + build_custom_image "$test_plugin_name" "" "$mypath/rockserver" + if [ ! $? -eq 0 ]; then + tfailure + else + run_kong_cmd "luarocks list --porcelain" | grep $test_plugin_name + if [ ! $? -eq 0 ]; then + tmessage "injected plugin '$test_plugin_name' was not found" + tfailure + else + tsuccess + fi + fi + delete_custom_image + + + + ttest "build image to test KONG_PLUGINS settings" + local test_plugin_name="kong-plugin-myplugin" + build_custom_image "$test_plugin_name" "" "$mypath/rockserver" + if [ ! $? -eq 0 ]; then + tfailure + else + tsuccess + fi + + ttest "injected plugin are added to KONG_PLUGINS if not set" + unset TEST_CMD_OPTS + run_kong_cmd "printenv" | grep "bundled,myplugin" + if [ ! $? -eq 0 ]; then + tmessage "injected plugin '$test_plugin_name' was not found in KONG_PLUGIN" + tfailure + else + tsuccess + fi + + ttest "injected plugin are added to KONG_PLUGINS if set with 'bundled'" + TEST_CMD_OPTS="-e KONG_PLUGINS=bundled,custom-one" + run_kong_cmd "printenv" | grep "bundled,myplugin,custom-one" + if [ ! $? -eq 0 ]; then + tmessage "injected plugin '$test_plugin_name' was not found in KONG_PLUGIN" + tfailure + else + tsuccess + fi + + ttest "injected plugin are NOT added to KONG_PLUGINS if set without 'bundled'" + TEST_CMD_OPTS="-e KONG_PLUGINS=custom-one,custom-two" + run_kong_cmd "printenv" | grep "$test_plugin_name" + if [ $? -eq 0 ]; then + tmessage "injected plugin '$test_plugin_name' was found in KONG_PLUGIN, but was not expected" + tfailure + else + tsuccess + fi + + # cleanup + unset TEST_CMD_OPTS + delete_custom_image + + + + ttest "fails injecting an unavailable plugin with local rockserver" + # the plugin is PUBLICLY available, but NOT on our local one, so should fail + local test_plugin_name="kong-upstream-jwt" + build_custom_image "$test_plugin_name" "" "$mypath/rockserver" + if [ ! $? -eq 0 ]; then + tsuccess + else + tmessage "injected plugin '$test_plugin_name' which was not on the local rockserver" + tfailure + fi + delete_custom_image + + + + ttest "injects a custom template" + build_custom_image "" "$mypath/bad_file.conf" + if [ ! $? -eq 0 ]; then + tfailure + else + docker run -it -d \ + -e "KONG_DATABASE=off" \ + --name "kong-testsuite-container" \ + "kong-$BASE-customize:latest" kong start + + sleep 3 + OUTPUT=$(docker logs kong-testsuite-container) + echo "$OUTPUT" + echo "$OUTPUT" | grep "nginx configuration is invalid" + + if [ $? -eq 0 ]; then + tmessage "container failed to start because of invalid config, as expected" + tsuccess + else + tmessage "container is running, while it should have failed to start" + tfailure + fi + docker rm --force kong-testsuite-container + fi + delete_custom_image + + + + ttest "injects a custom template and a plugin" + local test_plugin_name="kong-plugin-myplugin" + build_custom_image "$test_plugin_name" "$mypath/bad_file.conf" "$mypath/rockserver" + if [ ! $? -eq 0 ]; then + tfailure + else + # check if plugin was injected + run_kong_cmd "luarocks list --porcelain" | grep $test_plugin_name + if [ ! $? -eq 0 ]; then + tmessage "injected plugin '$test_plugin_name' was not found" + tfailure + else + # now check if the template was added + docker run -it -d \ + -e "KONG_DATABASE=off" \ + --name "kong-testsuite-container" \ + "kong-$BASE-customize:latest" kong start + + sleep 3 + OUTPUT=$(docker logs kong-testsuite-container) + echo "$OUTPUT" + echo "$OUTPUT" | grep "nginx configuration is invalid" + + if [ $? -eq 0 ]; then + tmessage "container failed to start because of invalid config, as expected" + tsuccess + else + tmessage "container is running, while it should have failed to start" + tfailure + fi + docker rm --force kong-testsuite-container + fi + fi + delete_custom_image + + + + popd + tfinish +} + + +# No need to modify anything below this comment + +# shellcheck disable=SC1090 # do not follow source +[[ "$T_PROJECT_NAME" == "" ]] && set -e && if [[ -f "${1:-$(dirname "$(realpath "$0")")/test.sh}" ]]; then source "${1:-$(dirname "$(realpath "$0")")/test.sh}"; else source "${1:-$(dirname "$(realpath "$0")")/run.sh}"; fi && set +e +run_test diff --git a/docker-kong_v2.8.1/tests/03-cis-sec.test.sh b/docker-kong_v2.8.1/tests/03-cis-sec.test.sh new file mode 100644 index 0000000..690b3d6 --- /dev/null +++ b/docker-kong_v2.8.1/tests/03-cis-sec.test.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +function run_test { + # the suite name below will only be used when rtunning this file directly, when + # running through "test.sh" it must be provided using the "--suite" option. + tinitialize "Docker-Kong test suite" "${BASH_SOURCE[0]}" + + tchapter "CIS-Sec tests $KONG_DOCKER_TAG" + ttest "CIS-Sec for docker-compose" + + docker kill $(docker ps -q) + docker run -d kong-$BASE tail -f /dev/null + docker rmi kong:1.5.0 + docker rmi postgres:9.5 + docker images + + rm -rf tests/docker-bench-security + + LOG_OUTPUT=docker-bench-security.log + + # * 5.1 is "apparmor". That option is not available in docker compose 3.x + # * 5.10 is "mem_limit". That option is not available in docker compose 3.x (it has moved to resources) + # * 5.11 is "cpu_shares". That option is not available in docker compose 3.x + # * 5.28 is "pids_limit". That option is also not available in docker compose 3.x + # * See https://github.com/docker/compose/issues/4513 for more examples of incompatibilities + LINUX_EXCLUDE_TESTS=5_1,5_10,5_11,5_28 + + if [[ -f /lib/systemd/system/docker.service ]]; then # Ubuntu + mkdir tests/docker-bench-security + pushd tests/docker-bench-security + docker run --rm --net host --pid host --userns host --cap-add audit_control \ + -e DOCKER_CONTENT_TRUST=$DOCKER_CONTENT_TRUST \ + -v /etc:/etc:ro \ + -v /lib/systemd/system:/lib/systemd/system:ro \ + -v /usr/bin/containerd:/usr/bin/containerd:ro \ + -v /usr/bin/runc:/usr/bin/runc:ro \ + -v /usr/lib/systemd:/usr/lib/systemd:ro \ + -v /var/lib:/var/lib:ro \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + --label docker_bench_security \ + docker/docker-bench-security -e $LINUX_EXCLUDE_TESTS > $LOG_OUTPUT + + else # all other linux distros + mkdir tests/docker-bench-security + pushd tests/docker-bench-security + docker run --rm --net host --pid host --userns host --cap-add audit_control \ + -e DOCKER_CONTENT_TRUST=$DOCKER_CONTENT_TRUST \ + -v /etc:/etc:ro \ + -v /usr/bin/containerd:/usr/bin/containerd:ro \ + -v /usr/bin/runc:/usr/bin/runc:ro \ + -v /usr/lib/systemd:/usr/lib/systemd:ro \ + -v /var/lib:/var/lib:ro \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + --label docker_bench_security \ + docker/docker-bench-security -e $LINUX_EXCLUDE_TESTS > $LOG_OUTPUT + fi + + if cat "$LOG_OUTPUT" | grep WARN | grep kong -B 1; then + tmessage "Found warnings in docker-bench-security report" + tfailure + else + tsuccess + fi + + popd + rm -rf tests/docker-bench-security + + tfinish +} + +# No need to modify anything below this comment + +# shellcheck disable=SC1090 # do not follow source +[[ "$T_PROJECT_NAME" == "" ]] && set -e && if [[ -f "${1:-$(dirname "$(realpath "$0")")/test.sh}" ]]; then source "${1:-$(dirname "$(realpath "$0")")/test.sh}"; else source "${1:-$(dirname "$(realpath "$0")")/run.sh}"; fi && set +e +run_test diff --git a/docker-kong_v2.8.1/tests/04-executables.test.sh b/docker-kong_v2.8.1/tests/04-executables.test.sh new file mode 100644 index 0000000..6f869a8 --- /dev/null +++ b/docker-kong_v2.8.1/tests/04-executables.test.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +function run_test { + # the suite name below will only be used when rtunning this file directly, when + # running through "test.sh" it must be provided using the "--suite" option. + tinitialize "Docker-Kong test suite" "${BASH_SOURCE[0]}" + + tchapter "makes executables available" + + ttest "resty is in the system path" + docker run -ti --rm "kong-$BASE" resty -V + if [ $? -eq 0 ]; then + tsuccess + else + tmessage "resty wasn't found in the system path" + tfailure + fi + + ttest "luajit is in the system path" + docker run -ti --rm "kong-$BASE" luajit -v + if [ $? -eq 0 ]; then + tsuccess + else + tmessage "luajit wasn't found in the system path" + tfailure + fi + + ttest "lua is in the system path" + docker run -ti --rm "kong-$BASE" lua -v + if [ $? -eq 0 ]; then + tsuccess + else + tmessage "lua wasn't found in the system path" + tfailure + fi + + ttest "nginx is in the system path" + docker run -ti --rm "kong-$BASE" nginx -v + if [ $? -eq 0 ]; then + tsuccess + else + tmessage "nginx wasn't found in the system path" + tfailure + fi + + ttest "luarocks is in the system path" + docker run -ti --rm "kong-$BASE" luarocks --version + if [ $? -eq 0 ]; then + tsuccess + else + tmessage "luarocks wasn't found in the system path" + tfailure + fi + + tfinish +} + +# No need to modify anything below this comment + +# shellcheck disable=SC1090 # do not follow source +[[ "$T_PROJECT_NAME" == "" ]] && set -e && if [[ -f "${1:-$(dirname "$(realpath "$0")")/test.sh}" ]]; then source "${1:-$(dirname "$(realpath "$0")")/test.sh}"; else source "${1:-$(dirname "$(realpath "$0")")/run.sh}"; fi && set +e +run_test diff --git a/docker-kong_v2.8.1/tests/README.md b/docker-kong_v2.8.1/tests/README.md new file mode 100644 index 0000000..55a14a0 --- /dev/null +++ b/docker-kong_v2.8.1/tests/README.md @@ -0,0 +1,7 @@ +To create a new testfile run: + +```shell +./test.sh --create mytestfile "Docker-Kong test suite" +``` + +instructions will be in the generated file. diff --git a/docker-kong_v2.8.1/tests/bad_file.conf b/docker-kong_v2.8.1/tests/bad_file.conf new file mode 100644 index 0000000..badf962 --- /dev/null +++ b/docker-kong_v2.8.1/tests/bad_file.conf @@ -0,0 +1 @@ +this is a bad conf file diff --git a/docker-kong_v2.8.1/tests/rockserver/kong-plugin-myplugin-0.1.0-1.all.rock b/docker-kong_v2.8.1/tests/rockserver/kong-plugin-myplugin-0.1.0-1.all.rock new file mode 100644 index 0000000..778ddcc Binary files /dev/null and b/docker-kong_v2.8.1/tests/rockserver/kong-plugin-myplugin-0.1.0-1.all.rock differ diff --git a/docker-kong_v2.8.1/tests/test.sh b/docker-kong_v2.8.1/tests/test.sh new file mode 100644 index 0000000..618a631 --- /dev/null +++ b/docker-kong_v2.8.1/tests/test.sh @@ -0,0 +1,448 @@ +#!/usr/bin/env bash + +# source: https://github.com/Tieske/test.sh + +if [[ "$T_INIT_COUNT" == "" ]]; then + # first time, initialize global variables + T_PROJECT_NAME="" + T_FILE_NAME="" + T_COUNT_FAILURE=0 + T_COUNT_SUCCESS=0 + T_CURRENT_CHAPTER="" + T_CHAPTER_START_FAILURES=0 + T_CHAPTER_START_SUCCESSES=0 + T_CURRENT_TEST="" + T_COLOR_YELLOW="\033[1;33m" + T_COLOR_RED="\033[0;31m" + T_COLOR_GREEN="\033[1;32m" + T_COLOR_CLEAR="\033[0m" + T_INIT_COUNT=0 + T_FAILURE_ARRAY=() + T_DEBUGGING="" + + figlet -v > /dev/null 2>&1 + T_FIGLET_AVAILABLE=$? +else + # assuming we're being sourced again, just exit + return 0 +fi + + +function texit { + # internal function only + unset T_PROJECT_NAME + unset T_FILE_NAME + unset T_COUNT_FAILURE + unset T_COUNT_SUCCESS + unset T_CURRENT_CHAPTER + unset T_CHAPTER_START_FAILURES + unset T_CHAPTER_START_SUCCESSES + unset T_CURRENT_TEST + unset T_COLOR_YELLOW + unset T_COLOR_RED + unset T_COLOR_GREEN + unset T_COLOR_CLEAR + unset T_INIT_COUNT + unset T_FAILURE_ARRAY + unset T_FIGLET_AVAILABLE + unset T_DEBUGGING + exit "$1" +} + + +function tfooter { + # internal function only + # Arguments: + # 1) successes + # 2) failures + # 3) [optional] boolean; if set Project title, otherwise Chapter title + local indent="" + if [[ "$3" == "" ]]; then + indent=" " + local chapter + if [[ "$T_FILE_NAME" == "" ]]; then + chapter=$T_CURRENT_CHAPTER + else + chapter="$T_CURRENT_CHAPTER ($T_FILE_NAME)" + fi + echo "------------------------------------------------------------------------------------------------------------------------" + echo -e "$T_COLOR_YELLOW$indent""Chapter : $chapter$T_COLOR_CLEAR" + else + echo "========================================================================================================================" + echo -e "$T_COLOR_YELLOW$indent""Project : $T_PROJECT_NAME$T_COLOR_CLEAR" + fi + echo -e "$T_COLOR_YELLOW$indent""Successes: $1$T_COLOR_CLEAR" + echo -e "$T_COLOR_YELLOW$indent""Failures : $2$T_COLOR_CLEAR" +} + + +function theader { + # internal function only + local header=$1 + echo "========================================================================================================================" + if [ $T_FIGLET_AVAILABLE -eq 0 ]; then + figlet -c -w 120 "$header" + else + printf "%*s\n" $(( (${#header} + 120) / 2)) "$header" + fi + echo "========================================================================================================================" +} + + +function tmessage { + echo -e "$T_COLOR_YELLOW$T_PROJECT_NAME [ info ] $*$T_COLOR_CLEAR" +} + + +function tdebug { + if [[ "$T_CURRENT_TEST" == "" ]]; then + echo "calling tdebug without a test, call ttest first" + exit 1 + fi + T_DEBUGGING=true + set -x +} + + +function tinitialize { + # Initializes either a test suite or a single test file. Every tinitialize + # MUST be followed by a tfinish, after the tests are completed. + # Arguments: + # 1) [required] name of the test suite (ignored if already set) + # 2) [optional] filename of the testfile + if [[ ! "$T_CURRENT_CHAPTER" == "" ]]; then + echo "calling tinitialize after testing already started" + exit 1 + fi + if [[ "$1" == "" ]]; then + echo "calling tinitialize without project name" + exit 1 + fi + + if [[ $T_INIT_COUNT -eq 0 ]]; then + # first time we're being initialized + T_PROJECT_NAME=$1 + theader "$T_PROJECT_NAME" + ((T_INIT_COUNT = T_INIT_COUNT + 1)) + else + # we're being called multiple times, because multiple files run in a row + # do not intialize again, just update the counter + ((T_INIT_COUNT = T_INIT_COUNT + 1)) + fi + T_FILE_NAME=$2 +} + + +function tchapter { + # Initializes a test chapter. + # Call after tinitialize, and before ttest. + if [[ ! "$T_CURRENT_TEST" == "" ]]; then + echo "calling tchapter while test is unfinished, call tfailure or tsuccess first" + exit 1 + fi + if [[ "$1" == "" ]]; then + echo "calling tchapter without chapter name" + exit 1 + fi + + if [[ ! "$T_CURRENT_CHAPTER" == "" ]]; then + tfooter $((T_COUNT_SUCCESS - T_CHAPTER_START_SUCCESSES)) $((T_COUNT_FAILURE - T_CHAPTER_START_FAILURES)) + fi + + T_CURRENT_CHAPTER="$*" + T_CHAPTER_START_FAILURES=$T_COUNT_FAILURE + T_CHAPTER_START_SUCCESSES=$T_COUNT_SUCCESS + + theader "$T_CURRENT_CHAPTER" +} + + +function ttest { + # Marks the start of a test. + # The test MUST be finished with either tsuccess or tfailure. + # Arguments: + # 1) name of the test + if [[ "$T_CURRENT_CHAPTER" == "" ]]; then + echo "calling ttest without chapter, call tchapter first" + exit 1 + fi + if [[ "$1" == "" ]]; then + echo "calling ttest without test description" + exit 1 + fi + T_CURRENT_TEST="$*" + echo -e "$T_COLOR_YELLOW$T_PROJECT_NAME [ start ] $T_CURRENT_CHAPTER: $T_CURRENT_TEST$T_COLOR_CLEAR" +} + + +function tfailure { + # Marks the end of a test, with a failure. + # no arguments + if [[ "$T_DEBUGGING" == "true" ]]; then set +x; T_DEBUGGING=""; fi + if [[ "$T_CURRENT_TEST" == "" ]]; then + echo "calling tfailure without a test, call ttest first" + exit 1 + fi + [[ ! "$1" == "" ]] && tmessage "$*" + local failure="$T_CURRENT_CHAPTER: $T_CURRENT_TEST" + echo -e "$T_COLOR_YELLOW$T_PROJECT_NAME$T_COLOR_RED [ failed ]$T_COLOR_YELLOW $failure$T_COLOR_CLEAR" + + if [[ ! $T_FILE_NAME == "" ]]; then + failure="$failure ($T_FILE_NAME)" + fi + + T_FAILURE_ARRAY+=("$failure") + + ((T_COUNT_FAILURE = T_COUNT_FAILURE + 1)) + T_CURRENT_TEST="" +} + + +function tsuccess { + # Marks the end of a test, as a success. + # no arguments + if [[ "$T_DEBUGGING" == "true" ]]; then set +x; T_DEBUGGING=""; fi + if [[ "$T_CURRENT_TEST" == "" ]]; then + echo "calling tsuccess without a test, call ttest first" + exit 1 + fi + [[ ! "$1" == "" ]] && tmessage "$*" + echo -e "$T_COLOR_YELLOW$T_PROJECT_NAME$T_COLOR_GREEN [ success ]$T_COLOR_YELLOW $T_CURRENT_CHAPTER: $T_CURRENT_TEST$T_COLOR_CLEAR" + ((T_COUNT_SUCCESS = T_COUNT_SUCCESS + 1)) + T_CURRENT_TEST="" +} + + +function tfinish { + # Finishes either a test suite or a single test file. + # no arguments + if [[ ! "$T_CURRENT_TEST" == "" ]]; then + echo "calling tfinish while test is unfinished, call tfailure or tsuccess first" + exit 1 + fi + + if [[ ! "$T_CURRENT_CHAPTER" == "" ]]; then + tfooter $((T_COUNT_SUCCESS - T_CHAPTER_START_SUCCESSES)) $((T_COUNT_FAILURE - T_CHAPTER_START_FAILURES)) + T_CURRENT_CHAPTER="" + fi + + ((T_INIT_COUNT = T_INIT_COUNT - 1)) + if [[ $T_INIT_COUNT -eq 0 ]]; then + # this was the last testfile running, so actually wrap it up + tfooter $((T_COUNT_SUCCESS)) $((T_COUNT_FAILURE)) Project + local failure + for failure in "${T_FAILURE_ARRAY[@]}"; do + echo -e "$T_COLOR_YELLOW $failure$T_COLOR_CLEAR" + done + + if [ "$T_COUNT_FAILURE" -eq 0 ] && [ "$T_COUNT_SUCCESS" -gt 0 ]; then + if [ $T_FIGLET_AVAILABLE -eq 0 ]; then + # split in lines and colorize each individually for CI + figlet -c -w 120 "Success!" | while IFS= read -r line; do echo -e "$T_COLOR_GREEN$line $T_COLOR_CLEAR"; done + else + echo -e "$T_COLOR_GREEN Overall succes!$T_COLOR_CLEAR" + fi + texit 0 + else + if [ $T_FIGLET_AVAILABLE -eq 0 ]; then + # split in lines and colorize each individually for CI + figlet -c -w 120 "Failed!" | while IFS= read -r line; do echo -e "$T_COLOR_RED$line $T_COLOR_CLEAR"; done + else + echo -e "$T_COLOR_RED Overall failed!$T_COLOR_CLEAR" + fi + texit 1 + fi + else + # we've finished a file, but not the last one yet. + T_FILE_NAME="" + fi +} + + +function tcreate { + # Creates a new testfile from template. + # Arguments: + # 1) filename of the new testfile (.test.sh extension auto-appended) + # 2) test suite name + if [ "$1" == "" ]; then + echo "first argument missing: filename to create" + texit 1 + elif [ "$2" == "" ]; then + echo "second argument missing: test suite name" + texit 1 + elif [ ! "$3" == "" ]; then + echo "too many arguments" + texit 1 + fi + + local FILENAME="$1" + if [[ "$FILENAME" != *.test.sh ]]; then + FILENAME=$FILENAME.test.sh + fi + + if [ -f "$FILENAME" ]; then + echo "file already exists: $FILENAME" + texit 1 + fi + +cat < "$FILENAME" +#!/usr/bin/env bash + +: ' +There is one dependency; "test.sh", the "figlet" utility is optional. + +Usage test.sh: + 1: ./test.sh [--suite ] [files/dirs...] + 2: ./test.sh --create + + 1: Runs tests. When "files/dirs" is not provided, it will run all "*.test.sh" files + located in the same directory as "test.sh". The suite name defaults to "unknown + test suite". + + 2: Creates a new template test file + +Usage test files: + ./this.test.sh [path-to-test.sh] + + When "path-to-test.sh" is not provided it defaults to the same directory where + the test file is located. + +Assuming "test.sh" is in the same directory as this file: + + /some/path/this.test.sh # runs only this file + /some/path/test.sh # runs all "/some/path/*.test.sh" files + /some/path/test.sh this.test.sh that.test.sh # runs "this.test.sh" and "that.test.sh" files + +When not in the same directory + + /some/path/this.test.sh other/path/test.sh # runs only this file + /other/path/test.sh /some/path/this.test.sh # runs only this file + + +The test themselves are located below this comment in the "run_test" function +' + +function run_test { + # the suite name below will only be used when running this file directly, when + # running through "test.sh" it must be provided using the "--suite" option. + tinitialize "$2" "\${BASH_SOURCE[0]}" + + tchapter "great tests" + + ttest "ensures the file exists" + tdebug # this enables 'set -x' until the first tfailure or tsuccess call + if [ ! -f "some/local/file" ]; then + tmessage "The file did not exist" + tfailure + else + tmessage "The file was found" + tsuccess + fi + + tchapter "awesome tests" + + ttest "ensures the file does NOT exist" + if [ ! -f "some/local/file" ]; then + tsuccess "a success message" + else + tfailure "a failure message" + fi + + tfinish +} + +# No need to modify anything below this comment + +# shellcheck disable=SC1090 # do not follow source +[[ "\$T_PROJECT_NAME" == "" ]] && set -e && if [[ -f "\${1:-\$(dirname "\$(realpath "\$0")")/test.sh}" ]]; then source "\${1:-\$(dirname "\$(realpath "\$0")")/test.sh}"; else source "\${1:-\$(dirname "\$(realpath "\$0")")/run.sh}"; fi && set +e +run_test +EOF + + if [ ! $? -eq 0 ]; then + echo "failed to write file $FILENAME" + texit 1 + fi + chmod +x "$FILENAME" + #bash ./test.sh "$FILENAME" + + echo "Successfully created a new test file: $FILENAME" + echo "Instructions are in the file." + texit 0 +} + + +function main { + # usage: test.sh [--suite ] [filenames...] + # if no filenames given then will execute all "*.test.sh" files in the + # same directory + local FILE_LIST=() + local FILENAME + local SUITE_NAME + + if [[ "$1" == "--create" ]]; then + shift + tcreate "$@" + texit 0 + elif [[ "$1" == "--suite" ]]; then + shift + SUITE_NAME=$1 + shift + else + SUITE_NAME="unknown test suite" + fi + + if [[ $# -gt 0 ]]; then + # filenames passed; only execute filenames passed in + while [[ $# -gt 0 ]]; do + if [ ! -f "$1" ] ; then + if [ ! -d "$1" ] ; then + echo "test file not found: $1" + texit 1 + fi + # it's a directory, add all files in it + for FILENAME in "$1"/*.test.sh; do + if [ -f "$FILENAME" ] ; then + FILE_LIST+=("$FILENAME") + fi + done + else + # add a single file + FILE_LIST+=("$1") + fi + shift + done + + else + # no parameters passed, go execute all test files we can find + local MY_PATH + if [[ "$0" == "/dev/stdin" ]]; then + # script is executed from stdin, probably through run.sh + MY_PATH=$PWD + else + MY_PATH=$(dirname "$(realpath "$0")") + fi + for FILENAME in "$MY_PATH"/*.test.sh; do + if [[ "$FILENAME" != "$(realpath "$0")" ]]; then + if [ -f "$FILENAME" ] ; then + FILE_LIST+=("$FILENAME") + fi + fi + done + fi + + tinitialize "$SUITE_NAME" + + for FILENAME in ${FILE_LIST[*]}; do + # shellcheck disable=SC1090 + source "$FILENAME" + done + + tfinish +} + + +# see 'main' for usage +if [[ $0 == "${BASH_SOURCE[0]}" ]]; then + # this script is executed, not sourced, so initiate a test run + main "$@" +fi diff --git a/docker-kong_v2.8.1/ubuntu/Dockerfile b/docker-kong_v2.8.1/ubuntu/Dockerfile new file mode 100644 index 0000000..f8d5ead --- /dev/null +++ b/docker-kong_v2.8.1/ubuntu/Dockerfile @@ -0,0 +1,64 @@ +FROM ubuntu:focal + +ARG ASSET=ce +ENV ASSET $ASSET + +ARG EE_PORTS + +COPY kong.deb /tmp/kong.deb + +ARG KONG_VERSION=2.8.1 +ENV KONG_VERSION $KONG_VERSION + +ARG KONG_AMD64_SHA="10d12d23e5890414d666663094d51a42de41f8a9806fbc0baaf9ac4d37794361" +ARG KONG_ARM64_SHA="61c13219ef64dac9aeae5ae775411e8cfcd406f068cf3e75d463f916ae6513cb" + +# hadolint ignore=DL3015 +RUN set -ex; \ + arch=$(dpkg --print-architecture); \ + case "${arch}" in \ + amd64) KONG_SHA256=$KONG_AMD64_SHA ;; \ + arm64) KONG_SHA256=$KONG_ARM64_SHA ;; \ + esac; \ + apt-get update \ + && if [ "$ASSET" = "ce" ] ; then \ + apt-get install -y curl \ + && UBUNTU_CODENAME=$(cat /etc/os-release | grep UBUNTU_CODENAME | cut -d = -f 2) \ + && KONG_REPO=$(echo ${KONG_VERSION%.*} | sed 's/\.//') \ + && curl -fL https://packages.konghq.com/public/gateway-$KONG_REPO/deb/ubuntu/pool/$UBUNTU_CODENAME/main/k/ko/kong_$KONG_VERSION/kong_${KONG_VERSION}_$arch.deb -o /tmp/kong.deb \ + && apt-get purge -y curl \ + && echo "$KONG_SHA256 /tmp/kong.deb" | sha256sum -c -; \ + else \ + # this needs to stay inside this "else" block so that it does not become part of the "official images" builds (https://github.com/docker-library/official-images/pull/11532#issuecomment-996219700) + apt-get upgrade -y ; \ + fi; \ + apt-get install -y --no-install-recommends unzip git \ + # Please update the ubuntu install docs if the below line is changed so that + # end users can properly install Kong along with its required dependencies + # and that our CI does not diverge from our docs. + && apt install --yes /tmp/kong.deb \ + && rm -rf /var/lib/apt/lists/* \ + && rm -rf /tmp/kong.deb \ + && chown kong:0 /usr/local/bin/kong \ + && chown -R kong:0 /usr/local/kong \ + && ln -s /usr/local/openresty/bin/resty /usr/local/bin/resty \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/luajit \ + && ln -s /usr/local/openresty/luajit/bin/luajit /usr/local/bin/lua \ + && ln -s /usr/local/openresty/nginx/sbin/nginx /usr/local/bin/nginx \ + && if [ "$ASSET" = "ce" ] ; then \ + kong version ; \ + fi + +COPY docker-entrypoint.sh /docker-entrypoint.sh + +USER kong + +ENTRYPOINT ["/docker-entrypoint.sh"] + +EXPOSE 8000 8443 8001 8444 $EE_PORTS + +STOPSIGNAL SIGQUIT + +HEALTHCHECK --interval=10s --timeout=10s --retries=10 CMD kong health + +CMD ["kong", "docker-start"] diff --git a/docker-kong_v2.8.1/ubuntu/docker-entrypoint.sh b/docker-kong_v2.8.1/ubuntu/docker-entrypoint.sh new file mode 100644 index 0000000..f37496e --- /dev/null +++ b/docker-kong_v2.8.1/ubuntu/docker-entrypoint.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +set -Eeo pipefail + +# usage: file_env VAR [DEFAULT] +# ie: file_env 'XYZ_DB_PASSWORD' 'example' +# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of +# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) +file_env() { + local var="$1" + local fileVar="${var}_FILE" + local def="${2:-}" + # Do not continue if _FILE env is not set + if ! [ "${!fileVar:-}" ]; then + return + elif [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then + echo >&2 "error: both $var and $fileVar are set (but are exclusive)" + exit 1 + fi + local val="$def" + if [ "${!var:-}" ]; then + val="${!var}" + elif [ "${!fileVar:-}" ]; then + val="$(< "${!fileVar}")" + fi + export "$var"="$val" + unset "$fileVar" +} + +export KONG_NGINX_DAEMON=${KONG_NGINX_DAEMON:=off} + +if [[ "$1" == "kong" ]]; then + + all_kong_options="/usr/local/share/lua/5.1/kong/templates/kong_defaults.lua" + set +Eeo pipefail + while IFS='' read -r LINE || [ -n "${LINE}" ]; do + opt=$(echo "$LINE" | grep "=" | sed "s/=.*$//" | sed "s/ //" | tr '[:lower:]' '[:upper:]') + file_env "KONG_$opt" + done < $all_kong_options + set -Eeo pipefail + + file_env KONG_PASSWORD + PREFIX=${KONG_PREFIX:=/usr/local/kong} + + if [[ "$2" == "docker-start" ]]; then + kong prepare -p "$PREFIX" "$@" + + ln -sf /dev/stdout $PREFIX/logs/access.log + ln -sf /dev/stdout $PREFIX/logs/admin_access.log + ln -sf /dev/stderr $PREFIX/logs/error.log + + exec /usr/local/openresty/nginx/sbin/nginx \ + -p "$PREFIX" \ + -c nginx.conf + fi +fi + +exec "$@" diff --git a/docker-kong_v2.8.1/ubuntu/kong.deb b/docker-kong_v2.8.1/ubuntu/kong.deb new file mode 100644 index 0000000..80931f4 Binary files /dev/null and b/docker-kong_v2.8.1/ubuntu/kong.deb differ diff --git a/docker-kong_v2.8.1/update.sh b/docker-kong_v2.8.1/update.sh new file mode 100644 index 0000000..1d7d0c5 --- /dev/null +++ b/docker-kong_v2.8.1/update.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash +set -e + +if ! [ "$1" ] +then + echo "usage: $0 " + echo "example: $0 1.2.3" + exit 1 +fi + +version=$1 + +if [[ "$version" =~ "rc" ]]; then + version="${version//-}" +fi + +function red() { + echo -e "\033[1;31m$@\033[0m" +} + +function die() { + red "*** $@" + echo "See also: $0 --help" + echo + exit 1 +} + +# get kong url from dockerfile +# and fill it up with needed args +function get_url() { + dockerfile=$1 + arch=$2 + args=$3 + + eval $args + + raw_url=$(egrep -o 'https?://download.konghq.com/gateway-[^ ]+' $dockerfile | sed 's/\"//g') + + # set variables contained in raw url + KONG_VERSION=$version + + eval echo $raw_url +} + +hub --version &> /dev/null || die "hub is not in PATH. Get it from https://github.com/github/hub" + +kbt_in_kong_v=$(curl -sL https://raw.githubusercontent.com/Kong/kong/$version/.requirements | grep 'KONG_BUILD_TOOLS_VERSION\=' | awk -F"=" '{print $2}' | tr -d "'[:space:]") +if [[ -n "$kbt_in_kong_v" ]]; then + sed -i.bak 's/KONG_BUILD_TOOLS?=.*/KONG_BUILD_TOOLS?='$kbt_in_kong_v'/g' Makefile +fi + +pushd alpine + url=$(get_url Dockerfile amd64) + echo $url + curl -fL $url -o /tmp/kong + new_sha=$(sha256sum /tmp/kong | cut -b1-64) + + sed -i.bak 's/ARG KONG_AMD64_SHA=.*/ARG KONG_AMD64_SHA=\"'$new_sha'\"/g' Dockerfile + sed -i.bak 's/ARG KONG_VERSION=.*/ARG KONG_VERSION='$version'/g' Dockerfile + + sed -i.bak 's/ARG KONG_AMD64_SHA=.*/ARG KONG_AMD64_SHA=\"'$new_sha'\"/g' ../Dockerfile.apk + sed -i.bak 's/ARG KONG_VERSION=.*/ARG KONG_VERSION='$version'/g' ../Dockerfile.apk + + url=$(get_url Dockerfile arm64) + echo $url + curl -fL $url -o /tmp/kong + new_sha=$(sha256sum /tmp/kong | cut -b1-64) + + sed -i.bak 's/ARG KONG_ARM64_SHA=.*/ARG KONG_ARM64_SHA=\"'$new_sha'\"/g' Dockerfile + sed -i.bak 's/ARG KONG_VERSION=.*/ARG KONG_VERSION='$version'/g' Dockerfile + + sed -i.bak 's/ARG KONG_ARM64_SHA=.*/ARG KONG_ARM64_SHA=\"'$new_sha'\"/g' ../Dockerfile.apk + sed -i.bak 's/ARG KONG_VERSION=.*/ARG KONG_VERSION='$version'/g' ../Dockerfile.apk +popd + +pushd rhel + url=$(get_url Dockerfile amd64 "RHEL_VERSION=7") + echo $url + curl -fL $url -o /tmp/kong + new_sha=$(sha256sum /tmp/kong | cut -b1-64) + + sed -i.bak 's/ARG KONG_SHA256=.*/ARG KONG_SHA256=\"'$new_sha'\"/g' Dockerfile + sed -i.bak 's/ARG KONG_VERSION=.*/ARG KONG_VERSION='$version'/g' Dockerfile +popd + +url=$(get_url Dockerfile.rpm amd64 "VERSION=8") +echo $url +curl -fL $url -o /tmp/kong +new_sha=$(sha256sum /tmp/kong | cut -b1-64) + +sed -i.bak 's/ARG KONG_SHA256=.*/ARG KONG_SHA256=\"'$new_sha'\"/g' Dockerfile.rpm +sed -i.bak 's/ARG KONG_VERSION=.*/ARG KONG_VERSION='$version'/g' Dockerfile.rpm + +pushd ubuntu + url=$(get_url Dockerfile amd64 "UBUNTU_CODENAME=focal") + echo $url + curl -fL $url -o /tmp/kong + new_sha=$(sha256sum /tmp/kong | cut -b1-64) + + sed -i.bak 's/ARG KONG_AMD64_SHA=.*/ARG KONG_AMD64_SHA=\"'$new_sha'\"/g' Dockerfile + + url=$(get_url Dockerfile arm64 "UBUNTU_CODENAME=focal") + echo $url + curl -fL $url -o /tmp/kong + new_sha=$(sha256sum /tmp/kong | cut -b1-64) + + sed -i.bak 's/ARG KONG_ARM64_SHA=.*/ARG KONG_ARM64_SHA=\"'$new_sha'\"/g' Dockerfile + + sed -i.bak 's/ARG KONG_VERSION=.*/ARG KONG_VERSION='$version'/g' Dockerfile +popd + +url=$(get_url Dockerfile.deb amd64 "CODENAME=bullseye") +echo $url +curl -fL $url -o /tmp/kong +new_sha=$(sha256sum /tmp/kong | cut -b1-64) + +sed -i.bak 's/ARG KONG_SHA256=.*/ARG KONG_SHA256=\"'$new_sha'\"/g' Dockerfile.deb +sed -i.bak 's/ARG KONG_VERSION=.*/ARG KONG_VERSION='$version'/g' Dockerfile.deb + +echo "****************************************" +git diff +echo "****************************************" + +echo "Everything looks all right? (y/n)" +echo "(Answering y will commit, push the branch, and open a browser with the PR)" +read +if ! [ "$REPLY" == "y" ] +then + exit 1 +fi + +git commit -av -m "chore(*) bump to Kong $version" +git push --set-upstream origin release/$version + +hub pull-request -b master -h "$branch" -m "Release: $version" diff --git a/postgres_15.8.1.044/.gitignore b/postgres_15.8.1.044/.gitignore new file mode 100644 index 0000000..005d3ec --- /dev/null +++ b/postgres_15.8.1.044/.gitignore @@ -0,0 +1,26 @@ +.DS_Store +.python-version +.mise.toml +venv/ +*.swp +docker/cache/ + +ansible/image-manifest*.json +testinfra-aio-container-logs.log + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +#nix related +result* +.env-local +.history + + +#IDE +.idea/ +.vscode/ + +db/schema.sql diff --git a/postgres_15.8.1.044/CONTRIBUTING.md b/postgres_15.8.1.044/CONTRIBUTING.md new file mode 100644 index 0000000..a2c0976 --- /dev/null +++ b/postgres_15.8.1.044/CONTRIBUTING.md @@ -0,0 +1,46 @@ +# Welcome to Supabase Postgres contributing guide + +## Adding a new extension + +Extensions can either be built from source or installed through a debian package. In general, you want to add the installation commands for your extension to the [Dockerfile](Dockerfile) following the steps below. + +1. Create a [build stage](Dockerfile#L777) named after your extension. +2. Add build args that specify the extension's [release version](Dockerfile#L37). +3. If your extension is published as a package, download it to `/tmp/.deb` using the [ADD command](Dockerfile#L705). +4. If you need to build the extensions from source, use [checkinstall](Dockerfile#L791) to create a `/tmp/.deb` package. +5. Copy your extension's package from build stage to [extensions stage](Dockerfile#L851). + +Here's a minimal example: + +```dockerfile +ARG pg_graphql_release=1.1.0 + +#################### +# 19-pg_graphql.yml +#################### +FROM base as pg_graphql +# Download package archive +ARG pg_graphql_release +ADD "https://github.com/supabase/pg_graphql/releases/download/v${pg_graphql_release}/pg_graphql-v${pg_graphql_release}-pg${postgresql_major}-${TARGETARCH}-linux-gnu.deb" \ + /tmp/pg_graphql.deb + +#################### +# Collect extension packages +#################### +FROM scratch as extensions +COPY --from=pg_graphql /tmp/*.deb /tmp/ +``` + +Using this process maximises the effectiveness of Docker layer caching, which significantly speeds up our CI builds. + +## Testing an extension + +Extensions can be tested automatically using pgTAP. Start by creating a new file in [migrations/tests/extensions](migrations/tests/extensions). For example: + +```sql +BEGIN; +create extension if not exists wrappers with schema "extensions"; +ROLLBACK; +``` + +This test will be run as part of CI to check that your extension can be enabled successfully from the final Docker image. diff --git a/postgres_15.8.1.044/Dockerfile-15 b/postgres_15.8.1.044/Dockerfile-15 new file mode 100644 index 0000000..ee9db3c --- /dev/null +++ b/postgres_15.8.1.044/Dockerfile-15 @@ -0,0 +1,221 @@ +# syntax=docker/dockerfile:1.6 +ARG postgresql_major=15 +ARG postgresql_release=${postgresql_major}.1 + +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.7.1 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.11 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.1.4 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.3.0 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.2.0 +ARG wal_g_release=2.0.1 + +FROM ubuntu:focal as base + +RUN apt update -y && apt install -y \ + curl \ + gnupg \ + lsb-release \ + software-properties-common \ + wget \ + sudo \ + && apt clean + + +RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres +RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \ +--init none \ +--no-confirm \ +--extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ +--extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" + +COPY . /nixpg + +WORKDIR /nixpg + +RUN nix profile install .#psql_15/bin + + + +WORKDIR / + + +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql + +# Create symbolic links +RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ + && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ + && chown -R postgres:postgres /usr/bin + +# Create symbolic links for PostgreSQL shares +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ +RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ +RUN chown -R postgres:postgres /usr/share/postgresql/ +# Create symbolic links for contrib directory +RUN mkdir -p /usr/lib/postgresql/share/postgresql/contrib \ + && find /nix/var/nix/profiles/default/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ + && chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/contrib/ + +RUN chown -R postgres:postgres /usr/lib/postgresql + +RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets + + +RUN apt-get update && \ + apt-get install -y --no-install-recommends tzdata + +RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ + dpkg-reconfigure --frontend noninteractive tzdata + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + checkinstall \ + cmake + +ENV PGDATA=/var/lib/postgresql/data + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release +# ADD "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${TARGETARCH}.tar.gz" /tmp/wal-g.tar.gz +RUN arch=$([ "$TARGETARCH" = "arm64" ] && echo "aarch64" || echo "$TARGETARCH") && \ + apt-get update && apt-get install -y --no-install-recommends curl && \ + curl -kL "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-aarch64.tar.gz" -o /tmp/wal-g.tar.gz && \ + tar -xvf /tmp/wal-g.tar.gz -C /tmp && \ + rm -rf /tmp/wal-g.tar.gz && \ + mv /tmp/wal-g-pg-ubuntu*20.04-aarch64 /tmp/wal-g + +# #################### +# # Download gosu for easy step-down from root +# #################### +FROM base as gosu +ARG TARGETARCH +# Install dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Download binary +ARG GOSU_VERSION=1.16 +ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ + /usr/local/bin/gosu +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ + /usr/local/bin/gosu.asc +# Verify checksum +RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + gpgconf --kill all && \ + chmod +x /usr/local/bin/gosu + +# #################### +# # Build final image +# #################### +FROM gosu as production +RUN id postgres || (echo "postgres user does not exist" && exit 1) +# # Setup extensions +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +# # Initialise configs +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "cron.database_name = 'postgres'" >> /etc/postgresql/postgresql.conf && \ + #echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo 'auto_explain.log_min_duration = 10s' >> /etc/postgresql/postgresql.conf && \ + usermod -aG postgres wal-g && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + +# # Include schema migrations +COPY migrations/db /docker-entrypoint-initdb.d/ +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql + +# # Add upstream entrypoint script +COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +ADD --chmod=0755 \ + https://github.com/docker-library/postgres/raw/master/15/bullseye/docker-entrypoint.sh \ + /usr/local/bin/ + +RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql + +ENTRYPOINT ["docker-entrypoint.sh"] + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +ENV POSTGRES_USER=supabase_admin +ENV POSTGRES_DB=postgres +RUN apt-get update && apt-get install -y --no-install-recommends \ + locales \ + && rm -rf /var/lib/apt/lists/* && \ + localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 +RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV LC_CTYPE=C.UTF-8 +ENV LC_COLLATE=C.UTF-8 +ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/postgres_15.8.1.044/Dockerfile-kubernetes b/postgres_15.8.1.044/Dockerfile-kubernetes new file mode 100644 index 0000000..716e72b --- /dev/null +++ b/postgres_15.8.1.044/Dockerfile-kubernetes @@ -0,0 +1,9 @@ +FROM alpine:3.21 + +ADD ./output-cloudimg/packer-cloudimg /disk/focal.qcow2 + +RUN apk add --no-cache qemu-system-aarch64 qemu-img openssh-client nftables cloud-utils-localds aavmf +# dev stuff +# RUN apk add --no-cache iproute2 + +CMD exec /bin/sh -c "trap : TERM INT; sleep 9999999999d & wait" diff --git a/postgres_15.8.1.044/Dockerfile-orioledb-17 b/postgres_15.8.1.044/Dockerfile-orioledb-17 new file mode 100644 index 0000000..f5b4678 --- /dev/null +++ b/postgres_15.8.1.044/Dockerfile-orioledb-17 @@ -0,0 +1,234 @@ +# syntax=docker/dockerfile:1.6 +ARG postgresql_major=17-orioledb +ARG postgresql_release=${postgresql_major}.1 + +# Bump default build arg to build a package from source +# Bump vars.yml to specify runtime package version +ARG sfcgal_release=1.3.10 +ARG postgis_release=3.3.2 +ARG pgrouting_release=3.4.1 +ARG pgtap_release=1.2.0 +ARG pg_cron_release=1.6.2 +ARG pgaudit_release=1.7.0 +ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 +ARG pgsql_http_release=1.5.0 +ARG plpgsql_check_release=2.2.5 +ARG pg_safeupdate_release=1.4 +ARG timescaledb_release=2.9.1 +ARG wal2json_release=2_5 +ARG pljava_release=1.6.4 +ARG plv8_release=3.1.5 +ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 +ARG pg_net_release=0.7.1 +ARG rum_release=1.3.13 +ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 +ARG libsodium_release=1.0.18 +ARG pgsodium_release=3.1.6 +ARG pg_graphql_release=1.5.11 +ARG pg_stat_monitor_release=1.1.1 +ARG pg_jsonschema_release=0.1.4 +ARG pg_repack_release=1.4.8 +ARG vault_release=0.2.8 +ARG groonga_release=12.0.8 +ARG pgroonga_release=2.4.0 +ARG wrappers_release=0.3.0 +ARG hypopg_release=1.3.1 +ARG pgvector_release=0.4.0 +ARG pg_tle_release=1.3.2 +ARG index_advisor_release=0.2.0 +ARG supautils_release=2.2.0 +ARG wal_g_release=2.0.1 + +FROM ubuntu:focal as base + +RUN apt update -y && apt install -y \ + curl \ + gnupg \ + lsb-release \ + software-properties-common \ + wget \ + sudo \ + tree \ + && apt clean + + +RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres +RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \ +--init none \ +--no-confirm \ +--extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ +--extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" + +COPY . /nixpg + +WORKDIR /nixpg + +RUN nix profile install .#psql_orioledb-17/bin + + + +WORKDIR / + + +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql + +# Create symbolic links +RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ + && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ + && chown -R postgres:postgres /usr/bin + +# Create symbolic links for PostgreSQL shares +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ +RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ +RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ +RUN chown -R postgres:postgres /usr/share/postgresql/ +# Create symbolic links for contrib directory +RUN tree /nix > /tmp/tree.txt && cat /tmp/tree.txt && cat /tmp/tree.txt >&2 + +RUN chown -R postgres:postgres /usr/lib/postgresql + +RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets + + +RUN apt-get update && \ + apt-get install -y --no-install-recommends tzdata + +RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ + dpkg-reconfigure --frontend noninteractive tzdata + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + checkinstall \ + cmake + +ENV PGDATA=/var/lib/postgresql/data + +#################### +# setup-wal-g.yml +#################### +FROM base as walg +ARG wal_g_release +# ADD "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-${TARGETARCH}.tar.gz" /tmp/wal-g.tar.gz +RUN arch=$([ "$TARGETARCH" = "arm64" ] && echo "aarch64" || echo "$TARGETARCH") && \ + apt-get update && apt-get install -y --no-install-recommends curl && \ + curl -kL "https://github.com/wal-g/wal-g/releases/download/v${wal_g_release}/wal-g-pg-ubuntu-20.04-aarch64.tar.gz" -o /tmp/wal-g.tar.gz && \ + tar -xvf /tmp/wal-g.tar.gz -C /tmp && \ + rm -rf /tmp/wal-g.tar.gz && \ + mv /tmp/wal-g-pg-ubuntu*20.04-aarch64 /tmp/wal-g + +# #################### +# # Download gosu for easy step-down from root +# #################### +FROM base as gosu +ARG TARGETARCH +# Install dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* +# Download binary +ARG GOSU_VERSION=1.16 +ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ + /usr/local/bin/gosu +ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ + /usr/local/bin/gosu.asc +# Verify checksum +RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + gpgconf --kill all && \ + chmod +x /usr/local/bin/gosu + +# #################### +# # Build final image +# #################### +FROM gosu as production +RUN id postgres || (echo "postgres user does not exist" && exit 1) +# # Setup extensions +COPY --from=walg /tmp/wal-g /usr/local/bin/ + +# # Initialise configs +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql-stdout-log.conf /etc/postgresql/logging.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/supautils.conf.j2 /etc/postgresql-custom/supautils.conf +COPY --chown=postgres:postgres ansible/files/postgresql_extension_custom_scripts /etc/postgresql-custom/extension-custom-scripts +COPY --chown=postgres:postgres ansible/files/pgsodium_getkey_urandom.sh.j2 /usr/lib/postgresql/bin/pgsodium_getkey.sh +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_replica.conf.j2 /etc/postgresql-custom/read-replica.conf +COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_walg.conf.j2 /etc/postgresql-custom/wal-g.conf +COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh +COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh + +RUN sed -i \ + -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ + -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ + -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ + -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ + echo "cron.database_name = 'postgres'" >> /etc/postgresql/postgresql.conf && \ + #echo "pljava.libjvm_location = '/usr/lib/jvm/java-11-openjdk-${TARGETARCH}/lib/server/libjvm.so'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo 'auto_explain.log_min_duration = 10s' >> /etc/postgresql/postgresql.conf && \ + usermod -aG postgres wal-g && \ + mkdir -p /etc/postgresql-custom && \ + chown postgres:postgres /etc/postgresql-custom + + # Remove items from postgresql.conf +RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" + #as of pg 16.4 + this db_user_namespace totally deprecated and will break the server if setting is present +RUN sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "/etc/postgresql/postgresql.conf" +RUN sed -i 's/ timescaledb,//g; s/ plv8,//g; s/ postgis,//g; s/ pgrouting,//g' "/etc/postgresql-custom/supautils.conf" +RUN sed -i 's/\(shared_preload_libraries.*\)'\''\(.*\)$/\1, orioledb'\''\2/' "/etc/postgresql/postgresql.conf" +RUN echo "default_table_access_method = 'orioledb'" >> "/etc/postgresql/postgresql.conf" + + + +# # Include schema migrations +COPY migrations/db /docker-entrypoint-initdb.d/ +COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql +COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql +# Enable orioledb extension first +RUN echo "CREATE EXTENSION orioledb;" > /docker-entrypoint-initdb.d/init-scripts/00-pre-init.sql && \ + chown postgres:postgres /docker-entrypoint-initdb.d/init-scripts/00-pre-init.sql + +# # Add upstream entrypoint script +COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +ADD --chmod=0755 \ + https://github.com/docker-library/postgres/raw/master/17/bullseye/docker-entrypoint.sh \ + /usr/local/bin/ + +RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql + +ENTRYPOINT ["docker-entrypoint.sh"] + +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 + +ENV POSTGRES_HOST=/var/run/postgresql +ENV POSTGRES_USER=supabase_admin +ENV POSTGRES_DB=postgres +ENV POSTGRES_INITDB_ARGS="--allow-group-access --locale-provider=icu --encoding=UTF-8 --icu-locale=en_US.UTF-8" +RUN apt-get update && apt-get install -y --no-install-recommends \ + locales \ + && rm -rf /var/lib/apt/lists/* && \ + localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 +RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV LC_CTYPE=C.UTF-8 +ENV LC_COLLATE=C.UTF-8 +ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/postgres_15.8.1.044/LICENSE b/postgres_15.8.1.044/LICENSE new file mode 100644 index 0000000..ab15a1e --- /dev/null +++ b/postgres_15.8.1.044/LICENSE @@ -0,0 +1,9 @@ +The PostgreSQL License + +Copyright (c) 2020, Supabase + +Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. + +IN NO EVENT SHALL Supabase BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF Supabase HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Supabase SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND Supabase HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/postgres_15.8.1.044/Makefile b/postgres_15.8.1.044/Makefile new file mode 100644 index 0000000..5bef8a4 --- /dev/null +++ b/postgres_15.8.1.044/Makefile @@ -0,0 +1,20 @@ +UPSTREAM_NIX_GIT_SHA := $(shell git rev-parse HEAD) +GIT_SHA := $(shell git describe --tags --always --dirty) + +init: qemu-arm64-nix.pkr.hcl + packer init qemu-arm64-nix.pkr.hcl + +output-cloudimg/packer-cloudimg: ansible qemu-arm64-nix.pkr.hcl + packer build -var "git_sha=$(UPSTREAM_NIX_GIT_SHA)" qemu-arm64-nix.pkr.hcl + +disk/focal-raw.img: output-cloudimg/packer-cloudimg + mkdir -p disk + sudo qemu-img convert -O raw output-cloudimg/packer-cloudimg disk/focal-raw.img + +alpine-image: output-cloudimg/packer-cloudimg + sudo nerdctl build . -t supabase-postgres-test:$(GIT_SHA) -f ./Dockerfile-kubernetes + +clean: + rm -rf output-cloudimg + +.PHONY: alpine-image init clean diff --git a/postgres_15.8.1.044/README.md b/postgres_15.8.1.044/README.md new file mode 100644 index 0000000..254e62b --- /dev/null +++ b/postgres_15.8.1.044/README.md @@ -0,0 +1,131 @@ +# Postgres + goodies + +Unmodified Postgres with some useful plugins. Our goal with this repo is not to modify Postgres, but to provide some of the most common extensions with a one-click install. + +## Primary Features +- ✅ Postgres [postgresql-15.8](https://www.postgresql.org/docs/15/index.html) +- ✅ Postgres [orioledb-postgresql-17_5](https://github.com/orioledb/orioledb) +- ✅ Ubuntu 20.04 (Focal Fossa). +- ✅ [wal_level](https://www.postgresql.org/docs/current/runtime-config-wal.html) = logical and [max_replication_slots](https://www.postgresql.org/docs/current/runtime-config-replication.html) = 5. Ready for replication. +- ✅ [Large Systems Extensions](https://github.com/aws/aws-graviton-getting-started#building-for-graviton-and-graviton2). Enabled for ARM images. +## Extensions + +### PostgreSQL 15 Extensions +| Extension | Version | Description | +| ------------- | :-------------: | ------------- | +| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | +| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | +| [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | +| [pg_backtrace](https://github.com/pashkinelfe/pg_backtrace/archive/d100bac815a7365e199263f5b3741baf71b14c70.tar.gz) | [1.1](https://github.com/pashkinelfe/pg_backtrace/archive/d100bac815a7365e199263f5b3741baf71b14c70.tar.gz) | Updated fork of pg_backtrace | +| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_graphql](https://github.com/supabase/pg_graphql/archive/v1.5.9.tar.gz) | [1.5.9](https://github.com/supabase/pg_graphql/archive/v1.5.9.tar.gz) | GraphQL support for PostreSQL | +| [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | +| [pg_jsonschema](https://github.com/supabase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/supabase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | +| [pg_net](https://github.com/supabase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/supabase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | +| [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | +| [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | +| [pg_tle](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | [1.4.0](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | Framework for 'Trusted Language Extensions' in PostgreSQL | +| [pgaudit](https://github.com/pgaudit/pgaudit/archive/1.7.0.tar.gz) | [1.7.0](https://github.com/pgaudit/pgaudit/archive/1.7.0.tar.gz) | Open Source PostgreSQL Audit Logging | +| [pgjwt](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | [9742dab1b2f297ad3811120db7b21451bca2d3c9](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | PostgreSQL implementation of JSON Web Tokens | +| [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | +| [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | +| [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | +| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | +| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | +| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | +| [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | +| [plv8](https://github.com/plv8/plv8/archive/v3.1.10.tar.gz) | [3.1.10](https://github.com/plv8/plv8/archive/v3.1.10.tar.gz) | V8 Engine Javascript Procedural Language add-on for PostgreSQL | +| [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | +| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | +| [supabase-wrappers](https://github.com/supabase/wrappers/archive/v0.4.4.tar.gz) | [0.4.4](https://github.com/supabase/wrappers/archive/v0.4.4.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [supautils](https://github.com/supabase/supautils/archive/refs/tags/v2.6.0.tar.gz) | [2.6.0](https://github.com/supabase/supautils/archive/refs/tags/v2.6.0.tar.gz) | PostgreSQL extension for enhanced security | +| [timescaledb-apache](https://github.com/timescale/timescaledb/archive/2.16.1.tar.gz) | [2.16.1](https://github.com/timescale/timescaledb/archive/2.16.1.tar.gz) | Scales PostgreSQL for time-series data via automatic partitioning across time and space | +| [vault](https://github.com/supabase/vault/archive/refs/tags/v0.2.9.tar.gz) | [0.2.9](https://github.com/supabase/vault/archive/refs/tags/v0.2.9.tar.gz) | Store encrypted secrets in PostgreSQL | +| [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | + +### PostgreSQL orioledb-17 Extensions +| Extension | Version | Description | +| ------------- | :-------------: | ------------- | +| [hypopg](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | [1.4.1](https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.1.tar.gz) | Hypothetical Indexes for PostgreSQL | +| [index_advisor](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | [0.2.0](https://github.com/olirice/index_advisor/archive/v0.2.0.tar.gz) | Recommend indexes to improve query performance in PostgreSQL | +| [orioledb](https://github.com/orioledb/orioledb/archive/beta9.tar.gz) | [orioledb](https://github.com/orioledb/orioledb/archive/beta9.tar.gz) | orioledb | +| [pg-safeupdate](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | [1.4](https://github.com/eradman/pg-safeupdate/archive/1.4.tar.gz) | A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE | +| [pg_backtrace](https://github.com/pashkinelfe/pg_backtrace/archive/d100bac815a7365e199263f5b3741baf71b14c70.tar.gz) | [1.1](https://github.com/pashkinelfe/pg_backtrace/archive/d100bac815a7365e199263f5b3741baf71b14c70.tar.gz) | Updated fork of pg_backtrace | +| [pg_cron](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | [1.6.4](https://github.com/citusdata/pg_cron/archive/v1.6.4.tar.gz) | Run Cron jobs through PostgreSQL | +| [pg_graphql](https://github.com/supabase/pg_graphql/archive/v1.5.9.tar.gz) | [1.5.9](https://github.com/supabase/pg_graphql/archive/v1.5.9.tar.gz) | GraphQL support for PostreSQL | +| [pg_hashids](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | [cd0e1b31d52b394a0df64079406a14a4f7387cd6](https://github.com/iCyberon/pg_hashids/archive/cd0e1b31d52b394a0df64079406a14a4f7387cd6.tar.gz) | Generate short unique IDs in PostgreSQL | +| [pg_jsonschema](https://github.com/supabase/pg_jsonschema/archive/v0.3.3.tar.gz) | [0.3.3](https://github.com/supabase/pg_jsonschema/archive/v0.3.3.tar.gz) | JSON Schema Validation for PostgreSQL | +| [pg_net](https://github.com/supabase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | [0.14.0](https://github.com/supabase/pg_net/archive/refs/tags/v0.14.0.tar.gz) | Async networking for Postgres | +| [pg_plan_filter](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | [5081a7b5cb890876e67d8e7486b6a64c38c9a492](https://github.com/pgexperts/pg_plan_filter/archive/5081a7b5cb890876e67d8e7486b6a64c38c9a492.tar.gz) | Filter PostgreSQL statements by execution plans | +| [pg_repack](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | [1.5.2](https://github.com/reorg/pg_repack/archive/ver_1.5.2.tar.gz) | Reorganize tables in PostgreSQL databases with minimal locks | +| [pg_stat_monitor](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | [2.1.0](https://github.com/percona/pg_stat_monitor/archive/refs/tags/2.1.0.tar.gz) | Query Performance Monitoring Tool for PostgreSQL | +| [pg_tle](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | [1.4.0](https://github.com/aws/pg_tle/archive/refs/tags/v1.4.0.tar.gz) | Framework for 'Trusted Language Extensions' in PostgreSQL | +| [pgaudit](https://github.com/pgaudit/pgaudit/archive/17.0.tar.gz) | [17.0](https://github.com/pgaudit/pgaudit/archive/17.0.tar.gz) | Open Source PostgreSQL Audit Logging | +| [pgjwt](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | [9742dab1b2f297ad3811120db7b21451bca2d3c9](https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz) | PostgreSQL implementation of JSON Web Tokens | +| [pgmq](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | [1.4.4](https://github.com/tembo-io/pgmq/archive/v1.4.4.tar.gz) | A lightweight message queue. Like AWS SQS and RSMQ but on Postgres. | +| [pgroonga](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | [3.2.5](https://packages.groonga.org/source/pgroonga/pgroonga-3.2.5.tar.gz) | A PostgreSQL extension to use Groonga as the index | +| [pgrouting](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | [3.4.1](https://github.com/pgRouting/pgrouting/archive/v3.4.1.tar.gz) | A PostgreSQL/PostGIS extension that provides geospatial routing functionality | +| [pgsodium](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | [3.1.8](https://github.com/michelp/pgsodium/archive/refs/tags/v3.1.8.tar.gz) | Modern cryptography for PostgreSQL | +| [pgsql-http](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | [1.6.1](https://github.com/pramsey/pgsql-http/archive/refs/tags/v1.6.1.tar.gz) | HTTP client for Postgres | +| [pgtap](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | [1.2.0](https://github.com/theory/pgtap/archive/v1.2.0.tar.gz) | A unit testing framework for PostgreSQL | +| [pgvector](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | [0.8.0](https://github.com/pgvector/pgvector/archive/refs/tags/v0.8.0.tar.gz) | Open-source vector similarity search for Postgres | +| [plpgsql-check](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | [2.7.11](https://github.com/okbob/plpgsql_check/archive/v2.7.11.tar.gz) | Linter tool for language PL/pgSQL | +| [postgis](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | [3.3.7](https://download.osgeo.org/postgis/source/postgis-3.3.7.tar.gz) | Geographic Objects for PostgreSQL | +| [rum](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | [1.3.14](https://github.com/postgrespro/rum/archive/1.3.14.tar.gz) | Full text search index method for PostgreSQL | +| [supabase-wrappers](https://github.com/supabase/wrappers/archive/v0.4.4.tar.gz) | [0.4.4](https://github.com/supabase/wrappers/archive/v0.4.4.tar.gz) | Various Foreign Data Wrappers (FDWs) for PostreSQL | +| [supautils](https://github.com/supabase/supautils/archive/refs/tags/v2.6.0.tar.gz) | [2.6.0](https://github.com/supabase/supautils/archive/refs/tags/v2.6.0.tar.gz) | PostgreSQL extension for enhanced security | +| [vault](https://github.com/supabase/vault/archive/refs/tags/v0.2.9.tar.gz) | [0.2.9](https://github.com/supabase/vault/archive/refs/tags/v0.2.9.tar.gz) | Store encrypted secrets in PostgreSQL | +| [wal2json](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | [2_6](https://github.com/eulerto/wal2json/archive/wal2json_2_6.tar.gz) | PostgreSQL JSON output plugin for changeset extraction | +## Additional Goodies +*This is only available for our AWS EC2* + +| Goodie | Version | Description | +| ------------- | :-------------: | ------------- | +| [PgBouncer](https://www.pgbouncer.org/) | [1.16.1](http://www.pgbouncer.org/changelog.html#pgbouncer-116x) | Set up Connection Pooling. | +| [PostgREST](https://postgrest.org/en/stable/) | [v12.2.3](https://github.com/PostgREST/postgrest/releases/tag/v12.2.3) | Instantly transform your database into an RESTful API. | +| [WAL-G](https://github.com/wal-g/wal-g#wal-g) | [v2.0.1](https://github.com/wal-g/wal-g/releases/tag/v2.0.1) | Tool for physical database backup and recovery. | --> + +## Install + +See all installation instructions in the [repo wiki](https://github.com/supabase/postgres/wiki). + +[![Docker](https://github.com/supabase/postgres/blob/develop/docs/img/docker.png)](https://github.com/supabase/postgres/wiki/Docker) +[![AWS](https://github.com/supabase/postgres/blob/develop/docs/img/aws.png)](https://github.com/supabase/postgres/wiki/AWS-EC2) + + + +## Motivation + +- Make it fast and simple to get started with Postgres. +- Show off a few of Postgres' most exciting features. +- This is the same build we offer at [Supabase](https://supabase.io). +- Open a github issue if you have a feature request + +## License + +[The PostgreSQL License](https://opensource.org/licenses/postgresql). We realize that licensing is tricky since we are bundling all the various plugins. If we have infringed on any license, let us know and we will make the necessary changes (or remove that extension from this repo). + +## Sponsors + +We are building the features of Firebase using enterprise-grade, open source products. We support existing communities wherever possible, and if the products don’t exist we build them and open source them ourselves. + +[![New Sponsor](https://user-images.githubusercontent.com/10214025/90518111-e74bbb00-e198-11ea-8f88-c9e3c1aa4b5b.png)](https://github.com/sponsors/supabase) \ No newline at end of file diff --git a/postgres_15.8.1.044/amazon-arm64-nix.pkr.hcl b/postgres_15.8.1.044/amazon-arm64-nix.pkr.hcl new file mode 100644 index 0000000..ec427ff --- /dev/null +++ b/postgres_15.8.1.044/amazon-arm64-nix.pkr.hcl @@ -0,0 +1,277 @@ +variable "ami" { + type = string + default = "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-arm64-server-*" +} + +variable "profile" { + type = string + default = "${env("AWS_PROFILE")}" +} + +variable "ami_name" { + type = string + default = "supabase-postgres" +} + +variable "ami_regions" { + type = list(string) + default = ["ap-southeast-2"] +} + +variable "ansible_arguments" { + type = string + default = "--skip-tags install-postgrest,install-pgbouncer,install-supabase-internal" +} + +variable "aws_access_key" { + type = string + default = "" +} + +variable "aws_secret_key" { + type = string + default = "" +} + +variable "environment" { + type = string + default = "prod" +} + +variable "region" { + type = string +} + +variable "build-vol" { + type = string + default = "xvdc" +} + +# ccache docker image details +variable "docker_user" { + type = string + default = "" +} + +variable "docker_passwd" { + type = string + default = "" +} + +variable "docker_image" { + type = string + default = "" +} + +variable "docker_image_tag" { + type = string + default = "latest" +} + +locals { + creator = "packer" +} + +variable "postgres-version" { + type = string + default = "" +} + +variable "git-head-version" { + type = string + default = "unknown" +} + +variable "packer-execution-id" { + type = string + default = "unknown" +} + +variable "force-deregister" { + type = bool + default = false +} + +packer { + required_plugins { + amazon = { + source = "github.com/hashicorp/amazon" + version = "~> 1" + } + } +} + +# source block +source "amazon-ebssurrogate" "source" { + profile = "${var.profile}" + #access_key = "${var.aws_access_key}" + #ami_name = "${var.ami_name}-arm64-${formatdate("YYYY-MM-DD-hhmm", timestamp())}" + ami_name = "${var.ami_name}-${var.postgres-version}-stage-1" + ami_virtualization_type = "hvm" + ami_architecture = "arm64" + ami_regions = "${var.ami_regions}" + instance_type = "c6g.4xlarge" + region = "${var.region}" + #secret_key = "${var.aws_secret_key}" + force_deregister = var.force-deregister + + # Use latest official ubuntu focal ami owned by Canonical. + source_ami_filter { + filters = { + virtualization-type = "hvm" + name = "${var.ami}" + root-device-type = "ebs" + } + owners = [ "099720109477" ] + most_recent = true + } + ena_support = true + launch_block_device_mappings { + device_name = "/dev/xvdf" + delete_on_termination = true + volume_size = 10 + volume_type = "gp3" + } + + launch_block_device_mappings { + device_name = "/dev/xvdh" + delete_on_termination = true + volume_size = 8 + volume_type = "gp3" + } + + launch_block_device_mappings { + device_name = "/dev/${var.build-vol}" + delete_on_termination = true + volume_size = 16 + volume_type = "gp2" + omit_from_artifact = true + } + + run_tags = { + creator = "packer" + appType = "postgres" + packerExecutionId = "${var.packer-execution-id}" + } + run_volume_tags = { + creator = "packer" + appType = "postgres" + } + snapshot_tags = { + creator = "packer" + appType = "postgres" + } + tags = { + creator = "packer" + appType = "postgres" + postgresVersion = "${var.postgres-version}-stage1" + sourceSha = "${var.git-head-version}" + } + + communicator = "ssh" + ssh_pty = true + ssh_username = "ubuntu" + ssh_timeout = "5m" + + ami_root_device { + source_device_name = "/dev/xvdf" + device_name = "/dev/xvda" + delete_on_termination = true + volume_size = 10 + volume_type = "gp2" + } + + associate_public_ip_address = true +} + +# a build block invokes sources and runs provisioning steps on them. +build { + sources = ["source.amazon-ebssurrogate.source"] + + provisioner "file" { + source = "ebssurrogate/files/sources-arm64.cfg" + destination = "/tmp/sources.list" + } + + provisioner "file" { + source = "ebssurrogate/files/ebsnvme-id" + destination = "/tmp/ebsnvme-id" + } + + provisioner "file" { + source = "ebssurrogate/files/70-ec2-nvme-devices.rules" + destination = "/tmp/70-ec2-nvme-devices.rules" + } + + provisioner "file" { + source = "ebssurrogate/scripts/chroot-bootstrap-nix.sh" + destination = "/tmp/chroot-bootstrap-nix.sh" + } + + provisioner "file" { + source = "ebssurrogate/files/cloud.cfg" + destination = "/tmp/cloud.cfg" + } + + provisioner "file" { + source = "ebssurrogate/files/vector.timer" + destination = "/tmp/vector.timer" + } + + provisioner "file" { + source = "ebssurrogate/files/apparmor_profiles" + destination = "/tmp" + } + + provisioner "file" { + source = "migrations" + destination = "/tmp" + } + + provisioner "file" { + source = "ebssurrogate/files/unit-tests" + destination = "/tmp" + } + + # Copy ansible playbook + provisioner "shell" { + inline = ["mkdir /tmp/ansible-playbook"] + } + + provisioner "file" { + source = "ansible" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "scripts" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "ansible/vars.yml" + destination = "/tmp/ansible-playbook/vars.yml" + } + + provisioner "shell" { + environment_vars = [ + "ARGS=${var.ansible_arguments}", + "DOCKER_USER=${var.docker_user}", + "DOCKER_PASSWD=${var.docker_passwd}", + "DOCKER_IMAGE=${var.docker_image}", + "DOCKER_IMAGE_TAG=${var.docker_image_tag}", + "POSTGRES_SUPABASE_VERSION=${var.postgres-version}" + ] + use_env_var_file = true + script = "ebssurrogate/scripts/surrogate-bootstrap-nix.sh" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && cd /tmp/ansible-playbook && {{.Path}}'" + start_retry_timeout = "5m" + skip_clean = true + } + + provisioner "file" { + source = "/tmp/ansible.log" + destination = "/tmp/ansible.log" + direction = "download" + } +} diff --git a/postgres_15.8.1.044/ansible.cfg b/postgres_15.8.1.044/ansible.cfg new file mode 100644 index 0000000..5410ed8 --- /dev/null +++ b/postgres_15.8.1.044/ansible.cfg @@ -0,0 +1,3 @@ +[defaults] + +callback_whitelist = profile_tasks diff --git a/postgres_15.8.1.044/ansible/files/admin_api_scripts/grow_fs.sh b/postgres_15.8.1.044/ansible/files/admin_api_scripts/grow_fs.sh new file mode 100644 index 0000000..1bca017 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/admin_api_scripts/grow_fs.sh @@ -0,0 +1,32 @@ +#! /usr/bin/env bash + +set -euo pipefail + +VOLUME_TYPE=${1:-data} + +if pgrep resizefs; then + echo "resize2fs is already running" + exit 1 +fi + +if [ -b /dev/nvme1n1 ] ; then + if [[ "${VOLUME_TYPE}" == "data" ]]; then + resize2fs /dev/nvme1n1 + + elif [[ "${VOLUME_TYPE}" == "root" ]] ; then + PLACEHOLDER_FL=/home/ubuntu/50M_PLACEHOLDER + rm -f "${PLACEHOLDER_FL}" || true + growpart /dev/nvme0n1 2 + resize2fs /dev/nvme0n1p2 + if [[ ! -f "${PLACEHOLDER_FL}" ]] ; then + fallocate -l50M "${PLACEHOLDER_FL}" + fi + else + echo "Invalid disk specified: ${VOLUME_TYPE}" + exit 1 + fi +else + growpart /dev/nvme0n1 2 + resize2fs /dev/nvme0n1p2 +fi +echo "Done resizing disk" diff --git a/postgres_15.8.1.044/ansible/files/admin_api_scripts/manage_readonly_mode.sh b/postgres_15.8.1.044/ansible/files/admin_api_scripts/manage_readonly_mode.sh new file mode 100644 index 0000000..41c9f5a --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/admin_api_scripts/manage_readonly_mode.sh @@ -0,0 +1,45 @@ +#! /usr/bin/env bash + +set -euo pipefail + +SUBCOMMAND=$1 + +function set_mode { + MODE=$1 + psql -h localhost -U supabase_admin -d postgres -c "ALTER SYSTEM SET default_transaction_read_only to ${MODE};" + psql -h localhost -U supabase_admin -d postgres -c "SELECT pg_reload_conf();" +} + +function check_override { + COMMAND=$(cat < 220.235.16.223.62599: Flags [S.], cksum 0x5de3 (incorrect -> 0x63da), seq 2314200657, ack 2071735457, win 62643, options [mss 8961,sackOK,TS val 3358598837 ecr 1277499190,nop,wscale 7], length 0 +# 1674013833.989257 IP (tos 0x0, ttl 64, id 24975, offset 0, flags [DF], proto TCP (6), length 52) +# 10.112.101.122.5432 > 220.235.16.223.62599: Flags [.], cksum 0x5ddb (incorrect -> 0xa25b), seq 1, ack 9, win 490, options [nop,nop,TS val 3358598885 ecr 1277499232], length 0 +# +# Sample IPv6 input lines: +# +# 1706483718.836526 IP6 (flowlabel 0x0bf27, hlim 64, next-header TCP (6) payload length: 125) 2406:da18:4fd:9b00:959:c52:ce68:10c8.5432 > 2406:da12:d78:f501:1273:296c:2482:c7a7.50530: Flags [P.], seq 25:118, ack 125, win 488, options [nop,nop,TS val 1026340732 ecr 1935666426], length 93 +# 1706483718.912083 IP6 (flowlabel 0x0bf27, hlim 64, next-header TCP (6) payload length: 501) 2406:da18:4fd:9b00:959:c52:ce68:10c8.5432 > 2406:da12:d78:f501:1273:296c:2482:c7a7.50530: Flags [P.], seq 118:587, ack 234, win 488, options [nop,nop,TS val 1026340807 ecr 1935666497], length 469 +# 1706483718.984001 IP6 (flowlabel 0x0bf27, hlim 64, next-header TCP (6) payload length: 151) 2406:da18:4fd:9b00:959:c52:ce68:10c8.5432 > 2406:da12:d78:f501:1273:296c:2482:c7a7.50530: Flags [P.], seq 587:706, ack 448, win 487, options [nop,nop,TS val 1026340879 ecr 1935666569], length 119 +sub extract_packet_length { + my ($line) = @_; + + #print("debug: >> " . $line); + + if ($line =~ /^.*, length (\d+)$/) { + # extract tcp packet length and add it up + my $len = $1; + $captured_len += $len; + } +} + +# write total length to file +sub write_file { + my ($output) = @_; + + my $now = strftime "%F %T", localtime time; + print "[$now] write captured len $captured_len to $output\n"; + + open(my $fh, "+>", $output) or die "Could not open file '$output' $!"; + print $fh "$captured_len"; + close($fh) or die "Could not write file '$output' $!"; +} + +# main +sub main { + # get arguments + GetOptions( + "interval:i" => \(my $interval = 60), + "output:s" => \(my $output = "/tmp/pg_egress_collect.txt"), + "help" => sub { HelpMessage(0) }, + ) or HelpMessage(1); + + my $loop = IO::Async::Loop->new; + + # tcpdump extractor + my $extractor = IO::Async::Stream->new_for_stdin( + on_read => sub { + my ($self, $buffref, $eof) = @_; + + while($$buffref =~ s/^(.*\n)//) { + my $line = $1; + extract_packet_length($line); + } + + return 0; + }, + ); + + # schedule file writer per minute + my $writer = IO::Async::Timer::Periodic->new( + interval => $interval, + on_tick => sub { + write_file($output); + + # reset total captured length + $captured_len = 0; + }, + ); + $writer->start; + + print "pg_egress_collect started, egress data will be saved to $output at interval $interval seconds.\n"; + + $loop->add($extractor); + $loop->add($writer); + $loop->run; +} + +main(); + +__END__ + +=head1 NAME + +pg_egress_collect.pl - collect egress from tcpdump output, extract TCP packet length, aggregate in specified interval and write to output file. + +=head1 SYNOPSIS + +pg_egress_collect.pl [-i interval] [-o output] + +Options: + + -i, --interval interval + output file write interval, in seconds, default is 60 seconds + + -o, --output output + output file path, default is /tmp/pg_egress_collect.txt + + -h, --help + print this help message + +=cut diff --git a/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/check.sh b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/check.sh new file mode 100644 index 0000000..f85e957 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/check.sh @@ -0,0 +1,16 @@ +#! /usr/bin/env bash +## This script provides a method to check the status of the database upgrade +## process, which is updated in /tmp/pg-upgrade-status by initiate.sh +## This runs on the old (source) instance. + +set -euo pipefail + +STATUS_FILE="/tmp/pg-upgrade-status" + +if [ -f "${STATUS_FILE}" ]; then + STATUS=$(cat "${STATUS_FILE}") + echo -n "${STATUS}" +else + echo -n "unknown" +fi + diff --git a/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh new file mode 100644 index 0000000..e9e3afe --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh @@ -0,0 +1,561 @@ +#! /usr/bin/env bash + +# Common functions and variables used by initiate.sh and complete.sh + +REPORTING_PROJECT_REF="ihmaxnjpcccasmrbkpvo" +REPORTING_CREDENTIALS_FILE="/root/upgrade-reporting-credentials" + +REPORTING_ANON_KEY="" +if [ -f "$REPORTING_CREDENTIALS_FILE" ]; then + REPORTING_ANON_KEY=$(cat "$REPORTING_CREDENTIALS_FILE") +fi + +# shellcheck disable=SC2120 +# Arguments are passed in other files +function run_sql { + psql -h localhost -U supabase_admin -d postgres "$@" +} + +function ship_logs { + LOG_FILE=$1 + + if [ -z "$REPORTING_ANON_KEY" ]; then + echo "No reporting key found. Skipping log upload." + return 0 + fi + + if [ ! -f "$LOG_FILE" ]; then + echo "No log file found. Skipping log upload." + return 0 + fi + + if [ ! -s "$LOG_FILE" ]; then + echo "Log file is empty. Skipping log upload." + return 0 + fi + + HOSTNAME=$(hostname) + DERIVED_REF="${HOSTNAME##*-}" + + printf -v BODY '{ "ref": "%s", "step": "%s", "content": %s }' "$DERIVED_REF" "completion" "$(cat "$LOG_FILE" | jq -Rs '.')" + curl -sf -X POST "https://$REPORTING_PROJECT_REF.supabase.co/rest/v1/error_logs" \ + -H "apikey: ${REPORTING_ANON_KEY}" \ + -H 'Content-type: application/json' \ + -d "$BODY" +} + +function retry { + local retries=$1 + shift + + local count=0 + until "$@"; do + exit=$? + wait=$((2 ** (count + 1))) + count=$((count + 1)) + if [ $count -lt "$retries" ]; then + echo "Command $* exited with code $exit, retrying..." + sleep $wait + else + echo "Command $* exited with code $exit, no more retries left." + return $exit + fi + done + return 0 +} + +CI_stop_postgres() { + BINDIR=$(pg_config --bindir) + ARG=${1:-""} + + if [ "$ARG" = "--new-bin" ]; then + BINDIR="/tmp/pg_upgrade_bin/$PG_MAJOR_VERSION/bin" + fi + + su postgres -c "$BINDIR/pg_ctl stop -o '-c config_file=/etc/postgresql/postgresql.conf' -l /tmp/postgres.log" +} + +CI_start_postgres() { + BINDIR=$(pg_config --bindir) + ARG=${1:-""} + + if [ "$ARG" = "--new-bin" ]; then + BINDIR="/tmp/pg_upgrade_bin/$PG_MAJOR_VERSION/bin" + fi + + su postgres -c "$BINDIR/pg_ctl start -o '-c config_file=/etc/postgresql/postgresql.conf' -l /tmp/postgres.log" +} + +swap_postgres_and_supabase_admin() { + run_sql <<'EOSQL' +alter database postgres connection limit 0; +select pg_terminate_backend(pid) from pg_stat_activity where backend_type = 'client backend' and pid != pg_backend_pid(); +EOSQL + + if [ -z "$IS_CI" ]; then + retry 5 systemctl restart postgresql + else + CI_start_postgres "" + fi + + retry 8 pg_isready -h localhost -U supabase_admin + + run_sql <<'EOSQL' +set statement_timeout = '600s'; +begin; +create role supabase_tmp superuser; +set session authorization supabase_tmp; + +-- to handle snowflakes that happened in the past +revoke supabase_admin from authenticator; + +do $$ +begin + if exists (select from pg_extension where extname = 'timescaledb') then + execute(format('select %s.timescaledb_pre_restore()', (select pronamespace::regnamespace from pg_proc where proname = 'timescaledb_pre_restore'))); + end if; +end +$$; + +do $$ +declare + postgres_rolpassword text := (select rolpassword from pg_authid where rolname = 'postgres'); + supabase_admin_rolpassword text := (select rolpassword from pg_authid where rolname = 'supabase_admin'); + role_settings jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('database', d.datname, 'role', a.rolname, 'configs', s.setconfig)), '{}') + from pg_db_role_setting s + left join pg_database d on d.oid = s.setdatabase + join pg_authid a on a.oid = s.setrole + where a.rolname in ('postgres', 'supabase_admin') + ); + event_triggers jsonb[] := (select coalesce(array_agg(jsonb_build_object('name', evtname)), '{}') from pg_event_trigger where evtowner = 'postgres'::regrole); + user_mappings jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', um.oid, 'role', a.rolname, 'server', s.srvname, 'options', um.umoptions)), '{}') + from pg_user_mapping um + join pg_authid a on a.oid = um.umuser + join pg_foreign_server s on s.oid = um.umserver + where a.rolname in ('postgres', 'supabase_admin') + ); + -- Objects can have initial privileges either by having those privileges set + -- when the system is initialized (by initdb) or when the object is created + -- during a CREATE EXTENSION and the extension script sets initial + -- privileges using the GRANT system. (https://www.postgresql.org/docs/current/catalog-pg-init-privs.html) + -- We only care about swapping init_privs for extensions. + init_privs jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('objoid', objoid, 'classoid', classoid, 'initprivs', initprivs::text)), '{}') + from pg_init_privs + where privtype = 'e' + ); + default_acls jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', d.oid, 'role', a.rolname, 'schema', n.nspname, 'objtype', d.defaclobjtype, 'acl', defaclacl::text)), '{}') + from pg_default_acl d + join pg_authid a on a.oid = d.defaclrole + left join pg_namespace n on n.oid = d.defaclnamespace + ); + schemas jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', n.oid, 'owner', a.rolname, 'acl', nspacl::text)), '{}') + from pg_namespace n + join pg_authid a on a.oid = n.nspowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + ); + types jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', t.oid, 'owner', a.rolname, 'acl', t.typacl::text)), '{}') + from pg_type t + join pg_namespace n on n.oid = t.typnamespace + join pg_authid a on a.oid = t.typowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + and ( + t.typrelid = 0 + or ( + select + c.relkind = 'c' + from + pg_class c + where + c.oid = t.typrelid + ) + ) + and not exists ( + select + from + pg_type el + where + el.oid = t.typelem + and el.typarray = t.oid + ) + ); + functions jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', p.oid, 'owner', a.rolname, 'kind', p.prokind, 'acl', p.proacl::text)), '{}') + from pg_proc p + join pg_namespace n on n.oid = p.pronamespace + join pg_authid a on a.oid = p.proowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + ); + relations jsonb[] := ( + select coalesce(array_agg(jsonb_build_object('oid', c.oid, 'owner', a.rolname, 'acl', c.relacl::text)), '{}') + from ( + -- Sequences must appear after tables, so we order by relkind + select * from pg_class order by relkind desc + ) c + join pg_namespace n on n.oid = c.relnamespace + join pg_authid a on a.oid = c.relowner + where true + and n.nspname != 'information_schema' + and not starts_with(n.nspname, 'pg_') + and c.relkind not in ('c', 'i', 'I') + ); + rec record; + obj jsonb; +begin + set local search_path = ''; + + if exists (select from pg_event_trigger where evtname = 'pgsodium_trg_mask_update') then + alter event trigger pgsodium_trg_mask_update disable; + end if; + + alter role postgres rename to supabase_admin_; + alter role supabase_admin rename to postgres; + alter role supabase_admin_ rename to supabase_admin; + + -- role grants + for rec in + select * from pg_auth_members + loop + execute(format('revoke %s from %s;', rec.roleid::regrole, rec.member::regrole)); + execute(format( + 'grant %s to %s %s granted by %s;', + case + when rec.roleid = 'postgres'::regrole then 'supabase_admin' + when rec.roleid = 'supabase_admin'::regrole then 'postgres' + else rec.roleid::regrole + end, + case + when rec.member = 'postgres'::regrole then 'supabase_admin' + when rec.member = 'supabase_admin'::regrole then 'postgres' + else rec.member::regrole + end, + case + when rec.admin_option then 'with admin option' + else '' + end, + case + when rec.grantor = 'postgres'::regrole then 'supabase_admin' + when rec.grantor = 'supabase_admin'::regrole then 'postgres' + else rec.grantor::regrole + end + )); + end loop; + + -- role passwords + execute(format('alter role postgres password %L;', postgres_rolpassword)); + execute(format('alter role supabase_admin password %L;', supabase_admin_rolpassword)); + + -- role settings + foreach obj in array role_settings + loop + execute(format('alter role %I %s reset all', + case when obj->>'role' = 'postgres' then 'supabase_admin' else 'postgres' end, + case when obj->>'database' is null then '' else format('in database %I', obj->>'database') end + )); + end loop; + foreach obj in array role_settings + loop + for rec in + select split_part(value, '=', 1) as key, substr(value, strpos(value, '=') + 1) as value + from jsonb_array_elements_text(obj->'configs') + loop + execute(format('alter role %I %s set %I to %s', + obj->>'role', + case when obj->>'database' is null then '' else format('in database %I', obj->>'database') end, + rec.key, + -- https://github.com/postgres/postgres/blob/70d1c664f4376fd3499e3b0c6888cf39b65d722b/src/bin/pg_dump/dumputils.c#L861 + case + when rec.key in ('local_preload_libraries', 'search_path', 'session_preload_libraries', 'shared_preload_libraries', 'temp_tablespaces', 'unix_socket_directories') + then rec.value + else quote_literal(rec.value) + end + )); + end loop; + end loop; + + reassign owned by postgres to supabase_admin; + + -- databases + for rec in + select * from pg_database where datname not in ('template0') + loop + execute(format('alter database %I owner to postgres;', rec.datname)); + end loop; + + -- event triggers + foreach obj in array event_triggers + loop + execute(format('alter event trigger %I owner to postgres;', obj->>'name')); + end loop; + + -- publications + for rec in + select * from pg_publication + loop + execute(format('alter publication %I owner to postgres;', rec.pubname)); + end loop; + + -- FDWs + for rec in + select * from pg_foreign_data_wrapper + loop + execute(format('alter foreign data wrapper %I owner to postgres;', rec.fdwname)); + end loop; + + -- foreign servers + for rec in + select * from pg_foreign_server + loop + execute(format('alter server %I owner to postgres;', rec.srvname)); + end loop; + + -- user mappings + foreach obj in array user_mappings + loop + execute(format('drop user mapping for %I server %I', case when obj->>'role' = 'postgres' then 'supabase_admin' else 'postgres' end, obj->>'server')); + end loop; + foreach obj in array user_mappings + loop + execute(format('create user mapping for %I server %I', obj->>'role', obj->>'server')); + for rec in + select split_part(value, '=', 1) as key, substr(value, strpos(value, '=') + 1) as value + from jsonb_array_elements_text(obj->'options') + loop + execute(format('alter user mapping for %I server %I options (%I %L)', obj->>'role', obj->>'server', rec.key, rec.value)); + end loop; + end loop; + + -- init privs + foreach obj in array init_privs + loop + -- We need to modify system catalog directly here because there's no ALTER INIT PRIVILEGES. + update pg_init_privs set initprivs = (obj->>'initprivs')::aclitem[] where objoid = (obj->>'objoid')::oid and classoid = (obj->>'classoid')::oid; + end loop; + + -- default acls + foreach obj in array default_acls + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + loop + if obj->>'role' in ('postgres', 'supabase_admin') or rec.grantee::regrole in ('postgres', 'supabase_admin') then + execute(format('alter default privileges for role %I %s revoke %s on %s from %s' + , case when obj->>'role' = 'postgres' then 'supabase_admin' + when obj->>'role' = 'supabase_admin' then 'postgres' + else obj->>'role' + end + , case when obj->>'schema' is null then '' + else format('in schema %I', obj->>'schema') + end + , rec.privilege_type + , case when obj->>'objtype' = 'r' then 'tables' + when obj->>'objtype' = 'S' then 'sequences' + when obj->>'objtype' = 'f' then 'functions' + when obj->>'objtype' = 'T' then 'types' + when obj->>'objtype' = 'n' then 'schemas' + end + , case when rec.grantee = 'postgres'::regrole then 'supabase_admin' + when rec.grantee = 'supabase_admin'::regrole then 'postgres' + when rec.grantee = 0 then 'public' + else rec.grantee::regrole::text + end + )); + end if; + end loop; + end loop; + + foreach obj in array default_acls + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + loop + if obj->>'role' in ('postgres', 'supabase_admin') or rec.grantee::regrole in ('postgres', 'supabase_admin') then + execute(format('alter default privileges for role %I %s grant %s on %s to %s %s' + , obj->>'role' + , case when obj->>'schema' is null then '' + else format('in schema %I', obj->>'schema') + end + , rec.privilege_type + , case when obj->>'objtype' = 'r' then 'tables' + when obj->>'objtype' = 'S' then 'sequences' + when obj->>'objtype' = 'f' then 'functions' + when obj->>'objtype' = 'T' then 'types' + when obj->>'objtype' = 'n' then 'schemas' + end + , case when rec.grantee = 0 then 'public' else rec.grantee::regrole::text end + , case when rec.is_grantable then 'with grant option' else '' end + )); + end if; + end loop; + end loop; + + -- schemas + foreach obj in array schemas + loop + if obj->>'owner' = 'postgres' then + execute(format('alter schema %s owner to postgres;', (obj->>'oid')::regnamespace)); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'supabase_admin') + loop + execute(format('revoke %s on schema %s from %I', rec.privilege_type, (obj->>'oid')::regnamespace, case when rec.grantee = 'postgres'::regrole then 'supabase_admin' else 'postgres' end)); + end loop; + end loop; + foreach obj in array schemas + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'supabase_admin') + loop + execute(format('grant %s on schema %s to %s %s', rec.privilege_type, (obj->>'oid')::regnamespace, rec.grantee::regrole, case when rec.is_grantable then 'with grant option' else '' end)); + end loop; + end loop; + + -- types + foreach obj in array types + loop + if obj->>'owner' = 'postgres' then + execute(format('alter type %s owner to postgres;', (obj->>'oid')::regtype)); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'supabase_admin') + loop + execute(format('revoke %s on type %s from %I', rec.privilege_type, (obj->>'oid')::regtype, case when rec.grantee = 'postgres'::regrole then 'supabase_admin' else 'postgres' end)); + end loop; + end loop; + foreach obj in array types + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'supabase_admin') + loop + execute(format('grant %s on type %s to %s %s', rec.privilege_type, (obj->>'oid')::regtype, rec.grantee::regrole, case when rec.is_grantable then 'with grant option' else '' end)); + end loop; + end loop; + + -- functions + foreach obj in array functions + loop + if obj->>'owner' = 'postgres' then + execute(format('alter %s %s(%s) owner to postgres;' + , case when obj->>'kind' = 'p' then 'procedure' else 'function' end + , (obj->>'oid')::regproc + , pg_get_function_identity_arguments((obj->>'oid')::regproc))); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'supabase_admin') + loop + execute(format('revoke %s on %s %s(%s) from %I' + , rec.privilege_type + , case + when obj->>'kind' = 'p' then 'procedure' + else 'function' + end + , (obj->>'oid')::regproc + , pg_get_function_identity_arguments((obj->>'oid')::regproc) + , case when rec.grantee = 'postgres'::regrole then 'supabase_admin' else 'postgres' end + )); + end loop; + end loop; + foreach obj in array functions + loop + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'supabase_admin') + loop + execute(format('grant %s on %s %s(%s) to %s %s' + , rec.privilege_type + , case + when obj->>'kind' = 'p' then 'procedure' + else 'function' + end + , (obj->>'oid')::regproc + , pg_get_function_identity_arguments((obj->>'oid')::regproc) + , rec.grantee::regrole + , case when rec.is_grantable then 'with grant option' else '' end + )); + end loop; + end loop; + + -- relations + foreach obj in array relations + loop + -- obj->>'oid' (text) needs to be casted to oid first for some reason + + if obj->>'owner' = 'postgres' then + execute(format('alter table %s owner to postgres;', (obj->>'oid')::oid::regclass)); + end if; + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'supabase_admin') + loop + execute(format('revoke %s on table %s from %I', rec.privilege_type, (obj->>'oid')::oid::regclass, case when rec.grantee = 'postgres'::regrole then 'supabase_admin' else 'postgres' end)); + end loop; + end loop; + foreach obj in array relations + loop + -- obj->>'oid' (text) needs to be casted to oid first for some reason + + for rec in + select grantor, grantee, privilege_type, is_grantable + from aclexplode((obj->>'acl')::aclitem[]) + where grantee::regrole in ('postgres', 'supabase_admin') + loop + execute(format('grant %s on table %s to %s %s', rec.privilege_type, (obj->>'oid')::oid::regclass, rec.grantee::regrole, case when rec.is_grantable then 'with grant option' else '' end)); + end loop; + end loop; + + if exists (select from pg_event_trigger where evtname = 'pgsodium_trg_mask_update') then + alter event trigger pgsodium_trg_mask_update enable; + end if; +end +$$; + +do $$ +begin + if exists (select from pg_extension where extname = 'timescaledb') then + execute(format('select %s.timescaledb_post_restore()', (select pronamespace::regnamespace from pg_proc where proname = 'timescaledb_post_restore'))); + end if; +end +$$; + +alter database postgres connection limit -1; + +-- #incident-2024-09-12-project-upgrades-are-temporarily-disabled +do $$ +begin + if exists (select from pg_authid where rolname = 'pg_read_all_data') then + execute('grant pg_read_all_data to postgres'); + end if; +end +$$; +grant pg_signal_backend to postgres; + +set session authorization supabase_admin; +drop role supabase_tmp; +commit; +EOSQL +} diff --git a/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh new file mode 100644 index 0000000..515c490 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh @@ -0,0 +1,276 @@ +#! /usr/bin/env bash + +## This script is run on the newly launched instance which is to be promoted to +## become the primary database instance once the upgrade successfully completes. +## The following commands copy custom PG configs and enable previously disabled +## extensions, containing regtypes referencing system OIDs. + +set -eEuo pipefail + +SCRIPT_DIR=$(dirname -- "$0";) +# shellcheck disable=SC1091 +source "$SCRIPT_DIR/common.sh" + +IS_CI=${IS_CI:-} +LOG_FILE="/var/log/pg-upgrade-complete.log" + +function cleanup { + UPGRADE_STATUS=${1:-"failed"} + EXIT_CODE=${?:-0} + + echo "$UPGRADE_STATUS" > /tmp/pg-upgrade-status + + ship_logs "$LOG_FILE" || true + + exit "$EXIT_CODE" +} + +function execute_extension_upgrade_patches { + if [ -f "/var/lib/postgresql/extension/wrappers--0.3.1--0.4.1.sql" ] && [ ! -f "/usr/share/postgresql/15/extension/wrappers--0.3.0--0.4.1.sql" ]; then + cp /var/lib/postgresql/extension/wrappers--0.3.1--0.4.1.sql /var/lib/postgresql/extension/wrappers--0.3.0--0.4.1.sql + ln -s /var/lib/postgresql/extension/wrappers--0.3.0--0.4.1.sql /usr/share/postgresql/15/extension/wrappers--0.3.0--0.4.1.sql + fi +} + +function execute_patches { + # Patch pg_net grants + PG_NET_ENABLED=$(run_sql -A -t -c "select count(*) > 0 from pg_extension where extname = 'pg_net';") + + if [ "$PG_NET_ENABLED" = "t" ]; then + PG_NET_GRANT_QUERY=$(cat < 0 from pg_extension where extname = 'pg_cron' and extowner::regrole::text = 'postgres';") + + if [ "$HAS_PG_CRON_OWNED_BY_POSTGRES" = "t" ]; then + RECREATE_PG_CRON_QUERY=$(cat < 0 from pg_extension where extname = 'pgmq';") + if [ "$HAS_PGMQ" = "t" ]; then + PATCH_PGMQ_QUERY=$(cat < /tmp/pg-upgrade-status + + echo "1. Mounting data disk" + if [ -z "$IS_CI" ]; then + retry 8 mount -a -v + else + echo "Skipping mount -a -v" + fi + + # copying custom configurations + echo "2. Copying custom configurations" + retry 3 copy_configs + + echo "3. Starting postgresql" + if [ -z "$IS_CI" ]; then + retry 3 service postgresql start + else + CI_start_postgres --new-bin + fi + + execute_extension_upgrade_patches || true + + echo "4. Running generated SQL files" + retry 3 run_generated_sql + + echo "4.1. Applying patches" + execute_patches || true + + run_sql -c "ALTER USER postgres WITH NOSUPERUSER;" + + echo "4.2. Applying authentication scheme updates" + retry 3 apply_auth_scheme_updates + + sleep 5 + + echo "5. Restarting postgresql" + if [ -z "$IS_CI" ]; then + retry 3 service postgresql restart + + echo "5.1. Restarting gotrue and postgrest" + retry 3 service gotrue restart + retry 3 service postgrest restart + else + retry 3 CI_stop_postgres || true + retry 3 CI_start_postgres + fi + + echo "6. Starting vacuum analyze" + retry 3 start_vacuum_analyze +} + +function copy_configs { + cp -R /data/conf/* /etc/postgresql-custom/ + chown -R postgres:postgres /var/lib/postgresql/data + chown -R postgres:postgres /data/pgdata + chmod -R 0750 /data/pgdata +} + +function run_generated_sql { + if [ -d /data/sql ]; then + for FILE in /data/sql/*.sql; do + if [ -f "$FILE" ]; then + run_sql -f "$FILE" || true + fi + done + fi +} + +# Projects which had their passwords hashed using md5 need to have their passwords reset +# Passwords for managed roles are already present in /etc/postgresql.schema.sql +function apply_auth_scheme_updates { + PASSWORD_ENCRYPTION_SETTING=$(run_sql -A -t -c "SHOW password_encryption;") + if [ "$PASSWORD_ENCRYPTION_SETTING" = "md5" ]; then + run_sql -c "ALTER SYSTEM SET password_encryption TO 'scram-sha-256';" + run_sql -c "SELECT pg_reload_conf();" + + if [ -z "$IS_CI" ]; then + run_sql -f /etc/postgresql.schema.sql + fi + fi +} + +function start_vacuum_analyze { + echo "complete" > /tmp/pg-upgrade-status + + # shellcheck disable=SC1091 + if [ -f "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" ]; then + # shellcheck disable=SC1091 + source "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" + fi + vacuumdb --all --analyze-in-stages -U supabase_admin -h localhost -p 5432 + echo "Upgrade job completed" +} + +trap cleanup ERR + +echo "C.UTF-8 UTF-8" > /etc/locale.gen +echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen +locale-gen + +if [ -z "$IS_CI" ]; then + complete_pg_upgrade >> $LOG_FILE 2>&1 & +else + CI_stop_postgres || true + + rm -f /tmp/pg-upgrade-status + mv /data_migration /data + + rm -rf /var/lib/postgresql/data + ln -s /data/pgdata /var/lib/postgresql/data + + complete_pg_upgrade +fi diff --git a/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh new file mode 100644 index 0000000..4e11f2d --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh @@ -0,0 +1,470 @@ +#! /usr/bin/env bash + +## This script is run on the old (source) instance, mounting the data disk +## of the newly launched instance, disabling extensions containing regtypes, +## and running pg_upgrade. +## It reports the current status of the upgrade process to /tmp/pg-upgrade-status, +## which can then be subsequently checked through check.sh. + +# Extensions to disable before running pg_upgrade. +# Running an upgrade with these extensions enabled will result in errors due to +# them depending on regtypes referencing system OIDs or outdated library files. +EXTENSIONS_TO_DISABLE=( + "pg_graphql" + "pg_stat_monitor" +) + +PG14_EXTENSIONS_TO_DISABLE=( + "wrappers" + "pgrouting" +) + +PG13_EXTENSIONS_TO_DISABLE=( + "pgrouting" +) + +set -eEuo pipefail + +SCRIPT_DIR=$(dirname -- "$0";) +# shellcheck disable=SC1091 +source "$SCRIPT_DIR/common.sh" + +IS_CI=${IS_CI:-} +IS_LOCAL_UPGRADE=${IS_LOCAL_UPGRADE:-} +IS_NIX_UPGRADE=${IS_NIX_UPGRADE:-} +IS_NIX_BASED_SYSTEM="false" + +PGVERSION=$1 +MOUNT_POINT="/data_migration" +LOG_FILE="/var/log/pg-upgrade-initiate.log" + +POST_UPGRADE_EXTENSION_SCRIPT="/tmp/pg_upgrade/pg_upgrade_extensions.sql" +POST_UPGRADE_POSTGRES_PERMS_SCRIPT="/tmp/pg_upgrade/pg_upgrade_postgres_perms.sql" +OLD_PGVERSION=$(run_sql -A -t -c "SHOW server_version;") + +SERVER_LC_COLLATE=$(run_sql -A -t -c "SHOW lc_collate;") +SERVER_LC_CTYPE=$(run_sql -A -t -c "SHOW lc_ctype;") +SERVER_ENCODING=$(run_sql -A -t -c "SHOW server_encoding;") + +POSTGRES_CONFIG_PATH="/etc/postgresql/postgresql.conf" +PGBINOLD="/usr/lib/postgresql/bin" + +PG_UPGRADE_BIN_DIR="/tmp/pg_upgrade_bin/$PGVERSION" +NIX_INSTALLER_PATH="/tmp/persistent/nix-installer" +NIX_INSTALLER_PACKAGE_PATH="$NIX_INSTALLER_PATH.tar.gz" + +if [ -L "$PGBINOLD/pg_upgrade" ]; then + BINARY_PATH=$(readlink -f "$PGBINOLD/pg_upgrade") + if [[ "$BINARY_PATH" == *"nix"* ]]; then + IS_NIX_BASED_SYSTEM="true" + fi +fi + +# If upgrading from older major PG versions, disable specific extensions +if [[ "$OLD_PGVERSION" =~ ^14.* ]]; then + EXTENSIONS_TO_DISABLE+=("${PG14_EXTENSIONS_TO_DISABLE[@]}") +elif [[ "$OLD_PGVERSION" =~ ^13.* ]]; then + EXTENSIONS_TO_DISABLE+=("${PG13_EXTENSIONS_TO_DISABLE[@]}") +elif [[ "$OLD_PGVERSION" =~ ^12.* ]]; then + POSTGRES_CONFIG_PATH="/etc/postgresql/12/main/postgresql.conf" + PGBINOLD="/usr/lib/postgresql/12/bin" +fi + +if [ -n "$IS_CI" ]; then + PGBINOLD="$(pg_config --bindir)" + echo "Running in CI mode; using pg_config bindir: $PGBINOLD" + echo "PGVERSION: $PGVERSION" +fi + +OLD_BOOTSTRAP_USER=$(run_sql -A -t -c "select rolname from pg_authid where oid = 10;") + +cleanup() { + UPGRADE_STATUS=${1:-"failed"} + EXIT_CODE=${?:-0} + + if [ "$UPGRADE_STATUS" = "failed" ]; then + EXIT_CODE=1 + fi + + if [ "$UPGRADE_STATUS" = "failed" ]; then + echo "Upgrade job failed. Cleaning up and exiting." + fi + + if [ -d "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" ]; then + echo "Copying pg_upgrade output to /var/log" + cp -R "${MOUNT_POINT}/pgdata/pg_upgrade_output.d/" /var/log/ || true + chown -R postgres:postgres /var/log/pg_upgrade_output.d/ + chmod -R 0750 /var/log/pg_upgrade_output.d/ + ship_logs "$LOG_FILE" || true + tail -n +1 /var/log/pg_upgrade_output.d/*/* > /var/log/pg_upgrade_output.d/pg_upgrade.log || true + ship_logs "/var/log/pg_upgrade_output.d/pg_upgrade.log" || true + fi + + if [ -L "/usr/share/postgresql/${PGVERSION}" ]; then + rm "/usr/share/postgresql/${PGVERSION}" + + if [ -f "/usr/share/postgresql/${PGVERSION}.bak" ]; then + mv "/usr/share/postgresql/${PGVERSION}.bak" "/usr/share/postgresql/${PGVERSION}" + fi + + if [ -d "/usr/share/postgresql/${PGVERSION}.bak" ]; then + mv "/usr/share/postgresql/${PGVERSION}.bak" "/usr/share/postgresql/${PGVERSION}" + fi + fi + + echo "Restarting postgresql" + if [ -z "$IS_CI" ]; then + systemctl enable postgresql + retry 5 systemctl restart postgresql + else + CI_start_postgres + fi + + retry 8 pg_isready -h localhost -U supabase_admin + + echo "Re-enabling extensions" + if [ -f $POST_UPGRADE_EXTENSION_SCRIPT ]; then + retry 5 run_sql -f $POST_UPGRADE_EXTENSION_SCRIPT + fi + + echo "Removing SUPERUSER grant from postgres" + retry 5 run_sql -c "ALTER USER postgres WITH NOSUPERUSER;" + + echo "Resetting postgres database connection limit" + retry 5 run_sql -c "ALTER DATABASE postgres CONNECTION LIMIT -1;" + + echo "Making sure postgres still has access to pg_shadow" + cat << EOF >> $POST_UPGRADE_POSTGRES_PERMS_SCRIPT +DO \$\$ +begin + if exists (select from pg_authid where rolname = 'pg_read_all_data') then + execute('grant pg_read_all_data to postgres'); + end if; +end +\$\$; +grant pg_signal_backend to postgres; +EOF + + if [ -f $POST_UPGRADE_POSTGRES_PERMS_SCRIPT ]; then + retry 5 run_sql -f $POST_UPGRADE_POSTGRES_PERMS_SCRIPT + fi + + if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then + echo "Unmounting data disk from ${MOUNT_POINT}" + retry 3 umount $MOUNT_POINT + fi + echo "$UPGRADE_STATUS" > /tmp/pg-upgrade-status + + if [ -z "$IS_CI" ]; then + exit "$EXIT_CODE" + else + echo "CI run complete with code ${EXIT_CODE}. Exiting." + exit "$EXIT_CODE" + fi +} + +function handle_extensions { + if [ -z "$IS_CI" ]; then + retry 5 systemctl restart postgresql + else + CI_start_postgres + fi + + retry 8 pg_isready -h localhost -U supabase_admin + + rm -f $POST_UPGRADE_EXTENSION_SCRIPT + touch $POST_UPGRADE_EXTENSION_SCRIPT + + PASSWORD_ENCRYPTION_SETTING=$(run_sql -A -t -c "SHOW password_encryption;") + if [ "$PASSWORD_ENCRYPTION_SETTING" = "md5" ]; then + echo "ALTER SYSTEM SET password_encryption = 'md5';" >> $POST_UPGRADE_EXTENSION_SCRIPT + fi + + cat << EOF >> $POST_UPGRADE_EXTENSION_SCRIPT +ALTER SYSTEM SET jit = off; +SELECT pg_reload_conf(); +EOF + + # Disable extensions if they're enabled + # Generate SQL script to re-enable them after upgrade + for EXTENSION in "${EXTENSIONS_TO_DISABLE[@]}"; do + EXTENSION_ENABLED=$(run_sql -A -t -c "SELECT EXISTS(SELECT 1 FROM pg_extension WHERE extname = '${EXTENSION}');") + if [ "$EXTENSION_ENABLED" = "t" ]; then + echo "Disabling extension ${EXTENSION}" + run_sql -c "DROP EXTENSION IF EXISTS ${EXTENSION} CASCADE;" + cat << EOF >> $POST_UPGRADE_EXTENSION_SCRIPT +DO \$\$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_available_extensions WHERE name = '${EXTENSION}') THEN + CREATE EXTENSION IF NOT EXISTS ${EXTENSION} CASCADE; + END IF; +END; +\$\$; +EOF + fi + done +} + +function initiate_upgrade { + mkdir -p "$MOUNT_POINT" + SHARED_PRELOAD_LIBRARIES=$(cat "$POSTGRES_CONFIG_PATH" | grep shared_preload_libraries | sed "s/shared_preload_libraries =\s\{0,1\}'\(.*\)'.*/\1/") + + # Wrappers officially launched in PG15; PG14 version is incompatible + if [[ "$OLD_PGVERSION" =~ 14* ]]; then + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/wrappers//" | xargs) + fi + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/pg_cron//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/pg_net//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/check_role_membership//" | xargs) + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/safeupdate//" | xargs) + + # Exclude empty-string entries, as well as leading/trailing commas and spaces resulting from the above lib exclusions + # i.e. " , pg_stat_statements, , pgsodium, " -> "pg_stat_statements, pgsodium" + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | tr ',' ' ' | tr -s ' ' | tr ' ' ', ') + + # Account for trailing comma + # eg. "...,auto_explain,pg_tle,plan_filter," -> "...,auto_explain,pg_tle,plan_filter" + if [[ "${SHARED_PRELOAD_LIBRARIES: -1}" = "," ]]; then + # clean up trailing comma + SHARED_PRELOAD_LIBRARIES=$(echo "$SHARED_PRELOAD_LIBRARIES" | sed "s/.$//" | xargs) + fi + + PGDATAOLD=$(cat "$POSTGRES_CONFIG_PATH" | grep data_directory | sed "s/data_directory = '\(.*\)'.*/\1/") + + PGDATANEW="$MOUNT_POINT/pgdata" + + # running upgrade using at least 1 cpu core + WORKERS=$(nproc | awk '{ print ($1 == 1 ? 1 : $1 - 1) }') + + # To make nix-based upgrades work for testing, create a pg binaries tarball with the following contents: + # - nix_flake_version - a7189a68ed4ea78c1e73991b5f271043636cf074 + # Where the value is the commit hash of the nix flake that contains the binaries + + if [ -n "$IS_LOCAL_UPGRADE" ]; then + mkdir -p "$PG_UPGRADE_BIN_DIR" + mkdir -p /tmp/persistent/ + echo "a7189a68ed4ea78c1e73991b5f271043636cf074" > "$PG_UPGRADE_BIN_DIR/nix_flake_version" + tar -czf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" . + rm -rf /tmp/pg_upgrade_bin/ + fi + + echo "1. Extracting pg_upgrade binaries" + mkdir -p "/tmp/pg_upgrade_bin" + tar zxf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" + + PGSHARENEW="$PG_UPGRADE_BIN_DIR/share" + + if [ -f "$PG_UPGRADE_BIN_DIR/nix_flake_version" ]; then + IS_NIX_UPGRADE="true" + NIX_FLAKE_VERSION=$(cat "$PG_UPGRADE_BIN_DIR/nix_flake_version") + + if [ "$IS_NIX_BASED_SYSTEM" = "false" ]; then + if [ ! -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then + if ! command -v nix > /dev/null; then + echo "1.1. Nix is not installed; installing." + + if [ -f "$NIX_INSTALLER_PACKAGE_PATH" ]; then + echo "1.1.1. Installing Nix using the provided installer" + tar -xzf "$NIX_INSTALLER_PACKAGE_PATH" -C /tmp/persistent/ + chmod +x "$NIX_INSTALLER_PATH" + "$NIX_INSTALLER_PATH" install --no-confirm \ + --extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ + --extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + else + echo "1.1.1. Installing Nix using the official installer" + + curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ + --extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + fi + else + echo "1.1. Nix is installed; moving on." + fi + fi + fi + + echo "1.2. Installing flake revision: $NIX_FLAKE_VERSION" + # shellcheck disable=SC1091 + source /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + nix-collect-garbage -d > /tmp/pg_upgrade-nix-gc.log 2>&1 || true + PG_UPGRADE_BIN_DIR=$(nix build "github:supabase/postgres/${NIX_FLAKE_VERSION}#psql_15/bin" --no-link --print-out-paths --extra-experimental-features nix-command --extra-experimental-features flakes) + PGSHARENEW="$PG_UPGRADE_BIN_DIR/share/postgresql" + fi + + PGBINNEW="$PG_UPGRADE_BIN_DIR/bin" + PGLIBNEW="$PG_UPGRADE_BIN_DIR/lib" + + # copy upgrade-specific pgsodium_getkey script into the share dir + chmod +x "$SCRIPT_DIR/pgsodium_getkey.sh" + mkdir -p "$PGSHARENEW/extension" + cp "$SCRIPT_DIR/pgsodium_getkey.sh" "$PGSHARENEW/extension/pgsodium_getkey" + if [ -d "/var/lib/postgresql/extension/" ]; then + cp "$SCRIPT_DIR/pgsodium_getkey.sh" "/var/lib/postgresql/extension/pgsodium_getkey" + chown postgres:postgres "/var/lib/postgresql/extension/pgsodium_getkey" + fi + + chown -R postgres:postgres "/tmp/pg_upgrade_bin/$PGVERSION" + + # upgrade job outputs a log in the cwd; needs write permissions + mkdir -p /tmp/pg_upgrade/ + chown -R postgres:postgres /tmp/pg_upgrade/ + cd /tmp/pg_upgrade/ + + # Fixing erros generated by previous dpkg executions (package upgrades et co) + echo "2. Fixing potential errors generated by dpkg" + DEBIAN_FRONTEND=noninteractive dpkg --configure -a --force-confold || true # handle errors generated by dpkg + + # Needed for PostGIS, since it's compiled with Protobuf-C support now + echo "3. Installing libprotobuf-c1 and libicu66 if missing" + if [[ ! "$(apt list --installed libprotobuf-c1 | grep "installed")" ]]; then + apt-get update -y + apt --fix-broken install -y libprotobuf-c1 libicu66 || true + fi + + echo "4. Setup locale if required" + if ! grep -q "^en_US.UTF-8" /etc/locale.gen ; then + echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen + fi + if ! grep -q "^C.UTF-8" /etc/locale.gen ; then + echo "C.UTF-8 UTF-8" >> /etc/locale.gen + fi + locale-gen + + if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then + # awk NF==3 prints lines with exactly 3 fields, which are the block devices currently not mounted anywhere + # excluding nvme0 since it is the root disk + echo "5. Determining block device to mount" + BLOCK_DEVICE=$(lsblk -dprno name,size,mountpoint,type | grep "disk" | grep -v "nvme0" | awk 'NF==3 { print $1; }') + echo "Block device found: $BLOCK_DEVICE" + + mkdir -p "$MOUNT_POINT" + echo "6. Mounting block device" + + sleep 5 + e2fsck -pf "$BLOCK_DEVICE" + + sleep 1 + mount "$BLOCK_DEVICE" "$MOUNT_POINT" + + sleep 1 + resize2fs "$BLOCK_DEVICE" + else + mkdir -p "$MOUNT_POINT" + fi + + if [ -f "$MOUNT_POINT/pgsodium_root.key" ]; then + cp "$MOUNT_POINT/pgsodium_root.key" /etc/postgresql-custom/pgsodium_root.key + chown postgres:postgres /etc/postgresql-custom/pgsodium_root.key + chmod 600 /etc/postgresql-custom/pgsodium_root.key + fi + + echo "7. Disabling extensions and generating post-upgrade script" + handle_extensions + + echo "8.1. Granting SUPERUSER to postgres user" + run_sql -c "ALTER USER postgres WITH SUPERUSER;" + + if [ "$OLD_BOOTSTRAP_USER" = "postgres" ]; then + echo "8.2. Swap postgres & supabase_admin roles as we're upgrading a project with postgres as bootstrap user" + swap_postgres_and_supabase_admin + fi + + if [ -z "$IS_NIX_UPGRADE" ]; then + if [ -d "/usr/share/postgresql/${PGVERSION}" ]; then + mv "/usr/share/postgresql/${PGVERSION}" "/usr/share/postgresql/${PGVERSION}.bak" + fi + + ln -s "$PGSHARENEW" "/usr/share/postgresql/${PGVERSION}" + cp --remove-destination "$PGLIBNEW"/*.control "$PGSHARENEW/extension/" + cp --remove-destination "$PGLIBNEW"/*.sql "$PGSHARENEW/extension/" + + export LD_LIBRARY_PATH="${PGLIBNEW}" + fi + + echo "9. Creating new data directory, initializing database" + chown -R postgres:postgres "$MOUNT_POINT/" + rm -rf "${PGDATANEW:?}/" + + if [ "$IS_NIX_UPGRADE" = "true" ]; then + LC_ALL=en_US.UTF-8 LC_CTYPE=$SERVER_LC_CTYPE LC_COLLATE=$SERVER_LC_COLLATE LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && $PGBINNEW/initdb --encoding=$SERVER_ENCODING --lc-collate=$SERVER_LC_COLLATE --lc-ctype=$SERVER_LC_CTYPE -L $PGSHARENEW -D $PGDATANEW/ --username=supabase_admin" -s "$SHELL" postgres + else + su -c "$PGBINNEW/initdb -L $PGSHARENEW -D $PGDATANEW/ --username=supabase_admin" -s "$SHELL" postgres + fi + + # This line avoids the need to supply the supabase_admin password on the old + # instance, since pg_upgrade connects to the db as supabase_admin using unix + # sockets, which is gated behind scram-sha-256 per pg_hba.conf.j2. The new + # instance is unaffected. + if ! grep -q "local all supabase_admin trust" /etc/postgresql/pg_hba.conf; then + echo "local all supabase_admin trust +$(cat /etc/postgresql/pg_hba.conf)" > /etc/postgresql/pg_hba.conf + run_sql -c "select pg_reload_conf();" + fi + + UPGRADE_COMMAND=$(cat < /tmp/pg-upgrade-status +if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then + initiate_upgrade >> "$LOG_FILE" 2>&1 & + echo "Upgrade initiate job completed" +else + rm -f /tmp/pg-upgrade-status + initiate_upgrade +fi diff --git a/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/pgsodium_getkey.sh b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/pgsodium_getkey.sh new file mode 100644 index 0000000..5a5a90e --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/pgsodium_getkey.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -euo pipefail + +KEY_FILE=/etc/postgresql-custom/pgsodium_root.key + +# if key file doesn't exist (project previously didn't use pgsodium), generate a new key +if [[ ! -f "${KEY_FILE}" ]]; then + head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n' > $KEY_FILE +fi + +cat $KEY_FILE diff --git a/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/prepare.sh b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/prepare.sh new file mode 100644 index 0000000..7d7eb98 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/admin_api_scripts/pg_upgrade_scripts/prepare.sh @@ -0,0 +1,15 @@ +#! /usr/bin/env bash +## This script is runs in advance of the database version upgrade, on the newly +## launched instance which will eventually be promoted to become the primary +## database instance once the upgrade successfully completes, terminating the +## previous (source) instance. +## The following commands safely stop the Postgres service and unmount +## the data disk off the newly launched instance, to be re-attached to the +## source instance and run the upgrade there. + +set -euo pipefail + +systemctl stop postgresql + +cp /etc/postgresql-custom/pgsodium_root.key /data/pgsodium_root.key +umount /data diff --git a/postgres_15.8.1.044/ansible/files/adminapi.service.j2 b/postgres_15.8.1.044/ansible/files/adminapi.service.j2 new file mode 100644 index 0000000..6078f3d --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/adminapi.service.j2 @@ -0,0 +1,13 @@ +[Unit] +Description=AdminAPI + +[Service] +Type=simple +ExecStart=/opt/supabase-admin-api +User=adminapi +Restart=always +RestartSec=3 +Environment="AWS_USE_DUALSTACK_ENDPOINT=true" + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/adminapi.sudoers.conf b/postgres_15.8.1.044/ansible/files/adminapi.sudoers.conf new file mode 100644 index 0000000..ae55377 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/adminapi.sudoers.conf @@ -0,0 +1,30 @@ +Cmnd_Alias ENVOY = /bin/systemctl start envoy.service, /bin/systemctl stop envoy.service, /bin/systemctl restart envoy.service, /bin/systemctl disable envoy.service, /bin/systemctl enable envoy.service, /bin/systemctl reload envoy.service, /bin/systemctl try-restart envoy.service +Cmnd_Alias KONG = /bin/systemctl start kong.service, /bin/systemctl stop kong.service, /bin/systemctl restart kong.service, /bin/systemctl disable kong.service, /bin/systemctl enable kong.service, /bin/systemctl reload kong.service, /bin/systemctl try-restart kong.service +Cmnd_Alias POSTGREST = /bin/systemctl start postgrest.service, /bin/systemctl stop postgrest.service, /bin/systemctl restart postgrest.service, /bin/systemctl disable postgrest.service, /bin/systemctl enable postgrest.service, /bin/systemctl try-restart postgrest.service +Cmnd_Alias GOTRUE = /bin/systemctl start gotrue.service, /bin/systemctl stop gotrue.service, /bin/systemctl restart gotrue.service, /bin/systemctl disable gotrue.service, /bin/systemctl enable gotrue.service, /bin/systemctl try-restart gotrue.service +Cmnd_Alias PGBOUNCER = /bin/systemctl start pgbouncer.service, /bin/systemctl stop pgbouncer.service, /bin/systemctl restart pgbouncer.service, /bin/systemctl disable pgbouncer.service, /bin/systemctl enable pgbouncer.service, /bin/systemctl reload pgbouncer.service, /bin/systemctl try-restart pgbouncer.service + +%adminapi ALL= NOPASSWD: /root/grow_fs.sh +%adminapi ALL= NOPASSWD: /root/manage_readonly_mode.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/prepare.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/initiate.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/complete.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/check.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/common.sh +%adminapi ALL= NOPASSWD: /etc/adminapi/pg_upgrade_scripts/pgsodium_getkey.sh +%adminapi ALL= NOPASSWD: /usr/bin/systemctl daemon-reload +%adminapi ALL= NOPASSWD: /usr/bin/systemctl reload postgresql.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl restart postgresql.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl show -p NRestarts postgresql.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl restart adminapi.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl is-active commence-backup.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl start commence-backup.service +%adminapi ALL= NOPASSWD: /bin/systemctl daemon-reload +%adminapi ALL= NOPASSWD: /bin/systemctl restart services.slice +%adminapi ALL= NOPASSWD: /usr/sbin/nft -f /etc/nftables/supabase_managed.conf +%adminapi ALL= NOPASSWD: /usr/bin/admin-mgr +%adminapi ALL= NOPASSWD: ENVOY +%adminapi ALL= NOPASSWD: KONG +%adminapi ALL= NOPASSWD: POSTGREST +%adminapi ALL= NOPASSWD: GOTRUE +%adminapi ALL= NOPASSWD: PGBOUNCER diff --git a/postgres_15.8.1.044/ansible/files/ansible-pull.service b/postgres_15.8.1.044/ansible/files/ansible-pull.service new file mode 100644 index 0000000..3e061b3 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/ansible-pull.service @@ -0,0 +1,20 @@ +[Unit] +Description=Ansible pull + +[Service] +Type=simple +User=ubuntu + +ExecStart=/usr/bin/ansible-pull --private-key "$SSH_READ_KEY_FILE" -U "$REPO" --accept-host-key -t "$REGION,db-all" -i localhost --clean --full "$PLAYBOOK" -v -o -C "$REPO_BRANCH" + +# --verify-commit +# temporarily disable commit verification, while we figure out how we want to balance commit signatures +# and PR reviews; an --ff-only merge options would have allowed us to use this pretty nicely + +MemoryAccounting=true +MemoryMax=30% + +StandardOutput=append:/var/log/ansible-pull.stdout +StandardError=append:/var/log/ansible-pull.error + +TimeoutStopSec=600 diff --git a/postgres_15.8.1.044/ansible/files/ansible-pull.timer b/postgres_15.8.1.044/ansible/files/ansible-pull.timer new file mode 100644 index 0000000..27ce24b --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/ansible-pull.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Run ansible roughly every 3 hours + +[Timer] +OnBootSec=1h +OnUnitActiveSec=3h +RandomizedDelaySec=1h +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/postgres_15.8.1.044/ansible/files/apt_periodic b/postgres_15.8.1.044/ansible/files/apt_periodic new file mode 100644 index 0000000..7587020 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/apt_periodic @@ -0,0 +1,4 @@ +APT::Periodic::Update-Package-Lists "1"; +APT::Periodic::Download-Upgradeable-Packages "1"; +APT::Periodic::AutocleanInterval "7"; +APT::Periodic::Unattended-Upgrade "1"; \ No newline at end of file diff --git a/postgres_15.8.1.044/ansible/files/commence-backup.service.j2 b/postgres_15.8.1.044/ansible/files/commence-backup.service.j2 new file mode 100644 index 0000000..9d4ad0c --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/commence-backup.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Async commence physical backup + +[Service] +Type=simple +User=adminapi +ExecStart=/usr/bin/admin-mgr commence-backup --run-as-service true +Restart=no +OOMScoreAdjust=-1000 + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/cron.deny b/postgres_15.8.1.044/ansible/files/cron.deny new file mode 100644 index 0000000..3b5199b --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/cron.deny @@ -0,0 +1,2 @@ +ubuntu +postgres diff --git a/postgres_15.8.1.044/ansible/files/database-optimizations.service.j2 b/postgres_15.8.1.044/ansible/files/database-optimizations.service.j2 new file mode 100644 index 0000000..f25fc09 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/database-optimizations.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Postgresql optimizations + +[Service] +Type=oneshot +# we do not want failures from these commands to cause downstream service startup to fail +ExecStart=-/opt/supabase-admin-api optimize db --destination-config-file-path /etc/postgresql-custom/generated-optimizations.conf +ExecStart=-/opt/supabase-admin-api optimize pgbouncer --destination-config-file-path /etc/pgbouncer-custom/generated-optimizations.ini +User=adminapi + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/default.sysstat b/postgres_15.8.1.044/ansible/files/default.sysstat new file mode 100644 index 0000000..1b029ba --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/default.sysstat @@ -0,0 +1,9 @@ +# +# Default settings for /etc/init.d/sysstat, /etc/cron.d/sysstat +# and /etc/cron.daily/sysstat files +# + +# Should sadc collect system activity informations? Valid values +# are "true" and "false". Please do not put other values, they +# will be overwritten by debconf! +ENABLED="true" diff --git a/postgres_15.8.1.044/ansible/files/envoy.service b/postgres_15.8.1.044/ansible/files/envoy.service new file mode 100644 index 0000000..d739ffd --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/envoy.service @@ -0,0 +1,31 @@ +[Unit] +Description=Envoy +After=postgrest.service gotrue.service adminapi.service +Wants=postgrest.service gotrue.service adminapi.service +Conflicts=kong.service + +[Service] +Type=simple + +ExecStartPre=sh -c 'if ss -lnt | grep -Eq ":(80|443) "; then echo "Port 80 or 443 already in use"; exit 1; fi' + +# Need to run via a restarter script to support hot restart when using a process +# manager, see: +# https://www.envoyproxy.io/docs/envoy/latest/operations/hot_restarter +ExecStart=/opt/envoy-hot-restarter.py /opt/start-envoy.sh + +ExecReload=/bin/kill -HUP $MAINPID +ExecStop=/bin/kill -TERM $MAINPID +User=envoy +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +# The envoy user is unprivileged and thus not permitted to bind on ports < 1024 +# Via systemd we grant the process a set of privileges to bind to 80/443 +# See http://archive.vn/36zJU +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/envoy_config/cds.yaml b/postgres_15.8.1.044/ansible/files/envoy_config/cds.yaml new file mode 100644 index 0000000..48fd1b9 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/envoy_config/cds.yaml @@ -0,0 +1,86 @@ +resources: + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: admin_api + load_assignment: + cluster_name: admin_api + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8085 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: gotrue + load_assignment: + cluster_name: gotrue + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 9999 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: postgrest + load_assignment: + cluster_name: postgrest + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 3000 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + - '@type': type.googleapis.com/envoy.config.cluster.v3.Cluster + name: postgrest_admin + load_assignment: + cluster_name: postgrest_admin + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 3001 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 10000 + max_pending_requests: 10000 + max_requests: 10000 + retry_budget: + budget_percent: + value: 100 + min_retry_concurrency: 100 + diff --git a/postgres_15.8.1.044/ansible/files/envoy_config/envoy.yaml b/postgres_15.8.1.044/ansible/files/envoy_config/envoy.yaml new file mode 100644 index 0000000..3d25c13 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/envoy_config/envoy.yaml @@ -0,0 +1,23 @@ +dynamic_resources: + cds_config: + path_config_source: + path: /etc/envoy/cds.yaml + resource_api_version: V3 + lds_config: + path_config_source: + path: /etc/envoy/lds.yaml + resource_api_version: V3 +node: + cluster: cluster_0 + id: node_0 +overload_manager: + resource_monitors: + - name: envoy.resource_monitors.global_downstream_max_connections + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig + max_active_downstream_connections: 30000 +stats_config: + stats_matcher: + reject_all: true + diff --git a/postgres_15.8.1.044/ansible/files/envoy_config/lds.supabase.yaml b/postgres_15.8.1.044/ansible/files/envoy_config/lds.supabase.yaml new file mode 100644 index 0000000..40d3d46 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/envoy_config/lds.supabase.yaml @@ -0,0 +1,455 @@ +resources: + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: http_listener + address: + socket_address: + address: '::' + port_value: 80 + ipv4_compat: true + filter_chains: + - filters: &ref_1 + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + - name: envoy.access_loggers.stdout + filter: + status_code_filter: + comparison: + op: GE + value: + default_value: 400 + runtime_key: unused + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.access_loggers.stream.v3.StdoutAccessLog + generate_request_id: false + http_filters: + - name: envoy.filters.http.cors + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.Cors + - name: envoy.filters.http.rbac + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + action: DENY + policies: + api_key_missing: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: apikey + present_match: true + - header: + name: ':path' + string_match: + contains: apikey= + api_key_not_valid: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: apikey + string_match: + exact: anon_key + - header: + name: apikey + string_match: + exact: service_key + - header: + name: apikey + string_match: + exact: supabase_admin_key + - header: + name: ':path' + string_match: + contains: apikey=anon_key + - header: + name: ':path' + string_match: + contains: apikey=service_key + - header: + name: ':path' + string_match: + contains: apikey=supabase_admin_key + origin_protection_key_missing: + permissions: + - any: true + principals: + - not_id: + header: + name: sb-opk + present_match: true + origin_protection_key_not_valid: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: sb-opk + string_match: + exact: supabase_origin_protection_key + - name: envoy.filters.http.lua + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + source_codes: + remove_apikey_and_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?"):gsub("&apikey=[^&]*", ""):gsub("?apikey=[^&]*$", ""):gsub("?apikey=[^&]*&", "?")) + end + remove_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?")) + end + - name: envoy.filters.http.compressor.brotli + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.brotli.compressor.v3.Brotli + - name: envoy.filters.http.compressor.gzip + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + - name: envoy.filters.http.router + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + dynamic_stats: false + local_reply_config: + mappers: + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /customer/v1/privileged/ + status_code: 401 + body: + inline_string: Unauthorized + headers_to_add: + - header: + key: WWW-Authenticate + value: Basic realm="Unknown" + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /metrics/aggregated + invert_match: true + status_code: 401 + body_format_override: + json_format: + message: >- + `apikey` request header or query parameter is either + missing or invalid. Double check your Supabase `anon` + or `service_role` API key. + hint: '%RESPONSE_CODE_DETAILS%' + json_format_options: + sort_properties: false + merge_slashes: true + route_config: + name: route_config_0 + virtual_hosts: + - name: virtual_host_0 + domains: + - '*' + typed_per_filter_config: + envoy.filters.http.cors: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.CorsPolicy + allow_origin_string_match: + - safe_regex: + regex: \* + allow_methods: GET,HEAD,PUT,PATCH,POST,DELETE,OPTIONS,TRACE,CONNECT + allow_headers: apikey,authorization,x-client-info + max_age: '3600' + routes: + - match: + path: /health + direct_response: + status: 200 + body: + inline_string: Healthy + typed_per_filter_config: &ref_0 + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + - match: + safe_regex: + google_re2: + max_program_size: 150 + regex: >- + /auth/v1/(verify|callback|authorize|sso/saml/(acs|metadata|slo)|\.well-known/(openid-configuration|jwks\.json)) + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + regex_rewrite: + pattern: + regex: ^/auth/v1 + substitution: '' + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 35s + typed_per_filter_config: *ref_0 + - match: + prefix: /auth/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + prefix_rewrite: / + timeout: 35s + - match: + prefix: /rest/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + - match: + path: /graphql/v1 + request_headers_to_add: + header: + key: Content-Profile + value: graphql_public + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: /rpc/graphql + timeout: 125s + - match: + prefix: /admin/v1/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: / + timeout: 600s + - match: + prefix: /customer/v1/privileged/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /privileged/ + typed_per_filter_config: + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + rbac: + rules: + action: DENY + policies: + basic_auth: + permissions: + - any: true + principals: + - header: + name: authorization + invert_match: true + string_match: + exact: Basic c2VydmljZV9yb2xlOnNlcnZpY2Vfa2V5 + treat_missing_header_as_empty: true + - match: + prefix: /metrics/aggregated + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /supabase-internal/metrics + typed_per_filter_config: + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + rbac: + rules: + action: DENY + policies: + not_private_ip: + permissions: + - any: true + principals: + - not_id: + direct_remote_ip: + address_prefix: 10.0.0.0 + prefix_len: 8 + include_attempt_count_in_response: true + retry_policy: + num_retries: 5 + retry_back_off: + base_interval: 0.1s + max_interval: 1s + retry_on: gateway-error + stat_prefix: ingress_http + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: https_listener + address: + socket_address: + address: '::' + port_value: 443 + ipv4_compat: true + filter_chains: + - filters: *ref_1 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: /etc/envoy/fullChain.pem + private_key: + filename: /etc/envoy/privKey.pem + diff --git a/postgres_15.8.1.044/ansible/files/envoy_config/lds.yaml b/postgres_15.8.1.044/ansible/files/envoy_config/lds.yaml new file mode 100644 index 0000000..2fc7cae --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/envoy_config/lds.yaml @@ -0,0 +1,436 @@ +resources: + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: http_listener + address: + socket_address: + address: '::' + port_value: 80 + ipv4_compat: true + filter_chains: + - filters: &ref_1 + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + - name: envoy.access_loggers.stdout + filter: + status_code_filter: + comparison: + op: GE + value: + default_value: 400 + runtime_key: unused + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.access_loggers.stream.v3.StdoutAccessLog + generate_request_id: false + http_filters: + - name: envoy.filters.http.cors + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.Cors + - name: envoy.filters.http.rbac + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + action: DENY + policies: + api_key_missing: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: apikey + present_match: true + - header: + name: ':path' + string_match: + contains: apikey= + api_key_not_valid: + permissions: + - any: true + principals: + - not_id: + or_ids: + ids: + - header: + name: apikey + string_match: + exact: anon_key + - header: + name: apikey + string_match: + exact: service_key + - header: + name: apikey + string_match: + exact: supabase_admin_key + - header: + name: ':path' + string_match: + contains: apikey=anon_key + - header: + name: ':path' + string_match: + contains: apikey=service_key + - header: + name: ':path' + string_match: + contains: apikey=supabase_admin_key + - name: envoy.filters.http.lua + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + source_codes: + remove_apikey_and_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?"):gsub("&apikey=[^&]*", ""):gsub("?apikey=[^&]*$", ""):gsub("?apikey=[^&]*&", "?")) + end + remove_empty_key_query_parameters: + inline_string: |- + function envoy_on_request(request_handle) + local path = request_handle:headers():get(":path") + request_handle + :headers() + :replace(":path", path:gsub("&=[^&]*", ""):gsub("?=[^&]*$", ""):gsub("?=[^&]*&", "?")) + end + - name: envoy.filters.http.compressor.brotli + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.brotli.compressor.v3.Brotli + - name: envoy.filters.http.compressor.gzip + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - application/vnd.pgrst.object+json + - application/vnd.pgrst.array+json + - application/openapi+json + - application/geo+json + - text/csv + - application/vnd.pgrst.plan + - application/vnd.pgrst.object + - application/vnd.pgrst.array + - application/javascript + - application/json + - application/xhtml+xml + - image/svg+xml + - text/css + - text/html + - text/plain + - text/xml + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + - name: envoy.filters.http.router + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + dynamic_stats: false + local_reply_config: + mappers: + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /customer/v1/privileged/ + status_code: 401 + body: + inline_string: Unauthorized + headers_to_add: + - header: + key: WWW-Authenticate + value: Basic realm="Unknown" + - filter: + and_filter: + filters: + - status_code_filter: + comparison: + value: + default_value: 403 + runtime_key: unused + - header_filter: + header: + name: ':path' + string_match: + prefix: /metrics/aggregated + invert_match: true + status_code: 401 + body_format_override: + json_format: + message: >- + `apikey` request header or query parameter is either + missing or invalid. Double check your Supabase `anon` + or `service_role` API key. + hint: '%RESPONSE_CODE_DETAILS%' + json_format_options: + sort_properties: false + merge_slashes: true + route_config: + name: route_config_0 + virtual_hosts: + - name: virtual_host_0 + domains: + - '*' + typed_per_filter_config: + envoy.filters.http.cors: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.cors.v3.CorsPolicy + allow_origin_string_match: + - safe_regex: + regex: \* + allow_methods: GET,HEAD,PUT,PATCH,POST,DELETE,OPTIONS,TRACE,CONNECT + allow_headers: apikey,authorization,x-client-info + max_age: '3600' + routes: + - match: + path: /health + direct_response: + status: 200 + body: + inline_string: Healthy + typed_per_filter_config: &ref_0 + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + - match: + safe_regex: + google_re2: + max_program_size: 150 + regex: >- + /auth/v1/(verify|callback|authorize|sso/saml/(acs|metadata|slo)|\.well-known/(openid-configuration|jwks\.json)) + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + regex_rewrite: + pattern: + regex: ^/auth/v1 + substitution: '' + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 35s + typed_per_filter_config: *ref_0 + - match: + prefix: /auth/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: gotrue + prefix_rewrite: / + timeout: 35s + - match: + prefix: /rest/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: / + timeout: 125s + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + query_parameters: + - name: apikey + present_match: true + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + typed_per_filter_config: + envoy.filters.http.lua: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: remove_apikey_and_empty_key_query_parameters + - match: + prefix: /rest-admin/v1/ + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest_admin + prefix_rewrite: / + - match: + path: /graphql/v1 + request_headers_to_add: + header: + key: Content-Profile + value: graphql_public + request_headers_to_remove: + - apikey + - sb-opk + route: + cluster: postgrest + prefix_rewrite: /rpc/graphql + timeout: 125s + - match: + prefix: /admin/v1/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: / + timeout: 600s + - match: + prefix: /customer/v1/privileged/ + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /privileged/ + typed_per_filter_config: + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + rbac: + rules: + action: DENY + policies: + basic_auth: + permissions: + - any: true + principals: + - header: + name: authorization + invert_match: true + string_match: + exact: Basic c2VydmljZV9yb2xlOnNlcnZpY2Vfa2V5 + treat_missing_header_as_empty: true + - match: + prefix: /metrics/aggregated + request_headers_to_remove: + - sb-opk + route: + cluster: admin_api + prefix_rewrite: /supabase-internal/metrics + typed_per_filter_config: + envoy.filters.http.rbac: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute + rbac: + rules: + action: DENY + policies: + not_private_ip: + permissions: + - any: true + principals: + - not_id: + direct_remote_ip: + address_prefix: 10.0.0.0 + prefix_len: 8 + include_attempt_count_in_response: true + retry_policy: + num_retries: 5 + retry_back_off: + base_interval: 0.1s + max_interval: 1s + retry_on: gateway-error + stat_prefix: ingress_http + - '@type': type.googleapis.com/envoy.config.listener.v3.Listener + name: https_listener + address: + socket_address: + address: '::' + port_value: 443 + ipv4_compat: true + filter_chains: + - filters: *ref_1 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: /etc/envoy/fullChain.pem + private_key: + filename: /etc/envoy/privKey.pem + diff --git a/postgres_15.8.1.044/ansible/files/fail2ban_config/fail2ban.service.conf b/postgres_15.8.1.044/ansible/files/fail2ban_config/fail2ban.service.conf new file mode 100644 index 0000000..431d1db --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/fail2ban_config/fail2ban.service.conf @@ -0,0 +1,6 @@ +[Unit] +After=nftables.service +Wants=nftables.service + +[Service] +ExecStartPost=/bin/bash -c "sleep 5 && chmod g+w /var/run/fail2ban/fail2ban.sock" diff --git a/postgres_15.8.1.044/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 b/postgres_15.8.1.044/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 new file mode 100644 index 0000000..3a3a52e --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/fail2ban_config/filter-pgbouncer.conf.j2 @@ -0,0 +1,3 @@ +[Definition] +failregex = ^.+@:.+password authentication failed$ +journalmatch = _SYSTEMD_UNIT=pgbouncer.service diff --git a/postgres_15.8.1.044/ansible/files/fail2ban_config/filter-postgresql.conf.j2 b/postgres_15.8.1.044/ansible/files/fail2ban_config/filter-postgresql.conf.j2 new file mode 100644 index 0000000..fd0895a --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/fail2ban_config/filter-postgresql.conf.j2 @@ -0,0 +1,3 @@ +[Definition] +failregex = ^.*,.*,.*,.*,":.*password authentication failed for user.*$ +ignoreregex = ^.*,.*,.*,.*,"127\.0\.0\.1.*password authentication failed for user.*$ \ No newline at end of file diff --git a/postgres_15.8.1.044/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 b/postgres_15.8.1.044/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 new file mode 100644 index 0000000..60a9eb3 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/fail2ban_config/jail-pgbouncer.conf.j2 @@ -0,0 +1,7 @@ +[pgbouncer] +enabled = true +port = 6543 +protocol = tcp +filter = pgbouncer +backend = systemd[journalflags=1] +maxretry = 3 diff --git a/postgres_15.8.1.044/ansible/files/fail2ban_config/jail-postgresql.conf.j2 b/postgres_15.8.1.044/ansible/files/fail2ban_config/jail-postgresql.conf.j2 new file mode 100644 index 0000000..a021035 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/fail2ban_config/jail-postgresql.conf.j2 @@ -0,0 +1,8 @@ +[postgresql] +enabled = true +port = 5432 +protocol = tcp +filter = postgresql +logpath = /var/log/postgresql/auth-failures.csv +maxretry = 3 +ignoreip = 192.168.0.0/16 172.17.1.0/20 diff --git a/postgres_15.8.1.044/ansible/files/fail2ban_config/jail-ssh.conf b/postgres_15.8.1.044/ansible/files/fail2ban_config/jail-ssh.conf new file mode 100644 index 0000000..5476c30 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/fail2ban_config/jail-ssh.conf @@ -0,0 +1,4 @@ +[sshd] + +backend = systemd +mode = aggressive diff --git a/postgres_15.8.1.044/ansible/files/fail2ban_config/jail.local b/postgres_15.8.1.044/ansible/files/fail2ban_config/jail.local new file mode 100644 index 0000000..44e8210 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/fail2ban_config/jail.local @@ -0,0 +1,4 @@ +[DEFAULT] + +banaction = nftables-multiport +banaction_allports = nftables-allports diff --git a/postgres_15.8.1.044/ansible/files/gotrue-optimizations.service.j2 b/postgres_15.8.1.044/ansible/files/gotrue-optimizations.service.j2 new file mode 100644 index 0000000..d9c2f01 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/gotrue-optimizations.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=GoTrue (Auth) optimizations + +[Service] +Type=oneshot +# we don't want failures from this command to cause PG startup to fail +ExecStart=/bin/bash -c "/opt/supabase-admin-api optimize auth --destination-config-file-path /etc/gotrue/gotrue.generated.env ; exit 0" +User=postgrest + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/gotrue.service.j2 b/postgres_15.8.1.044/ansible/files/gotrue.service.j2 new file mode 100644 index 0000000..c1f7f58 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/gotrue.service.j2 @@ -0,0 +1,22 @@ +[Unit] +Description=Gotrue + +[Service] +Type=simple +WorkingDirectory=/opt/gotrue +ExecStart=/opt/gotrue/gotrue +User=gotrue +Restart=always +RestartSec=3 + +MemoryAccounting=true +MemoryMax=50% + +EnvironmentFile=-/etc/gotrue.generated.env +EnvironmentFile=/etc/gotrue.env +EnvironmentFile=-/etc/gotrue.overrides.env + +Slice=services.slice + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/journald.conf b/postgres_15.8.1.044/ansible/files/journald.conf new file mode 100644 index 0000000..2eb89f9 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/journald.conf @@ -0,0 +1,6 @@ +[Journal] +Storage=persistent +SystemMaxUse=3G +SystemKeepFree=3G +SystemMaxFileSize=200M +ForwardToSyslog=no diff --git a/postgres_15.8.1.044/ansible/files/kong_config/kong.conf.j2 b/postgres_15.8.1.044/ansible/files/kong_config/kong.conf.j2 new file mode 100644 index 0000000..3906757 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/kong_config/kong.conf.j2 @@ -0,0 +1,7 @@ +database = off +declarative_config = /etc/kong/kong.yml + +# plugins defined in the dockerfile +plugins = request-transformer,cors,key-auth,http-log + +proxy_listen = 0.0.0.0:80 reuseport backlog=16384, 0.0.0.0:443 http2 ssl reuseport backlog=16834, [::]:80 reuseport backlog=16384, [::]:443 http2 ssl reuseport backlog=16384 diff --git a/postgres_15.8.1.044/ansible/files/kong_config/kong.env.j2 b/postgres_15.8.1.044/ansible/files/kong_config/kong.env.j2 new file mode 100644 index 0000000..57613fd --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/kong_config/kong.env.j2 @@ -0,0 +1,8 @@ +KONG_NGINX_HTTP_GZIP=on +KONG_NGINX_HTTP_GZIP_COMP_LEVEL=6 +KONG_NGINX_HTTP_GZIP_MIN_LENGTH=256 +KONG_NGINX_HTTP_GZIP_PROXIED=any +KONG_NGINX_HTTP_GZIP_VARY=on +KONG_NGINX_HTTP_GZIP_TYPES=text/plain application/xml application/openapi+json application/json +KONG_PROXY_ERROR_LOG=syslog:server=unix:/dev/log +KONG_ADMIN_ERROR_LOG=syslog:server=unix:/dev/log diff --git a/postgres_15.8.1.044/ansible/files/kong_config/kong.service.j2 b/postgres_15.8.1.044/ansible/files/kong_config/kong.service.j2 new file mode 100644 index 0000000..6a36520 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/kong_config/kong.service.j2 @@ -0,0 +1,28 @@ +[Unit] +Description=Kong server +After=postgrest.service gotrue.service adminapi.service +Wants=postgrest.service gotrue.service adminapi.service +Conflicts=envoy.service + +# Ensures that Kong service is stopped before Envoy service is started +Before=envoy.service + +[Service] +Type=forking +ExecStart=/usr/local/bin/kong start -c /etc/kong/kong.conf +ExecReload=/usr/local/bin/kong reload -c /etc/kong/kong.conf +ExecStop=/usr/local/bin/kong quit +User=kong +EnvironmentFile=/etc/kong/kong.env +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +# The kong user is unprivileged and thus not permitted to bind on ports < 1024 +# Via systemd we grant the process a set of privileges to bind to 80/443 +# See http://archive.vn/36zJU +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/logind.conf b/postgres_15.8.1.044/ansible/files/logind.conf new file mode 100644 index 0000000..732900f --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/logind.conf @@ -0,0 +1,2 @@ +[Login] +RemoveIPC=no diff --git a/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-postgres-auth.conf b/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-postgres-auth.conf new file mode 100644 index 0000000..050210e --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-postgres-auth.conf @@ -0,0 +1,8 @@ +/var/log/postgresql/auth-failures.csv { + size 10M + rotate 5 + compress + delaycompress + notifempty + missingok +} diff --git a/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-postgres-csv.conf b/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-postgres-csv.conf new file mode 100644 index 0000000..e5418e8 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-postgres-csv.conf @@ -0,0 +1,11 @@ +/var/log/postgresql/postgresql.csv { + size 50M + rotate 9 + compress + delaycompress + notifempty + missingok + postrotate + sudo -u postgres /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data logrotate + endscript +} diff --git a/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-postgres.conf b/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-postgres.conf new file mode 100644 index 0000000..c802320 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-postgres.conf @@ -0,0 +1,9 @@ +/var/log/postgresql/postgresql.log { + size 50M + rotate 3 + copytruncate + delaycompress + compress + notifempty + missingok +} diff --git a/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-walg.conf b/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-walg.conf new file mode 100644 index 0000000..49eeb59 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/logrotate_config/logrotate-walg.conf @@ -0,0 +1,9 @@ +/var/log/wal-g/*.log { + size 50M + rotate 3 + copytruncate + delaycompress + compress + notifempty + missingok +} diff --git a/postgres_15.8.1.044/ansible/files/manifest.json b/postgres_15.8.1.044/ansible/files/manifest.json new file mode 100644 index 0000000..3a20e76 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/manifest.json @@ -0,0 +1 @@ +{{ vars | to_json }} diff --git a/postgres_15.8.1.044/ansible/files/nginx.service.j2 b/postgres_15.8.1.044/ansible/files/nginx.service.j2 new file mode 100644 index 0000000..872e334 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/nginx.service.j2 @@ -0,0 +1,22 @@ +[Unit] +Description=nginx server +After=postgrest.service gotrue.service adminapi.service +Wants=postgrest.service gotrue.service adminapi.service + +[Service] +Type=forking +ExecStart=/usr/local/nginx/sbin/nginx -c /etc/nginx/nginx.conf +ExecReload=/usr/local/nginx/sbin/nginx -s reload -c /etc/nginx/nginx.conf +ExecStop=/usr/local/nginx/sbin/nginx -s quit +User=nginx +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +# Via systemd we grant the process a set of privileges to bind to 80/443 +# See http://archive.vn/36zJU +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/permission_check.py b/postgres_15.8.1.044/ansible/files/permission_check.py new file mode 100644 index 0000000..5bf4210 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/permission_check.py @@ -0,0 +1,223 @@ +import subprocess +import json +import sys +import argparse + + +# Expected groups for each user +expected_results = { + "postgres": [ + {"groupname": "postgres", "username": "postgres"}, + {"groupname": "ssl-cert", "username": "postgres"}, + ], + "ubuntu": [ + {"groupname": "adm", "username": "ubuntu"}, + {"groupname": "audio", "username": "ubuntu"}, + {"groupname": "cdrom", "username": "ubuntu"}, + {"groupname": "dialout", "username": "ubuntu"}, + {"groupname": "dip", "username": "ubuntu"}, + {"groupname": "floppy", "username": "ubuntu"}, + {"groupname": "lxd", "username": "ubuntu"}, + {"groupname": "netdev", "username": "ubuntu"}, + {"groupname": "plugdev", "username": "ubuntu"}, + {"groupname": "sudo", "username": "ubuntu"}, + {"groupname": "ubuntu", "username": "ubuntu"}, + {"groupname": "video", "username": "ubuntu"}, + ], + "root": [{"groupname": "root", "username": "root"}], + "daemon": [{"groupname": "daemon", "username": "daemon"}], + "bin": [{"groupname": "bin", "username": "bin"}], + "sys": [{"groupname": "sys", "username": "sys"}], + "sync": [{"groupname": "nogroup", "username": "sync"}], + "games": [{"groupname": "games", "username": "games"}], + "man": [{"groupname": "man", "username": "man"}], + "lp": [{"groupname": "lp", "username": "lp"}], + "mail": [{"groupname": "mail", "username": "mail"}], + "news": [{"groupname": "news", "username": "news"}], + "uucp": [{"groupname": "uucp", "username": "uucp"}], + "proxy": [{"groupname": "proxy", "username": "proxy"}], + "www-data": [{"groupname": "www-data", "username": "www-data"}], + "backup": [{"groupname": "backup", "username": "backup"}], + "list": [{"groupname": "list", "username": "list"}], + "irc": [{"groupname": "irc", "username": "irc"}], + "gnats": [{"groupname": "gnats", "username": "gnats"}], + "nobody": [{"groupname": "nogroup", "username": "nobody"}], + "systemd-network": [ + {"groupname": "systemd-network", "username": "systemd-network"} + ], + "systemd-resolve": [ + {"groupname": "systemd-resolve", "username": "systemd-resolve"} + ], + "systemd-timesync": [ + {"groupname": "systemd-timesync", "username": "systemd-timesync"} + ], + "messagebus": [{"groupname": "messagebus", "username": "messagebus"}], + "ec2-instance-connect": [ + {"groupname": "nogroup", "username": "ec2-instance-connect"} + ], + "sshd": [{"groupname": "nogroup", "username": "sshd"}], + "wal-g": [ + {"groupname": "postgres", "username": "wal-g"}, + {"groupname": "wal-g", "username": "wal-g"}, + ], + "pgbouncer": [ + {"groupname": "pgbouncer", "username": "pgbouncer"}, + {"groupname": "postgres", "username": "pgbouncer"}, + {"groupname": "ssl-cert", "username": "pgbouncer"}, + ], + "gotrue": [{"groupname": "gotrue", "username": "gotrue"}], + "envoy": [{"groupname": "envoy", "username": "envoy"}], + "kong": [{"groupname": "kong", "username": "kong"}], + "nginx": [{"groupname": "nginx", "username": "nginx"}], + "vector": [ + {"groupname": "adm", "username": "vector"}, + {"groupname": "postgres", "username": "vector"}, + {"groupname": "systemd-journal", "username": "vector"}, + {"groupname": "vector", "username": "vector"}, + ], + "adminapi": [ + {"groupname": "admin", "username": "adminapi"}, + {"groupname": "adminapi", "username": "adminapi"}, + {"groupname": "envoy", "username": "adminapi"}, + {"groupname": "kong", "username": "adminapi"}, + {"groupname": "pgbouncer", "username": "adminapi"}, + {"groupname": "postgres", "username": "adminapi"}, + {"groupname": "postgrest", "username": "adminapi"}, + {"groupname": "root", "username": "adminapi"}, + {"groupname": "systemd-journal", "username": "adminapi"}, + {"groupname": "vector", "username": "adminapi"}, + {"groupname": "wal-g", "username": "adminapi"}, + ], + "postgrest": [{"groupname": "postgrest", "username": "postgrest"}], + "tcpdump": [{"groupname": "tcpdump", "username": "tcpdump"}], + "systemd-coredump": [ + {"groupname": "systemd-coredump", "username": "systemd-coredump"} + ], +} + + +# This program depends on osquery being installed on the system +# Function to run osquery +def run_osquery(query): + process = subprocess.Popen( + ["osqueryi", "--json", query], stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + output, error = process.communicate() + return output.decode("utf-8") + + +def parse_json(json_str): + try: + return json.loads(json_str) + except json.JSONDecodeError as e: + print("Error decoding JSON:", e) + sys.exit(1) + + +def compare_results(username, query_result): + expected_result = expected_results.get(username) + if expected_result is None: + print(f"No expected result defined for user '{username}'") + sys.exit(1) + + if query_result == expected_result: + print(f"The query result for user '{username}' matches the expected result.") + else: + print( + f"The query result for user '{username}' does not match the expected result." + ) + print("Expected:", expected_result) + print("Got:", query_result) + sys.exit(1) + + +def check_nixbld_users(): + query = """ + SELECT u.username, g.groupname + FROM users u + JOIN user_groups ug ON u.uid = ug.uid + JOIN groups g ON ug.gid = g.gid + WHERE u.username LIKE 'nixbld%'; + """ + query_result = run_osquery(query) + parsed_result = parse_json(query_result) + + for user in parsed_result: + if user["groupname"] != "nixbld": + print( + f"User '{user['username']}' is in group '{user['groupname']}' instead of 'nixbld'." + ) + sys.exit(1) + + print("All nixbld users are in the 'nixbld' group.") + + +def main(): + parser = argparse.ArgumentParser( + prog="Supabase Postgres Artifact Permissions Checker", + description="Checks the Postgres Artifact for the appropriate users and group memberships", + ) + parser.add_argument( + "-q", + "--qemu", + action="store_true", + help="Whether we are checking a QEMU artifact", + ) + args = parser.parse_args() + qemu_artifact = args.qemu or False + + # Define usernames for which you want to compare results + usernames = [ + "postgres", + "ubuntu", + "root", + "daemon", + "bin", + "sys", + "sync", + "games", + "man", + "lp", + "mail", + "news", + "uucp", + "proxy", + "www-data", + "backup", + "list", + "irc", + "gnats", + "nobody", + "systemd-network", + "systemd-resolve", + "systemd-timesync", + "messagebus", + "sshd", + "wal-g", + "pgbouncer", + "gotrue", + "envoy", + "kong", + "nginx", + "vector", + "adminapi", + "postgrest", + "tcpdump", + "systemd-coredump", + ] + if not qemu_artifact: + usernames.append("ec2-instance-connect") + + # Iterate over usernames, run the query, and compare results + for username in usernames: + query = f"SELECT u.username, g.groupname FROM users u JOIN user_groups ug ON u.uid = ug.uid JOIN groups g ON ug.gid = g.gid WHERE u.username = '{username}' ORDER BY g.groupname;" + query_result = run_osquery(query) + parsed_result = parse_json(query_result) + compare_results(username, parsed_result) + + # Check if all nixbld users are in the nixbld group + check_nixbld_users() + + +if __name__ == "__main__": + main() diff --git a/postgres_15.8.1.044/ansible/files/pg_egress_collect.service.j2 b/postgres_15.8.1.044/ansible/files/pg_egress_collect.service.j2 new file mode 100644 index 0000000..7ac04f4 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/pg_egress_collect.service.j2 @@ -0,0 +1,13 @@ +[Unit] +Description=Postgres Egress Collector + +[Service] +Type=simple +ExecStart=/bin/bash -c "tcpdump -s 128 -Q out -nn -tt -vv -p -l 'tcp and (port 5432 or port 6543)' | perl /root/pg_egress_collect.pl" +User=root +Slice=services.slice +Restart=always +RestartSec=3 + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/pgbouncer_config/pgbouncer.ini.j2 b/postgres_15.8.1.044/ansible/files/pgbouncer_config/pgbouncer.ini.j2 new file mode 100644 index 0000000..e4518c0 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/pgbouncer_config/pgbouncer.ini.j2 @@ -0,0 +1,364 @@ +;;; +;;; PgBouncer configuration file +;;; + +;; database name = connect string +;; +;; connect string params: +;; dbname= host= port= user= password= auth_user= +;; client_encoding= datestyle= timezone= +;; pool_size= reserve_pool= max_db_connections= +;; pool_mode= connect_query= application_name= +[databases] +* = host=localhost auth_user=pgbouncer + +;; foodb over Unix socket +;foodb = + +;; redirect bardb to bazdb on localhost +;bardb = host=localhost dbname=bazdb + +;; access to dest database will go with single user +;forcedb = host=localhost port=300 user=baz password=foo client_encoding=UNICODE datestyle=ISO connect_query='SELECT 1' + +;; use custom pool sizes +;nondefaultdb = pool_size=50 reserve_pool=10 + +;; use auth_user with auth_query if user not present in auth_file +;; auth_user must exist in auth_file +; foodb = auth_user=bar + +;; fallback connect string +;* = host=testserver + +;; User-specific configuration +[users] + +;user1 = pool_mode=transaction max_user_connections=10 + +;; Configuration section +[pgbouncer] + +;;; +;;; Administrative settings +;;; + +;logfile = /var/log/pgbouncer.log +pidfile = /var/run/pgbouncer/pgbouncer.pid + +;;; +;;; Where to wait for clients +;;; + +;; IP address or * which means all IPs +listen_addr = * +listen_port = 6543 + +;; Unix socket is also used for -R. +;; On Debian it should be /var/run/postgresql +unix_socket_dir = /tmp +;unix_socket_mode = 0777 +;unix_socket_group = + +;;; +;;; TLS settings for accepting clients +;;; + +;; disable, allow, require, verify-ca, verify-full +;client_tls_sslmode = disable + +;; Path to file that contains trusted CA certs +;client_tls_ca_file = + +;; Private key and cert to present to clients. +;; Required for accepting TLS connections from clients. +;client_tls_key_file = +;client_tls_cert_file = + +;; fast, normal, secure, legacy, +;client_tls_ciphers = fast + +;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 +;client_tls_protocols = secure + +;; none, auto, legacy +;client_tls_dheparams = auto + +;; none, auto, +;client_tls_ecdhcurve = auto + +;;; +;;; TLS settings for connecting to backend databases +;;; + +;; disable, allow, require, verify-ca, verify-full +;server_tls_sslmode = disable + +;; Path to that contains trusted CA certs +;server_tls_ca_file = + +;; Private key and cert to present to backend. +;; Needed only if backend server require client cert. +;server_tls_key_file = +;server_tls_cert_file = + +;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 +;server_tls_protocols = secure + +;; fast, normal, secure, legacy, +;server_tls_ciphers = fast + +;;; +;;; Authentication settings +;;; + +;; any, trust, plain, md5, cert, hba, pam +auth_type = scram-sha-256 +auth_file = /etc/pgbouncer/userlist.txt + +;; Path to HBA-style auth config +;auth_hba_file = + +;; Query to use to fetch password from database. Result +;; must have 2 columns - username and password hash. +auth_query = SELECT * FROM pgbouncer.get_auth($1) + +;;; +;;; Users allowed into database 'pgbouncer' +;;; + +;; comma-separated list of users who are allowed to change settings +admin_users = pgbouncer + +;; comma-separated list of users who are just allowed to use SHOW command +stats_users = pgbouncer + +;;; +;;; Pooler personality questions +;;; + +;; When server connection is released back to pool: +;; session - after client disconnects (default) +;; transaction - after transaction finishes +;; statement - after statement finishes +pool_mode = transaction + +;; Query for cleaning connection immediately after releasing from +;; client. No need to put ROLLBACK here, pgbouncer does not reuse +;; connections where transaction is left open. +;server_reset_query = DISCARD ALL + +;; Whether server_reset_query should run in all pooling modes. If it +;; is off, server_reset_query is used only for session-pooling. +;server_reset_query_always = 0 + +;; Comma-separated list of parameters to ignore when given in startup +;; packet. Newer JDBC versions require the extra_float_digits here. +ignore_startup_parameters = extra_float_digits + +;; When taking idle server into use, this query is run first. +;server_check_query = select 1 + +;; If server was used more recently that this many seconds ago, +; skip the check query. Value 0 may or may not run in immediately. +;server_check_delay = 30 + +;; Close servers in session pooling mode after a RECONNECT, RELOAD, +;; etc. when they are idle instead of at the end of the session. +;server_fast_close = 0 + +;; Use as application_name on server. +;application_name_add_host = 0 + +;; Period for updating aggregated stats. +;stats_period = 60 + +;;; +;;; Connection limits +;;; + +;; Total number of clients that can connect +;max_client_conn = 100 + +;; Default pool size. 20 is good number when transaction pooling +;; is in use, in session pooling it needs to be the number of +;; max clients you want to handle at any moment +default_pool_size = 15 + +;; Minimum number of server connections to keep in pool. +;min_pool_size = 0 + +; how many additional connection to allow in case of trouble +;reserve_pool_size = 0 + +;; If a clients needs to wait more than this many seconds, use reserve +;; pool. +;reserve_pool_timeout = 5 + +;; Maximum number of server connections for a database +;max_db_connections = 0 + +;; Maximum number of server connections for a user +;max_user_connections = 0 + +;; If off, then server connections are reused in LIFO manner +;server_round_robin = 0 + +;;; +;;; Logging +;;; + +;; Syslog settings +;syslog = 0 +;syslog_facility = daemon +;syslog_ident = pgbouncer + +;; log if client connects or server connection is made +;log_connections = 1 + +;; log if and why connection was closed +;log_disconnections = 1 + +;; log error messages pooler sends to clients +;log_pooler_errors = 1 + +;; write aggregated stats into log +;log_stats = 1 + +;; Logging verbosity. Same as -v switch on command line. +;verbose = 0 + +;;; +;;; Timeouts +;;; + +;; Close server connection if its been connected longer. +;server_lifetime = 3600 + +;; Close server connection if its not been used in this time. Allows +;; to clean unnecessary connections from pool after peak. +;server_idle_timeout = 600 + +;; Cancel connection attempt if server does not answer takes longer. +;server_connect_timeout = 15 + +;; If server login failed (server_connect_timeout or auth failure) +;; then wait this many second. +;server_login_retry = 15 + +;; Dangerous. Server connection is closed if query does not return in +;; this time. Should be used to survive network problems, _not_ as +;; statement_timeout. (default: 0) +;query_timeout = 0 + +;; Dangerous. Client connection is closed if the query is not +;; assigned to a server in this time. Should be used to limit the +;; number of queued queries in case of a database or network +;; failure. (default: 120) +;query_wait_timeout = 120 + +;; Dangerous. Client connection is closed if no activity in this +;; time. Should be used to survive network problems. (default: 0) +;client_idle_timeout = 0 + +;; Disconnect clients who have not managed to log in after connecting +;; in this many seconds. +;client_login_timeout = 60 + +;; Clean automatically created database entries (via "*") if they stay +;; unused in this many seconds. +; autodb_idle_timeout = 3600 + +;; Close connections which are in "IDLE in transaction" state longer +;; than this many seconds. +;idle_transaction_timeout = 0 + +;; How long SUSPEND/-R waits for buffer flush before closing +;; connection. +;suspend_timeout = 10 + +;;; +;;; Low-level tuning options +;;; + +;; buffer for streaming packets +;pkt_buf = 4096 + +;; man 2 listen +;listen_backlog = 128 + +;; Max number pkt_buf to process in one event loop. +;sbuf_loopcnt = 5 + +;; Maximum PostgreSQL protocol packet size. +;max_packet_size = 2147483647 + +;; Set SO_REUSEPORT socket option +;so_reuseport = 0 + +;; networking options, for info: man 7 tcp + +;; Linux: Notify program about new connection only if there is also +;; data received. (Seconds to wait.) On Linux the default is 45, on +;; other OS'es 0. +;tcp_defer_accept = 0 + +;; In-kernel buffer size (Linux default: 4096) +;tcp_socket_buffer = 0 + +;; whether tcp keepalive should be turned on (0/1) +;tcp_keepalive = 1 + +;; The following options are Linux-specific. They also require +;; tcp_keepalive=1. + +;; Count of keepalive packets +;tcp_keepcnt = 0 + +;; How long the connection can be idle before sending keepalive +;; packets +;tcp_keepidle = 0 + +;; The time between individual keepalive probes +;tcp_keepintvl = 0 + +;; How long may transmitted data remain unacknowledged before TCP +;; connection is closed (in milliseconds) +;tcp_user_timeout = 0 + +;; DNS lookup caching time +;dns_max_ttl = 15 + +;; DNS zone SOA lookup period +;dns_zone_check_period = 0 + +;; DNS negative result caching time +;dns_nxdomain_ttl = 15 + +;; Custom resolv.conf file, to set custom DNS servers or other options +;; (default: empty = use OS settings) +;resolv_conf = /etc/pgbouncer/resolv.conf + +;;; +;;; Random stuff +;;; + +;; Hackish security feature. Helps against SQL injection: when PQexec +;; is disabled, multi-statement cannot be made. +;disable_pqexec = 0 + +;; Config file to use for next RELOAD/SIGHUP +;; By default contains config file from command line. +;conffile + +;; Windows service name to register as. job_name is alias for +;; service_name, used by some Skytools scripts. +;service_name = pgbouncer +;job_name = pgbouncer + +;; Read additional config from other file +;%include /etc/pgbouncer/pgbouncer-other.ini + +%include /etc/pgbouncer-custom/generated-optimizations.ini +%include /etc/pgbouncer-custom/custom-overrides.ini +%include /etc/pgbouncer-custom/ssl-config.ini diff --git a/postgres_15.8.1.044/ansible/files/pgbouncer_config/pgbouncer.service.j2 b/postgres_15.8.1.044/ansible/files/pgbouncer_config/pgbouncer.service.j2 new file mode 100644 index 0000000..1ec5ea3 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/pgbouncer_config/pgbouncer.service.j2 @@ -0,0 +1,20 @@ +[Unit] +Description=connection pooler for PostgreSQL +Documentation=man:pgbouncer(1) +Documentation=https://www.pgbouncer.org/ +After=network.target +{% if supabase_internal is defined %} +Requires=database-optimizations.service +After=database-optimizations.service +{% endif %} + +[Service] +Type=notify +User=pgbouncer +ExecStart=/usr/local/bin/pgbouncer /etc/pgbouncer/pgbouncer.ini +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGINT +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql b/postgres_15.8.1.044/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql new file mode 100644 index 0000000..c10ce44 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql @@ -0,0 +1,20 @@ +CREATE USER pgbouncer; + +REVOKE ALL PRIVILEGES ON SCHEMA public FROM pgbouncer; + +CREATE SCHEMA pgbouncer AUTHORIZATION pgbouncer; + +CREATE OR REPLACE FUNCTION pgbouncer.get_auth(p_usename TEXT) +RETURNS TABLE(username TEXT, password TEXT) AS +$$ +BEGIN + RAISE WARNING 'PgBouncer auth request: %', p_usename; + + RETURN QUERY + SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow + WHERE usename = p_usename; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +REVOKE ALL ON FUNCTION pgbouncer.get_auth(p_usename TEXT) FROM PUBLIC; +GRANT EXECUTE ON FUNCTION pgbouncer.get_auth(p_usename TEXT) TO pgbouncer; diff --git a/postgres_15.8.1.044/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 b/postgres_15.8.1.044/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 new file mode 100644 index 0000000..d5d2cd4 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 @@ -0,0 +1,2 @@ +# Directory for PostgreSQL sockets, lockfiles and stats tempfiles +d /run/pgbouncer 2775 pgbouncer postgres - - \ No newline at end of file diff --git a/postgres_15.8.1.044/ansible/files/pgsodium_getkey_readonly.sh.j2 b/postgres_15.8.1.044/ansible/files/pgsodium_getkey_readonly.sh.j2 new file mode 100644 index 0000000..e0a7273 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/pgsodium_getkey_readonly.sh.j2 @@ -0,0 +1,14 @@ +#!/bin/bash + +set -euo pipefail + +KEY_FILE=/etc/postgresql-custom/pgsodium_root.key + +# On the hosted platform, the root key is generated and managed for each project +# If for some reason the key is missing, we want to fail loudly, +# rather than generating a new one. +if [[ ! -f "${KEY_FILE}" ]]; then + echo "Key file ${KEY_FILE} does not exist." >&2 + exit 1 +fi +cat $KEY_FILE diff --git a/postgres_15.8.1.044/ansible/files/pgsodium_getkey_urandom.sh.j2 b/postgres_15.8.1.044/ansible/files/pgsodium_getkey_urandom.sh.j2 new file mode 100644 index 0000000..e8039d0 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/pgsodium_getkey_urandom.sh.j2 @@ -0,0 +1,10 @@ +#!/bin/bash + +set -euo pipefail + +KEY_FILE=/etc/postgresql-custom/pgsodium_root.key + +if [[ ! -f "${KEY_FILE}" ]]; then + head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n' > "${KEY_FILE}" +fi +cat $KEY_FILE diff --git a/postgres_15.8.1.044/ansible/files/postgres_exporter.service.j2 b/postgres_15.8.1.044/ansible/files/postgres_exporter.service.j2 new file mode 100644 index 0000000..0066a76 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgres_exporter.service.j2 @@ -0,0 +1,14 @@ +[Unit] +Description=Postgres Exporter + +[Service] +Type=simple +ExecStart=/opt/postgres_exporter/postgres_exporter --disable-settings-metrics --extend.query-path="/opt/postgres_exporter/queries.yml" --disable-default-metrics --no-collector.locks --no-collector.replication --no-collector.replication_slot --no-collector.stat_bgwriter --no-collector.stat_database --no-collector.stat_user_tables --no-collector.statio_user_tables --no-collector.wal +User=postgres +Group=postgres +Restart=always +RestartSec=3 +Environment="DATA_SOURCE_NAME=host=localhost dbname=postgres sslmode=disable user=supabase_admin pg_stat_statements.track=none application_name=postgres_exporter" + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/postgres_prestart.sh.j2 b/postgres_15.8.1.044/ansible/files/postgres_prestart.sh.j2 new file mode 100644 index 0000000..3ffe54c --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgres_prestart.sh.j2 @@ -0,0 +1,49 @@ +#!/bin/bash + +check_orioledb_enabled() { + local pg_conf="/etc/postgresql/postgresql.conf" + if [ ! -f "$pg_conf" ]; then + return 0 + fi + grep "^shared_preload_libraries" "$pg_conf" | grep -c "orioledb" || return 0 +} + +get_shared_buffers() { + local opt_conf="/etc/postgresql-custom/generated-optimizations.conf" + if [ ! -f "$opt_conf" ]; then + return 0 + fi + grep "^shared_buffers = " "$opt_conf" | cut -d "=" -f2 | tr -d ' ' || return 0 +} + +update_orioledb_buffers() { + local pg_conf="/etc/postgresql/postgresql.conf" + local value="$1" + if grep -q "^orioledb.main_buffers = " "$pg_conf"; then + sed -i "s/^orioledb.main_buffers = .*/orioledb.main_buffers = $value/" "$pg_conf" + else + echo "orioledb.main_buffers = $value" >> "$pg_conf" + fi +} + +main() { + local has_orioledb=$(check_orioledb_enabled) + if [ "$has_orioledb" -lt 1 ]; then + return 0 + fi + local shared_buffers_value=$(get_shared_buffers) + if [ ! -z "$shared_buffers_value" ]; then + update_orioledb_buffers "$shared_buffers_value" + fi +} + +# Initial locale setup +if [ $(cat /etc/locale.gen | grep -c en_US.UTF-8) -eq 0 ]; then + echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen +fi + +if [ $(locale -a | grep -c en_US.utf8) -eq 0 ]; then + locale-gen +fi + +main diff --git a/postgres_15.8.1.044/ansible/files/postgresql_config/custom_read_replica.conf.j2 b/postgres_15.8.1.044/ansible/files/postgresql_config/custom_read_replica.conf.j2 new file mode 100644 index 0000000..7d52f92 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_config/custom_read_replica.conf.j2 @@ -0,0 +1,5 @@ +# hot_standby = on +# restore_command = '/usr/bin/admin-mgr wal-fetch %f %p >> /var/log/wal-g/wal-fetch.log 2>&1' +# recovery_target_timeline = 'latest' + +# primary_conninfo = 'host=localhost port=6543 user=replication' diff --git a/postgres_15.8.1.044/ansible/files/postgresql_config/custom_walg.conf.j2 b/postgres_15.8.1.044/ansible/files/postgresql_config/custom_walg.conf.j2 new file mode 100644 index 0000000..7ef7256 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_config/custom_walg.conf.j2 @@ -0,0 +1,21 @@ +# - Archiving - + +#archive_mode = on +#archive_command = '/usr/bin/admin-mgr wal-push %p >> /var/log/wal-g/wal-push.log 2>&1' +#archive_timeout = 120 + + +# - Archive Recovery - + +#restore_command = '/usr/bin/admin-mgr wal-fetch %f %p >> /var/log/wal-g/wal-fetch.log 2>&1' + +# - Recovery Target - + +#recovery_target_lsn = '' +#recovery_target_time = '' +#recovery_target_action = 'promote' +#recovery_target_timeline = 'current' +#recovery_target_inclusive = off + +# - Hot Standby - +hot_standby = off diff --git a/postgres_15.8.1.044/ansible/files/postgresql_config/pg_hba.conf.j2 b/postgres_15.8.1.044/ansible/files/postgresql_config/pg_hba.conf.j2 new file mode 100644 index 0000000..9cafd41 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_config/pg_hba.conf.j2 @@ -0,0 +1,94 @@ +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: "local" is a Unix-domain +# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, +# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a +# non-SSL TCP/IP socket. Similarly, "hostgssenc" uses a +# GSSAPI-encrypted TCP/IP socket, while "hostnogssenc" uses a +# non-GSSAPI socket. +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, or a comma-separated list thereof. The "all" +# keyword does not match "replication". Access to replication +# must be enabled in a separate record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", or a +# comma-separated list thereof. In both the DATABASE and USER fields +# you can also write a file name prefixed with "@" to include names +# from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", +# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". +# Note that "password" sends passwords in clear text; "md5" or +# "scram-sha-256" are preferred since they send encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + +# TYPE DATABASE USER ADDRESS METHOD + +# trust local connections +local all supabase_admin scram-sha-256 +local all all peer map=supabase_map +host all all 127.0.0.1/32 trust +host all all ::1/128 trust + +# IPv4 external connections +host all all 10.0.0.0/8 scram-sha-256 +host all all 172.16.0.0/12 scram-sha-256 +host all all 192.168.0.0/16 scram-sha-256 +host all all 0.0.0.0/0 scram-sha-256 + +# IPv6 external connections +host all all ::0/0 scram-sha-256 diff --git a/postgres_15.8.1.044/ansible/files/postgresql_config/pg_ident.conf.j2 b/postgres_15.8.1.044/ansible/files/postgresql_config/pg_ident.conf.j2 new file mode 100644 index 0000000..d8891f4 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_config/pg_ident.conf.j2 @@ -0,0 +1,50 @@ +# PostgreSQL User Name Maps +# ========================= +# +# Refer to the PostgreSQL documentation, chapter "Client +# Authentication" for a complete description. A short synopsis +# follows. +# +# This file controls PostgreSQL user name mapping. It maps external +# user names to their corresponding PostgreSQL user names. Records +# are of the form: +# +# MAPNAME SYSTEM-USERNAME PG-USERNAME +# +# (The uppercase quantities must be replaced by actual values.) +# +# MAPNAME is the (otherwise freely chosen) map name that was used in +# pg_hba.conf. SYSTEM-USERNAME is the detected user name of the +# client. PG-USERNAME is the requested PostgreSQL user name. The +# existence of a record specifies that SYSTEM-USERNAME may connect as +# PG-USERNAME. +# +# If SYSTEM-USERNAME starts with a slash (/), it will be treated as a +# regular expression. Optionally this can contain a capture (a +# parenthesized subexpression). The substring matching the capture +# will be substituted for \1 (backslash-one) if present in +# PG-USERNAME. +# +# Multiple maps may be specified in this file and used by pg_hba.conf. +# +# No map names are defined in the default configuration. If all +# system user names and PostgreSQL user names are the same, you don't +# need anything in this file. +# +# This file is read on server startup and when the postmaster receives +# a SIGHUP signal. If you edit the file on a running system, you have +# to SIGHUP the postmaster for the changes to take effect. You can +# use "pg_ctl reload" to do that. + +# Put your actual configuration here +# ---------------------------------- + +# MAPNAME SYSTEM-USERNAME PG-USERNAME +supabase_map postgres postgres +supabase_map root postgres +supabase_map ubuntu postgres + +# supabase-specific users +supabase_map gotrue supabase_auth_admin +supabase_map postgrest authenticator +supabase_map adminapi postgres diff --git a/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql-csvlog.conf b/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql-csvlog.conf new file mode 100644 index 0000000..b8d64da --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql-csvlog.conf @@ -0,0 +1,33 @@ +# - Where to Log - + +log_destination = 'csvlog' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +logging_collector = on # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +log_directory = '/var/log/postgresql' # directory where log files are written, + # can be absolute or relative to PGDATA +log_filename = 'postgresql.log' # log file name pattern, + # can include strftime() escapes +log_file_mode = 0640 # creation mode for log files, + # begin with 0 to use octal notation +log_rotation_age = 0 # Automatic rotation of logfiles will + # happen after that time. 0 disables. +log_rotation_size = 0 # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. diff --git a/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql-stdout-log.conf b/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql-stdout-log.conf new file mode 100644 index 0000000..6ae4ff4 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql-stdout-log.conf @@ -0,0 +1,4 @@ +logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) diff --git a/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql.conf.j2 b/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql.conf.j2 new file mode 100644 index 0000000..6d2df60 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql.conf.j2 @@ -0,0 +1,778 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +data_directory = '/var/lib/postgresql/data' # use data in another directory + # (change requires restart) +hba_file = '/etc/postgresql/pg_hba.conf' # host-based authentication file + # (change requires restart) +ident_file = '/etc/postgresql/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' # what IP address(es) to listen on; + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +#max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +authentication_timeout = 1min # 1s-600s +password_encryption = scram-sha-256 # scram-sha-256 or md5 +db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off + +# - SSL - + +ssl = off +ssl_ca_file = '' +ssl_cert_file = '' +ssl_crl_file = '' +ssl_crl_dir = '' +ssl_key_file = '' +ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +ssl_prefer_server_ciphers = on +ssl_ecdh_curve = 'prime256v1' +ssl_min_protocol_version = 'TLSv1.2' +ssl_max_protocol_version = '' +ssl_dh_params_file = '' +ssl_passphrase_command = '' +ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#huge_page_size = 0 # zero for system default + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem +#maintenance_work_mem = 64MB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +#dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +#min_dynamic_shared_memory = 0MB # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 0 # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#backend_flush_after = 0 # measured in pages, 0 disables +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +#parallel_leader_participation = on +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +wal_level = logical # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_compression = off # enable compression of full-page writes +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 +checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +#max_wal_size = 1GB +#min_wal_size = 80MB + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived logfile segment + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +max_replication_slots = 5 # max number of replication slots + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +max_slot_wal_keep_size = 4096 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#promote_trigger_file = '' # file name whose presence ends recovery +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_resultcache = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +effective_cache_size = 128MB + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#jit = on # allow JIT compilation +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +include = '/etc/postgresql/logging.conf' + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_autovacuum_min_duration = -1 # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +log_line_prefix = '%h %m [%p] %q%u@%d ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +log_statement = 'ddl' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'UTC' + +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +cluster_name = 'main' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Query and Index Statistics Collector - + +#track_activities = on +#track_activity_query_size = 1024 # (change requires restart) +#track_counts = on +#track_io_timing = off +#track_wal_io_timing = off +#track_functions = none # none, pl, all +#stats_temp_directory = 'pg_stat_tmp' + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table + # size before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +#default_toast_compression = 'pglz' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +#datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +extra_float_digits = 0 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.UTF-8' # locale for system error message + # strings +lc_monetary = 'en_US.UTF-8' # locale for monetary formatting +lc_numeric = 'en_US.UTF-8' # locale for number formatting +lc_time = 'en_US.UTF-8' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' + +shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain, pg_tle, plan_filter' # (change requires restart) +jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#gin_fuzzy_search_limit = 0 + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + +# Automatically generated optimizations +#include = '/etc/postgresql-custom/generated-optimizations.conf' +# User-supplied custom parameters, override any automatically generated ones +#include = '/etc/postgresql-custom/custom-overrides.conf' + +# WAL-G specific configurations +#include = '/etc/postgresql-custom/wal-g.conf' + +# read replica specific configurations +include = '/etc/postgresql-custom/read-replica.conf' + +# supautils specific configurations +#include = '/etc/postgresql-custom/supautils.conf' + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here +auto_explain.log_min_duration = 10s +cron.database_name = 'postgres' diff --git a/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql.service.j2 b/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql.service.j2 new file mode 100644 index 0000000..c056ac4 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_config/postgresql.service.j2 @@ -0,0 +1,25 @@ +[Unit] +Description=PostgreSQL database server +Documentation=man:postgres(1) +{% if supabase_internal is defined %} +Requires=database-optimizations.service +After=database-optimizations.service +{% endif %} + +[Service] +Type=notify +User=postgres +ExecStart=/usr/lib/postgresql/bin/postgres -D /etc/postgresql +ExecStartPre=+/usr/local/bin/postgres_prestart.sh +ExecReload=/bin/kill -HUP $MAINPID +KillMode=mixed +KillSignal=SIGINT +TimeoutStopSec=90 +TimeoutStartSec=86400 +Restart=always +RestartSec=5 +OOMScoreAdjust=-1000 +EnvironmentFile=-/etc/environment.d/postgresql.env +LimitNOFILE=16384 +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/postgresql_config/supautils.conf.j2 b/postgres_15.8.1.044/ansible/files/postgresql_config/supautils.conf.j2 new file mode 100644 index 0000000..1edd4ab --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_config/supautils.conf.j2 @@ -0,0 +1,14 @@ +supautils.extensions_parameter_overrides = '{"pg_cron":{"schema":"pg_catalog"}}' +supautils.policy_grants = '{"postgres":["auth.audit_log_entries","auth.identities","auth.refresh_tokens","auth.sessions","auth.users","realtime.messages","storage.buckets","storage.migrations","storage.objects","storage.s3_multipart_uploads","storage.s3_multipart_uploads_parts"]}' +supautils.drop_trigger_grants = '{"postgres":["auth.audit_log_entries","auth.identities","auth.refresh_tokens","auth.sessions","auth.users","realtime.messages","storage.buckets","storage.migrations","storage.objects","storage.s3_multipart_uploads","storage.s3_multipart_uploads_parts"]}' +# full list: address_standardizer, address_standardizer_data_us, adminpack, amcheck, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, file_fdw, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intagg, intarray, isn, lo, ltree, moddatetime, old_snapshot, orioledb, pageinspect, pg_buffercache, pg_cron, pg_freespacemap, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_prewarm, pg_repack, pg_stat_monitor, pg_stat_statements, pg_surgery, pg_tle, pg_trgm, pg_visibility, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgmq, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgstattuple, pgtap, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, supabase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers, xml2 +# omitted because may be unsafe: adminpack, amcheck, file_fdw, lo, old_snapshot, pageinspect, pg_buffercache, pg_freespacemap, pg_surgery, pg_visibility +# omitted because deprecated: intagg, xml2 +# omitted because doesn't require superuser: pgmq +supautils.privileged_extensions = 'address_standardizer, address_standardizer_data_us, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intarray, isn, ltree, moddatetime, orioledb, pg_cron, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_prewarm, pg_repack, pg_stat_monitor, pg_stat_statements, pg_tle, pg_trgm, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgstattuple, pgtap, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, supabase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers' +supautils.privileged_extensions_custom_scripts_path = '/etc/postgresql-custom/extension-custom-scripts' +supautils.privileged_extensions_superuser = 'supabase_admin' +supautils.privileged_role = 'postgres' +supautils.privileged_role_allowed_configs = 'auto_explain.*, log_lock_waits, log_min_duration_statement, log_min_messages, log_statement, log_temp_files, pg_net.batch_size, pg_net.ttl, pg_stat_statements.*, pgaudit.log, pgaudit.log_catalog, pgaudit.log_client, pgaudit.log_level, pgaudit.log_relation, pgaudit.log_rows, pgaudit.log_statement, pgaudit.log_statement_once, pgaudit.role, pgrst.*, plan_filter.*, safeupdate.enabled, session_replication_role, track_io_timing, wal_compression' +supautils.reserved_memberships = 'pg_read_server_files, pg_write_server_files, pg_execute_server_program, supabase_admin, supabase_auth_admin, supabase_storage_admin, supabase_read_only_user, supabase_realtime_admin, supabase_replication_admin, dashboard_user, pgbouncer, authenticator' +supautils.reserved_roles = 'supabase_admin, supabase_auth_admin, supabase_storage_admin, supabase_read_only_user, supabase_realtime_admin, supabase_replication_admin, dashboard_user, pgbouncer, service_role*, authenticator*, authenticated*, anon*' diff --git a/postgres_15.8.1.044/ansible/files/postgresql_config/tmpfiles.postgresql.conf b/postgres_15.8.1.044/ansible/files/postgresql_config/tmpfiles.postgresql.conf new file mode 100644 index 0000000..b5ea549 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_config/tmpfiles.postgresql.conf @@ -0,0 +1,5 @@ +# unchanged from upstream package +d /run/postgresql 2775 postgres postgres - - +# Log directory - ensure that our logging setup gets preserved +# and that vector can keep writing to a file here as well +d /var/log/postgresql 1775 postgres postgres - - diff --git a/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/before-create.sql b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/before-create.sql new file mode 100644 index 0000000..f2f2386 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/before-create.sql @@ -0,0 +1,84 @@ +-- If the following are true: +-- * the extension to be created is a TLE +-- * the extension is created with `cascade` +-- +-- then we pre-`create` all nested extension dependencies which are part of +-- `supautils.privileged_extensions`. This is because supautils can't intercept +-- the extension creation for dependencies - it can only intercept the `create +-- extension` statement. +do $$ +declare + _extname text := @extname@; + _extschema text := @extschema@; + _extversion text := @extversion@; + _extcascade bool := @extcascade@; + _r record; +begin + if not _extcascade then + return; + end if; + + if not exists (select from pg_extension where extname = 'pg_tle') then + return; + end if; + + if not exists (select from pgtle.available_extensions() where name = _extname) then + return; + end if; + + if _extversion is null then + select default_version + from pgtle.available_extensions() + where name = _extname + into _extversion; + end if; + + if _extschema is null then + select schema + from pgtle.available_extension_versions() + where name = _extname and version = _extversion + into _extschema; + end if; + + for _r in ( + with recursive available_extensions(name, default_version) as ( + select name, default_version + from pg_available_extensions + union + select name, default_version + from pgtle.available_extensions() + ) + , available_extension_versions(name, version, requires) as ( + select name, version, requires + from pg_available_extension_versions + union + select name, version, requires + from pgtle.available_extension_versions() + ) + , all_dependencies(name, dependency) as ( + select e.name, unnest(ev.requires) as dependency + from available_extensions e + join available_extension_versions ev on ev.name = e.name and ev.version = e.default_version + ) + , dependencies(name) AS ( + select unnest(requires) + from available_extension_versions + where name = _extname and version = _extversion + union + select all_dependencies.dependency + from all_dependencies + join dependencies d on d.name = all_dependencies.name + ) + select name + from dependencies + intersect + select name + from regexp_split_to_table(current_setting('supautils.privileged_extensions', true), '\s*,\s*') as t(name) + ) loop + if _extschema is null then + execute(format('create extension if not exists %I cascade', _r.name)); + else + execute(format('create extension if not exists %I schema %I cascade', _r.name, _extschema)); + end if; + end loop; +end $$; diff --git a/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/dblink/after-create.sql b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/dblink/after-create.sql new file mode 100644 index 0000000..22261bc --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/dblink/after-create.sql @@ -0,0 +1,14 @@ +do $$ +declare + r record; +begin + for r in (select oid, (aclexplode(proacl)).grantee from pg_proc where proname = 'dblink_connect_u') loop + continue when r.grantee = 'supabase_admin'::regrole; + execute( + format( + 'revoke all on function %s(%s) from %s;', r.oid::regproc, pg_get_function_identity_arguments(r.oid), r.grantee::regrole + ) + ); + end loop; +end +$$; diff --git a/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pg_cron/after-create.sql b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pg_cron/after-create.sql new file mode 100644 index 0000000..6ac9d6b --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pg_cron/after-create.sql @@ -0,0 +1,13 @@ +grant usage on schema cron to postgres with grant option; +grant all on all functions in schema cron to postgres with grant option; + +alter default privileges for user supabase_admin in schema cron grant all + on sequences to postgres with grant option; +alter default privileges for user supabase_admin in schema cron grant all + on tables to postgres with grant option; +alter default privileges for user supabase_admin in schema cron grant all + on functions to postgres with grant option; + +grant all privileges on all tables in schema cron to postgres with grant option; +revoke all on table cron.job from postgres; +grant select on table cron.job to postgres with grant option; diff --git a/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pg_repack/after-create.sql b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pg_repack/after-create.sql new file mode 100644 index 0000000..b0ec306 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pg_repack/after-create.sql @@ -0,0 +1,4 @@ +grant all on all tables in schema repack to postgres; +grant all on schema repack to postgres; +alter default privileges in schema repack grant all on tables to postgres; +alter default privileges in schema repack grant all on sequences to postgres; diff --git a/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pg_tle/after-create.sql b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pg_tle/after-create.sql new file mode 100644 index 0000000..eb8aeff --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pg_tle/after-create.sql @@ -0,0 +1 @@ +grant pgtle_admin to postgres; diff --git a/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql new file mode 100644 index 0000000..050e07d --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pgmq/after-create.sql @@ -0,0 +1,173 @@ +do $$ +declare + extoid oid := (select oid from pg_extension where extname = 'pgmq'); + r record; + cls pg_class%rowtype; +begin + + set local search_path = ''; + +/* + Override the pgmq.drop_queue to check if relevant tables are owned + by the pgmq extension before attempting to run + `alter extension pgmq drop table ...` + this is necessary becasue, to enable nightly logical backups to include user queues + we automatically detach them from pgmq. + + this update is backwards compatible with version 1.4.4 but should be removed once we're on + physical backups everywhere +*/ +-- Detach and delete the official function +alter extension pgmq drop function pgmq.drop_queue; +drop function pgmq.drop_queue; + +-- Create and reattach the patched function +CREATE FUNCTION pgmq.drop_queue(queue_name TEXT) +RETURNS BOOLEAN AS $func$ +DECLARE + qtable TEXT := pgmq.format_table_name(queue_name, 'q'); + qtable_seq TEXT := qtable || '_msg_id_seq'; + fq_qtable TEXT := 'pgmq.' || qtable; + atable TEXT := pgmq.format_table_name(queue_name, 'a'); + fq_atable TEXT := 'pgmq.' || atable; + partitioned BOOLEAN; +BEGIN + EXECUTE FORMAT( + $QUERY$ + SELECT is_partitioned FROM pgmq.meta WHERE queue_name = %L + $QUERY$, + queue_name + ) INTO partitioned; + + -- NEW CONDITIONAL CHECK + if exists ( + select 1 + from pg_class c + join pg_depend d on c.oid = d.objid + join pg_extension e on d.refobjid = e.oid + where c.relname = qtable and e.extname = 'pgmq' + ) then + + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP TABLE pgmq.%I + $QUERY$, + qtable + ); + + end if; + + -- NEW CONDITIONAL CHECK + if exists ( + select 1 + from pg_class c + join pg_depend d on c.oid = d.objid + join pg_extension e on d.refobjid = e.oid + where c.relname = qtable_seq and e.extname = 'pgmq' + ) then + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP SEQUENCE pgmq.%I + $QUERY$, + qtable_seq + ); + + end if; + + -- NEW CONDITIONAL CHECK + if exists ( + select 1 + from pg_class c + join pg_depend d on c.oid = d.objid + join pg_extension e on d.refobjid = e.oid + where c.relname = atable and e.extname = 'pgmq' + ) then + + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP TABLE pgmq.%I + $QUERY$, + atable + ); + + end if; + + -- NO CHANGES PAST THIS POINT + + EXECUTE FORMAT( + $QUERY$ + DROP TABLE IF EXISTS pgmq.%I + $QUERY$, + qtable + ); + + EXECUTE FORMAT( + $QUERY$ + DROP TABLE IF EXISTS pgmq.%I + $QUERY$, + atable + ); + + IF EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_name = 'meta' and table_schema = 'pgmq' + ) THEN + EXECUTE FORMAT( + $QUERY$ + DELETE FROM pgmq.meta WHERE queue_name = %L + $QUERY$, + queue_name + ); + END IF; + + IF partitioned THEN + EXECUTE FORMAT( + $QUERY$ + DELETE FROM %I.part_config where parent_table in (%L, %L) + $QUERY$, + pgmq._get_pg_partman_schema(), fq_qtable, fq_atable + ); + END IF; + + RETURN TRUE; +END; +$func$ LANGUAGE plpgsql; + +alter extension pgmq add function pgmq.drop_queue; + + + update pg_extension set extowner = 'postgres'::regrole where extname = 'pgmq'; + + for r in (select * from pg_depend where refobjid = extoid) loop + + + if r.classid = 'pg_type'::regclass then + + -- store the type's relkind + select * into cls from pg_class c where c.reltype = r.objid; + + if r.objid::regtype::text like '%[]' then + -- do nothing (skipping array type) + + elsif cls.relkind in ('r', 'p', 'f', 'm') then + -- table-like objects (regular table, partitioned, foreign, materialized view) + execute format('alter table pgmq.%I owner to postgres;', cls.relname); + + else + execute(format('alter type %s owner to postgres;', r.objid::regtype)); + + end if; + + elsif r.classid = 'pg_proc'::regclass then + execute(format('alter function %s(%s) owner to postgres;', r.objid::regproc, pg_get_function_identity_arguments(r.objid))); + + elsif r.classid = 'pg_class'::regclass then + execute(format('alter table %s owner to postgres;', r.objid::regclass)); + + else + raise exception 'error on pgmq after-create script: unexpected object type %', r.classid; + + end if; + end loop; +end $$; diff --git a/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql new file mode 100644 index 0000000..38242ab --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pgsodium/after-create.sql @@ -0,0 +1,26 @@ +grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role; +grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role; +grant execute on function pgsodium.crypto_aead_det_keygen to service_role; + +CREATE OR REPLACE FUNCTION pgsodium.mask_role(masked_role regrole, source_name text, view_name text) +RETURNS void +LANGUAGE plpgsql +SECURITY DEFINER +SET search_path TO '' +AS $function$ +BEGIN + EXECUTE format( + 'GRANT SELECT ON pgsodium.key TO %s', + masked_role); + + EXECUTE format( + 'GRANT pgsodium_keyiduser, pgsodium_keyholder TO %s', + masked_role); + + EXECUTE format( + 'GRANT ALL ON %I TO %s', + view_name, + masked_role); + RETURN; +END +$function$; diff --git a/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pgsodium/before-create.sql b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pgsodium/before-create.sql new file mode 100644 index 0000000..fb82a46 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/pgsodium/before-create.sql @@ -0,0 +1,9 @@ +do $$ +declare + _extversion text := @extversion@; + _r record; +begin + if _extversion is not null and _extversion != '3.1.8' then + raise exception 'only pgsodium 3.1.8 is supported'; + end if; +end $$; diff --git a/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql new file mode 100644 index 0000000..f8ec163 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql @@ -0,0 +1,10 @@ +-- These schemas are created by extension to house all tiger related functions, owned by supabase_admin +grant usage on schema tiger, tiger_data to postgres with grant option; +-- Give postgres permission to all existing entities, also allows postgres to grant other roles +grant all on all tables in schema tiger, tiger_data to postgres with grant option; +grant all on all routines in schema tiger, tiger_data to postgres with grant option; +grant all on all sequences in schema tiger, tiger_data to postgres with grant option; +-- Update default privileges so that new entities are also accessible by postgres +alter default privileges in schema tiger, tiger_data grant all on tables to postgres with grant option; +alter default privileges in schema tiger, tiger_data grant all on routines to postgres with grant option; +alter default privileges in schema tiger, tiger_data grant all on sequences to postgres with grant option; diff --git a/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/postgres_fdw/after-create.sql b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/postgres_fdw/after-create.sql new file mode 100644 index 0000000..1e83ee9 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgresql_extension_custom_scripts/postgres_fdw/after-create.sql @@ -0,0 +1,21 @@ +do $$ +declare + is_super boolean; +begin + is_super = ( + select usesuper + from pg_user + where usename = 'postgres' + ); + + -- Need to be superuser to own FDWs, so we temporarily make postgres superuser. + if not is_super then + alter role postgres superuser; + end if; + + alter foreign data wrapper postgres_fdw owner to postgres; + + if not is_super then + alter role postgres nosuperuser; + end if; +end $$; diff --git a/postgres_15.8.1.044/ansible/files/postgrest-optimizations.service.j2 b/postgres_15.8.1.044/ansible/files/postgrest-optimizations.service.j2 new file mode 100644 index 0000000..38604b6 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgrest-optimizations.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=Postgrest optimizations + +[Service] +Type=oneshot +# we don't want failures from this command to cause PG startup to fail +ExecStart=/bin/bash -c "/opt/supabase-admin-api optimize postgrest --destination-config-file-path /etc/postgrest/generated.conf ; exit 0" +User=postgrest + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/postgrest.service.j2 b/postgres_15.8.1.044/ansible/files/postgrest.service.j2 new file mode 100644 index 0000000..290f077 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/postgrest.service.j2 @@ -0,0 +1,18 @@ +[Unit] +Description=PostgREST +Requires=postgrest-optimizations.service +After=postgrest-optimizations.service + +[Service] +Type=simple +# We allow the base config (sent from the worker) to override the generated config +ExecStartPre=/etc/postgrest/merge.sh /etc/postgrest/generated.conf /etc/postgrest/base.conf +ExecStart=/opt/postgrest /etc/postgrest/merged.conf +User=postgrest +Slice=services.slice +Restart=always +RestartSec=3 +LimitNOFILE=100000 + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/services.slice.j2 b/postgres_15.8.1.044/ansible/files/services.slice.j2 new file mode 100644 index 0000000..d45187f --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/services.slice.j2 @@ -0,0 +1,6 @@ +# Used for general services grouping for easy visibility when running +# systemctl status +# See http://archive.vn/94IGa for an in depth article on systemd slices +[Unit] +Description=Slice used for PostgreSQL +Before=slices.target diff --git a/postgres_15.8.1.044/ansible/files/sodium_extension.sql b/postgres_15.8.1.044/ansible/files/sodium_extension.sql new file mode 100644 index 0000000..a19cabf --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/sodium_extension.sql @@ -0,0 +1,6 @@ +create schema if not exists pgsodium; +create extension if not exists pgsodium with schema pgsodium cascade; + +grant pgsodium_keyiduser to postgres with admin option; +grant pgsodium_keyholder to postgres with admin option; +grant pgsodium_keymaker to postgres with admin option; diff --git a/postgres_15.8.1.044/ansible/files/start-envoy.sh b/postgres_15.8.1.044/ansible/files/start-envoy.sh new file mode 100644 index 0000000..edd6fe0 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/start-envoy.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -eou pipefail + +if [[ $(cat /sys/module/ipv6/parameters/disable) = 1 ]]; then + sed -i -e "s/address: '::'/address: '0.0.0.0'/" -e 's/ipv4_compat: true/ipv4_compat: false/' /etc/envoy/lds.yaml +else + sed -i -e "s/address: '0.0.0.0'/address: '::'/" -e 's/ipv4_compat: false/ipv4_compat: true/' /etc/envoy/lds.yaml +fi + +# Workaround using `tee` to get `/dev/stdout` access logging to work, see: +# https://github.com/envoyproxy/envoy/issues/8297#issuecomment-620659781 +exec /opt/envoy --config-path /etc/envoy/envoy.yaml --restart-epoch "${RESTART_EPOCH}" 2>&1 | tee diff --git a/postgres_15.8.1.044/ansible/files/stat_extension.sql b/postgres_15.8.1.044/ansible/files/stat_extension.sql new file mode 100644 index 0000000..9378340 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/stat_extension.sql @@ -0,0 +1,2 @@ +CREATE SCHEMA IF NOT exists extensions; +CREATE EXTENSION IF NOT EXISTS pg_stat_statements with schema extensions; diff --git a/postgres_15.8.1.044/ansible/files/supabase_facts.ini b/postgres_15.8.1.044/ansible/files/supabase_facts.ini new file mode 100644 index 0000000..44e01b4 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/supabase_facts.ini @@ -0,0 +1,2 @@ +[general] +postgres_version=15 diff --git a/postgres_15.8.1.044/ansible/files/sysstat.sysstat b/postgres_15.8.1.044/ansible/files/sysstat.sysstat new file mode 100644 index 0000000..52b7d07 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/sysstat.sysstat @@ -0,0 +1,36 @@ +# How long to keep log files (in days). +# Used by sa2(8) script +# If value is greater than 28, then use sadc's option -D to prevent older +# data files from being overwritten. See sadc(8) and sysstat(5) manual pages. +HISTORY=7 + +# Compress (using xz, gzip or bzip2) sa and sar files older than (in days): +COMPRESSAFTER=10 + +# Parameters for the system activity data collector (see sadc(8) manual page) +# which are used for the generation of log files. +# By default contains the `-S DISK' option responsible for generating disk +# statisitcs. Use `-S XALL' to collect all available statistics. +SADC_OPTIONS="-S DISK" + +# Directory where sa and sar files are saved. The directory must exist. +SA_DIR=/var/log/sysstat + +# Compression program to use. +ZIP="xz" + +# By default sa2 script generates yesterday's summary, since the cron job +# usually runs right after midnight. If you want sa2 to generate the summary +# of the same day (for example when cron job runs at 23:53) set this variable. +#YESTERDAY=no + +# By default sa2 script generates reports files (the so called sarDD files). +# Set this variable to false to disable reports generation. +#REPORTS=false + +# The sa1 and sa2 scripts generate system activity data and report files in +# the /var/log/sysstat directory. By default the files are created with umask 0022 +# and are therefore readable for all users. Change this variable to restrict +# the permissions on the files (e.g. use 0027 to adhere to more strict +# security standards). +UMASK=0022 diff --git a/postgres_15.8.1.044/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service b/postgres_15.8.1.044/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service new file mode 100644 index 0000000..5e70943 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.service @@ -0,0 +1,11 @@ +[Unit] +Description=Check if systemd-networkd has broken NDisc routes and fix +Requisite=systemd-networkd.service +After=systemd-networkd.service + +[Service] +Type=oneshot +# This needs to be root for the service restart to work +User=root +Group=root +ExecStart=/usr/local/bin/systemd-networkd-check-and-fix.sh diff --git a/postgres_15.8.1.044/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.sh b/postgres_15.8.1.044/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.sh new file mode 100644 index 0000000..af00b41 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Check for occurrences of an NDisc log error +# NOTE: --since timer flag must match the cadence of systemd timer unit. Risk of repeat matches and restart loop +journalctl --no-pager --unit systemd-networkd --since "1 minutes ago" --grep "Could not set NDisc route" >/dev/null +NDISC_ERROR=$? + +if systemctl is-active --quiet systemd-networkd.service && [ "${NDISC_ERROR}" == 0 ]; then + echo "$(date) systemd-network running but NDisc routes are broken. Restarting systemd.networkd.service" + /usr/bin/systemctl restart systemd-networkd.service + exit # no need to check further +fi + +# check for routes +ROUTES=$(ip -6 route list) + +if ! echo "${ROUTES}" | grep default >/dev/null || ! echo "${ROUTES}" | grep "::1 dev lo">/dev/null; then + echo "IPv6 routing table messed up. Restarting systemd.networkd.service" + /usr/bin/systemctl restart systemd-networkd.service +fi diff --git a/postgres_15.8.1.044/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.timer b/postgres_15.8.1.044/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.timer new file mode 100644 index 0000000..93c0836 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/systemd-networkd/systemd-networkd-check-and-fix.timer @@ -0,0 +1,9 @@ +[Unit] +Description=Check if systemd-networkd has broken NDisc routes and fix + +[Timer] +# NOTE: cadence must match that of the journalctl search (--since). Risk of repeat matches and restart loop +OnCalendar=minutely + +[Install] +WantedBy=timers.target diff --git a/postgres_15.8.1.044/ansible/files/systemd-resolved.conf b/postgres_15.8.1.044/ansible/files/systemd-resolved.conf new file mode 100644 index 0000000..9280d88 --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/systemd-resolved.conf @@ -0,0 +1,8 @@ +# the default is RestartSec=0. If the service fails to start because +# of a systemic issue (e.g. rare case when disk is full) it will +# quickly hit the burst limit (default of 5 failures within 10secs) +# and thereafter be placed in a failed state. By increasing the +# restart interval, we avoid that, and ensure that the service will be +# started back up once any underlying issues are resolved. +[Service] +RestartSec=3 diff --git a/postgres_15.8.1.044/ansible/files/ufw.service.conf b/postgres_15.8.1.044/ansible/files/ufw.service.conf new file mode 100644 index 0000000..83b82ef --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/ufw.service.conf @@ -0,0 +1,4 @@ +[Unit] +After=nftables.service +Requires=nftables.service +PartOf=nftables.service diff --git a/postgres_15.8.1.044/ansible/files/vector.service.j2 b/postgres_15.8.1.044/ansible/files/vector.service.j2 new file mode 100644 index 0000000..1c88baa --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/vector.service.j2 @@ -0,0 +1,20 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +User=vector +Group=vector +ExecStartPre=/usr/bin/vector validate --config-yaml /etc/vector/vector.yaml +ExecStart=/usr/bin/vector --config-yaml /etc/vector/vector.yaml +ExecReload=/usr/bin/vector validate --config-yaml /etc/vector/vector.yaml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +RestartSec=3 +AmbientCapabilities=CAP_NET_BIND_SERVICE +EnvironmentFile=-/etc/default/vector + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ansible/files/walg_helper_scripts/wal_change_ownership.sh b/postgres_15.8.1.044/ansible/files/walg_helper_scripts/wal_change_ownership.sh new file mode 100644 index 0000000..3f0112d --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/walg_helper_scripts/wal_change_ownership.sh @@ -0,0 +1,42 @@ +#! /usr/bin/env bash + +set -euo pipefail + +filename=$1 + +if [[ -z "$filename" ]]; then + echo "Nothing supplied. Exiting." + exit 1 +fi + +full_path=/tmp/wal_fetch_dir/$filename + +num_paths=$(readlink -f "$full_path" | wc -l) + +# Checks if supplied filename string contains multiple paths +# For example, "correct/path /var/lib/injected/path /var/lib/etc" +if [[ "$num_paths" -gt 1 ]]; then + echo "Multiple paths supplied. Exiting." + exit 1 +fi + +base_dir=$(readlink -f "$full_path" | cut -d'/' -f2) + +# Checks if directory/ file to be manipulated +# is indeed within the /tmp directory +# For example, "/tmp/../var/lib/postgresql/..." +# will return "var" as the value for $base_dir +if [[ "$base_dir" != "tmp" ]]; then + echo "Attempt to manipulate a file not in /tmp. Exiting." + exit 1 +fi + +# Checks if change of ownership will be applied to a file +# If not, exit +if [[ ! -f $full_path ]]; then + echo "Either file does not exist or is a directory. Exiting." + exit 1 +fi + +# once valid, proceed to change ownership +chown postgres:postgres "$full_path" diff --git a/postgres_15.8.1.044/ansible/files/walg_helper_scripts/wal_fetch.sh b/postgres_15.8.1.044/ansible/files/walg_helper_scripts/wal_fetch.sh new file mode 100644 index 0000000..33448ac --- /dev/null +++ b/postgres_15.8.1.044/ansible/files/walg_helper_scripts/wal_fetch.sh @@ -0,0 +1,12 @@ +#! /usr/bin/env bash + +set -euo pipefail + +# Fetch the WAL file and temporarily store them in /tmp +sudo -u wal-g wal-g wal-fetch "$1" /tmp/wal_fetch_dir/"$1" --config /etc/wal-g/config.json + +# Ensure WAL file is owned by the postgres Linux user +sudo -u root /root/wal_change_ownership.sh "$1" + +# Move file to its final destination +mv /tmp/wal_fetch_dir/"$1" /var/lib/postgresql/data/"$2" diff --git a/postgres_15.8.1.044/ansible/manifest-playbook.yml b/postgres_15.8.1.044/ansible/manifest-playbook.yml new file mode 100644 index 0000000..5c1c650 --- /dev/null +++ b/postgres_15.8.1.044/ansible/manifest-playbook.yml @@ -0,0 +1,75 @@ +- hosts: localhost + gather_facts: no + + vars_files: + - ./vars.yml + + tasks: + - name: Write out image manifest + action: template src=files/manifest.json dest=./image-manifest-{{ ami_release_version }}.json + + - name: Upload image manifest + shell: | + aws s3 cp ./image-manifest-{{ ami_release_version }}.json s3://{{ internal_artifacts_bucket }}/manifests/postgres-{{ ami_release_version }}/software-manifest.json + + # upload software artifacts of interest + # Generally - download, extract, repack as xz archive, upload + # currently, we upload gotrue, adminapi, postgrest + - name: gotrue - download commit archive + get_url: + url: "https://github.com/supabase/gotrue/releases/download/v{{ gotrue_release }}/auth-v{{ gotrue_release }}-arm64.tar.gz" + dest: /tmp/auth-v{{ gotrue_release }}-arm64.tar.gz + checksum: "{{ gotrue_release_checksum }}" + timeout: 60 + + - name: PostgREST - download ubuntu binary archive (arm) + get_url: + url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-ubuntu-aarch64.tar.xz" + dest: /tmp/postgrest-{{ postgrest_release }}-arm64.tar.xz + checksum: "{{ postgrest_arm_release_checksum }}" + timeout: 60 + + - name: Download adminapi archive + get_url: + url: "https://supabase-public-artifacts-bucket.s3.amazonaws.com/supabase-admin-api/v{{ adminapi_release }}/supabase-admin-api_{{ adminapi_release }}_linux_arm64.tar.gz" + dest: "/tmp/adminapi.tar.gz" + timeout: 90 + + - name: adminapi - unpack archive in /tmp + unarchive: + remote_src: yes + src: /tmp/adminapi.tar.gz + dest: /tmp + + - name: adminapi - pack archive + shell: | + cd /tmp && tar -cJf supabase-admin-api-{{ adminapi_release }}-arm64.tar.xz supabase-admin-api + + - name: Download admin-mgr archive + get_url: + url: "https://supabase-public-artifacts-bucket.s3.amazonaws.com/admin-mgr/v{{ adminmgr_release }}/admin-mgr_{{ adminmgr_release }}_linux_arm64.tar.gz" + dest: "/tmp/admin-mgr.tar.gz" + timeout: 90 + + - name: admin-mgr - unpack archive in /tmp + unarchive: + remote_src: yes + src: /tmp/admin-mgr.tar.gz + dest: /tmp + + - name: admin-mgr - pack archive + shell: | + cd /tmp && tar -cJf admin-mgr-{{ adminmgr_release }}-arm64.tar.xz admin-mgr + + - name: upload archives + shell: | + aws s3 cp /tmp/{{ item.file }} s3://{{ internal_artifacts_bucket }}/upgrades/{{ item.service }}/{{ item.file }} + with_items: + - service: gotrue + file: auth-v{{ gotrue_release }}-arm64.tar.gz + - service: postgrest + file: postgrest-{{ postgrest_release }}-arm64.tar.xz + - service: supabase-admin-api + file: supabase-admin-api-{{ adminapi_release }}-arm64.tar.xz + - service: admin-mgr + file: admin-mgr-{{ adminmgr_release }}-arm64.tar.xz diff --git a/postgres_15.8.1.044/ansible/playbook.yml b/postgres_15.8.1.044/ansible/playbook.yml new file mode 100644 index 0000000..5c5f8da --- /dev/null +++ b/postgres_15.8.1.044/ansible/playbook.yml @@ -0,0 +1,229 @@ +- hosts: all + become: yes + + pre_tasks: + - import_tasks: tasks/setup-system.yml + vars_files: + - ./vars.yml + + vars: + sql_files: + - { + source: "pgbouncer_config/pgbouncer_auth_schema.sql", + dest: "00-schema.sql", + } + - { source: "stat_extension.sql", dest: "01-extension.sql" } + + environment: + PATH: /usr/lib/postgresql/bin:{{ ansible_env.PATH }} + + tasks: + - set_fact: + supabase_internal: true + tags: + - install-supabase-internal + + - set_fact: + parallel_jobs: 16 + + - name: Install Postgres from source + import_tasks: tasks/setup-postgres.yml + + - name: Install PgBouncer + import_tasks: tasks/setup-pgbouncer.yml + tags: + - install-pgbouncer + - install-supabase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install WAL-G + import_tasks: tasks/setup-wal-g.yml + when: debpkg_mode or nixpkg_mode + + - name: Install Gotrue + import_tasks: tasks/setup-gotrue.yml + tags: + - install-gotrue + - install-supabase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install PostgREST + import_tasks: tasks/setup-postgrest.yml + tags: + - install-postgrest + - install-supabase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install Envoy + import_tasks: tasks/setup-envoy.yml + tags: + - install-supabase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install Kong + import_tasks: tasks/setup-kong.yml + tags: + - install-supabase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install nginx + import_tasks: tasks/setup-nginx.yml + tags: + - install-supabase-internal + when: debpkg_mode or nixpkg_mode + + - name: Install Supabase specific content + import_tasks: tasks/setup-supabase-internal.yml + tags: + - install-supabase-internal + when: debpkg_mode or nixpkg_mode + + - name: Fix IPv6 NDisc issues + import_tasks: tasks/fix_ipv6_ndisc.yml + tags: + - install-supabase-internal + when: debpkg_mode or nixpkg_mode + + - name: Start Postgres Database without Systemd + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start + when: debpkg_mode + + - name: Adjust APT update intervals + copy: + src: files/apt_periodic + dest: /etc/apt/apt.conf.d/10periodic + when: debpkg_mode or nixpkg_mode + + - name: Transfer init SQL files + copy: + src: files/{{ item.source }} + dest: /tmp/{{ item.dest }} + loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix + + - name: Create postgres role + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/psql --username=supabase_admin -d postgres -c "create role postgres superuser login; alter database postgres owner to postgres;" + when: debpkg_mode or stage2_nix + + - name: Execute init SQL files + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/psql -f /tmp/{{ item.dest }} + loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix + + - name: Delete SQL scripts + file: + path: /tmp/{{ item.dest }} + state: absent + loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix + + - name: First boot optimizations + import_tasks: tasks/internal/optimizations.yml + tags: + - install-supabase-internal + when: debpkg_mode or stage2_nix + + - name: Finalize AMI + import_tasks: tasks/finalize-ami.yml + tags: + - install-supabase-internal + when: debpkg_mode or nixpkg_mode + + - name: Enhance fail2ban + import_tasks: tasks/setup-fail2ban.yml + when: debpkg_mode or nixpkg_mode + + + # Install EC2 instance connect + # Only for AWS images + - name: install EC2 instance connect + become: yes + apt: + pkg: + - ec2-instance-connect + tags: + - aws-only + when: qemu_mode is undefined + + # Install this at the end to prevent it from kicking in during the apt process, causing conflicts + - name: Install security tools + become: yes + apt: + pkg: + - unattended-upgrades + update_cache: yes + cache_valid_time: 3600 + + - name: Clean out build dependencies + import_tasks: tasks/clean-build-dependencies.yml + + - name: Restart Postgres Database without Systemd + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data restart -o "-c shared_preload_libraries='pg_tle'" + when: debpkg_mode + + - name: Run migrations + import_tasks: tasks/setup-migrations.yml + tags: + - migrations + when: debpkg_mode or stage2_nix + + - name: Stop Postgres Database without Systemd + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop + when: debpkg_mode + + - name: Run unit tests + import_tasks: tasks/test-image.yml + tags: + - unit-tests + when: debpkg_mode or stage2_nix + + - name: Collect Postgres binaries + import_tasks: tasks/internal/collect-pg-binaries.yml + tags: + - collect-binaries + when: debpkg_mode + + - name: Install osquery from nixpkgs binary cache + become: yes + shell: | + apt autoremove -y --purge snapd + when: stage2_nix + + - name: Install osquery from nixpkgs binary cache + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:nixos/nixpkgs/f98ec4f73c762223d62bee706726138cb6ea27cc#osquery" + when: stage2_nix + + - name: Run osquery permission checks + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && /usr/bin/python3 /tmp/ansible-playbook/ansible/files/permission_check.py {{ '--qemu' if qemu_mode is defined else '' }}" + when: stage2_nix + + - name: Remove osquery + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile remove osquery" + when: stage2_nix + + - name: nix collect garbage + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix-collect-garbage -d" + when: stage2_nix diff --git a/postgres_15.8.1.044/ansible/tasks/clean-build-dependencies.yml b/postgres_15.8.1.044/ansible/tasks/clean-build-dependencies.yml new file mode 100644 index 0000000..43ec051 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/clean-build-dependencies.yml @@ -0,0 +1,21 @@ +- name: Remove build dependencies + apt: + pkg: + - bison + - build-essential + - clang-11 + - cmake + - cpp + - flex + - g++ + - g++-10 + - g++-9 + - gcc-10 + - make + - manpages + - manpages-dev + - ninja-build + - patch + - python2 + state: absent + autoremove: yes diff --git a/postgres_15.8.1.044/ansible/tasks/finalize-ami.yml b/postgres_15.8.1.044/ansible/tasks/finalize-ami.yml new file mode 100644 index 0000000..7f0de3a --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/finalize-ami.yml @@ -0,0 +1,81 @@ +- name: PG logging conf + template: + src: files/postgresql_config/postgresql-csvlog.conf + dest: /etc/postgresql/logging.conf + group: postgres + +- name: UFW - Allow SSH connections + ufw: + rule: allow + name: OpenSSH + +- name: UFW - Allow connections to postgreSQL (5432) + ufw: + rule: allow + port: "5432" + +- name: UFW - Allow connections to postgreSQL (6543) + ufw: + rule: allow + port: "6543" + tags: + - install-pgbouncer + +- name: UFW - Allow connections to http (80) + ufw: + rule: allow + port: http + tags: + - install-supabase-internal + +- name: UFW - Allow connections to https (443) + ufw: + rule: allow + port: https + tags: + - install-supabase-internal + +- name: UFW - Deny all other incoming traffic by default + ufw: + state: enabled + policy: deny + direction: incoming + +- name: Move logrotate files to /etc/logrotate.d/ + copy: + src: "files/logrotate_config/{{ item.file }}" + dest: "/etc/logrotate.d/{{ item.file }}" + mode: "0700" + owner: root + loop: + - { file: "logrotate-postgres-csv.conf" } + - { file: "logrotate-postgres.conf" } + - { file: "logrotate-walg.conf" } + - { file: "logrotate-postgres-auth.conf" } + +- name: Ensure default Postgres logrotate config is removed + file: + path: /etc/logrotate.d/postgresql-common + state: absent + +- name: Disable cron access + copy: + src: files/cron.deny + dest: /etc/cron.deny + +- name: Configure logrotation to run every hour + shell: + cmd: | + cp /usr/lib/systemd/system/logrotate.timer /etc/systemd/system/logrotate.timer + sed -i -e 's;daily;*:0/5;' /etc/systemd/system/logrotate.timer + systemctl reenable logrotate.timer + become: yes + +- name: import pgsodium_getkey script + template: + src: files/pgsodium_getkey_readonly.sh.j2 + dest: "{{ pg_bindir }}/pgsodium_getkey.sh" + owner: postgres + group: postgres + mode: 0700 + when: debpkg_mode or stage2_nix diff --git a/postgres_15.8.1.044/ansible/tasks/fix_ipv6_ndisc.yml b/postgres_15.8.1.044/ansible/tasks/fix_ipv6_ndisc.yml new file mode 100644 index 0000000..7489a2f --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/fix_ipv6_ndisc.yml @@ -0,0 +1,33 @@ +--- +- name: fix Network - systemd timer file + copy: + dest: /etc/systemd/system/systemd-networkd-check-and-fix.timer + src: "files/systemd-networkd/systemd-networkd-check-and-fix.timer" + owner: root + group: root + mode: 0644 + +- name: fix Network - systemd service file + copy: + dest: /etc/systemd/system/systemd-networkd-check-and-fix.service + src: "files/systemd-networkd/systemd-networkd-check-and-fix.service" + owner: root + group: root + mode: 0644 + +- name: fix Network - detect script + copy: + dest: /usr/local/bin/systemd-networkd-check-and-fix.sh + src: "files/systemd-networkd/systemd-networkd-check-and-fix.sh" + owner: root + group: root + mode: 0700 + +- name: fix Network - reload systemd + systemd: + daemon_reload: yes + +- name: fix Network - enable systemd timer + systemd: + name: systemd-networkd-check-and-fix.timer + enabled: true diff --git a/postgres_15.8.1.044/ansible/tasks/internal/admin-api.yml b/postgres_15.8.1.044/ansible/tasks/internal/admin-api.yml new file mode 100644 index 0000000..da93fef --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/admin-api.yml @@ -0,0 +1,97 @@ +- name: adminapi - system user + user: + name: adminapi + groups: root,admin,envoy,kong,pgbouncer,postgres,postgrest,systemd-journal,vector,wal-g + append: yes + +- name: Move shell scripts to /root dir + copy: + src: "files/admin_api_scripts/{{ item.file }}" + dest: "/root/{{ item.file }}" + mode: "0700" + owner: root + loop: + - { file: "grow_fs.sh" } + - { file: "manage_readonly_mode.sh" } + - { file: "pg_egress_collect.pl" } + +- name: give adminapi user permissions + copy: + src: files/adminapi.sudoers.conf + dest: /etc/sudoers.d/adminapi + mode: "0644" + +- name: perms for adminapi + shell: | + chmod g+w /etc + +- name: Setting arch (x86) + set_fact: + arch: "x86" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: Download adminapi archive + get_url: + url: "https://supabase-public-artifacts-bucket.s3.amazonaws.com/supabase-admin-api/v{{ adminapi_release }}/supabase-admin-api_{{ adminapi_release }}_linux_{{ arch }}.tar.gz" + dest: "/tmp/adminapi.tar.gz" + timeout: 90 + +- name: adminapi - unpack archive in /opt + unarchive: + remote_src: yes + src: /tmp/adminapi.tar.gz + dest: /opt + owner: adminapi + +- name: adminapi - config dir + file: + path: /etc/adminapi + owner: adminapi + state: directory + +- name: adminapi - pg_upgrade scripts dir + file: + path: /etc/adminapi/pg_upgrade_scripts + owner: adminapi + state: directory + +- name: Move shell scripts to /etc/adminapi/pg_upgrade_scripts/ + copy: + src: "files/admin_api_scripts/pg_upgrade_scripts/{{ item.file }}" + dest: "/etc/adminapi/pg_upgrade_scripts/{{ item.file }}" + mode: "0755" + owner: adminapi + loop: + - { file: "check.sh" } + - { file: "complete.sh" } + - { file: "initiate.sh" } + - { file: "prepare.sh" } + - { file: "pgsodium_getkey.sh" } + - { file: "common.sh" } + +- name: adminapi - create service file + template: + src: files/adminapi.service.j2 + dest: /etc/systemd/system/adminapi.service + +- name: adminapi - create service file for commence backup process + template: + src: files/commence-backup.service.j2 + dest: /etc/systemd/system/commence-backup.service + +- name: UFW - Allow connections to adminapi ports + ufw: + rule: allow + port: "8085" + +- name: adminapi - reload systemd + systemd: + daemon_reload: yes + +- name: adminapi - grant extra priviliges to user + shell: chmod 775 /etc && chmod 775 /etc/kong diff --git a/postgres_15.8.1.044/ansible/tasks/internal/admin-mgr.yml b/postgres_15.8.1.044/ansible/tasks/internal/admin-mgr.yml new file mode 100644 index 0000000..073b866 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/admin-mgr.yml @@ -0,0 +1,22 @@ +- name: Setting arch (x86) + set_fact: + arch: "amd64" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: Download admin-mgr archive + get_url: + url: "https://supabase-public-artifacts-bucket.s3.amazonaws.com/admin-mgr/v{{ adminmgr_release }}/admin-mgr_{{ adminmgr_release }}_linux_{{ arch }}.tar.gz" + dest: "/tmp/admin-mgr.tar.gz" + timeout: 90 + +- name: admin-mgr - unpack archive in /usr/bin/ + unarchive: + remote_src: yes + src: /tmp/admin-mgr.tar.gz + dest: /usr/bin/ + owner: root diff --git a/postgres_15.8.1.044/ansible/tasks/internal/collect-pg-binaries.yml b/postgres_15.8.1.044/ansible/tasks/internal/collect-pg-binaries.yml new file mode 100644 index 0000000..7f652f7 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/collect-pg-binaries.yml @@ -0,0 +1,49 @@ +- name: Collect Postgres binaries - create collection directory + file: + path: /tmp/pg_binaries/{{ postgresql_major }}/ + state: directory + +- name: Collect Postgres binaries - collect binaries and libraries + copy: + remote_src: yes + src: /usr/lib/postgresql/{{ postgresql_major }}/{{ item }}/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/{{ item }}/ + with_items: + - bin + - lib + +- name: Collect Postgres libraries - collect libraries which are in /usr/lib/postgresql/lib/ + copy: + remote_src: yes + src: /usr/lib/postgresql/lib/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/lib/ + +- name: Collect Postgres libraries - collect libraries which are in /var/lib/postgresql/extension/ + copy: + remote_src: yes + src: /var/lib/postgresql/extension/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/lib/ + +- name: Collect Postgres libraries - collect latest libpq + copy: + remote_src: yes + src: /usr/lib/aarch64-linux-gnu/libpq.so.5 + dest: /tmp/pg_binaries/{{ postgresql_major }}/lib/libpq.so.5 + +- name: Collect Postgres binaries - collect shared files + copy: + remote_src: yes + src: /usr/share/postgresql/{{ postgresql_major }}/ + dest: /tmp/pg_binaries/{{ postgresql_major }}/share/ + +- name: Collect Postgres binaries - create tarfile + archive: + path: /tmp/pg_binaries/ + dest: /tmp/pg_binaries.tar.gz + remove: yes + +- name: Fetch tarfile to local + fetch: + src: /tmp/pg_binaries.tar.gz + dest: /tmp/ + flat: true diff --git a/postgres_15.8.1.044/ansible/tasks/internal/install-salt.yml b/postgres_15.8.1.044/ansible/tasks/internal/install-salt.yml new file mode 100644 index 0000000..73cd6ee --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/install-salt.yml @@ -0,0 +1,47 @@ +- name: Add apt repository for Saltstack (arm) + block: + - name: Ensure /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + + - name: salt gpg key + ansible.builtin.get_url: + url: https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public + dest: /etc/apt/keyrings/salt-archive-keyring-2023.pgp + mode: '0644' + + - name: salt apt repo + ansible.builtin.apt_repository: + repo: "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.pgp arch=arm64] https://packages.broadcom.com/artifactory/saltproject-deb/ stable main" + filename: 'salt.list' + state: present + when: platform == "arm64" + +- name: Add apt repository for Saltstack (amd) + block: + - name: Ensure /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + + - name: salt gpg key + ansible.builtin.get_url: + url: https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public + dest: /etc/apt/keyrings/salt-archive-keyring-2023.pgp + mode: '0644' + + - name: salt apt repo + ansible.builtin.apt_repository: + repo: "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.pgp arch=amd64] https://packages.broadcom.com/artifactory/saltproject-deb/ stable main" + filename: 'salt.list' + state: present + when: platform == "amd64" + +- name: Salt minion install + apt: + name: salt-minion + state: present + update_cache: yes diff --git a/postgres_15.8.1.044/ansible/tasks/internal/optimizations.yml b/postgres_15.8.1.044/ansible/tasks/internal/optimizations.yml new file mode 100644 index 0000000..42a0a24 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/optimizations.yml @@ -0,0 +1,39 @@ +- name: ensure services are stopped and disabled for first boot debian build + systemd: + enabled: no + name: '{{ item }}' + state: stopped + with_items: + - postgresql + - pgbouncer + - fail2ban + - motd-news + - vector + - lvm2-monitor + - salt-minion + when: debpkg_mode + +- name: ensure services are stopped and disabled for first boot nix build + systemd: + enabled: no + name: '{{ item }}' + state: stopped + with_items: + - postgresql + - pgbouncer + - fail2ban + - motd-news + - vector + - salt-minion + when: stage2_nix + +- name: disable man-db + become: yes + file: + state: absent + path: "/etc/cron.daily/{{ item }}" + with_items: + - man-db + - popularity-contest + - ubuntu-advantage-tools + when: debpkg_mode or stage2_nix diff --git a/postgres_15.8.1.044/ansible/tasks/internal/pg_egress_collect.yml b/postgres_15.8.1.044/ansible/tasks/internal/pg_egress_collect.yml new file mode 100644 index 0000000..be9fefe --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/pg_egress_collect.yml @@ -0,0 +1,15 @@ +- name: pg_egress_collect - install tcpdump and perl async lib + apt: + pkg: + - tcpdump + - libio-async-perl + +- name: pg_egress_collect - create service file + template: + src: files/pg_egress_collect.service.j2 + dest: /etc/systemd/system/pg_egress_collect.service + +- name: pg_egress_collect - reload systemd + systemd: + daemon_reload: yes + diff --git a/postgres_15.8.1.044/ansible/tasks/internal/postgres-exporter.yml b/postgres_15.8.1.044/ansible/tasks/internal/postgres-exporter.yml new file mode 100644 index 0000000..0292157 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/postgres-exporter.yml @@ -0,0 +1,48 @@ +- name: UFW - Allow connections to exporter for prometheus + ufw: + rule: allow + port: "9187" + +- name: create directories - systemd unit + file: + state: directory + path: /etc/systemd/system/postgres_exporter.service.d + owner: root + mode: '0700' + become: yes + +- name: create directories - service files + file: + state: directory + path: /opt/postgres_exporter + owner: postgres + group: postgres + mode: '0775' + become: yes + +- name: download postgres exporter + get_url: + url: "https://github.com/prometheus-community/postgres_exporter/releases/download/v{{ postgres_exporter_release }}/postgres_exporter-{{ postgres_exporter_release }}.linux-{{ platform }}.tar.gz" + dest: /tmp/postgres_exporter.tar.gz + checksum: "{{ postgres_exporter_release_checksum[platform] }}" + timeout: 60 + +- name: expand postgres exporter + unarchive: + remote_src: yes + src: /tmp/postgres_exporter.tar.gz + dest: /opt/postgres_exporter + extra_opts: [--strip-components=1] + become: yes + +- name: exporter create a service + template: + src: files/postgres_exporter.service.j2 + dest: /etc/systemd/system/postgres_exporter.service + +- name: exporter ensure service is present + systemd: + enabled: no + name: postgres_exporter + daemon_reload: yes + state: stopped diff --git a/postgres_15.8.1.044/ansible/tasks/internal/postgresql-prestart.yml b/postgres_15.8.1.044/ansible/tasks/internal/postgresql-prestart.yml new file mode 100644 index 0000000..46671d5 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/postgresql-prestart.yml @@ -0,0 +1,7 @@ +- name: postgres_prestart - create service file + template: + src: files/postgres_prestart.sh.j2 + dest: /usr/local/bin/postgres_prestart.sh + mode: a+x + owner: root + group: root diff --git a/postgres_15.8.1.044/ansible/tasks/internal/setup-ansible-pull.yml b/postgres_15.8.1.044/ansible/tasks/internal/setup-ansible-pull.yml new file mode 100644 index 0000000..7cce74a --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/setup-ansible-pull.yml @@ -0,0 +1,29 @@ +- name: install ansible + shell: + cmd: | + apt install -y software-properties-common + add-apt-repository --yes --update ppa:ansible/ansible + apt install -y ansible + sed -i -e 's/#callback_whitelist.*/callback_whitelist = profile_tasks/' /etc/ansible/ansible.cfg + +- name: ansible pull systemd units + copy: + src: files/{{ item }} + dest: /etc/systemd/system/{{ item }} + with_items: + - ansible-pull.service + - ansible-pull.timer + +- name: create facts dir + file: + path: /etc/ansible/facts.d + state: directory + +- name: ansible facts + copy: + src: files/supabase_facts.ini + dest: /etc/ansible/facts.d/supabase.fact + +- name: reload systemd + systemd: + daemon_reload: yes diff --git a/postgres_15.8.1.044/ansible/tasks/internal/setup-nftables.yml b/postgres_15.8.1.044/ansible/tasks/internal/setup-nftables.yml new file mode 100644 index 0000000..fc8d023 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/setup-nftables.yml @@ -0,0 +1,34 @@ +- name: nftables overrides + file: + state: directory + path: /etc/nftables + owner: adminapi + +- name: nftables empty config + file: + state: touch + path: /etc/nftables/supabase_managed.conf + owner: adminapi + +- name: include managed config + shell: | + cat >> "/etc/nftables.conf" << EOF + table inet supabase_managed { } + include "/etc/nftables/supabase_managed.conf"; + + EOF + +- name: ufw overrides dir + file: + state: directory + path: /etc/systemd/system/ufw.service.d + owner: root + +- name: Custom systemd overrides + copy: + src: files/ufw.service.conf + dest: /etc/systemd/system/ufw.service.d/overrides.conf + +- name: reload systemd + systemd: + daemon_reload: yes diff --git a/postgres_15.8.1.044/ansible/tasks/internal/supautils.yml b/postgres_15.8.1.044/ansible/tasks/internal/supautils.yml new file mode 100644 index 0000000..33811b5 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/internal/supautils.yml @@ -0,0 +1,77 @@ +# supautils +- name: supautils - download & install dependencies + apt: + pkg: + - build-essential + - clang-11 + update_cache: yes + cache_valid_time: 3600 + +- name: supautils - download latest release + get_url: + url: "https://github.com/supabase/supautils/archive/refs/tags/v{{ supautils_release }}.tar.gz" + dest: /tmp/supautils-{{ supautils_release }}.tar.gz + checksum: "{{ supautils_release_checksum }}" + timeout: 60 + +- name: supautils - unpack archive + unarchive: + remote_src: yes + src: /tmp/supautils-{{ supautils_release }}.tar.gz + dest: /tmp + become: yes + +- name: supautils - build + make: + chdir: /tmp/supautils-{{ supautils_release }} + become: yes + +- name: supautils - install + make: + chdir: /tmp/supautils-{{ supautils_release }} + target: install + become: yes + +- name: supautils - add supautils to session_preload_libraries + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#session_preload_libraries = ''" + replace: session_preload_libraries = 'supautils' + +- name: supautils - write custom supautils.conf + template: + src: "files/postgresql_config/supautils.conf.j2" + dest: /etc/postgresql-custom/supautils.conf + mode: 0664 + owner: postgres + group: postgres + +- name: supautils - copy extension custom scripts + copy: + src: files/postgresql_extension_custom_scripts/ + dest: /etc/postgresql-custom/extension-custom-scripts + become: yes + +- name: supautils - chown extension custom scripts + file: + mode: 0775 + owner: postgres + group: postgres + path: /etc/postgresql-custom/extension-custom-scripts + recurse: yes + become: yes + +- name: supautils - include /etc/postgresql-custom/supautils.conf in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/supautils.conf'" + replace: "include = '/etc/postgresql-custom/supautils.conf'" + +- name: supautils - remove build dependencies + apt: + pkg: + - build-essential + - clang-11 + state: absent diff --git a/postgres_15.8.1.044/ansible/tasks/setup-docker.yml b/postgres_15.8.1.044/ansible/tasks/setup-docker.yml new file mode 100644 index 0000000..7b37f70 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-docker.yml @@ -0,0 +1,80 @@ +- name: Copy extension packages + copy: + src: files/extensions/ + dest: /tmp/extensions/ + when: debpkg_mode + +# Builtin apt module does not support wildcard for deb paths +- name: Install extensions + shell: | + set -e + apt-get update + apt-get install -y --no-install-recommends /tmp/extensions/*.deb + when: debpkg_mode + +- name: pgsodium - determine postgres bin directory + shell: pg_config --bindir + register: pg_bindir_output + when: debpkg_mode + +- set_fact: + pg_bindir: "{{ pg_bindir_output.stdout }}" + when: debpkg_mode + +- name: pgsodium - set pgsodium.getkey_script + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + # script is expected to be placed by finalization tasks for different target platforms + line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' + when: debpkg_mode + +# supautils +- name: supautils - add supautils to session_preload_libraries + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#session_preload_libraries = ''" + replace: session_preload_libraries = 'supautils' + when: debpkg_mode or stage2_nix + +- name: supautils - write custom supautils.conf + template: + src: "files/postgresql_config/supautils.conf.j2" + dest: /etc/postgresql-custom/supautils.conf + mode: 0664 + owner: postgres + group: postgres + when: debpkg_mode or stage2_nix + +- name: supautils - copy extension custom scripts + copy: + src: files/postgresql_extension_custom_scripts/ + dest: /etc/postgresql-custom/extension-custom-scripts + become: yes + when: debpkg_mode or stage2_nix + +- name: supautils - chown extension custom scripts + file: + mode: 0775 + owner: postgres + group: postgres + path: /etc/postgresql-custom/extension-custom-scripts + recurse: yes + become: yes + when: debpkg_mode or stage2_nix + +- name: supautils - include /etc/postgresql-custom/supautils.conf in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/supautils.conf'" + replace: "include = '/etc/postgresql-custom/supautils.conf'" + when: debpkg_mode or stage2_nix + +- name: Cleanup - extension packages + file: + path: /tmp/extensions + state: absent + when: debpkg_mode diff --git a/postgres_15.8.1.044/ansible/tasks/setup-envoy.yml b/postgres_15.8.1.044/ansible/tasks/setup-envoy.yml new file mode 100644 index 0000000..9843b55 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-envoy.yml @@ -0,0 +1,60 @@ +- name: Envoy - system user + ansible.builtin.user: + name: envoy + +- name: Envoy - download binary + ansible.builtin.get_url: + checksum: "{{ envoy_release_checksum }}" + dest: /opt/envoy + group: envoy + mode: u+x + owner: envoy + # yamllint disable-line rule:line-length + url: "https://github.com/envoyproxy/envoy/releases/download/v{{ envoy_release }}/envoy-{{ envoy_release }}-linux-aarch_64" + +- name: Envoy - download hot restarter script + ansible.builtin.get_url: + checksum: "{{ envoy_hot_restarter_release_checksum }}" + dest: /opt/envoy-hot-restarter.py + group: envoy + mode: u+x + owner: envoy + # yamllint disable-line rule:line-length + url: https://raw.githubusercontent.com/envoyproxy/envoy/v{{ envoy_release }}/restarter/hot-restarter.py + +- name: Envoy - bump up ulimit + community.general.pam_limits: + domain: envoy + limit_item: nofile + limit_type: soft + value: 4096 + +- name: Envoy - create script to start envoy + ansible.builtin.copy: + dest: /opt/start-envoy.sh + group: envoy + mode: u+x + owner: envoy + src: files/start-envoy.sh + +- name: Envoy - create configuration files + ansible.builtin.copy: + dest: /etc/envoy/ + directory_mode: u=rwx,g=rwx,o=rx + group: envoy + mode: u=rw,g=rw,o=r + owner: envoy + src: files/envoy_config/ + +- name: Envoy - create service file + ansible.builtin.copy: + dest: /etc/systemd/system/envoy.service + mode: u=rw,g=r,o=r + src: files/envoy.service + +- name: Envoy - disable service + ansible.builtin.systemd: + daemon_reload: true + enabled: false + name: envoy + state: stopped diff --git a/postgres_15.8.1.044/ansible/tasks/setup-extensions.yml b/postgres_15.8.1.044/ansible/tasks/setup-extensions.yml new file mode 100644 index 0000000..a560ae8 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-extensions.yml @@ -0,0 +1,91 @@ +- name: Install plv8 + import_tasks: tasks/postgres-extensions/13-plv8.yml + +- name: Install pg_jsonschema + import_tasks: tasks/postgres-extensions/22-pg_jsonschema.yml + +- name: Install postgis + import_tasks: tasks/postgres-extensions/01-postgis.yml + +- name: Install pgrouting + import_tasks: tasks/postgres-extensions/02-pgrouting.yml + +- name: Install pgtap + import_tasks: tasks/postgres-extensions/03-pgtap.yml + +- name: Install pg_cron + import_tasks: tasks/postgres-extensions/04-pg_cron.yml + +- name: Install pgaudit + import_tasks: tasks/postgres-extensions/05-pgaudit.yml + +- name: Install pgjwt + import_tasks: tasks/postgres-extensions/06-pgjwt.yml + +- name: Install pgsql-http + import_tasks: tasks/postgres-extensions/07-pgsql-http.yml + +- name: Install plpgsql_check + import_tasks: tasks/postgres-extensions/08-plpgsql_check.yml + +- name: Install pg-safeupdate + import_tasks: tasks/postgres-extensions/09-pg-safeupdate.yml + +- name: Install timescaledb + import_tasks: tasks/postgres-extensions/10-timescaledb.yml + +- name: Install wal2json + import_tasks: tasks/postgres-extensions/11-wal2json.yml + +- name: Install pljava + import_tasks: tasks/postgres-extensions/12-pljava.yml + tags: + - legacy-incompatible + +- name: Install pg_plan_filter + import_tasks: tasks/postgres-extensions/14-pg_plan_filter.yml + +- name: Install pg_net + import_tasks: tasks/postgres-extensions/15-pg_net.yml + +- name: Install rum + import_tasks: tasks/postgres-extensions/16-rum.yml + +- name: Install pg_hashids + import_tasks: tasks/postgres-extensions/17-pg_hashids.yml + +- name: Install pgsodium + import_tasks: tasks/postgres-extensions/18-pgsodium.yml + +- name: Install pg_graphql + import_tasks: tasks/postgres-extensions/19-pg_graphql.yml + tags: + - legacy-incompatible + +- name: Install pg_stat_monitor + import_tasks: tasks/postgres-extensions/20-pg_stat_monitor.yml + +- name: Install vault + import_tasks: tasks/postgres-extensions/23-vault.yml + +- name: Install PGroonga + import_tasks: tasks/postgres-extensions/24-pgroonga.yml + +- name: Install wrappers + import_tasks: tasks/postgres-extensions/25-wrappers.yml + +- name: Install hypopg + import_tasks: tasks/postgres-extensions/26-hypopg.yml + + - name: Install pg_repack + import_tasks: tasks/postgres-extensions/27-pg_repack.yml + +- name: Install pgvector + import_tasks: tasks/postgres-extensions/28-pgvector.yml + +- name: Install Trusted Language Extensions + import_tasks: tasks/postgres-extensions/29-pg_tle.yml + +- name: Verify async task status + import_tasks: tasks/postgres-extensions/99-finish_async_tasks.yml + when: async_mode diff --git a/postgres_15.8.1.044/ansible/tasks/setup-fail2ban.yml b/postgres_15.8.1.044/ansible/tasks/setup-fail2ban.yml new file mode 100644 index 0000000..7d9088d --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-fail2ban.yml @@ -0,0 +1,73 @@ +# set default bantime to 1 hour +- name: extend bantime + become: yes + replace: + path: /etc/fail2ban/jail.conf + regexp: bantime = 10m + replace: bantime = 3600 + when: debpkg_mode or nixpkg_mode + +- name: Configure journald + copy: + src: files/fail2ban_config/jail-ssh.conf + dest: /etc/fail2ban/jail.d/sshd.local + when: debpkg_mode or nixpkg_mode + +- name: configure fail2ban to use nftables + copy: + src: files/fail2ban_config/jail.local + dest: /etc/fail2ban/jail.local + when: debpkg_mode or nixpkg_mode + +# postgresql +- name: import jail.d/postgresql.conf + template: + src: files/fail2ban_config/jail-postgresql.conf.j2 + dest: /etc/fail2ban/jail.d/postgresql.conf + become: yes + when: debpkg_mode or nixpkg_mode + +- name: import filter.d/postgresql.conf + template: + src: files/fail2ban_config/filter-postgresql.conf.j2 + dest: /etc/fail2ban/filter.d/postgresql.conf + become: yes + when: debpkg_mode or nixpkg_mode + +- name: create overrides dir + file: + state: directory + owner: root + group: root + path: /etc/systemd/system/fail2ban.service.d + mode: '0700' + when: debpkg_mode or nixpkg_mode + +- name: Custom systemd overrides + copy: + src: files/fail2ban_config/fail2ban.service.conf + dest: /etc/systemd/system/fail2ban.service.d/overrides.conf + when: debpkg_mode or nixpkg_mode + +- name: add in supabase specific ignore filters + lineinfile: + path: /etc/fail2ban/filter.d/postgresql.conf + state: present + line: "{{ item.line }}" + loop: + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""supabase_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""supabase_auth_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""supabase_storage_admin".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""authenticator".*$' } + - { line: ' ^.*,.*,.*,.*,":.*password authentication failed for user ""pgbouncer".*$' } + become: yes + tags: + - install-supabase-internal + when: debpkg_mode or nixpkg_mode + +- name: fail2ban - disable service + systemd: + name: fail2ban + enabled: no + daemon_reload: yes + when: debpkg_mode or nixpkg_mode diff --git a/postgres_15.8.1.044/ansible/tasks/setup-gotrue.yml b/postgres_15.8.1.044/ansible/tasks/setup-gotrue.yml new file mode 100644 index 0000000..0998468 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-gotrue.yml @@ -0,0 +1,59 @@ +- name: UFW - Allow connections to GoTrue metrics exporter + ufw: + rule: allow + port: "9122" + +# use this user for the Gotrue build and for running the service +- name: Gotrue - system user + user: name=gotrue + +- name: Setting arch (x86) + set_fact: + arch: "x86" + when: platform == "amd64" + +- name: Setting arch (arm) + set_fact: + arch: "arm64" + when: platform == "arm64" + +- name: gotrue - download commit archive + get_url: + url: "https://github.com/supabase/gotrue/releases/download/v{{ gotrue_release }}/auth-v{{ gotrue_release }}-{{ arch }}.tar.gz" + dest: /tmp/gotrue.tar.gz + checksum: "{{ gotrue_release_checksum }}" + +- name: gotrue - create /opt/gotrue + file: + path: /opt/gotrue + state: directory + owner: gotrue + mode: 0775 + +- name: gotrue - unpack archive in /opt/gotrue + unarchive: + remote_src: yes + src: /tmp/gotrue.tar.gz + dest: /opt/gotrue + owner: gotrue + +# libpq is a C library that enables user programs to communicate with +# the PostgreSQL database server. +# - name: gotrue - system dependencies +# apt: +# pkg: +# - libpq-dev + +- name: gotrue - create service file + template: + src: files/gotrue.service.j2 + dest: /etc/systemd/system/gotrue.service + +- name: gotrue - create optimizations file + template: + src: files/gotrue-optimizations.service.j2 + dest: /etc/systemd/system/gotrue-optimizations.service + +- name: gotrue - reload systemd + systemd: + daemon_reload: yes diff --git a/postgres_15.8.1.044/ansible/tasks/setup-kong.yml b/postgres_15.8.1.044/ansible/tasks/setup-kong.yml new file mode 100644 index 0000000..b34f96e --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-kong.yml @@ -0,0 +1,62 @@ +- name: Kong - system user + user: name=kong + +# Kong installation steps from http://archive.vn/3HRQx +- name: Kong - system dependencies + apt: + pkg: + - openssl + - libpcre3 + - procps + - perl + +- name: Kong - download deb package + get_url: + url: "https://packages.konghq.com/public/gateway-28/deb/ubuntu/pool/{{ kong_release_target }}/main/k/ko/kong_2.8.1/{{ kong_deb }}" + dest: /tmp/kong.deb + checksum: "{{ kong_deb_checksum }}" + +- name: Kong - deb installation + apt: deb=file:///tmp/kong.deb + +- name: Kong - ensure it is NOT autoremoved + shell: | + set -e + apt-mark manual kong zlib1g* + +- name: Kong - configuration + template: + src: files/kong_config/kong.conf.j2 + dest: /etc/kong/kong.conf + +- name: Kong - hand over ownership of /usr/local/kong to user kong + file: + path: /usr/local/kong + recurse: yes + owner: kong + +# [warn] ulimit is currently set to "1024". For better performance set it to at least +# "4096" using "ulimit -n" +- name: Kong - bump up ulimit + pam_limits: + limit_item: nofile + limit_type: soft + domain: kong + value: "4096" + +- name: Kong - create env file + template: + src: files/kong_config/kong.env.j2 + dest: /etc/kong/kong.env + +- name: Kong - create service file + template: + src: files/kong_config/kong.service.j2 + dest: /etc/systemd/system/kong.service + +- name: Kong - disable service + systemd: + enabled: no + name: kong + state: stopped + daemon_reload: yes diff --git a/postgres_15.8.1.044/ansible/tasks/setup-migrations.yml b/postgres_15.8.1.044/ansible/tasks/setup-migrations.yml new file mode 100644 index 0000000..6eea684 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-migrations.yml @@ -0,0 +1,13 @@ +- name: Run migrate.sh script + shell: ./migrate.sh + register: retval + when: debpkg_mode or stage2_nix + args: + chdir: /tmp/migrations/db + failed_when: retval.rc != 0 + +- name: Create /root/MIGRATION-AMI file + file: + path: "/root/MIGRATION-AMI" + state: touch + when: debpkg_mode or stage2_nix diff --git a/postgres_15.8.1.044/ansible/tasks/setup-nginx.yml b/postgres_15.8.1.044/ansible/tasks/setup-nginx.yml new file mode 100644 index 0000000..77fb770 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-nginx.yml @@ -0,0 +1,82 @@ +- name: nginx - system user + user: name=nginx + +# Kong installation steps from http://archive.vn/3HRQx +- name: nginx - system dependencies + apt: + pkg: + - openssl + - libpcre3-dev + - libssl-dev + - zlib1g-dev + +- name: nginx - download source + get_url: + url: "https://nginx.org/download/nginx-{{ nginx_release }}.tar.gz" + dest: /tmp/nginx-{{ nginx_release }}.tar.gz + checksum: "{{ nginx_release_checksum }}" + +- name: nginx - unpack archive + unarchive: + remote_src: yes + src: /tmp/nginx-{{ nginx_release }}.tar.gz + dest: /tmp + +- name: nginx - configure + shell: + chdir: /tmp/nginx-{{ nginx_release }} + cmd: | + set -e + + ./configure \ + --prefix=/usr/local/nginx \ + --conf-path=/etc/nginx/nginx.conf \ + --with-http_ssl_module \ + --with-http_realip_module \ + --with-threads + become: yes + +- name: nginx - build + community.general.make: + target: build + chdir: /tmp/nginx-{{ nginx_release }} + jobs: "{{ parallel_jobs | default(omit) }}" + become: yes + +- name: nginx - install + make: + chdir: /tmp/nginx-{{ nginx_release }} + target: install + become: yes + +- name: nginx - hand over ownership of /usr/local/nginx to user nginx + file: + path: /usr/local/nginx + recurse: yes + owner: nginx + +- name: nginx - hand over ownership of /etc/nginx to user nginx + file: + path: /etc/nginx + recurse: yes + owner: nginx + +# [warn] ulimit is currently set to "1024". For better performance set it to at least +# "4096" using "ulimit -n" +- name: nginx - bump up ulimit + pam_limits: + limit_item: nofile + limit_type: soft + domain: nginx + value: "4096" + +- name: nginx - create service file + template: + src: files/nginx.service.j2 + dest: /etc/systemd/system/nginx.service + +# Keep it dormant for the timebeing + +# - name: nginx - reload systemd +# systemd: +# daemon_reload: yes diff --git a/postgres_15.8.1.044/ansible/tasks/setup-pgbouncer.yml b/postgres_15.8.1.044/ansible/tasks/setup-pgbouncer.yml new file mode 100644 index 0000000..4381ba2 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-pgbouncer.yml @@ -0,0 +1,135 @@ +# PgBouncer +- name: PgBouncer - download & install dependencies + apt: + pkg: + - build-essential + - libssl-dev + - pkg-config + - libevent-dev + - libsystemd-dev + update_cache: yes + cache_valid_time: 3600 + +- name: PgBouncer - download latest release + get_url: + url: "https://www.pgbouncer.org/downloads/files/{{ pgbouncer_release }}/pgbouncer-{{ pgbouncer_release }}.tar.gz" + dest: /tmp/pgbouncer-{{ pgbouncer_release }}.tar.gz + checksum: "{{ pgbouncer_release_checksum }}" + timeout: 60 + +- name: PgBouncer - unpack archive + unarchive: + remote_src: yes + src: /tmp/pgbouncer-{{ pgbouncer_release }}.tar.gz + dest: /tmp + become: yes + +- name: PgBouncer - configure + shell: + cmd: "./configure --prefix=/usr/local --with-systemd" + chdir: /tmp/pgbouncer-{{ pgbouncer_release }} + become: yes + +- name: PgBouncer - build + make: + chdir: /tmp/pgbouncer-{{ pgbouncer_release }} + become: yes + +- name: PgBouncer - install + make: + chdir: /tmp/pgbouncer-{{ pgbouncer_release }} + target: install + become: yes + +- name: Create pgbouncer user + user: + name: pgbouncer + shell: /bin/false + comment: PgBouncer user + groups: postgres,ssl-cert + +- name: PgBouncer - create a directory if it does not exist + file: + path: /etc/pgbouncer + state: directory + owner: pgbouncer + group: pgbouncer + mode: '0700' + +- name: PgBouncer - create a directory if it does not exist + file: + state: directory + owner: pgbouncer + group: pgbouncer + path: '{{ item }}' + mode: '0775' + with_items: + - '/etc/pgbouncer-custom' + +- name: create placeholder config files + file: + path: '/etc/pgbouncer-custom/{{ item }}' + state: touch + owner: pgbouncer + group: pgbouncer + mode: 0664 + with_items: + - 'generated-optimizations.ini' + - 'custom-overrides.ini' + - 'ssl-config.ini' + +- name: PgBouncer - adjust pgbouncer.ini + copy: + src: files/pgbouncer_config/pgbouncer.ini.j2 + dest: /etc/pgbouncer/pgbouncer.ini + owner: pgbouncer + mode: '0700' + +- name: PgBouncer - create a directory if it does not exist + file: + path: /etc/pgbouncer/userlist.txt + state: touch + owner: pgbouncer + mode: '0700' + +- name: import /etc/tmpfiles.d/pgbouncer.conf + template: + src: files/pgbouncer_config/tmpfiles.d-pgbouncer.conf.j2 + dest: /etc/tmpfiles.d/pgbouncer.conf + become: yes + +- name: PgBouncer - By default allow ssl connections. + become: yes + copy: + dest: /etc/pgbouncer-custom/ssl-config.ini + content: | + client_tls_sslmode = allow + +- name: Grant pg_hba and pgbouncer grp perm for adminapi updates + shell: | + chmod g+w /etc/postgresql/pg_hba.conf + chmod g+w /etc/pgbouncer-custom/ssl-config.ini + +# Add fail2ban filter +- name: import jail.d/pgbouncer.conf + template: + src: files/fail2ban_config/jail-pgbouncer.conf.j2 + dest: /etc/fail2ban/jail.d/pgbouncer.conf + become: yes + +- name: import filter.d/pgbouncer.conf + template: + src: files/fail2ban_config/filter-pgbouncer.conf.j2 + dest: /etc/fail2ban/filter.d/pgbouncer.conf + become: yes + +# Add systemd file for PgBouncer +- name: PgBouncer - import postgresql.service + template: + src: files/pgbouncer_config/pgbouncer.service.j2 + dest: /etc/systemd/system/pgbouncer.service + become: yes + +- name: PgBouncer - reload systemd + systemd: + daemon_reload: yes diff --git a/postgres_15.8.1.044/ansible/tasks/setup-postgres.yml b/postgres_15.8.1.044/ansible/tasks/setup-postgres.yml new file mode 100644 index 0000000..a45b7a5 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-postgres.yml @@ -0,0 +1,321 @@ +- name: Postgres - copy package + copy: + src: files/postgres/ + dest: /tmp/build/ + when: debpkg_mode + +- name: Postgres - add PPA + apt_repository: + repo: "deb [ trusted=yes ] file:///tmp/build ./" + state: present + when: debpkg_mode + +- name: Postgres - install commons + apt: + name: postgresql-common + install_recommends: no + when: debpkg_mode + +- name: Do not create main cluster + shell: + cmd: sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf + when: debpkg_mode + +- name: Postgres - install server + apt: + name: postgresql-{{ postgresql_major }}={{ postgresql_release }}-1.pgdg20.04+1 + install_recommends: no + when: debpkg_mode + +- name: Postgres - remove PPA + apt_repository: + repo: "deb [ trusted=yes ] file:///tmp/build ./" + state: absent + when: debpkg_mode + +- name: Postgres - cleanup package + file: + path: /tmp/build + state: absent + when: debpkg_mode + +- name: install locales + apt: + name: locales + state: present + become: yes + when: stage2_nix + +- name: configure locales + command: echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen + become: yes + when: stage2_nix + +- name: locale-gen + command: sudo locale-gen + when: stage2_nix + +- name: update-locale + command: sudo update-locale + when: stage2_nix + +- name: Create symlink to /usr/lib/postgresql/bin + shell: + cmd: ln -s /usr/lib/postgresql/{{ postgresql_major }}/bin /usr/lib/postgresql/bin + when: debpkg_mode + +- name: create ssl-cert group + group: + name: ssl-cert + state: present + when: nixpkg_mode +# the old method of installing from debian creates this group, but we must create it explicitly +# for the nix built version + +- name: create postgres group + group: + name: postgres + state: present + when: nixpkg_mode + +- name: create postgres user + shell: adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres + args: + executable: /bin/bash + become: yes + when: nixpkg_mode + +- name: add postgres user to postgres group + shell: usermod -a -G ssl-cert postgres + args: + executable: /bin/bash + become: yes + when: nixpkg_mode + +- name: Create relevant directories + file: + path: '{{ item }}' + recurse: yes + state: directory + owner: postgres + group: postgres + with_items: + - '/home/postgres' + - '/var/log/postgresql' + - '/var/lib/postgresql' + when: debpkg_mode or nixpkg_mode + +- name: Allow adminapi to write custom config + file: + path: '{{ item }}' + recurse: yes + state: directory + owner: postgres + group: postgres + mode: 0775 + with_items: + - '/etc/postgresql' + - '/etc/postgresql-custom' + when: debpkg_mode or nixpkg_mode + +- name: create placeholder config files + file: + path: '/etc/postgresql-custom/{{ item }}' + state: touch + owner: postgres + group: postgres + mode: 0664 + with_items: + - 'generated-optimizations.conf' + - 'custom-overrides.conf' + when: debpkg_mode or nixpkg_mode + +# Move Postgres configuration files into /etc/postgresql +# Add postgresql.conf +- name: import postgresql.conf + template: + src: files/postgresql_config/postgresql.conf.j2 + dest: /etc/postgresql/postgresql.conf + group: postgres + when: debpkg_mode or nixpkg_mode + +# Add pg_hba.conf +- name: import pg_hba.conf + template: + src: files/postgresql_config/pg_hba.conf.j2 + dest: /etc/postgresql/pg_hba.conf + group: postgres + when: debpkg_mode or nixpkg_mode + +# Add pg_ident.conf +- name: import pg_ident.conf + template: + src: files/postgresql_config/pg_ident.conf.j2 + dest: /etc/postgresql/pg_ident.conf + group: postgres + when: debpkg_mode or nixpkg_mode + +# Add custom config for read replicas set up +- name: Move custom read-replica.conf file to /etc/postgresql-custom/read-replica.conf + template: + src: "files/postgresql_config/custom_read_replica.conf.j2" + dest: /etc/postgresql-custom/read-replica.conf + mode: 0664 + owner: postgres + group: postgres + when: debpkg_mode or nixpkg_mode + +# Install extensions before init +- name: Install Postgres extensions + import_tasks: tasks/setup-docker.yml + when: debpkg_mode or stage2_nix + +#stage 2 postgres tasks +- name: stage2 postgres tasks + import_tasks: tasks/stage2-setup-postgres.yml + when: stage2_nix + +# init DB +- name: Create directory on data volume + file: + path: '{{ item }}' + recurse: yes + state: directory + owner: postgres + group: postgres + mode: 0750 + with_items: + - "/data/pgdata" + when: debpkg_mode or nixpkg_mode + +- name: Link database data_dir to data volume directory + file: + src: "/data/pgdata" + path: "/var/lib/postgresql/data" + state: link + force: yes + when: debpkg_mode or nixpkg_mode + +- name: Initialize the database + become: yes + become_user: postgres + shell: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" -o "--username=supabase_admin" + vars: + ansible_command_timeout: 60 + when: debpkg_mode + +- name: Make sure .bashrc exists + file: + path: /var/lib/postgresql/.bashrc + state: touch + owner: postgres + group: postgres + when: nixpkg_mode + +- name: Check psql_version and modify supautils.conf and postgresql.conf if necessary + block: + - name: Check if psql_version is psql_orioledb + set_fact: + is_psql_oriole: "{{ psql_version in ['psql_orioledb-16', 'psql_orioledb-17'] }}" + + - name: Initialize the database stage2_nix (non-orioledb) + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" -o "--username=supabase_admin" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + vars: + ansible_command_timeout: 60 + when: stage2_nix and not is_psql_oriole + + - name: Initialize the database stage2_nix (orioledb) + become: yes + become_user: postgres + shell: > + source /var/lib/postgresql/.bashrc && initdb -D /var/lib/postgresql/data + --allow-group-access + --username=supabase_admin + --locale-provider=icu + --encoding=UTF-8 + --icu-locale=en_US.UTF-8 + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + vars: + ansible_command_timeout: 60 + when: stage2_nix and is_psql_oriole + +- name: copy PG systemd unit + template: + src: files/postgresql_config/postgresql.service.j2 + dest: /etc/systemd/system/postgresql.service + when: debpkg_mode or stage2_nix + +- name: copy optimizations systemd unit + template: + src: files/database-optimizations.service.j2 + dest: /etc/systemd/system/database-optimizations.service + when: debpkg_mode or stage2_nix + +- name: initialize pg required state + become: yes + shell: | + mkdir -p /run/postgresql + chown -R postgres:postgres /run/postgresql + when: stage2_nix and qemu_mode is defined + +- name: Restart Postgres Database without Systemd + become: yes + become_user: postgres + shell: | + source /var/lib/postgresql/.bashrc + /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix + + +# Reload +- name: System - systemd reload + systemd: + enabled: yes + name: postgresql + daemon_reload: yes + when: debpkg_mode or stage2_nix + + +- name: Add LOCALE_ARCHIVE to .bashrc + lineinfile: + dest: "/var/lib/postgresql/.bashrc" + line: 'export LOCALE_ARCHIVE=/usr/lib/locale/locale-archive' + create: yes + become: yes + when: nixpkg_mode + +- name: Add LANG items to .bashrc + lineinfile: + dest: "/var/lib/postgresql/.bashrc" + line: "{{ item }}" + loop: + - 'export LANG="en_US.UTF-8"' + - 'export LANGUAGE="en_US.UTF-8"' + - 'export LC_ALL="en_US.UTF-8"' + - 'export LANG="en_US.UTF-8"' + - 'export LC_CTYPE="en_US.UTF-8"' + become: yes + when: nixpkg_mode diff --git a/postgres_15.8.1.044/ansible/tasks/setup-postgrest.yml b/postgres_15.8.1.044/ansible/tasks/setup-postgrest.yml new file mode 100644 index 0000000..a98d199 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-postgrest.yml @@ -0,0 +1,109 @@ +- name: PostgREST - system user + user: name=postgrest + +- name: PostgREST - add Postgres PPA gpg key + apt_key: + url: https://www.postgresql.org/media/keys/ACCC4CF8.asc + state: present + +- name: PostgREST - add Postgres PPA + apt_repository: + repo: "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg {{ postgresql_major }}" + state: present + +- name: PostgREST - update apt cache + apt: + update_cache: yes + +# libpq is a C library that enables user programs to communicate with +# the PostgreSQL database server. +- name: PostgREST - system dependencies + apt: + pkg: + - libpq5 + - libnuma-dev + +- name: PostgREST - remove Postgres PPA gpg key + apt_key: + url: https://www.postgresql.org/media/keys/ACCC4CF8.asc + state: absent + +- name: PostgREST - remove Postgres PPA + apt_repository: + repo: "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg {{ postgresql_major }}" + state: absent + +- name: postgis - ensure dependencies do not get autoremoved + shell: | + set -e + apt-mark manual libpq5* + apt-mark manual libnuma* + apt-mark auto libnuma*-dev + +- name: PostgREST - download ubuntu binary archive (arm) + get_url: + url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-ubuntu-aarch64.tar.xz" + dest: /tmp/postgrest.tar.xz + checksum: "{{ postgrest_arm_release_checksum }}" + timeout: 60 + when: platform == "arm64" + +- name: PostgREST - download ubuntu binary archive (x86) + get_url: + url: "https://github.com/PostgREST/postgrest/releases/download/v{{ postgrest_release }}/postgrest-v{{ postgrest_release }}-linux-static-x64.tar.xz" + dest: /tmp/postgrest.tar.xz + checksum: "{{ postgrest_x86_release_checksum }}" + timeout: 60 + when: platform == "amd64" + +- name: PostgREST - unpack archive in /opt + unarchive: + remote_src: yes + src: /tmp/postgrest.tar.xz + dest: /opt + owner: postgrest + mode: '0755' + +- name: create directories + file: + state: directory + owner: postgrest + group: postgrest + mode: '0775' + path: /etc/postgrest + +- name: empty files + file: + state: touch + owner: postgrest + group: postgrest + path: /etc/postgrest/{{ item }} + with_items: + - base.conf + - generated.conf + +- name: create conf merging script + copy: + content: | + #! /usr/bin/env bash + set -euo pipefail + set -x + + cd "$(dirname "$0")" + cat $@ > merged.conf + dest: /etc/postgrest/merge.sh + mode: 0750 + owner: postgrest + group: postgrest + +- name: PostgREST - create service files + template: + src: files/{{ item }}.j2 + dest: /etc/systemd/system/{{ item }} + with_items: + - postgrest.service + - postgrest-optimizations.service + +- name: PostgREST - reload systemd + systemd: + daemon_reload: yes diff --git a/postgres_15.8.1.044/ansible/tasks/setup-supabase-internal.yml b/postgres_15.8.1.044/ansible/tasks/setup-supabase-internal.yml new file mode 100644 index 0000000..7aa9317 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-supabase-internal.yml @@ -0,0 +1,119 @@ +- name: AWS CLI dep + apt: + pkg: + - unzip + - jq + install_recommends: no + +- name: AWS CLI (arm) + get_url: + url: "https://awscli.amazonaws.com/awscli-exe-linux-aarch64-{{ aws_cli_release }}.zip" + dest: "/tmp/awscliv2.zip" + timeout: 60 + when: platform == "arm64" + +- name: AWS CLI (x86) + get_url: + url: "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-{{ aws_cli_release }}.zip" + dest: "/tmp/awscliv2.zip" + timeout: 60 + when: platform == "amd64" + +- name: AWS CLI - expand + unarchive: + remote_src: yes + src: "/tmp/awscliv2.zip" + dest: "/tmp" + +- name: AWS CLI - install + shell: "/tmp/aws/install --update" + become: true + +- name: AWS CLI - configure ipv6 support for s3 + shell: | + aws configure set default.s3.use_dualstack_endpoint true + +- name: install Vector for logging + become: yes + apt: + deb: "{{ vector_x86_deb }}" + when: platform == "amd64" + +- name: install Vector for logging + become: yes + apt: + deb: "{{ vector_arm_deb }}" + when: platform == "arm64" + +- name: add Vector to postgres group + become: yes + shell: + cmd: | + usermod -a -G postgres vector + +- name: create service files for Vector + template: + src: files/vector.service.j2 + dest: /etc/systemd/system/vector.service + +- name: configure tmpfiles for postgres - overwrites upstream package + template: + src: files/postgresql_config/tmpfiles.postgresql.conf + dest: /etc/tmpfiles.d/postgresql-common.conf + +- name: fix permissions for vector config to be managed + shell: + cmd: | + chown -R vector:vector /etc/vector + chmod 0775 /etc/vector + +- name: vector - reload systemd + systemd: + daemon_reload: yes + +- name: Create checkpoints dir + become: yes + file: + path: /var/lib/vector + state: directory + owner: vector + +- name: Include file for generated optimizations in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/generated-optimizations.conf'" + replace: "include = '/etc/postgresql-custom/generated-optimizations.conf'" + +- name: Include file for custom overrides in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/custom-overrides.conf'" + replace: "include = '/etc/postgresql-custom/custom-overrides.conf'" + +- name: Install Postgres exporter + import_tasks: internal/postgres-exporter.yml + +- name: Install admin-mgr + import_tasks: internal/admin-mgr.yml + +- name: Install adminapi + import_tasks: internal/admin-api.yml + +- name: Init nftabless + import_tasks: internal/setup-nftables.yml + +- name: Install pg_egress_collect + import_tasks: internal/pg_egress_collect.yml + +- name: Install PostgreSQL prestart script + import_tasks: internal/postgresql-prestart.yml + +- name: Install salt minion + import_tasks: internal/install-salt.yml + tags: + - aws-only + +- name: Envoy - use lds.supabase.yaml for /etc/envoy/lds.yaml + command: mv /etc/envoy/lds.supabase.yaml /etc/envoy/lds.yaml diff --git a/postgres_15.8.1.044/ansible/tasks/setup-system.yml b/postgres_15.8.1.044/ansible/tasks/setup-system.yml new file mode 100644 index 0000000..c1285bf --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-system.yml @@ -0,0 +1,199 @@ +- name: System - apt update and apt upgrade + apt: update_cache=yes upgrade=yes + when: debpkg_mode or nixpkg_mode + # SEE http://archive.vn/DKJjs#parameter-upgrade + +- name: Install required security updates + apt: + pkg: + - tzdata + - linux-libc-dev + when: debpkg_mode or nixpkg_mode +# SEE https://github.com/georchestra/ansible/issues/55#issuecomment-588313638 +# Without this, a similar error is faced +- name: Install Ansible dependencies + apt: + pkg: + - acl + when: debpkg_mode or nixpkg_mode + +- name: Install security tools + apt: + pkg: + - nftables + - fail2ban + update_cache: yes + cache_valid_time: 3600 + when: debpkg_mode or nixpkg_mode + +- name: Use nftables backend + shell: | + update-alternatives --set iptables /usr/sbin/iptables-nft + update-alternatives --set ip6tables /usr/sbin/ip6tables-nft + update-alternatives --set arptables /usr/sbin/arptables-nft + update-alternatives --set ebtables /usr/sbin/ebtables-nft + systemctl restart ufw + when: debpkg_mode or nixpkg_mode + +- name: Create Sysstat log directory + file: + path: /var/log/sysstat + state: directory + when: debpkg_mode or nixpkg_mode + +- name: Install other useful tools + apt: + pkg: + - bwm-ng + - htop + - net-tools + - ngrep + - sysstat + - vim-tiny + update_cache: yes + when: debpkg_mode or nixpkg_mode + +- name: Configure sysstat + copy: + src: files/sysstat.sysstat + dest: /etc/sysstat/sysstat + when: debpkg_mode or nixpkg_mode + +- name: Configure default sysstat + copy: + src: files/default.sysstat + dest: /etc/default/sysstat + when: debpkg_mode or nixpkg_mode + + +- name: Adjust APT update intervals + copy: + src: files/apt_periodic + dest: /etc/apt/apt.conf.d/10periodic + when: debpkg_mode or nixpkg_mode + +# Find platform architecture and set as a variable +- name: finding platform architecture + shell: if [ $(uname -m) = "aarch64" ]; then echo "arm64"; else echo "amd64"; fi + register: platform_output + tags: + - update + - update-only +- set_fact: + platform: "{{ platform_output.stdout }}" + tags: + - update + - update-only + when: debpkg_mode or nixpkg_mode or stage2_nix + +- name: create overrides dir + file: + state: directory + owner: root + group: root + path: /etc/systemd/system/systemd-resolved.service.d + mode: '0700' + when: debpkg_mode or nixpkg_mode + +- name: Custom systemd overrides for resolved + copy: + src: files/systemd-resolved.conf + dest: /etc/systemd/system/systemd-resolved.service.d/override.conf + when: debpkg_mode or nixpkg_mode + +- name: System - Create services.slice + template: + src: files/services.slice.j2 + dest: /etc/systemd/system/services.slice + when: debpkg_mode or nixpkg_mode + + +- name: System - systemd reload + systemd: daemon_reload=yes + when: debpkg_mode or nixpkg_mode + +- name: Configure journald + copy: + src: files/journald.conf + dest: /etc/systemd/journald.conf + when: debpkg_mode or nixpkg_mode + +- name: reload systemd-journald + systemd: + name: systemd-journald + state: restarted + when: debpkg_mode or nixpkg_mode + +- name: Configure logind + copy: + src: files/logind.conf + dest: /etc/systemd/logind.conf + when: debpkg_mode or nixpkg_mode + +- name: reload systemd-logind + systemd: + name: systemd-logind + state: restarted + when: debpkg_mode or nixpkg_mode + +- name: enable timestamps for shell history + copy: + content: | + export HISTTIMEFORMAT='%d/%m/%y %T ' + dest: /etc/profile.d/09-history-timestamps.sh + mode: 0644 + owner: root + group: root + when: debpkg_mode or nixpkg_mode + +- name: set hosts file + copy: + content: | + 127.0.0.1 localhost + ::1 localhost + dest: /etc/hosts + mode: 0644 + owner: root + group: root + when: debpkg_mode or stage2_nix + +#Set Sysctl params for restarting the OS on oom after 10 +- name: Set vm.panic_on_oom=1 + ansible.builtin.sysctl: + name: vm.panic_on_oom + value: '1' + state: present + reload: yes + when: debpkg_mode or nixpkg_mode + +- name: Set kernel.panic=10 + ansible.builtin.sysctl: + name: kernel.panic + value: '10' + state: present + reload: yes + when: debpkg_mode or nixpkg_mode + +- name: configure system + ansible.posix.sysctl: + name: 'net.core.somaxconn' + value: 16834 + +- name: configure system + ansible.posix.sysctl: + name: 'net.ipv4.ip_local_port_range' + value: '1025 65000' + +#Set Sysctl params specific to keepalives +- name: Set net.ipv4.tcp_keepalive_time=1800 + ansible.builtin.sysctl: + name: net.ipv4.tcp_keepalive_time + value: 1800 + state: present + when: debpkg_mode or nixpkg_mode +- name: Set net.ipv4.tcp_keepalive_intvl=60 + ansible.builtin.sysctl: + name: net.ipv4.tcp_keepalive_intvl + value: 60 + state: present + when: debpkg_mode or nixpkg_mode diff --git a/postgres_15.8.1.044/ansible/tasks/setup-wal-g.yml b/postgres_15.8.1.044/ansible/tasks/setup-wal-g.yml new file mode 100644 index 0000000..bbc64cd --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/setup-wal-g.yml @@ -0,0 +1,130 @@ +# Downloading dependencies +- name: wal-g dependencies + become: yes + apt: + pkg: + - libbrotli-dev + - liblzo2-dev + - libsodium-dev + - cmake + +# install go dependency for WAL-G +- name: wal-g go dependency + get_url: + url: "https://golang.org/dl/go{{ golang_version }}.linux-{{ platform }}.tar.gz" + dest: /tmp + checksum: "{{ golang_version_checksum[platform] }}" + timeout: 60 + +- name: unpack go archive + unarchive: + remote_src: yes + src: "/tmp/go{{ golang_version }}.linux-{{ platform }}.tar.gz" + dest: /usr/local + +# Download WAL-G +- name: wal-g - download latest version + git: + repo: https://github.com/wal-g/wal-g.git + dest: /tmp/wal-g + version: "v{{ wal_g_release }}" + become: yes + +- name: wal-g - pg_clean + make: + chdir: /tmp/wal-g + target: pg_clean + params: + GOBIN: "/usr/local/bin" + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + USE_LIBSODIUM: true + become: yes + ignore_errors: yes + +- name: wal-g - deps + make: + chdir: /tmp/wal-g + target: deps + params: + GOBIN: "/usr/local/bin" + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + USE_LIBSODIUM: true + become: yes + ignore_errors: yes + +- name: wal-g - build and install + community.general.make: + chdir: /tmp/wal-g + target: pg_install + jobs: "{{ parallel_jobs | default(omit) }}" + params: + GOBIN: "/usr/local/bin" + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + USE_LIBSODIUM: true + become: yes + +- name: Create wal-g group + group: + name: wal-g + state: present + +- name: Create wal-g user + user: + name: wal-g + shell: /bin/false + comment: WAL-G user + group: wal-g + groups: wal-g, postgres + +- name: Create a config directory owned by wal-g + file: + path: /etc/wal-g + state: directory + owner: wal-g + group: wal-g + mode: '0770' + +- name: Create /etc/wal-g/config.json + file: + path: /etc/wal-g/config.json + state: touch + owner: wal-g + group: wal-g + mode: '0664' + +- name: Move custom wal-g.conf file to /etc/postgresql-custom/wal-g.conf + template: + src: "files/postgresql_config/custom_walg.conf.j2" + dest: /etc/postgresql-custom/wal-g.conf + mode: 0664 + owner: postgres + group: postgres + +- name: Add script to be run for restore_command + template: + src: "files/walg_helper_scripts/wal_fetch.sh" + dest: /home/postgres/wal_fetch.sh + mode: 0500 + owner: postgres + group: postgres + +- name: Add helper script for wal_fetch.sh + template: + src: "files/walg_helper_scripts/wal_change_ownership.sh" + dest: /root/wal_change_ownership.sh + mode: 0700 + owner: root + +- name: Include /etc/postgresql-custom/wal-g.conf in postgresql.conf + become: yes + replace: + path: /etc/postgresql/postgresql.conf + regexp: "#include = '/etc/postgresql-custom/wal-g.conf'" + replace: "include = '/etc/postgresql-custom/wal-g.conf'" + +# Clean up Go +- name: Uninstall Go + become: yes + file: + path: /usr/local/go + state: absent diff --git a/postgres_15.8.1.044/ansible/tasks/stage2-setup-postgres.yml b/postgres_15.8.1.044/ansible/tasks/stage2-setup-postgres.yml new file mode 100644 index 0000000..d2877e2 --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/stage2-setup-postgres.yml @@ -0,0 +1,282 @@ +# - name: Install openjdk11 for pljava from nix binary cache +# become: yes +# shell: | +# sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install nixpkgs#openjdk11" +# It was decided to leave pljava disabled at https://github.com/supabase/postgres/pull/690 therefore removing this task + +- name: Check psql_version and modify supautils.conf and postgresql.conf if necessary + block: + - name: Check if psql_version is psql_orioledb-16 + set_fact: + is_psql_oriole: "{{ psql_version in ['psql_orioledb-16', 'psql_orioledb-17'] }}" + + - name: Remove specified extensions from postgresql.conf if oriole-16 build + ansible.builtin.command: + cmd: > + sed -i 's/ timescaledb,//g' + /etc/postgresql/postgresql.conf + when: is_psql_oriole and stage2_nix + become: yes + + - name: Remove specified extensions from supautils.conf if oriole-16 build + ansible.builtin.command: + cmd: > + sed -i 's/ timescaledb,//g; s/ vector,//g; s/ plv8,//g; s/ postgis,//g; s/ pgrouting,//g' + /etc/postgresql-custom/supautils.conf + when: is_psql_oriole and stage2_nix + become: yes + + - name: Remove db_user_namespace from postgresql.conf if oriole-xx build + ansible.builtin.command: + cmd: > + sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' + /etc/postgresql/postgresql.conf + when: is_psql_oriole and stage2_nix + become: yes + + - name: Append orioledb to shared_preload_libraries append within closing quote + ansible.builtin.command: + cmd: > + sed -i 's/\(shared_preload_libraries.*\)'\''\(.*\)$/\1, orioledb'\''\2/' + /etc/postgresql/postgresql.conf + when: is_psql_oriole and stage2_nix + become: yes + + - name: Add default_table_access_method setting + ansible.builtin.lineinfile: + path: /etc/postgresql/postgresql.conf + line: "default_table_access_method = 'orioledb'" + state: present + when: is_psql_oriole and stage2_nix + become: yes + + - name: Add ORIOLEDB_ENABLED environment variable + ansible.builtin.lineinfile: + path: /etc/environment + line: 'ORIOLEDB_ENABLED=true' + when: is_psql_oriole and stage2_nix + become: yes + +- name: Install Postgres from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:supabase/postgres/{{ git_commit_sha }}#{{psql_version}}/bin" + when: stage2_nix + +- name: Install pg_prove from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:supabase/postgres/{{ git_commit_sha }}#pg_prove" + when: stage2_nix + +- name: Install supabase-groonga from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:supabase/postgres/{{ git_commit_sha }}#supabase-groonga" + when: stage2_nix + +- name: Install debug symbols for postgres version + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:supabase/postgres/{{ git_commit_sha }}#{{postgresql_version}}_debug" + when: stage2_nix + +- name: Install source files for postgresql version + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:supabase/postgres/{{ git_commit_sha }}#{{postgresql_version}}_src" + when: stage2_nix + +- name: Set ownership and permissions for /etc/ssl/private + become: yes + file: + path: /etc/ssl/private + owner: root + group: postgres + mode: '0750' + when: stage2_nix + +- name: Set permissions for postgresql.env + become: yes + file: + path: /etc/environment.d/postgresql.env + owner: postgres + group: postgres + mode: '0644' + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/bin directory exists + file: + path: /usr/lib/postgresql/bin + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share directory exists + file: + path: /usr/lib/postgresql/share/postgresql + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/contrib directory exists + file: + path: /usr/lib/postgresql/share/postgresql/contrib + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/timezonesets directory exists + file: + path: /usr/lib/postgresql/share/postgresql/timezonesets + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/tsearch_data directory exists + file: + path: /usr/lib/postgresql/share/postgresql/tsearch_data + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/extension directory exists + file: + path: /usr/lib/postgresql/share/postgresql/extension + state: directory + owner: postgres + group: postgres + when: stage2_nix + +# - name: Ensure /usr/lib/postgresql/share/postgresql/pljava directory exists +# file: +# path: /usr/lib/postgresql/share/postgresql/pljava +# state: directory +# owner: postgres +# group: postgres +# when: stage2_nix +# It was decided to leave pljava disabled at https://github.com/supabase/postgres/pull/690 therefore removing this task + +- name: import pgsodium_getkey script + template: + src: files/pgsodium_getkey_readonly.sh.j2 + dest: "/usr/lib/postgresql/bin/pgsodium_getkey.sh" + owner: postgres + group: postgres + mode: 0700 + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/bin to /usr/lib/postgresql/bin + shell: >- + find /var/lib/postgresql/.nix-profile/bin/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "{{ item }}/$(basename $0)"' {} \; + loop: + - /usr/lib/postgresql/bin + - /usr/bin + become: yes + when: stage2_nix + +- name: Check if /usr/bin/pg_config exists + stat: + path: /usr/bin/pg_config + register: pg_config_stat + when: stage2_nix + +- name: Remove existing /usr/bin/pg_config if it is not a symlink + file: + path: /usr/bin/pg_config + state: absent + when: pg_config_stat.stat.exists and not pg_config_stat.stat.islnk and stage2_nix + become: yes + +- name: Ensure postgres user has ownership of symlink + shell: >- + find /var/lib/postgresql/.nix-profile/bin/ -maxdepth 1 -type f,l -exec chown postgres:postgres "/usr/bin/$(basename {})" \; + become: yes + when: stage2_nix + +# - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/pljava to /usr/lib/postgresql/share/postgresql/pljava +# file: +# src: "{{ item }}" +# dest: "/usr/lib/postgresql/share/postgresql/pljava/{{ item | basename }}" +# state: link +# with_fileglob: +# - "/var/lib/postgresql/.nix-profile/share/pljava/*" +# become: yes +# It was decided to leave pljava disabled at https://github.com/supabase/postgres/pull/690 therefore removing this task + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql to /usr/lib/postgresql/share/postgresql + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/$(basename $0)"' {} \; + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/extension to /usr/lib/postgresql/share/postgresql/extension + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/extension/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/extension/$(basename $0)"' {} \; + become: yes + when: stage2_nix + +- name: create destination directory + file: + path: /usr/lib/postgresql/share/postgresql/contrib/ + state: directory + recurse: yes + when: stage2_nix + +- name: Check psql_version and run postgis linking if not oriole-xx + block: + - name: Check if psql_version is psql_orioledb-17 + set_fact: + is_psql_oriole: "{{ psql_version == 'psql_orioledb-17' }}" + + - name: Recursively create symbolic links and set permissions for the contrib/postgis-* dir + shell: > + sudo mkdir -p /usr/lib/postgresql/share/postgresql/contrib && \ + sudo find /var/lib/postgresql/.nix-profile/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do sudo ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ + && chown -R postgres:postgres "/usr/lib/postgresql/share/postgresql/contrib/" + become: yes + when: stage2_nix and not is_psql_oriole + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets to /usr/lib/postgresql/share/postgresql/timeszonesets + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/timezonesets/$(basename $0)"' {} \; + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data to /usr/lib/postgresql/share/postgresql/tsearch_data + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/tsearch_data/$(basename $0)"' {} \; + become: yes + when: stage2_nix + +- set_fact: + pg_bindir: "/usr/lib/postgresql/bin" + when: stage2_nix + +- name: pgsodium - set pgsodium.getkey_script + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + # script is expected to be placed by finalization tasks for different target platforms + line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' + when: stage2_nix + +- name: Create symbolic link for pgsodium_getkey script + file: + src: "/usr/lib/postgresql/bin/pgsodium_getkey.sh" + dest: "/usr/lib/postgresql/share/postgresql/extension/pgsodium_getkey" + state: link + become: yes + when: stage2_nix + +- name: Append GRN_PLUGINS_DIR to /etc/environment.d/postgresql.env + ansible.builtin.lineinfile: + path: /etc/environment.d/postgresql.env + line: 'GRN_PLUGINS_DIR=/var/lib/postgresql/.nix-profile/lib/groonga/plugins' + become: yes diff --git a/postgres_15.8.1.044/ansible/tasks/test-image.yml b/postgres_15.8.1.044/ansible/tasks/test-image.yml new file mode 100644 index 0000000..9a8d4fa --- /dev/null +++ b/postgres_15.8.1.044/ansible/tasks/test-image.yml @@ -0,0 +1,131 @@ +- name: install pg_prove + apt: + pkg: + - libtap-parser-sourcehandler-pgtap-perl + when: debpkg_mode + +# - name: Temporarily disable PG Sodium references in config +# become: yes +# become_user: postgres +# shell: +# cmd: sed -i.bak -e "s/pg_net,\ pgsodium,\ timescaledb/pg_net,\ timescaledb/g" -e "s/pgsodium.getkey_script=/#pgsodium.getkey_script=/g" /etc/postgresql/postgresql.conf +# when: debpkg_mode or stage2_nix + +- name: Temporarily disable PG Sodium references in config + become: yes + become_user: postgres + shell: + cmd: > + sed -i.bak + -e 's/\(shared_preload_libraries = '\''.*\)pgsodium,\(.*'\''\)/\1\2/' + -e 's/pgsodium.getkey_script=/#pgsodium.getkey_script=/' + /etc/postgresql/postgresql.conf + when: debpkg_mode or stage2_nix + +- name: Start Postgres Database to load all extensions. + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start "-o -c config_file=/etc/postgresql/postgresql.conf" + when: debpkg_mode + +- name: Stop Postgres Database in stage 2 + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix + +- name: Start Postgres Database to load all extensions. + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start "-o -c config_file=/etc/postgresql/postgresql.conf" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix + + +- name: Check psql_version and modify migrations if oriole-xx + block: + - name: Check if psql_version is psql_orioledb-xx + set_fact: + is_psql_oriole: "{{ psql_version in ['psql_orioledb-16', 'psql_orioledb-17'] }}" + + - name: Remove specified extensions from SQL file + ansible.builtin.command: + cmd: > + sed -i '/\\ir.*\(timescaledb\|postgis\|pgrouting\|plv8\).*\.sql/d' /tmp/migrations/tests/extensions/test.sql + when: is_psql_oriole + become: yes + + - name: Remove specified extension files from extensions directory + ansible.builtin.find: + paths: /tmp/migrations/tests/extensions + patterns: + - '*timescaledb*.sql' + - '*plv8*.sql' + - '*postgis*.sql' + - '*pgrouting*.sql' + register: files_to_remove + when: is_psql_oriole + + - name: Delete matched extension files + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ files_to_remove.files }}" + when: is_psql_oriole + become: yes + +- name: Run Unit tests (with filename unit-test-*) on Postgres Database + shell: /usr/bin/pg_prove -U postgres -h localhost -d postgres -v /tmp/unit-tests/unit-test-*.sql + register: retval + failed_when: retval.rc != 0 + when: debpkg_mode or stage2_nix + +- name: Run migrations tests + shell: /usr/bin/pg_prove -U supabase_admin -h localhost -d postgres -v tests/test.sql + register: retval + failed_when: retval.rc != 0 + when: debpkg_mode or stage2_nix + args: + chdir: /tmp/migrations + +- name: Re-enable PG Sodium references in config + become: yes + become_user: postgres + shell: + cmd: mv /etc/postgresql/postgresql.conf.bak /etc/postgresql/postgresql.conf + when: debpkg_mode or stage2_nix + +- name: Reset db stats + shell: /usr/lib/postgresql/bin/psql --no-password --no-psqlrc -d postgres -h localhost -U supabase_admin -c 'SELECT pg_stat_statements_reset(); SELECT pg_stat_reset();' + when: debpkg_mode or stage2_nix + +- name: remove pg_prove + apt: + pkg: + - libtap-parser-sourcehandler-pgtap-perl + state: absent + autoremove: yes + when: debpkg_mode + +- name: Stop Postgres Database + become: yes + become_user: postgres + shell: + cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop + when: debpkg_mode or stage2_nix diff --git a/postgres_15.8.1.044/ansible/vars.yml b/postgres_15.8.1.044/ansible/vars.yml new file mode 100644 index 0000000..2f3defa --- /dev/null +++ b/postgres_15.8.1.044/ansible/vars.yml @@ -0,0 +1,59 @@ +supabase_internal: true +ebssurrogate_mode: true +async_mode: true + +postgres_major: + - "15" + - "orioledb-17" + +# Full version strings for each major version +postgres_release: + postgresorioledb-17: "17.0.1.039-orioledb" + postgres15: "15.8.1.044" + +# Non Postgres Extensions +pgbouncer_release: "1.19.0" +pgbouncer_release_checksum: sha256:af0b05e97d0e1fd9ad45fe00ea6d2a934c63075f67f7e2ccef2ca59e3d8ce682 + +# to get these use +# wget https://github.com/PostgREST/postgrest/releases/download/v12.2.3/postgrest-v12.2.3-ubuntu-aarch64.tar.xz -q -O- | sha1sum +# wget https://github.com/PostgREST/postgrest/releases/download/v12.2.3/postgrest-v12.2.3-linux-static-x64.tar.xz -q -O- | sha1sum +postgrest_release: "12.2.3" +postgrest_arm_release_checksum: sha1:fbfd6613d711ce1afa25c42d5df8f1b017f396f9 +postgrest_x86_release_checksum: sha1:61c513f91a8931be4062587b9d4a18b42acf5c05 + +gotrue_release: 2.169.0 +gotrue_release_checksum: sha1:1419b94683aac7ddc30355408b8e8b79e61146c4 + +aws_cli_release: "2.23.11" + +salt_minion_version: 3007 + +golang_version: "1.19.3" +golang_version_checksum: + arm64: sha256:99de2fe112a52ab748fb175edea64b313a0c8d51d6157dba683a6be163fd5eab + amd64: sha256:74b9640724fd4e6bb0ed2a1bc44ae813a03f1e72a4c76253e2d5c015494430ba + +envoy_release: 1.28.0 +envoy_release_checksum: sha1:b0a06e9cfb170f1993f369beaa5aa9d7ec679ce5 +envoy_hot_restarter_release_checksum: sha1:6d43b89d266fb2427a4b51756b649883b0617eda + +kong_release_target: focal # if it works, it works +kong_deb: kong_2.8.1_arm64.deb +kong_deb_checksum: sha1:2086f6ccf8454fe64435252fea4d29d736d7ec61 + +nginx_release: 1.22.0 +nginx_release_checksum: sha1:419efb77b80f165666e2ee406ad8ae9b845aba93 + +wal_g_release: "2.0.1" + +postgres_exporter_release: "0.15.0" +postgres_exporter_release_checksum: + arm64: sha256:29ba62d538b92d39952afe12ee2e1f4401250d678ff4b354ff2752f4321c87a0 + amd64: sha256:cb89fc5bf4485fb554e0d640d9684fae143a4b2d5fa443009bd29c59f9129e84 + +adminapi_release: 0.74.0 +adminmgr_release: 0.24.1 + +vector_x86_deb: "https://packages.timber.io/vector/0.22.3/vector_0.22.3-1_amd64.deb" +vector_arm_deb: "https://packages.timber.io/vector/0.22.3/vector_0.22.3-1_arm64.deb" diff --git a/postgres_15.8.1.044/development-arm.vars.pkr.hcl b/postgres_15.8.1.044/development-arm.vars.pkr.hcl new file mode 100644 index 0000000..6772bf6 --- /dev/null +++ b/postgres_15.8.1.044/development-arm.vars.pkr.hcl @@ -0,0 +1,7 @@ +arch = "arm64" +ami_regions = ["us-east-1"] +environment = "dev" +instance-type = "c6g.4xlarge" +region= "us-east-1" +ubuntu-2004 = "ami-0b49a4a6e8e22fa16" + diff --git a/postgres_15.8.1.044/docker/Dockerfile b/postgres_15.8.1.044/docker/Dockerfile new file mode 100644 index 0000000..116377b --- /dev/null +++ b/postgres_15.8.1.044/docker/Dockerfile @@ -0,0 +1,78 @@ +ARG ubuntu_release=focal +FROM ubuntu:${ubuntu_release} as base + +ARG ubuntu_release=flocal +ARG ubuntu_release_no=20.04 +ARG postgresql_major=15 +ARG postgresql_release=${postgresql_major}.1 + +FROM base as pg-source + +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gnupg \ + dpkg-dev \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +# Add Postgres PPA +# In the off-chance that the key in the repository expires, it can be replaced by running the following in the repository's root: +# gpg --keyserver hkps://keyserver.ubuntu.com --recv-keys $NEW_POSTGRESQL_GPG_KEY +# gpg --export --armor $NEW_POSTGRESQL_GPG_KEY > postgresql.gpg.key +COPY postgresql.gpg.key /tmp/postgresql.gpg.key +RUN apt-key add /tmp/postgresql.gpg.key && \ + echo "deb https://apt-archive.postgresql.org/pub/repos/apt ${ubuntu_release}-pgdg-archive main" > /etc/apt/sources.list.d/pgdg.list && \ + echo "deb-src https://apt-archive.postgresql.org/pub/repos/apt ${ubuntu_release}-pgdg-archive main" > /etc/apt/sources.list.d/pgdg.list + +# Create local PPA +WORKDIR /tmp/build +RUN echo "deb [ trusted=yes ] file:///tmp/build ./" > /etc/apt/sources.list.d/temp.list && \ + dpkg-scanpackages . > Packages && \ + apt-get -o Acquire::GzipIndexes=false update + +ENV DEBIAN_FRONTEND=noninteractive +ENV PYTHONDONTWRITEBYTECODE=1 +ENV DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" + +# Configure processor optimised build +ARG CPPFLAGS="" +ENV DEB_CPPFLAGS_APPEND="${CPPFLAGS} -fsigned-char" +ENV DEB_CFLAGS_APPEND="-g3" +ARG DEB_BUILD_PROFILES="pkg.postgresql.nozstd" +ENV DEB_BUILD_PROFILES="${DEB_BUILD_PROFILES}" + +RUN apt-get -o Acquire::GzipIndexes=false update && apt-get build-dep -y postgresql-common pgdg-keyring && \ + apt-get source --compile postgresql-common pgdg-keyring && \ + dpkg-scanpackages . > Packages && \ + apt-get -o Acquire::GzipIndexes=false update + +RUN apt-get build-dep -y "postgresql-${postgresql_major}=${postgresql_release}-1.pgdg${ubuntu_release_no}+1" && \ + apt-get source --compile "postgresql-${postgresql_major}=${postgresql_release}-1.pgdg${ubuntu_release_no}+1" && \ + dpkg-scanpackages . > Packages && \ + apt-get -o Acquire::GzipIndexes=false update + +# Remove source directories +RUN rm -rf /tmp/build/*/ + +FROM base as pg + +# Inherit args from base stage +ARG postgresql_major +ARG postgresql_release + +COPY --from=pg-source /tmp/build /tmp/build + +ENV DEBIAN_FRONTEND=noninteractive +RUN echo "deb [ trusted=yes ] file:///tmp/build ./" > /etc/apt/sources.list.d/temp.list && \ + apt-get -o Acquire::GzipIndexes=false update && \ + apt-get install -y --no-install-recommends postgresql-common && \ + sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf && \ + apt-get install -y --no-install-recommends "postgresql-${postgresql_major}=${postgresql_release}-1.pgdg${ubuntu_release_no}+1" && \ + rm -rf /var/lib/apt/lists/* && \ + rm -rf /tmp/build /etc/apt/sources.list.d/temp.list + +ENV PATH $PATH:/usr/lib/postgresql/${postgresql_major}/bin + +FROM scratch as pg-deb + +COPY --from=pg-source /tmp/build /tmp diff --git a/postgres_15.8.1.044/docker/docker-compose.yml b/postgres_15.8.1.044/docker/docker-compose.yml new file mode 100644 index 0000000..8775d6b --- /dev/null +++ b/postgres_15.8.1.044/docker/docker-compose.yml @@ -0,0 +1,9 @@ +version: "3" + +services: + db: + image: supabase/postgres + ports: + - "5432:5432" + environment: + POSTGRES_PASSWORD: postgres diff --git a/postgres_15.8.1.044/docker/nix/Dockerfile b/postgres_15.8.1.044/docker/nix/Dockerfile new file mode 100644 index 0000000..2269079 --- /dev/null +++ b/postgres_15.8.1.044/docker/nix/Dockerfile @@ -0,0 +1,16 @@ +FROM nixpkgs/nix-flakes + +RUN echo "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" >> /etc/nix/nix.conf + +RUN echo "trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=%" >> /etc/nix/nix.conf + + +USER $USER + +RUN mkdir -p /workspace + +COPY ./ /workspace + +RUN chmod +x /workspace/docker/nix/build_nix.sh + +RUN chown -R $USER:$USER /workspace \ No newline at end of file diff --git a/postgres_15.8.1.044/docker/nix/build_nix.sh b/postgres_15.8.1.044/docker/nix/build_nix.sh new file mode 100644 index 0000000..b0eef98 --- /dev/null +++ b/postgres_15.8.1.044/docker/nix/build_nix.sh @@ -0,0 +1,29 @@ +#!/bin/env bash +set -eou pipefail + +nix --version +if [ -d "/workspace" ]; then + cd /workspace +fi + +SYSTEM=$(nix-instantiate --eval -E builtins.currentSystem | tr -d '"') + +nix build .#checks.$SYSTEM.psql_15 -L --no-link +nix build .#checks.$SYSTEM.psql_orioledb-17 -L --no-link +nix build .#psql_15/bin -o psql_15 + +nix build .#psql_orioledb-17/bin -o psql_orioledb_17 + +# Copy to S3 +nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./psql_15 +nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./psql_orioledb_17 +if [ "$SYSTEM" = "aarch64-linux" ]; then + nix build .#postgresql_15_debug -o ./postgresql_15_debug + nix build .#postgresql_15_src -o ./postgresql_15_src + nix build .#postgresql_orioledb-17_debug -o ./postgresql_orioledb-17_debug + nix build .#postgresql_orioledb-17_src -o ./postgresql_orioledb-17_src + nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./postgresql_15_debug-debug + nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./postgresql_15_src + nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./postgresql_orioledb-17_debug-debug + nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./postgresql_orioledb-17_src +fi diff --git a/postgres_15.8.1.044/docs/img/aws.png b/postgres_15.8.1.044/docs/img/aws.png new file mode 100644 index 0000000..35ba656 Binary files /dev/null and b/postgres_15.8.1.044/docs/img/aws.png differ diff --git a/postgres_15.8.1.044/docs/img/digital-ocean.png b/postgres_15.8.1.044/docs/img/digital-ocean.png new file mode 100644 index 0000000..b34e563 Binary files /dev/null and b/postgres_15.8.1.044/docs/img/digital-ocean.png differ diff --git a/postgres_15.8.1.044/docs/img/docker.png b/postgres_15.8.1.044/docs/img/docker.png new file mode 100644 index 0000000..74d951a Binary files /dev/null and b/postgres_15.8.1.044/docs/img/docker.png differ diff --git a/postgres_15.8.1.044/ebssurrogate/USAGE.md b/postgres_15.8.1.044/ebssurrogate/USAGE.md new file mode 100644 index 0000000..9a63a08 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/USAGE.md @@ -0,0 +1,50 @@ +## Ext4 amd64 AMI creation + +`packer build -var "aws_access_key=$AWS_ACCESS_KEY_ID" -var "aws_secret_key=$AWS_SECRET_ACCESS_KEY" -var "region=$AWS_REGION" \ +-var "docker_passwd=$DOCKER_PASSWD" -var "docker_user=$DOCKER_USER" -var "docker_image=$DOCKER_IMAGE" -var "docker_image_tag=$DOCKER_IMAGE_TAG" \ +amazon-amd64.pkr.hcl` + +## Ext4 arm64 AMI creation + +`packer build -var "aws_access_key=$AWS_ACCESS_KEY_ID" -var "aws_secret_key=$AWS_SECRET_ACCESS_KEY" -var "region=$AWS_REGION" \ +-var "docker_passwd=$DOCKER_PASSWD" -var "docker_user=$DOCKER_USER" -var "docker_image=$DOCKER_IMAGE" -var "docker_image_tag=$DOCKER_IMAGE_TAG" \ +amazon-arm64.pkr.hcl` + +## Docker Image + + DOCKER_IMAGE is used to store ccache data during build process. This can be any image, you can create your image using: + + ``` + docker pull ubuntu + docker tag ubuntu /ccache + docker push /ccache + ``` + + For ARM64 builds + + ``` + docker pull arm64v8/ubuntu + docker tag arm64v8/ubuntu:latest /ccache-arm64v8 + docker push /ccache-arm64v8 + ``` + + Now set DOCKER_IMAGE="/ccache" or DOCKER_IMAGE="/ccache-arm64v8" based on your AMI architecture. + + +## EBS-Surrogate File layout + +``` +$ tree ebssurrogate/ +ebssurrogate/ +├── files +│   ├── 70-ec2-nvme-devices.rules +│   ├── cloud.cfg # cloud.cfg for cloud-init +│   ├── ebsnvme-id +│   ├── sources-arm64.cfg # apt/sources.list for arm64 +│   ├── sources.cfg # apt/sources.list for amd64 +│   ├── vector.timer # systemd-timer to delay vectore execution +│   └── zfs-growpart-root.cfg +└── scripts + ├── chroot-bootstrap.sh # Installs grub and other required packages for build. Configures target AMI settings + └── surrogate-bootstrap.sh # Formats disk and setups chroot environment. Runs Ansible tasks within chrooted environment. +``` diff --git a/postgres_15.8.1.044/ebssurrogate/files/70-ec2-nvme-devices.rules b/postgres_15.8.1.044/ebssurrogate/files/70-ec2-nvme-devices.rules new file mode 100644 index 0000000..62a5deb --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/70-ec2-nvme-devices.rules @@ -0,0 +1,25 @@ +# Copyright (C) 2006-2016 Amazon.com, Inc. or its affiliates. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS +# OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the +# License. + +#nvme-ns-* devices +KERNEL=="nvme[0-9]*n[0-9]*", ENV{DEVTYPE}=="disk", ATTRS{serial}=="?*", ATTRS{model}=="?*", SYMLINK+="disk/by-id/nvme-$attr{model}_$attr{serial}-ns-%n", OPTIONS+="string_escape=replace" + +#nvme partitions +KERNEL=="nvme[0-9]*n[0-9]*p[0-9]*", ENV{DEVTYPE}=="partition", ATTRS{serial}=="?*", ATTRS{model}=="?*", IMPORT{program}="ec2nvme-nsid %k" +KERNEL=="nvme[0-9]*n[0-9]*p[0-9]*", ENV{DEVTYPE}=="partition", ATTRS{serial}=="?*", ATTRS{model}=="?*", ENV{_NS_ID}=="?*", SYMLINK+="disk/by-id/nvme-$attr{model}_$attr{serial}-ns-$env{_NS_ID}-part%n", OPTIONS+="string_escape=replace" + +# ebs nvme devices +KERNEL=="nvme[0-9]*n[0-9]*", ENV{DEVTYPE}=="disk", ATTRS{model}=="Amazon Elastic Block Store", PROGRAM="/sbin/ebsnvme-id -u /dev/%k", SYMLINK+="%c" +KERNEL=="nvme[0-9]*n[0-9]*p[0-9]*", ENV{DEVTYPE}=="partition", ATTRS{model}=="Amazon Elastic Block Store", PROGRAM="/sbin/ebsnvme-id -u /dev/%k", SYMLINK+="%c%n" diff --git a/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/opt.gotrue.gotrue b/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/opt.gotrue.gotrue new file mode 100644 index 0000000..7b9594a --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/opt.gotrue.gotrue @@ -0,0 +1,15 @@ +#include + +/opt/gotrue/gotrue { + #include + #include + #include + + /opt/gotrue/gotrue r, + /opt/gotrue/migrations/ r, + /etc/ssl/certs/java/* r, + /opt/gotrue/migrations/** rw, + /proc/sys/net/core/somaxconn r, + /sys/kernel/mm/transparent_hugepage/hpage_pmd_size r, + owner /etc/gotrue.env r, +} diff --git a/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/opt.postgrest b/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/opt.postgrest new file mode 100644 index 0000000..c738a65 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/opt.postgrest @@ -0,0 +1,12 @@ +#include + +/opt/postgrest { + #include + #include + #include + + /etc/gss/mech.d/ r, + /sys/devices/system/node/ r, + /sys/devices/system/node/node0/meminfo r, + owner /etc/postgrest/merged.conf r, +} diff --git a/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/usr.bin.vector b/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/usr.bin.vector new file mode 100644 index 0000000..b8a7eb2 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/usr.bin.vector @@ -0,0 +1,35 @@ +#include + +/usr/bin/vector flags=(attach_disconnected) { + #include + #include + #include + #include + #include + #include + #include + #include + + deny @{HOME}/** rwx, + /etc/machine-id r, + /etc/vector/** r, + /proc/*/sched r, + /proc/cmdline r, + /proc/sys/kernel/osrelease r, + /run/log/journal/ r, + /var/log/journal/** r, + /run/systemd/notify rw, + /sys/firmware/efi/efivars/SecureBoot-8be4df61-93ca-11d2-aa0d-00e098032b8c r, + /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us r, + /sys/kernel/mm/transparent_hugepage/enabled r, + /usr/bin/journalctl mrix, + /usr/bin/vector mrix, + /var/lib/vector/** rw, + /var/log/journal/ r, + /var/log/postgresql/ r, + /var/log/postgresql/** rw, + /var/run/systemd/notify rw, + owner /proc/*/cgroup r, + owner /proc/*/mountinfo r, + owner /proc/*/stat r, +} diff --git a/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/usr.lib.postgresql.bin.postgres b/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/usr.lib.postgresql.bin.postgres new file mode 100644 index 0000000..8e2efc3 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/usr.lib.postgresql.bin.postgres @@ -0,0 +1,55 @@ +#include + +profile /usr/lib/postgresql/bin/postgres flags=(attach_disconnected) { +#include +#include +#include +#include +#include +#include +#include + +capability dac_override, +capability dac_read_search, + +deny @{HOME}/** rwx, + +/data/pgdata/** r, +/dev/shm rw, +/etc/java-11-openjdk/logging.properties r, +/etc/java-11-openjdk/security/default.policy r, +/etc/java-11-openjdk/security/java.policy r, +/etc/java-11-openjdk/security/java.security r, +/etc/mecabrc r, +/etc/postgresql-custom/** r, +/etc/postgresql/** r, +/etc/timezone r, +/etc/wal-g/config.json r, +/run/systemd/notify rw, +/usr/bin/cat rix, +/usr/bin/dash rix, +/usr/bin/mknod rix, +/usr/bin/admin-mgr Ux, +/usr/lib/postgresql/bin/* mrix, +/usr/local/bin/wal-g rix, +/usr/local/lib/groonga/plugins/tokenizers/mecab.so mr, +/usr/local/lib/libSFCGAL.so.* mr, +/usr/local/lib/libgroonga.so.* mr, +/usr/local/pgsql/etc/pljava.policy r, +/usr/share/postgresql/** r, +/var/lib/mecab/** r, +/var/lib/postgresql/** rwl, +/var/log/postgresql/** rw, +/var/log/wal-g/** w, +/var/run/systemd/notify rw, +/{,var/}run/postgresql/** rw, +owner /data/pgdata/ r, +owner /data/pgdata/** rwl, +owner /data/pgdata/pgroonga.log k, +owner /dev/shm/ rw, +owner /dev/shm/PostgreSQL.* rw, +owner /sys/kernel/mm/transparent_hugepage/hpage_pmd_size r, +owner /var/log/wal-g/** rw, +owner @{PROC}/[0-9]*/oom_adj rw, + +} diff --git a/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/usr.local.bin.pgbouncer b/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/usr.local.bin.pgbouncer new file mode 100644 index 0000000..7bf6d09 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/apparmor_profiles/usr.local.bin.pgbouncer @@ -0,0 +1,20 @@ +#include +profile /usr/local/bin/pgbouncer flags=(attach_disconnected) { + #include + #include + #include + #include + #include + #include + #include + + deny @{HOME}/** rwx, + /etc/pgbouncer-custom/** r, + /etc/pgbouncer/** r, + /proc/sys/kernel/random/uuid r, + /run/systemd/notify rw, + /usr/local/bin/pgbouncer mrix, + /var/log/pgbouncer.log rw, + /var/run/systemd/notify rw, + /{,var/}run/pgbouncer/** rw, +} diff --git a/postgres_15.8.1.044/ebssurrogate/files/cloud.cfg b/postgres_15.8.1.044/ebssurrogate/files/cloud.cfg new file mode 100644 index 0000000..678b5b6 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/cloud.cfg @@ -0,0 +1,137 @@ +# The top level settings are used as module +# and system configuration. +# A set of users which may be applied and/or used by various modules +# when a 'default' entry is found it will reference the 'default_user' +# from the distro configuration specified below +users: + - default + + +# If this is set, 'root' will not be able to ssh in and they +# will get a message to login instead as the default $user +disable_root: true + +# This will cause the set+update hostname module to not operate (if true) +preserve_hostname: false + +# If you use datasource_list array, keep array items in a single line. +# If you use multi line array, ds-identify script won't read array items. +# Example datasource config +# datasource: +# Ec2: +# metadata_urls: [ 'blah.com' ] +# timeout: 5 # (defaults to 50 seconds) +# max_wait: 10 # (defaults to 120 seconds) + + + +# The modules that run in the 'init' stage +cloud_init_modules: +# - migrator +# - seed_random +# - bootcmd + - write-files +# - growpart +# - resizefs +# - disk_setup +# - mounts + - set_hostname + - update_hostname + - update_etc_hosts +# - ca-certs +# - rsyslog + - users-groups + - ssh + +# The modules that run in the 'config' stage +cloud_config_modules: +# Emit the cloud config ready event +# this can be used by upstart jobs for 'start on cloud-config'. +# - emit_upstart +# - snap +# - ssh-import-id +# - locale +# - set-passwords +# - grub-dpkg +# - apt-pipelining +# - apt-configure +# - ubuntu-advantage + - ntp + - timezone + - disable-ec2-metadata + - runcmd +# - byobu + +# The modules that run in the 'final' stage +cloud_final_modules: +# - package-update-upgrade-install +# - fan +# - landscape +# - lxd +# - ubuntu-drivers +# - puppet +# - chef +# - mcollective +# - salt-minion + - reset_rmc + - refresh_rmc_and_interface +# - rightscale_userdata + - scripts-vendor + - scripts-per-once + - scripts-per-boot + - scripts-per-instance + - scripts-user +# - ssh-authkey-fingerprints +# - keys-to-console +# - phone-home + - final-message + - power-state-change + +# System and/or distro specific settings +# (not accessible to handlers/transforms) +system_info: + # This will affect which distro class gets used + distro: ubuntu + # Default user name + that default users groups (if added/used) + default_user: + name: ubuntu + lock_passwd: True + gecos: Ubuntu + groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/bash + network: + renderers: ['netplan', 'eni', 'sysconfig'] + # Automatically discover the best ntp_client + ntp_client: auto + # Other config here will be given to the distro class and/or path classes + paths: + cloud_dir: /var/lib/cloud/ + templates_dir: /etc/cloud/templates/ + upstart_dir: /etc/init/ + package_mirrors: + - arches: [i386, amd64] + failsafe: + primary: http://archive.ubuntu.com/ubuntu + security: http://security.ubuntu.com/ubuntu + search: + primary: + - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/ + - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ + - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/ + security: [] + - arches: [arm64, armel, armhf] + failsafe: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + search: + primary: + - http://%(ec2_region)s.ec2.ports.ubuntu.com/ubuntu-ports/ + - http://%(availability_zone)s.clouds.ports.ubuntu.com/ubuntu-ports/ + - http://%(region)s.clouds.ports.ubuntu.com/ubuntu-ports/ + security: [] + - arches: [default] + failsafe: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + ssh_svcname: ssh diff --git a/postgres_15.8.1.044/ebssurrogate/files/ebsnvme-id b/postgres_15.8.1.044/ebssurrogate/files/ebsnvme-id new file mode 100644 index 0000000..b543c0d --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/ebsnvme-id @@ -0,0 +1,173 @@ +#!/usr/bin/env python2.7 + +# Copyright (C) 2017 Amazon.com, Inc. or its affiliates. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS +# OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the +# License. + +""" +Usage: +Read EBS device information and provide information about +the volume. +""" + +import argparse +from ctypes import * +from fcntl import ioctl +import sys + +NVME_ADMIN_IDENTIFY = 0x06 +NVME_IOCTL_ADMIN_CMD = 0xC0484E41 +AMZN_NVME_VID = 0x1D0F +AMZN_NVME_EBS_MN = "Amazon Elastic Block Store" + +class nvme_admin_command(Structure): + _pack_ = 1 + _fields_ = [("opcode", c_uint8), # op code + ("flags", c_uint8), # fused operation + ("cid", c_uint16), # command id + ("nsid", c_uint32), # namespace id + ("reserved0", c_uint64), + ("mptr", c_uint64), # metadata pointer + ("addr", c_uint64), # data pointer + ("mlen", c_uint32), # metadata length + ("alen", c_uint32), # data length + ("cdw10", c_uint32), + ("cdw11", c_uint32), + ("cdw12", c_uint32), + ("cdw13", c_uint32), + ("cdw14", c_uint32), + ("cdw15", c_uint32), + ("reserved1", c_uint64)] + +class nvme_identify_controller_amzn_vs(Structure): + _pack_ = 1 + _fields_ = [("bdev", c_char * 32), # block device name + ("reserved0", c_char * (1024 - 32))] + +class nvme_identify_controller_psd(Structure): + _pack_ = 1 + _fields_ = [("mp", c_uint16), # maximum power + ("reserved0", c_uint16), + ("enlat", c_uint32), # entry latency + ("exlat", c_uint32), # exit latency + ("rrt", c_uint8), # relative read throughput + ("rrl", c_uint8), # relative read latency + ("rwt", c_uint8), # relative write throughput + ("rwl", c_uint8), # relative write latency + ("reserved1", c_char * 16)] + +class nvme_identify_controller(Structure): + _pack_ = 1 + _fields_ = [("vid", c_uint16), # PCI Vendor ID + ("ssvid", c_uint16), # PCI Subsystem Vendor ID + ("sn", c_char * 20), # Serial Number + ("mn", c_char * 40), # Module Number + ("fr", c_char * 8), # Firmware Revision + ("rab", c_uint8), # Recommend Arbitration Burst + ("ieee", c_uint8 * 3), # IEEE OUI Identifier + ("mic", c_uint8), # Multi-Interface Capabilities + ("mdts", c_uint8), # Maximum Data Transfer Size + ("reserved0", c_uint8 * (256 - 78)), + ("oacs", c_uint16), # Optional Admin Command Support + ("acl", c_uint8), # Abort Command Limit + ("aerl", c_uint8), # Asynchronous Event Request Limit + ("frmw", c_uint8), # Firmware Updates + ("lpa", c_uint8), # Log Page Attributes + ("elpe", c_uint8), # Error Log Page Entries + ("npss", c_uint8), # Number of Power States Support + ("avscc", c_uint8), # Admin Vendor Specific Command Configuration + ("reserved1", c_uint8 * (512 - 265)), + ("sqes", c_uint8), # Submission Queue Entry Size + ("cqes", c_uint8), # Completion Queue Entry Size + ("reserved2", c_uint16), + ("nn", c_uint32), # Number of Namespaces + ("oncs", c_uint16), # Optional NVM Command Support + ("fuses", c_uint16), # Fused Operation Support + ("fna", c_uint8), # Format NVM Attributes + ("vwc", c_uint8), # Volatile Write Cache + ("awun", c_uint16), # Atomic Write Unit Normal + ("awupf", c_uint16), # Atomic Write Unit Power Fail + ("nvscc", c_uint8), # NVM Vendor Specific Command Configuration + ("reserved3", c_uint8 * (704 - 531)), + ("reserved4", c_uint8 * (2048 - 704)), + ("psd", nvme_identify_controller_psd * 32), # Power State Descriptor + ("vs", nvme_identify_controller_amzn_vs)] # Vendor Specific + +class ebs_nvme_device: + def __init__(self, device): + self.device = device + self.ctrl_identify() + + def _nvme_ioctl(self, id_response, id_len): + admin_cmd = nvme_admin_command(opcode = NVME_ADMIN_IDENTIFY, + addr = id_response, + alen = id_len, + cdw10 = 1) + + with open(self.device, "rw") as nvme: + ioctl(nvme, NVME_IOCTL_ADMIN_CMD, admin_cmd) + + def ctrl_identify(self): + self.id_ctrl = nvme_identify_controller() + self._nvme_ioctl(addressof(self.id_ctrl), sizeof(self.id_ctrl)) + + if self.id_ctrl.vid != AMZN_NVME_VID or self.id_ctrl.mn.strip() != AMZN_NVME_EBS_MN: + raise TypeError("[ERROR] Not an EBS device: '{0}'".format(self.device)) + + def get_volume_id(self): + vol = self.id_ctrl.sn + + if vol.startswith("vol") and vol[3] != "-": + vol = "vol-" + vol[3:] + + return vol + + def get_block_device(self, stripped=False): + dev = self.id_ctrl.vs.bdev.strip() + + if stripped and dev.startswith("/dev/"): + dev = dev[5:] + + return dev + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Reads EBS information from NVMe devices.") + parser.add_argument("device", nargs=1, help="Device to query") + + display = parser.add_argument_group("Display Options") + display.add_argument("-v", "--volume", action="store_true", + help="Return volume-id") + display.add_argument("-b", "--block-dev", action="store_true", + help="Return block device mapping") + display.add_argument("-u", "--udev", action="store_true", + help="Output data in format suitable for udev rules") + + if len(sys.argv) < 2: + parser.print_help() + sys.exit(1) + + args = parser.parse_args() + + get_all = not (args.udev or args.volume or args.block_dev) + + try: + dev = ebs_nvme_device(args.device[0]) + except (IOError, TypeError) as err: + print >> sys.stderr, err + sys.exit(1) + + if get_all or args.volume: + print "Volume ID: {0}".format(dev.get_volume_id()) + if get_all or args.block_dev or args.udev: + print dev.get_block_device(args.udev) diff --git a/postgres_15.8.1.044/ebssurrogate/files/sources-arm64.cfg b/postgres_15.8.1.044/ebssurrogate/files/sources-arm64.cfg new file mode 100644 index 0000000..a236377 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/sources-arm64.cfg @@ -0,0 +1,10 @@ +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal main restricted +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-updates main restricted +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal universe +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-updates universe +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal multiverse +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-updates multiverse +deb http://REGION.clouds.ports.ubuntu.com/ubuntu-ports/ focal-backports main restricted universe multiverse +deb http://ports.ubuntu.com/ubuntu-ports focal-security main restricted +deb http://ports.ubuntu.com/ubuntu-ports focal-security universe +deb http://ports.ubuntu.com/ubuntu-ports focal-security multiverse diff --git a/postgres_15.8.1.044/ebssurrogate/files/sources.cfg b/postgres_15.8.1.044/ebssurrogate/files/sources.cfg new file mode 100644 index 0000000..ec30118 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/sources.cfg @@ -0,0 +1,10 @@ +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal main restricted +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-updates main restricted +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal universe +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-updates universe +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal multiverse +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-updates multiverse +deb http://REGION.ec2.archive.ubuntu.com/ubuntu/ focal-backports main restricted universe multiverse +deb http://security.ubuntu.com/ubuntu focal-security main restricted +deb http://security.ubuntu.com/ubuntu focal-security universe +deb http://security.ubuntu.com/ubuntu focal-security multiverse diff --git a/postgres_15.8.1.044/ebssurrogate/files/unit-tests/unit-test-01.sql b/postgres_15.8.1.044/ebssurrogate/files/unit-tests/unit-test-01.sql new file mode 100644 index 0000000..f3d4745 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/unit-tests/unit-test-01.sql @@ -0,0 +1,64 @@ +BEGIN; +CREATE EXTENSION IF NOT EXISTS pgtap; + +DO $$ +DECLARE + extension_array text[]; + orioledb_available boolean; +BEGIN + -- Check if orioledb is available + SELECT EXISTS ( + SELECT 1 FROM pg_available_extensions WHERE name = 'orioledb' + ) INTO orioledb_available; + + -- If available, create it and add to the expected extensions list + IF orioledb_available THEN + CREATE EXTENSION IF NOT EXISTS orioledb; + extension_array := ARRAY[ + 'plpgsql', + 'pg_stat_statements', + 'pgsodium', + 'pgtap', + 'pg_graphql', + 'pgcrypto', + 'pgjwt', + 'uuid-ossp', + 'supabase_vault', + 'orioledb' + ]; + ELSE + extension_array := ARRAY[ + 'plpgsql', + 'pg_stat_statements', + 'pgsodium', + 'pgtap', + 'pg_graphql', + 'pgcrypto', + 'pgjwt', + 'uuid-ossp', + 'supabase_vault' + ]; + END IF; + + -- Set the array as a temporary variable to use in the test + PERFORM set_config('myapp.extensions', array_to_string(extension_array, ','), false); +END $$; + +SELECT plan(8); + +SELECT extensions_are( + string_to_array(current_setting('myapp.extensions'), ',')::text[] +); + + +SELECT has_schema('pg_toast'); +SELECT has_schema('pg_catalog'); +SELECT has_schema('information_schema'); +SELECT has_schema('public'); + +SELECT function_privs_are('pgsodium', 'crypto_aead_det_decrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); +SELECT function_privs_are('pgsodium', 'crypto_aead_det_encrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); +SELECT function_privs_are('pgsodium', 'crypto_aead_det_keygen', array[]::text[], 'service_role', array['EXECUTE']); + +SELECT * FROM finish(); +ROLLBACK; \ No newline at end of file diff --git a/postgres_15.8.1.044/ebssurrogate/files/vector.timer b/postgres_15.8.1.044/ebssurrogate/files/vector.timer new file mode 100644 index 0000000..68bb4d6 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/files/vector.timer @@ -0,0 +1,9 @@ +[Unit] +Description= Defer running the vector.service 60secs after boot up + +[Timer] +OnBootSec=60s +Unit=vector.service + +[Install] +WantedBy=multi-user.target diff --git a/postgres_15.8.1.044/ebssurrogate/scripts/chroot-bootstrap-nix.sh b/postgres_15.8.1.044/ebssurrogate/scripts/chroot-bootstrap-nix.sh new file mode 100644 index 0000000..cda6bd2 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/scripts/chroot-bootstrap-nix.sh @@ -0,0 +1,219 @@ +#!/usr/bin/env bash +# +# This script runs inside chrooted environment. It installs grub and its +# Configuration file. +# + +set -o errexit +set -o pipefail +set -o xtrace + +export DEBIAN_FRONTEND=noninteractive + +export APT_OPTIONS="-oAPT::Install-Recommends=false \ + -oAPT::Install-Suggests=false \ + -oAcquire::Languages=none" + +if [ $(dpkg --print-architecture) = "amd64" ]; +then + ARCH="amd64"; +else + ARCH="arm64"; +fi + + + +function update_install_packages { + source /etc/os-release + + # Update APT with new sources + cat /etc/apt/sources.list + apt-get $APT_OPTIONS update && apt-get $APT_OPTIONS --yes dist-upgrade + + # Do not configure grub during package install + if [ "${ARCH}" = "amd64" ]; then + echo 'grub-pc grub-pc/install_devices_empty select true' | debconf-set-selections + echo 'grub-pc grub-pc/install_devices select' | debconf-set-selections + # Install various packages needed for a booting system + apt-get install -y \ + linux-aws \ + grub-pc \ + e2fsprogs + else + apt-get install -y e2fsprogs + fi + # Install standard packages + apt-get install -y \ + sudo \ + wget \ + cloud-init \ + acpid \ + ec2-hibinit-agent \ + ec2-instance-connect \ + hibagent \ + ncurses-term \ + ssh-import-id \ + + # apt upgrade + apt-get upgrade -y + + # Install OpenSSH and other packages + sudo add-apt-repository universe + apt-get update + apt-get install -y --no-install-recommends \ + openssh-server \ + git \ + ufw \ + cron \ + logrotate \ + fail2ban \ + locales \ + at \ + less \ + python3-systemd + + if [ "${ARCH}" = "arm64" ]; then + apt-get $APT_OPTIONS --yes install linux-aws initramfs-tools dosfstools + fi +} + +function setup_locale { +cat << EOF >> /etc/locale.gen +en_US.UTF-8 UTF-8 +EOF + +cat << EOF > /etc/default/locale +LANG="C.UTF-8" +LC_CTYPE="C.UTF-8" +EOF + locale-gen en_US.UTF-8 +} + +function setup_postgesql_env { + # Create the directory if it doesn't exist + sudo mkdir -p /etc/environment.d + + # Define the contents of the PostgreSQL environment file + cat </dev/null +LOCALE_ARCHIVE=/usr/lib/locale/locale-archive +LANG="en_US.UTF-8" +LANGUAGE="en_US.UTF-8" +LC_ALL="en_US.UTF-8" +LC_CTYPE="en_US.UTF-8" +EOF +} + +function install_packages_for_build { + apt-get install -y --no-install-recommends linux-libc-dev \ + acl \ + magic-wormhole sysstat \ + build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libxslt-dev libssl-dev libsystemd-dev libpq-dev libxml2-utils uuid-dev xsltproc ssl-cert \ + gcc-10 g++-10 \ + libgeos-dev libproj-dev libgdal-dev libjson-c-dev libboost-all-dev libcgal-dev libmpfr-dev libgmp-dev cmake \ + libkrb5-dev \ + maven default-jre default-jdk \ + curl gpp apt-transport-https cmake libc++-dev libc++abi-dev libc++1 libglib2.0-dev libtinfo5 libc++abi1 ninja-build python \ + liblzo2-dev + + source /etc/os-release + + apt-get install -y --no-install-recommends llvm-11-dev clang-11 + # Mark llvm as manual to prevent auto removal + apt-mark manual libllvm11:arm64 +} + +function setup_apparmor { + apt-get install -y apparmor apparmor-utils auditd + + # Copy apparmor profiles + cp -rv /tmp/apparmor_profiles/* /etc/apparmor.d/ +} + +function setup_grub_conf_arm64 { +cat << EOF > /etc/default/grub +GRUB_DEFAULT=0 +GRUB_TIMEOUT=0 +GRUB_TIMEOUT_STYLE="hidden" +GRUB_DISTRIBUTOR="Supabase postgresql" +GRUB_CMDLINE_LINUX_DEFAULT="nomodeset console=tty1 console=ttyS0 ipv6.disable=0" +EOF +} + +# Install GRUB +function install_configure_grub { + if [ "${ARCH}" = "arm64" ]; then + apt-get $APT_OPTIONS --yes install cloud-guest-utils fdisk grub-efi-arm64 efibootmgr + setup_grub_conf_arm64 + rm -rf /etc/grub.d/30_os-prober + sleep 1 + fi + grub-install /dev/xvdf && update-grub +} + +# skip fsck for first boot +function disable_fsck { + touch /fastboot +} + +# Don't request hostname during boot but set hostname +function setup_hostname { + sed -i 's/gethostname()/ubuntu /g' /etc/dhcp/dhclient.conf + sed -i 's/host-name,//g' /etc/dhcp/dhclient.conf + echo "ubuntu" > /etc/hostname + chmod 644 /etc/hostname +} + +# Set options for the default interface +function setup_eth0_interface { +cat << EOF > /etc/netplan/eth0.yaml +network: + version: 2 + ethernets: + eth0: + dhcp4: true +EOF +} + +function disable_sshd_passwd_auth { + sed -i -E -e 's/^#?\s*PasswordAuthentication\s+(yes|no)\s*$/PasswordAuthentication no/g' \ + -e 's/^#?\s*ChallengeResponseAuthentication\s+(yes|no)\s*$/ChallengeResponseAuthentication no/g' \ + /etc/ssh/sshd_config +} + +function create_admin_account { + groupadd admin +} + +#Set default target as multi-user +function set_default_target { + rm -f /etc/systemd/system/default.target + ln -s /lib/systemd/system/multi-user.target /etc/systemd/system/default.target +} + +# Setup ccache +function setup_ccache { + apt-get install ccache -y + mkdir -p /tmp/ccache + export PATH=/usr/lib/ccache:$PATH + echo "PATH=$PATH" >> /etc/environment +} + +# Clear apt caches +function cleanup_cache { + apt-get clean +} + +update_install_packages +setup_locale +setup_postgesql_env +#install_packages_for_build +install_configure_grub +setup_apparmor +setup_hostname +create_admin_account +set_default_target +setup_eth0_interface +disable_sshd_passwd_auth +disable_fsck +#setup_ccache +cleanup_cache diff --git a/postgres_15.8.1.044/ebssurrogate/scripts/qemu-bootstrap-nix.sh b/postgres_15.8.1.044/ebssurrogate/scripts/qemu-bootstrap-nix.sh new file mode 100644 index 0000000..464c4f0 --- /dev/null +++ b/postgres_15.8.1.044/ebssurrogate/scripts/qemu-bootstrap-nix.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail +set -o xtrace + +if [ $(dpkg --print-architecture) = "amd64" ]; then + ARCH="amd64" +else + ARCH="arm64" +fi + +function waitfor_boot_finished { + export DEBIAN_FRONTEND=noninteractive + + echo "args: ${ARGS}" + # Wait for cloudinit on the surrogate to complete before making progress + while [[ ! -f /var/lib/cloud/instance/boot-finished ]]; do + echo 'Waiting for cloud-init...' + sleep 1 + done +} + +function install_packages { + apt-get update && sudo apt-get install software-properties-common e2fsprogs nfs-common -y + add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y + ansible-galaxy collection install community.general +} + +function execute_playbook { + + tee /etc/ansible/ansible.cfg </dev/null +LOCALE_ARCHIVE=/usr/lib/locale/locale-archive +LANG="en_US.UTF-8" +LANGUAGE="en_US.UTF-8" +LC_ALL="en_US.UTF-8" +LC_CTYPE="en_US.UTF-8" +EOF +} + +function setup_locale { + cat <>/etc/locale.gen +en_US.UTF-8 UTF-8 +EOF + + cat </etc/default/locale +LANG="C.UTF-8" +LC_CTYPE="C.UTF-8" +EOF + locale-gen en_US.UTF-8 +} + +sed -i 's/- hosts: all/- hosts: localhost/' ansible/playbook.yml + +waitfor_boot_finished +install_packages +setup_postgesql_env +setup_locale +execute_playbook + +#################### +# stage 2 things +#################### + +function install_nix() { + sudo su -c "curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf \"substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com\" \ + --extra-conf \"trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=\" " -s /bin/bash root + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + +} + +function execute_stage2_playbook { + sudo tee /etc/ansible/ansible.cfg < sda + + # Create /dev/xvd* device symlink + if [[ ! -z "$mapping" ]] && [[ -b "${blkdev}" ]] && [[ ! -L "${mapping}" ]]; then + ln -s "$blkdev" "$mapping" + + blkdev_mappings["$blkdev"]="$mapping" + fi + done + + create_partition_table + + # NVMe EBS launch device partition mappings (symlinks): /dev/nvme*n*p* to /dev/xvd*[0-9]+ + declare -A partdev_mappings + for blkdev in "${!blkdev_mappings[@]}"; do # /dev/nvme*n* + mapping="${blkdev_mappings[$blkdev]}" + + # Create /dev/xvd*[0-9]+ partition device symlink + for partdev in "${blkdev}"p*; do + partnum=${partdev##*p} + if [[ ! -L "${mapping}${partnum}" ]]; then + ln -s "${blkdev}p${partnum}" "${mapping}${partnum}" + + partdev_mappings["${blkdev}p${partnum}"]="${mapping}${partnum}" + fi + done + done +} + + +#Download and install latest e2fsprogs for fast_commit feature,if required. +function format_and_mount_rootfs { + mkfs.ext4 -m0.1 /dev/xvdf2 + + mount -o noatime,nodiratime /dev/xvdf2 /mnt + if [ "${ARCH}" = "arm64" ]; then + mkfs.fat -F32 /dev/xvdf1 + mkdir -p /mnt/boot/efi + sleep 2 + mount /dev/xvdf1 /mnt/boot/efi + fi + + mkfs.ext4 /dev/xvdh + + # Explicitly reserving 100MiB worth of blocks for the data volume + RESERVED_DATA_VOLUME_BLOCK_COUNT=$((100 * 1024 * 1024 / 4096)) + tune2fs -r $RESERVED_DATA_VOLUME_BLOCK_COUNT /dev/xvdh + + mkdir -p /mnt/data + mount -o defaults,discard /dev/xvdh /mnt/data +} + +function create_swapfile { + fallocate -l 1G /mnt/swapfile + chmod 600 /mnt/swapfile + mkswap /mnt/swapfile +} + +function format_build_partition { + mkfs.ext4 -O ^has_journal /dev/xvdc +} +function pull_docker { + apt-get install -y docker.io + docker run -itd --name ccachedata "${DOCKER_IMAGE}:${DOCKER_IMAGE_TAG}" sh + docker exec -itd ccachedata mkdir -p /build/ccache +} + +# Create fstab +function create_fstab { + FMT="%-42s %-11s %-5s %-17s %-5s %s" +cat > "/mnt/etc/fstab" << EOF +$(printf "${FMT}" "# DEVICE UUID" "MOUNTPOINT" "TYPE" "OPTIONS" "DUMP" "FSCK") +$(findmnt -no SOURCE /mnt | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/", "ext4", "defaults,discard", "0", "1" ) }') +$(findmnt -no SOURCE /mnt/boot/efi | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/boot/efi", "vfat", "umask=0077", "0", "1" ) }') +$(findmnt -no SOURCE /mnt/data | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/data", "ext4", "defaults,discard", "0", "2" ) }') +$(printf "$FMT" "/swapfile" "none" "swap" "sw" "0" "0") +EOF + unset FMT +} + +function setup_chroot_environment { + UBUNTU_VERSION=$(lsb_release -cs) # 'focal' for Ubuntu 20.04 + + # Bootstrap Ubuntu into /mnt + debootstrap --arch ${ARCH} --variant=minbase "$UBUNTU_VERSION" /mnt + + # Update ec2-region + REGION=$(curl --silent --fail http://169.254.169.254/latest/meta-data/placement/availability-zone | sed -E 's|[a-z]+$||g') + sed -i "s/REGION/${REGION}/g" /tmp/sources.list + cp /tmp/sources.list /mnt/etc/apt/sources.list + + if [ "${ARCH}" = "arm64" ]; then + create_fstab + fi + + # Create mount points and mount the filesystem + mkdir -p /mnt/{dev,proc,sys} + mount --rbind /dev /mnt/dev + mount --rbind /proc /mnt/proc + mount --rbind /sys /mnt/sys + + # Create build mount point and mount + mkdir -p /mnt/tmp + mount /dev/xvdc /mnt/tmp + chmod 777 /mnt/tmp + + # Copy apparmor profiles + chmod 644 /tmp/apparmor_profiles/* + cp -r /tmp/apparmor_profiles /mnt/tmp/ + + # Copy migrations + cp -r /tmp/migrations /mnt/tmp/ + + # Copy unit tests + cp -r /tmp/unit-tests /mnt/tmp/ + + # Copy the bootstrap script into place and execute inside chroot + cp /tmp/chroot-bootstrap-nix.sh /mnt/tmp/chroot-bootstrap-nix.sh + chroot /mnt /tmp/chroot-bootstrap-nix.sh + rm -f /mnt/tmp/chroot-bootstrap-nix.sh + echo "${POSTGRES_SUPABASE_VERSION}" > /mnt/root/supabase-release + + # Copy the nvme identification script into /sbin inside the chroot + mkdir -p /mnt/sbin + cp /tmp/ebsnvme-id /mnt/sbin/ebsnvme-id + chmod +x /mnt/sbin/ebsnvme-id + + # Copy the udev rules for identifying nvme devices into the chroot + mkdir -p /mnt/etc/udev/rules.d + cp /tmp/70-ec2-nvme-devices.rules \ + /mnt/etc/udev/rules.d/70-ec2-nvme-devices.rules + + #Copy custom cloud-init + rm -f /mnt/etc/cloud/cloud.cfg + cp /tmp/cloud.cfg /mnt/etc/cloud/cloud.cfg + + sleep 2 +} + +function download_ccache { + docker cp ccachedata:/build/ccache/. /mnt/tmp/ccache +} + +function execute_playbook { + +tee /etc/ansible/ansible.cfg </dev/null; then + echo "Error: Could not create key directory $KEY_DIR" >&2 + exit 1 + fi + chmod 1777 "$KEY_DIR" + + if [[ ! -f "$KEY_FILE" ]]; then + if ! (dd if=/dev/urandom bs=32 count=1 2>/dev/null | od -A n -t x1 | tr -d ' \n' > "$KEY_FILE"); then + if ! (openssl rand -hex 32 > "$KEY_FILE"); then + echo "00000000000000000000000000000000" > "$KEY_FILE" + echo "Warning: Using fallback key" >&2 + fi + fi + chmod 644 "$KEY_FILE" + fi + + if [[ -f "$KEY_FILE" && -r "$KEY_FILE" ]]; then + cat "$KEY_FILE" + else + echo "Error: Cannot read key file $KEY_FILE" >&2 + exit 1 + fi + ''; + + # Use the shared setup but with a test-specific name + start-postgres-server-bin = makePostgresDevSetup { + inherit pkgs; + name = "start-postgres-server-test"; + extraSubstitutions = { + PGSODIUM_GETKEY = "${getkey-script}/bin/pgsodium-getkey"; + }; + }; + + getVersionArg = pkg: + let + name = pkg.version; + in + if builtins.match "15.*" name != null then "15" + else if builtins.match "17.*" name != null then "orioledb-17" + else throw "Unsupported PostgreSQL version: ${name}"; + + # Helper function to filter SQL files based on version + filterTestFiles = version: dir: + let + files = builtins.readDir dir; + isValidFile = name: + let + isVersionSpecific = builtins.match "z_([0-9]+)_.*" name != null; + matchesVersion = + if isVersionSpecific + then builtins.match ("z_" + version + "_.*") name != null + else true; + in + pkgs.lib.hasSuffix ".sql" name && matchesVersion; + in + pkgs.lib.filterAttrs (name: _: isValidFile name) files; + + # Get the major version for filtering + majorVersion = + if builtins.match ".*17.*" pgpkg.version != null + then "17" + else "15"; + + # Filter SQL test files + filteredSqlTests = filterTestFiles majorVersion ./nix/tests/sql; + + # Convert filtered tests to a sorted list of basenames (without extension) + testList = pkgs.lib.mapAttrsToList (name: _: + builtins.substring 0 (pkgs.lib.stringLength name - 4) name + ) filteredSqlTests; + sortedTestList = builtins.sort (a: b: a < b) testList; + + in + pkgs.runCommand "postgres-${pgpkg.version}-check-harness" + { + nativeBuildInputs = with pkgs; [ + coreutils bash perl pgpkg pg_prove pg_regress procps + start-postgres-server-bin which getkey-script supabase-groonga + ]; + } '' + set -e + + #First we need to create a generic pg cluster for pgtap tests and run those + export GRN_PLUGINS_DIR=${supabase-groonga}/lib/groonga/plugins + PGTAP_CLUSTER=$(mktemp -d) + initdb --locale=C --username=supabase_admin -D "$PGTAP_CLUSTER" + substitute ${./nix/tests/postgresql.conf.in} "$PGTAP_CLUSTER"/postgresql.conf \ + --subst-var-by PGSODIUM_GETKEY_SCRIPT "${getkey-script}/bin/pgsodium-getkey" + echo "listen_addresses = '*'" >> "$PGTAP_CLUSTER"/postgresql.conf + echo "port = 5435" >> "$PGTAP_CLUSTER"/postgresql.conf + echo "host all all 127.0.0.1/32 trust" >> $PGTAP_CLUSTER/pg_hba.conf + # Remove timescaledb if running orioledb-17 check + echo "I AM ${pgpkg.version}====================================================" + if [[ "${pgpkg.version}" == *"17"* ]]; then + perl -pi -e 's/ timescaledb,//g' "$PGTAP_CLUSTER/postgresql.conf" + fi + #NOTE in the future we may also need to add the orioledb extension to the cluster when cluster is oriole + echo "PGTAP_CLUSTER directory contents:" + ls -la "$PGTAP_CLUSTER" + + # Check if postgresql.conf exists + if [ ! -f "$PGTAP_CLUSTER/postgresql.conf" ]; then + echo "postgresql.conf is missing!" + exit 1 + fi + + # PostgreSQL startup + if [[ "$(uname)" == "Darwin" ]]; then + pg_ctl -D "$PGTAP_CLUSTER" -l "$PGTAP_CLUSTER"/postgresql.log -o "-k "$PGTAP_CLUSTER" -p 5435 -d 5" start 2>&1 + else + mkdir -p "$PGTAP_CLUSTER/sockets" + pg_ctl -D "$PGTAP_CLUSTER" -l "$PGTAP_CLUSTER"/postgresql.log -o "-k $PGTAP_CLUSTER/sockets -p 5435 -d 5" start 2>&1 + fi || { + echo "pg_ctl failed to start PostgreSQL" + echo "Contents of postgresql.log:" + cat "$PGTAP_CLUSTER"/postgresql.log + exit 1 + } + for i in {1..60}; do + if pg_isready -h localhost -p 5435; then + echo "PostgreSQL is ready" + break + fi + sleep 1 + if [ $i -eq 60 ]; then + echo "PostgreSQL is not ready after 60 seconds" + echo "PostgreSQL status:" + pg_ctl -D "$PGTAP_CLUSTER" status + echo "PostgreSQL log content:" + cat "$PGTAP_CLUSTER"/postgresql.log + exit 1 + fi + done + createdb -p 5435 -h localhost --username=supabase_admin testing + if ! psql -p 5435 -h localhost --username=supabase_admin -d testing -v ON_ERROR_STOP=1 -Xaf ${./nix/tests/prime.sql}; then + echo "Error executing SQL file. PostgreSQL log content:" + cat "$PGTAP_CLUSTER"/postgresql.log + pg_ctl -D "$PGTAP_CLUSTER" stop + exit 1 + fi + SORTED_DIR=$(mktemp -d) + for t in $(printf "%s\n" ${builtins.concatStringsSep " " sortedTestList}); do + psql -p 5435 -h localhost --username=supabase_admin -d testing -f "${./nix/tests/sql}/$t.sql" || true + done + rm -rf "$SORTED_DIR" + pg_ctl -D "$PGTAP_CLUSTER" stop + rm -rf $PGTAP_CLUSTER + + # End of pgtap tests + # from here on out we are running pg_regress tests, we use a different cluster for this + # which is start by the start-postgres-server-bin script + # start-postgres-server-bin script closely matches our AMI setup, configurations and migrations + + # Ensure pgsodium key directory exists with proper permissions + if [[ "$(uname)" == "Darwin" ]]; then + mkdir -p /private/tmp/pgsodium + chmod 1777 /private/tmp/pgsodium + fi + unset GRN_PLUGINS_DIR + ${start-postgres-server-bin}/bin/start-postgres-server ${getVersionArg pgpkg} --daemonize + + for i in {1..60}; do + if pg_isready -h localhost -p 5435 -U supabase_admin -q; then + echo "PostgreSQL is ready" + break + fi + sleep 1 + if [ $i -eq 60 ]; then + echo "PostgreSQL failed to start" + exit 1 + fi + done + + if ! psql -p 5435 -h localhost --no-password --username=supabase_admin -d postgres -v ON_ERROR_STOP=1 -Xaf ${./nix/tests/prime.sql}; then + echo "Error executing SQL file" + exit 1 + fi + + mkdir -p $out/regression_output + if ! pg_regress \ + --use-existing \ + --dbname=postgres \ + --inputdir=${./nix/tests} \ + --outputdir=$out/regression_output \ + --host=localhost \ + --port=5435 \ + --user=supabase_admin \ + ${builtins.concatStringsSep " " sortedTestList}; then + echo "pg_regress tests failed" + exit 1 + fi + + # Copy logs to output + for logfile in $(find /tmp -name postgresql.log -type f); do + cp "$logfile" $out/postgresql.log + done + exit 0 + ''; + in + rec { + # The list of all packages that can be built with 'nix build'. The list + # of names that can be used can be shown with 'nix flake show' + packages = flake-utils.lib.flattenTree basePackages // { + # Any extra packages we might want to include in our package + # set can go here. + inherit (pkgs); + }; + + # The list of exported 'checks' that are run with every run of 'nix + # flake check'. This is run in the CI system, as well. + checks = { + psql_15 = makeCheckHarness basePackages.psql_15.bin; + psql_orioledb-17 = makeCheckHarness basePackages.psql_orioledb-17.bin; + }; + + # Apps is a list of names of things that can be executed with 'nix run'; + # these are distinct from the things that can be built with 'nix build', + # so they need to be listed here too. + apps = + let + mkApp = attrName: binName: { + type = "app"; + program = "${basePackages."${attrName}"}/bin/${binName}"; + }; + in + { + start-server = mkApp "start-server" "start-postgres-server"; + start-client = mkApp "start-client" "start-postgres-client"; + start-replica = mkApp "start-replica" "start-postgres-replica"; + migrate-postgres = mkApp "migrate-tool" "migrate-postgres"; + sync-exts-versions = mkApp "sync-exts-versions" "sync-exts-versions"; + pg-restore = mkApp "pg-restore" "pg-restore"; + local-infra-bootstrap = mkApp "local-infra-bootstrap" "local-infra-bootstrap"; + dbmate-tool = mkApp "dbmate-tool" "dbmate-tool"; + update-readme = mkApp "update-readme" "update-readme"; + }; + + # 'devShells.default' lists the set of packages that are included in the + # ambient $PATH environment when you run 'nix develop'. This is useful + # for development and puts many convenient devtools instantly within + # reach. + + devShells = let + mkCargoPgrxDevShell = { pgrxVersion, rustVersion }: pkgs.mkShell { + packages = with pkgs; [ + basePackages."cargo-pgrx_${pgrxVersion}" + (rust-bin.stable.${rustVersion}.default.override { + extensions = [ "rust-src" ]; + }) + ]; + shellHook = '' + export HISTFILE=.history + ''; + }; + in { + default = pkgs.mkShell { + packages = with pkgs; [ + coreutils + just + nix-update + #pg_prove + shellcheck + ansible + ansible-lint + (packer.overrideAttrs (oldAttrs: { + version = "1.7.8"; + })) + + basePackages.start-server + basePackages.start-client + basePackages.start-replica + basePackages.migrate-tool + basePackages.sync-exts-versions + dbmate + nushell + ]; + shellHook = '' + export HISTFILE=.history + ''; + }; + cargo-pgrx_0_11_3 = mkCargoPgrxDevShell { + pgrxVersion = "0_11_3"; + rustVersion = "1.80.0"; + }; + cargo-pgrx_0_12_6 = mkCargoPgrxDevShell { + pgrxVersion = "0_12_6"; + rustVersion = "1.80.0"; + }; + }; + } + ); +} diff --git a/postgres_15.8.1.044/http/.gitkeep b/postgres_15.8.1.044/http/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/postgres_15.8.1.044/meta-data b/postgres_15.8.1.044/meta-data new file mode 100644 index 0000000..0551428 --- /dev/null +++ b/postgres_15.8.1.044/meta-data @@ -0,0 +1,2 @@ +instance-id: iid-local01 +local-hostname: packer-ubuntu diff --git a/postgres_15.8.1.044/migrations/.env b/postgres_15.8.1.044/migrations/.env new file mode 100644 index 0000000..81974b7 --- /dev/null +++ b/postgres_15.8.1.044/migrations/.env @@ -0,0 +1,2 @@ +POSTGRES_PASSWORD=password +DATABASE_URL="postgres://postgres:${POSTGRES_PASSWORD}@localhost:5478/postgres?sslmode=disable" diff --git a/postgres_15.8.1.044/migrations/Dockerfile.dbmate b/postgres_15.8.1.044/migrations/Dockerfile.dbmate new file mode 100644 index 0000000..29c80e6 --- /dev/null +++ b/postgres_15.8.1.044/migrations/Dockerfile.dbmate @@ -0,0 +1,23 @@ +FROM debian:bullseye-slim + +RUN apt-get update && apt-get install -y curl wget gnupg2 lsb-release + +RUN ARCH=$(dpkg --print-architecture); \ + case ${ARCH} in \ + amd64) DBMATE_ARCH="linux-amd64" ;; \ + arm64) DBMATE_ARCH="linux-arm64" ;; \ + *) echo "Unsupported architecture: ${ARCH}"; exit 1 ;; \ + esac && \ + curl -fsSL -o /usr/local/bin/dbmate \ + https://github.com/amacneil/dbmate/releases/latest/download/dbmate-${DBMATE_ARCH} && \ + chmod +x /usr/local/bin/dbmate + +RUN wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - +RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list +RUN apt-get update && apt-get install -y postgresql-client-%VERSION% + +ENV PATH="/usr/lib/postgresql/%VERSION%/bin:${PATH}" + +RUN dbmate --version + +ENTRYPOINT ["dbmate"] diff --git a/postgres_15.8.1.044/migrations/README.md b/postgres_15.8.1.044/migrations/README.md new file mode 100644 index 0000000..19d2bf4 --- /dev/null +++ b/postgres_15.8.1.044/migrations/README.md @@ -0,0 +1,115 @@ +# Usage + +from the root of the `supabase/postgres` project, you can run the following commands: + + +```shell +Usage: nix run .#dbmate-tool -- [options] + +Options: + -v, --version [15|16|orioledb-17|all] Specify the PostgreSQL version to use (required defaults to --version all) + -p, --port PORT Specify the port number to use (default: 5435) + -h, --help Show this help message + +Description: + Runs 'dbmate up' against a locally running the version of database you specify. Or 'all' to run against all versions. + NOTE: To create a migration, you must run 'nix develop' and then 'dbmate new ' to create a new migration file. + +Examples: + nix run .#dbmate-tool + nix run .#dbmate-tool -- --version 15 + nix run .#dbmate-tool -- --version 16 --port 5433 + +``` + +This can also be run from a github "flake url" for example: + +```shell +nix run github:supabase/postgres#dbmate-tool -- --version 15 + +or + +nix run github:supabase/postgres/mybranch#dbmate-tool -- --version 15 +``` +# supabase/migrations + +`supabase/migrations` is a consolidation of SQL migrations from: + +- supabase/postgres +- supabase/supabase +- supabase/cli +- supabase/infrastructure (internal) + +aiming to provide a single source of truth for migrations on the platform that can be depended upon by those components. For more information on goals see [the RFC](https://www.notion.so/supabase/Centralize-SQL-Migrations-cd3847ae027d4f2bba9defb2cc82f69a) + + + +## How it was Created + +Migrations were pulled (in order) from: + +1. [init-scripts/postgres](https://github.com/supabase/infrastructure/tree/develop/init-scripts/postgres) => [db/init-scripts](db/init-scripts) +2. [init-scripts/migrations](https://github.com/supabase/infrastructure/tree/develop/init-scripts/migrations) => [db/migrations](db/migrations) + +For compatibility with hosted projects, we include [migrate.sh](migrate.sh) that executes migrations in the same order as ami build: + +1. Run all `db/init-scripts` with `postgres` superuser role. +2. Run all `db/migrations` with `supabase_admin` superuser role. +3. Finalize role passwords with `/etc/postgresql.schema.sql` if present. + +Additionally, [supabase/postgres](https://github.com/supabase/postgres/blob/develop/ansible/playbook-docker.yml#L9) image contains several migration scripts to configure default extensions. These are run first by docker entrypoint and included in ami by ansible. + + + +## Guidelines + +- Migrations are append only. Never edit existing migrations once they are on master. +- Migrations in `migrations/db/migrations` have to be idempotent. +- Self contained components (gotrue, storage, realtime) may contain their own migrations. +- Self hosted Supabase users should update role passwords separately after running all migrations. +- Prod release is done by publishing a new GitHub release on master branch. + +## Requirements + +- [dbmate](https://github.com/amacneil/dbmate) +- [docker-compose](https://docs.docker.com/compose/) + +## Usage + +### Add a Migration + +```shell +# Start the database server +docker-compose up + +# create a new migration +dbmate new '' +``` + +Then, populate the migration at `./db/migrations/xxxxxxxxx_` and make sure it execute sucessfully with + +```shell +dbmate up +``` + +### Adding a migration with docker-compose + +dbmate can optionally be run locally using docker: + +```shell +# Start the database server +docker-compose up + +# create a new migration +docker-compose run --rm dbmate new '' +``` + +Then, populate the migration at `./db/migrations/xxxxxxxxx_` and make sure it execute sucessfully with + +```shell +docker-compose run --rm dbmate up +``` + +## Testing + +Migrations are tested in CI to ensure they do not raise an exception against previously released `supabase/postgres` docker images. The full version matrix is at [test.yml](./.github/workflows/test.yml) in the `supabase-version` variable. diff --git a/postgres_15.8.1.044/migrations/db/init-scripts/00000000000000-initial-schema.sql b/postgres_15.8.1.044/migrations/db/init-scripts/00000000000000-initial-schema.sql new file mode 100644 index 0000000..ecce79a --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/init-scripts/00000000000000-initial-schema.sql @@ -0,0 +1,57 @@ +-- migrate:up + +-- Set up realtime +-- defaults to empty publication +create publication supabase_realtime; + +-- Supabase super admin +alter user supabase_admin with superuser createdb createrole replication bypassrls; + +-- Supabase replication user +create user supabase_replication_admin with login replication; + +-- Supabase read-only user +create role supabase_read_only_user with login bypassrls; +grant pg_read_all_data to supabase_read_only_user; + +-- Extension namespacing +create schema if not exists extensions; +create extension if not exists "uuid-ossp" with schema extensions; +create extension if not exists pgcrypto with schema extensions; +create extension if not exists pgjwt with schema extensions; + +-- Set up auth roles for the developer +create role anon nologin noinherit; +create role authenticated nologin noinherit; -- "logged in" user: web_user, app_user, etc +create role service_role nologin noinherit bypassrls; -- allow developers to create JWT's that bypass their policies + +create user authenticator noinherit; +grant anon to authenticator; +grant authenticated to authenticator; +grant service_role to authenticator; +grant supabase_admin to authenticator; + +grant usage on schema public to postgres, anon, authenticated, service_role; +alter default privileges in schema public grant all on tables to postgres, anon, authenticated, service_role; +alter default privileges in schema public grant all on functions to postgres, anon, authenticated, service_role; +alter default privileges in schema public grant all on sequences to postgres, anon, authenticated, service_role; + +-- Allow Extensions to be used in the API +grant usage on schema extensions to postgres, anon, authenticated, service_role; + +-- Set up namespacing +alter user supabase_admin SET search_path TO public, extensions; -- don't include the "auth" schema + +-- These are required so that the users receive grants whenever "supabase_admin" creates tables/function +alter default privileges for user supabase_admin in schema public grant all + on sequences to postgres, anon, authenticated, service_role; +alter default privileges for user supabase_admin in schema public grant all + on tables to postgres, anon, authenticated, service_role; +alter default privileges for user supabase_admin in schema public grant all + on functions to postgres, anon, authenticated, service_role; + +-- Set short statement/query timeouts for API roles +alter role anon set statement_timeout = '3s'; +alter role authenticated set statement_timeout = '8s'; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/init-scripts/00000000000001-auth-schema.sql b/postgres_15.8.1.044/migrations/db/init-scripts/00000000000001-auth-schema.sql new file mode 100644 index 0000000..ad47aad --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/init-scripts/00000000000001-auth-schema.sql @@ -0,0 +1,123 @@ +-- migrate:up + +CREATE SCHEMA IF NOT EXISTS auth AUTHORIZATION supabase_admin; + +-- auth.users definition + +CREATE TABLE auth.users ( + instance_id uuid NULL, + id uuid NOT NULL UNIQUE, + aud varchar(255) NULL, + "role" varchar(255) NULL, + email varchar(255) NULL UNIQUE, + encrypted_password varchar(255) NULL, + confirmed_at timestamptz NULL, + invited_at timestamptz NULL, + confirmation_token varchar(255) NULL, + confirmation_sent_at timestamptz NULL, + recovery_token varchar(255) NULL, + recovery_sent_at timestamptz NULL, + email_change_token varchar(255) NULL, + email_change varchar(255) NULL, + email_change_sent_at timestamptz NULL, + last_sign_in_at timestamptz NULL, + raw_app_meta_data jsonb NULL, + raw_user_meta_data jsonb NULL, + is_super_admin bool NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT users_pkey PRIMARY KEY (id) +); +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); +comment on table auth.users is 'Auth: Stores user login data within a secure schema.'; + +-- auth.refresh_tokens definition + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid NULL, + id bigserial NOT NULL, + "token" varchar(255) NULL, + user_id varchar(255) NULL, + revoked bool NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id) +); +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); +comment on table auth.refresh_tokens is 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + +-- auth.instances definition + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid NULL, + raw_base_config text NULL, + created_at timestamptz NULL, + updated_at timestamptz NULL, + CONSTRAINT instances_pkey PRIMARY KEY (id) +); +comment on table auth.instances is 'Auth: Manages users across multiple sites.'; + +-- auth.audit_log_entries definition + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid NULL, + id uuid NOT NULL, + payload json NULL, + created_at timestamptz NULL, + CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id) +); +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); +comment on table auth.audit_log_entries is 'Auth: Audit trail for user actions.'; + +-- auth.schema_migrations definition + +CREATE TABLE auth.schema_migrations ( + "version" varchar(255) NOT NULL, + CONSTRAINT schema_migrations_pkey PRIMARY KEY ("version") +); +comment on table auth.schema_migrations is 'Auth: Manages updates to the auth system.'; + +INSERT INTO auth.schema_migrations (version) +VALUES ('20171026211738'), + ('20171026211808'), + ('20171026211834'), + ('20180103212743'), + ('20180108183307'), + ('20180119214651'), + ('20180125194653'); + +-- Gets the User ID from the request cookie +create or replace function auth.uid() returns uuid as $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$ language sql stable; + +-- Gets the User ID from the request cookie +create or replace function auth.role() returns text as $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$ language sql stable; + +-- Gets the User email +create or replace function auth.email() returns text as $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$ language sql stable; + +-- usage on auth functions to API roles +GRANT USAGE ON SCHEMA auth TO anon, authenticated, service_role; + +-- Supabase super admin +CREATE USER supabase_auth_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; +GRANT ALL PRIVILEGES ON SCHEMA auth TO supabase_auth_admin; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA auth TO supabase_auth_admin; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA auth TO supabase_auth_admin; +ALTER USER supabase_auth_admin SET search_path = "auth"; +ALTER table "auth".users OWNER TO supabase_auth_admin; +ALTER table "auth".refresh_tokens OWNER TO supabase_auth_admin; +ALTER table "auth".audit_log_entries OWNER TO supabase_auth_admin; +ALTER table "auth".instances OWNER TO supabase_auth_admin; +ALTER table "auth".schema_migrations OWNER TO supabase_auth_admin; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/init-scripts/00000000000002-storage-schema.sql b/postgres_15.8.1.044/migrations/db/init-scripts/00000000000002-storage-schema.sql new file mode 100644 index 0000000..40503e4 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/init-scripts/00000000000002-storage-schema.sql @@ -0,0 +1,120 @@ +-- migrate:up + +CREATE SCHEMA IF NOT EXISTS storage AUTHORIZATION supabase_admin; + +grant usage on schema storage to postgres, anon, authenticated, service_role; +alter default privileges in schema storage grant all on tables to postgres, anon, authenticated, service_role; +alter default privileges in schema storage grant all on functions to postgres, anon, authenticated, service_role; +alter default privileges in schema storage grant all on sequences to postgres, anon, authenticated, service_role; + +CREATE TABLE "storage"."buckets" ( + "id" text not NULL, + "name" text NOT NULL, + "owner" uuid, + "created_at" timestamptz DEFAULT now(), + "updated_at" timestamptz DEFAULT now(), + CONSTRAINT "buckets_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"), + PRIMARY KEY ("id") +); +CREATE UNIQUE INDEX "bname" ON "storage"."buckets" USING BTREE ("name"); + +CREATE TABLE "storage"."objects" ( + "id" uuid NOT NULL DEFAULT extensions.uuid_generate_v4(), + "bucket_id" text, + "name" text, + "owner" uuid, + "created_at" timestamptz DEFAULT now(), + "updated_at" timestamptz DEFAULT now(), + "last_accessed_at" timestamptz DEFAULT now(), + "metadata" jsonb, + CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id"), + CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"), + PRIMARY KEY ("id") +); +CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING BTREE ("bucket_id","name"); +CREATE INDEX name_prefix_search ON storage.objects(name text_pattern_ops); + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +CREATE FUNCTION storage.foldername(name text) + RETURNS text[] + LANGUAGE plpgsql +AS $function$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$function$; + +CREATE FUNCTION storage.filename(name text) + RETURNS text + LANGUAGE plpgsql +AS $function$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$function$; + +CREATE FUNCTION storage.extension(name text) + RETURNS text + LANGUAGE plpgsql +AS $function$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$function$; + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits int DEFAULT 100, levels int DEFAULT 1, offsets int DEFAULT 0) + RETURNS TABLE ( + name text, + id uuid, + updated_at TIMESTAMPTZ, + created_at TIMESTAMPTZ, + last_accessed_at TIMESTAMPTZ, + metadata jsonb + ) + LANGUAGE plpgsql +AS $function$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$function$; + +-- create migrations table +-- https://github.com/ThomWright/postgres-migrations/blob/master/src/migrations/0_create-migrations-table.sql +-- we add this table here and not let it be auto-created so that the permissions are properly applied to it +CREATE TABLE IF NOT EXISTS storage.migrations ( + id integer PRIMARY KEY, + name varchar(100) UNIQUE NOT NULL, + hash varchar(40) NOT NULL, -- sha1 hex encoded hash of the file name and contents, to ensure it hasn't been altered since applying the migration + executed_at timestamp DEFAULT current_timestamp +); + +CREATE USER supabase_storage_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; +GRANT ALL PRIVILEGES ON SCHEMA storage TO supabase_storage_admin; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA storage TO supabase_storage_admin; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO supabase_storage_admin; +ALTER USER supabase_storage_admin SET search_path = "storage"; +ALTER table "storage".objects owner to supabase_storage_admin; +ALTER table "storage".buckets owner to supabase_storage_admin; +ALTER table "storage".migrations OWNER TO supabase_storage_admin; +ALTER function "storage".foldername(text) owner to supabase_storage_admin; +ALTER function "storage".filename(text) owner to supabase_storage_admin; +ALTER function "storage".extension(text) owner to supabase_storage_admin; +ALTER function "storage".search(text,text,int,int,int) owner to supabase_storage_admin; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/init-scripts/00000000000003-post-setup.sql b/postgres_15.8.1.044/migrations/db/init-scripts/00000000000003-post-setup.sql new file mode 100644 index 0000000..47cdd13 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/init-scripts/00000000000003-post-setup.sql @@ -0,0 +1,119 @@ +-- migrate:up + +ALTER ROLE supabase_admin SET search_path TO "\$user",public,auth,extensions; +ALTER ROLE postgres SET search_path TO "\$user",public,extensions; + +-- Trigger for pg_cron +CREATE OR REPLACE FUNCTION extensions.grant_pg_cron_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $$ +DECLARE + schema_is_cron bool; +BEGIN + schema_is_cron = ( + SELECT n.nspname = 'cron' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_namespace AS n + ON ev.objid = n.oid + ); + + IF schema_is_cron + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user supabase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + + END IF; + +END; +$$; +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end WHEN TAG in ('CREATE SCHEMA') +EXECUTE PROCEDURE extensions.grant_pg_cron_access(); +COMMENT ON FUNCTION extensions.grant_pg_cron_access IS 'Grants access to pg_cron'; + +-- Event trigger for pg_net +CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END; +$$; +COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; + +DO +$$ +BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_event_trigger + WHERE evtname = 'issue_pg_net_access' + ) THEN + CREATE EVENT TRIGGER issue_pg_net_access + ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE PROCEDURE extensions.grant_pg_net_access(); + END IF; +END +$$; + +-- Supabase dashboard user +CREATE ROLE dashboard_user NOSUPERUSER CREATEDB CREATEROLE REPLICATION; +GRANT ALL ON DATABASE postgres TO dashboard_user; +GRANT ALL ON SCHEMA auth TO dashboard_user; +GRANT ALL ON SCHEMA extensions TO dashboard_user; +GRANT ALL ON SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL TABLES IN SCHEMA auth TO dashboard_user; +GRANT ALL ON ALL TABLES IN SCHEMA extensions TO dashboard_user; +-- GRANT ALL ON ALL TABLES IN SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA extensions TO dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO dashboard_user; +GRANT ALL ON ALL ROUTINES IN SCHEMA extensions TO dashboard_user; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrate.sh b/postgres_15.8.1.044/migrations/db/migrate.sh new file mode 100644 index 0000000..0a84d1e --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrate.sh @@ -0,0 +1,72 @@ +#!/bin/sh +set -eu + +####################################### +# Used by both ami and docker builds to initialise database schema. +# Env vars: +# POSTGRES_DB defaults to postgres +# POSTGRES_HOST defaults to localhost +# POSTGRES_PORT defaults to 5432 +# POSTGRES_PASSWORD defaults to "" +# USE_DBMATE defaults to "" +# Exit code: +# 0 if migration succeeds, non-zero on error. +####################################### + +export PGDATABASE="${POSTGRES_DB:-postgres}" +export PGHOST="${POSTGRES_HOST:-localhost}" +export PGPORT="${POSTGRES_PORT:-5432}" +export PGPASSWORD="${POSTGRES_PASSWORD:-}" + +# if args are supplied, simply forward to dbmate +connect="$PGPASSWORD@$PGHOST:$PGPORT/$PGDATABASE?sslmode=disable" +if [ "$#" -ne 0 ]; then + export DATABASE_URL="${DATABASE_URL:-postgres://supabase_admin:$connect}" + exec dbmate "$@" + exit 0 +fi + +db=$( cd -- "$( dirname -- "$0" )" > /dev/null 2>&1 && pwd ) +if [ -z "${USE_DBMATE:-}" ]; then + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U supabase_admin <= 14 THEN + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql extension is not enabled.'] + ); + ELSE + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql is only available on projects running Postgres 14 onwards.'] + ); + END IF; + END; +$$; + +grant usage on schema graphql_public to postgres, anon, authenticated, service_role; +alter default privileges in schema graphql_public grant all on tables to postgres, anon, authenticated, service_role; +alter default privileges in schema graphql_public grant all on functions to postgres, anon, authenticated, service_role; +alter default privileges in schema graphql_public grant all on sequences to postgres, anon, authenticated, service_role; + +alter default privileges for user supabase_admin in schema graphql_public grant all + on sequences to postgres, anon, authenticated, service_role; +alter default privileges for user supabase_admin in schema graphql_public grant all + on tables to postgres, anon, authenticated, service_role; +alter default privileges for user supabase_admin in schema graphql_public grant all + on functions to postgres, anon, authenticated, service_role; + +-- Trigger upon enabling pg_graphql +CREATE OR REPLACE FUNCTION extensions.grant_pg_graphql_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $func$ + DECLARE + func_is_graphql_resolve bool; + BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant all on function graphql.resolve to postgres, anon, authenticated, service_role; + + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + SELECT graphql.resolve(query, coalesce(variables, '{}')); + $$; + + grant execute on function graphql.resolve to postgres, anon, authenticated, service_role; + END IF; + + END; +$func$; + +DROP EVENT TRIGGER IF EXISTS issue_pg_graphql_access; +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end WHEN TAG in ('CREATE FUNCTION') +EXECUTE PROCEDURE extensions.grant_pg_graphql_access(); +COMMENT ON FUNCTION extensions.grant_pg_graphql_access IS 'Grants access to pg_graphql'; + +-- Trigger upon dropping the pg_graphql extension +CREATE OR REPLACE FUNCTION extensions.set_graphql_placeholder() +RETURNS event_trigger +LANGUAGE plpgsql +AS $func$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql extension is not enabled.'] + ); + ELSE + RETURN jsonb_build_object( + 'data', null::jsonb, + 'errors', array['pg_graphql is only available on projects running Postgres 14 onwards.'] + ); + END IF; + END; + $$; + END IF; + + END; +$func$; + +DROP EVENT TRIGGER IF EXISTS issue_graphql_placeholder; +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop WHEN TAG in ('DROP EXTENSION') +EXECUTE PROCEDURE extensions.set_graphql_placeholder(); +COMMENT ON FUNCTION extensions.set_graphql_placeholder IS 'Reintroduces placeholder function for graphql_public.graphql'; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20220321174452_fix-postgrest-alter-type-event-trigger.sql b/postgres_15.8.1.044/migrations/db/migrations/20220321174452_fix-postgrest-alter-type-event-trigger.sql new file mode 100644 index 0000000..339def9 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20220321174452_fix-postgrest-alter-type-event-trigger.sql @@ -0,0 +1,70 @@ +-- migrate:up + +drop event trigger if exists api_restart; +drop function if exists extensions.notify_api_restart(); + +-- https://postgrest.org/en/latest/schema_cache.html#finer-grained-event-trigger +-- watch create and alter +CREATE OR REPLACE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$ LANGUAGE plpgsql; + +-- watch drop +CREATE OR REPLACE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$ LANGUAGE plpgsql; + +DROP EVENT TRIGGER IF EXISTS pgrst_ddl_watch; +CREATE EVENT TRIGGER pgrst_ddl_watch + ON ddl_command_end + EXECUTE PROCEDURE extensions.pgrst_ddl_watch(); + +DROP EVENT TRIGGER IF EXISTS pgrst_drop_watch; +CREATE EVENT TRIGGER pgrst_drop_watch + ON sql_drop + EXECUTE PROCEDURE extensions.pgrst_drop_watch(); + + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20220322085208_gotrue-session-limit.sql b/postgres_15.8.1.044/migrations/db/migrations/20220322085208_gotrue-session-limit.sql new file mode 100644 index 0000000..ef1a5ef --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20220322085208_gotrue-session-limit.sql @@ -0,0 +1,4 @@ +-- migrate:up +ALTER ROLE supabase_auth_admin SET idle_in_transaction_session_timeout TO 60000; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20220404205710_pg_graphql-on-by-default.sql b/postgres_15.8.1.044/migrations/db/migrations/20220404205710_pg_graphql-on-by-default.sql new file mode 100644 index 0000000..452901e --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20220404205710_pg_graphql-on-by-default.sql @@ -0,0 +1,161 @@ +-- migrate:up + +-- Update Trigger upon enabling pg_graphql +create or replace function extensions.grant_pg_graphql_access() + returns event_trigger + language plpgsql +AS $func$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant all on function graphql.resolve to postgres, anon, authenticated, service_role; + + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + -- This changed + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + grant execute on function graphql.resolve to postgres, anon, authenticated, service_role; + END IF; + +END; +$func$; + +CREATE OR REPLACE FUNCTION extensions.set_graphql_placeholder() +RETURNS event_trigger +LANGUAGE plpgsql +AS $func$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$func$; + +-- GraphQL Placeholder Entrypoint +create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null +) + returns jsonb + language plpgsql +as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; +$$; + + +drop extension if exists pg_graphql; +-- Avoids limitation of only being able to load the extension via dashboard +-- Only install as well if the extension is actually installed +DO $$ +DECLARE + graphql_exists boolean; +BEGIN + graphql_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_graphql' + ); + + IF graphql_exists + THEN + create extension if not exists pg_graphql; + END IF; +END $$; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20220609081115_grant-supabase-auth-admin-and-supabase-storage-admin-to-postgres.sql b/postgres_15.8.1.044/migrations/db/migrations/20220609081115_grant-supabase-auth-admin-and-supabase-storage-admin-to-postgres.sql new file mode 100644 index 0000000..6634993 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20220609081115_grant-supabase-auth-admin-and-supabase-storage-admin-to-postgres.sql @@ -0,0 +1,10 @@ +-- migrate:up + +-- This is done so that the `postgres` role can manage auth tables triggers, +-- storage tables policies, etc. which unblocks the revocation of superuser +-- access. +-- +-- More context: https://www.notion.so/supabase/RFC-Postgres-Permissions-I-40cb4f61bd4145fd9e75ce657c0e31dd#bf5d853436384e6e8e339d0a2e684cbb +grant supabase_auth_admin, supabase_storage_admin to postgres; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20220613123923_pg_graphql-pg-dump-perms.sql b/postgres_15.8.1.044/migrations/db/migrations/20220613123923_pg_graphql-pg-dump-perms.sql new file mode 100644 index 0000000..915b1c0 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20220613123923_pg_graphql-pg-dump-perms.sql @@ -0,0 +1,74 @@ +-- migrate:up + +create or replace function extensions.grant_pg_graphql_access() + returns event_trigger + language plpgsql +AS $func$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + END IF; + +END; +$func$; + +-- Cycle the extension off and back on to apply the permissions update. + +drop extension if exists pg_graphql; +-- Avoids limitation of only being able to load the extension via dashboard +-- Only install as well if the extension is actually installed +DO $$ +DECLARE + graphql_exists boolean; +BEGIN + graphql_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_graphql' + ); + + IF graphql_exists + THEN + create extension if not exists pg_graphql; + END IF; +END $$; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20220713082019_pg_cron-pg_net-temp-perms-fix.sql b/postgres_15.8.1.044/migrations/db/migrations/20220713082019_pg_cron-pg_net-temp-perms-fix.sql new file mode 100644 index 0000000..f98c871 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20220713082019_pg_cron-pg_net-temp-perms-fix.sql @@ -0,0 +1,74 @@ +-- migrate:up +DO $$ +DECLARE + pg_cron_installed boolean; +BEGIN + -- checks if pg_cron is enabled + pg_cron_installed = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_cron' + and installed_version is not null + ); + + IF pg_cron_installed + THEN + grant usage on schema cron to postgres with grant option; + grant all on all functions in schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user supabase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + END IF; +END $$; + +DO $$ +DECLARE + pg_net_installed boolean; +BEGIN + -- checks if pg_net is enabled + pg_net_installed = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_net' + and installed_version is not null + + ); + + IF pg_net_installed + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END $$; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20221028101028_set_authenticator_timeout.sql b/postgres_15.8.1.044/migrations/db/migrations/20221028101028_set_authenticator_timeout.sql new file mode 100644 index 0000000..d38c2bf --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20221028101028_set_authenticator_timeout.sql @@ -0,0 +1,5 @@ +-- migrate:up +alter role authenticator set statement_timeout = '8s'; + +-- migrate:down + diff --git a/postgres_15.8.1.044/migrations/db/migrations/20221103090837_revoke_admin.sql b/postgres_15.8.1.044/migrations/db/migrations/20221103090837_revoke_admin.sql new file mode 100644 index 0000000..84e71f5 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20221103090837_revoke_admin.sql @@ -0,0 +1,5 @@ +-- migrate:up +revoke supabase_admin from authenticator; + +-- migrate:down + diff --git a/postgres_15.8.1.044/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql b/postgres_15.8.1.044/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql new file mode 100644 index 0000000..f93cc21 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20221207154255_create_pgsodium_and_vault.sql @@ -0,0 +1,50 @@ +-- migrate:up + +DO $$ +DECLARE + pgsodium_exists boolean; + vault_exists boolean; +BEGIN + IF EXISTS (SELECT FROM pg_available_extensions WHERE name = 'supabase_vault' AND default_version != '0.2.8') THEN + CREATE EXTENSION IF NOT EXISTS supabase_vault; + + -- for some reason extension custom scripts aren't run during AMI build, so + -- we manually run it here + GRANT USAGE ON SCHEMA vault TO postgres WITH GRANT OPTION; + GRANT SELECT, DELETE ON vault.secrets, vault.decrypted_secrets TO postgres WITH GRANT OPTION; + GRANT EXECUTE ON FUNCTION vault.create_secret, vault.update_secret, vault._crypto_aead_det_decrypt TO postgres WITH GRANT OPTION; + ELSE + pgsodium_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pgsodium' + and default_version in ('3.1.6', '3.1.7', '3.1.8', '3.1.9') + ); + + vault_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'supabase_vault' + ); + + IF pgsodium_exists + THEN + create extension if not exists pgsodium; + + grant pgsodium_keyiduser to postgres with admin option; + grant pgsodium_keyholder to postgres with admin option; + grant pgsodium_keymaker to postgres with admin option; + + grant execute on function pgsodium.crypto_aead_det_decrypt(bytea, bytea, uuid, bytea) to service_role; + grant execute on function pgsodium.crypto_aead_det_encrypt(bytea, bytea, uuid, bytea) to service_role; + grant execute on function pgsodium.crypto_aead_det_keygen to service_role; + + IF vault_exists + THEN + create extension if not exists supabase_vault; + END IF; + END IF; + END IF; +END $$; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20230201083204_grant_auth_roles_to_postgres.sql b/postgres_15.8.1.044/migrations/db/migrations/20230201083204_grant_auth_roles_to_postgres.sql new file mode 100644 index 0000000..f975813 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20230201083204_grant_auth_roles_to_postgres.sql @@ -0,0 +1,5 @@ +-- migrate:up +grant anon, authenticated, service_role to postgres; + +-- migrate:down + diff --git a/postgres_15.8.1.044/migrations/db/migrations/20230224042246_grant_extensions_perms_for_postgres.sql b/postgres_15.8.1.044/migrations/db/migrations/20230224042246_grant_extensions_perms_for_postgres.sql new file mode 100644 index 0000000..f00fffe --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20230224042246_grant_extensions_perms_for_postgres.sql @@ -0,0 +1,10 @@ +-- migrate:up +grant all privileges on all tables in schema extensions to postgres with grant option; +grant all privileges on all routines in schema extensions to postgres with grant option; +grant all privileges on all sequences in schema extensions to postgres with grant option; +alter default privileges in schema extensions grant all on tables to postgres with grant option; +alter default privileges in schema extensions grant all on routines to postgres with grant option; +alter default privileges in schema extensions grant all on sequences to postgres with grant option; + +-- migrate:down + diff --git a/postgres_15.8.1.044/migrations/db/migrations/20230306081037_grant_pg_monitor_to_postgres.sql b/postgres_15.8.1.044/migrations/db/migrations/20230306081037_grant_pg_monitor_to_postgres.sql new file mode 100644 index 0000000..76c350e --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20230306081037_grant_pg_monitor_to_postgres.sql @@ -0,0 +1,5 @@ +-- migrate:up +grant pg_monitor to postgres; + +-- migrate:down + diff --git a/postgres_15.8.1.044/migrations/db/migrations/20230327032006_grant_auth_roles_to_supabase_storage_admin.sql b/postgres_15.8.1.044/migrations/db/migrations/20230327032006_grant_auth_roles_to_supabase_storage_admin.sql new file mode 100644 index 0000000..641d4c6 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20230327032006_grant_auth_roles_to_supabase_storage_admin.sql @@ -0,0 +1,4 @@ +-- migrate:up +grant anon, authenticated, service_role to supabase_storage_admin; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql b/postgres_15.8.1.044/migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql new file mode 100644 index 0000000..d8291bb --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20230529180330_alter_api_roles_for_inherit.sql @@ -0,0 +1,15 @@ +-- migrate:up + +ALTER ROLE authenticated inherit; +ALTER ROLE anon inherit; +ALTER ROLE service_role inherit; + +DO $$ +BEGIN + IF EXISTS (SELECT FROM pg_roles WHERE rolname = 'pgsodium_keyholder') THEN + GRANT pgsodium_keyholder to service_role; + END IF; +END $$; + +-- migrate:down + diff --git a/postgres_15.8.1.044/migrations/db/migrations/20231013070755_grant_authenticator_to_supabase_storage_admin.sql b/postgres_15.8.1.044/migrations/db/migrations/20231013070755_grant_authenticator_to_supabase_storage_admin.sql new file mode 100644 index 0000000..7597f29 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20231013070755_grant_authenticator_to_supabase_storage_admin.sql @@ -0,0 +1,5 @@ +-- migrate:up +grant authenticator to supabase_storage_admin; +revoke anon, authenticated, service_role from supabase_storage_admin; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20231017062225_grant_pg_graphql_permissions_for_custom_roles.sql b/postgres_15.8.1.044/migrations/db/migrations/20231017062225_grant_pg_graphql_permissions_for_custom_roles.sql new file mode 100644 index 0000000..ca204bb --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20231017062225_grant_pg_graphql_permissions_for_custom_roles.sql @@ -0,0 +1,78 @@ +-- migrate:up + +create or replace function extensions.grant_pg_graphql_access() + returns event_trigger + language plpgsql +AS $func$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$func$; + +-- Cycle the extension off and back on to apply the permissions update. + +drop extension if exists pg_graphql; +-- Avoids limitation of only being able to load the extension via dashboard +-- Only install as well if the extension is actually installed +DO $$ +DECLARE + graphql_exists boolean; +BEGIN + graphql_exists = ( + select count(*) = 1 + from pg_available_extensions + where name = 'pg_graphql' + ); + + IF graphql_exists + THEN + create extension if not exists pg_graphql; + END IF; +END $$; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql b/postgres_15.8.1.044/migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql new file mode 100644 index 0000000..25b8271 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20231020085357_revoke_writes_on_cron_job_from_postgres.sql @@ -0,0 +1,47 @@ +-- migrate:up +do $$ +begin + if exists (select from pg_extension where extname = 'pg_cron') then + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + end if; +end $$; + +CREATE OR REPLACE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user supabase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + +drop event trigger if exists issue_pg_cron_access; +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20231130133139_set_lock_timeout_to_authenticator_role.sql b/postgres_15.8.1.044/migrations/db/migrations/20231130133139_set_lock_timeout_to_authenticator_role.sql new file mode 100644 index 0000000..a0cee20 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20231130133139_set_lock_timeout_to_authenticator_role.sql @@ -0,0 +1,4 @@ +-- migrate:up +ALTER ROLE authenticator set lock_timeout to '8s'; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20240124080435_alter_lo_export_lo_import_owner.sql b/postgres_15.8.1.044/migrations/db/migrations/20240124080435_alter_lo_export_lo_import_owner.sql new file mode 100644 index 0000000..7c0d57d --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20240124080435_alter_lo_export_lo_import_owner.sql @@ -0,0 +1,6 @@ +-- migrate:up +alter function pg_catalog.lo_export owner to supabase_admin; +alter function pg_catalog.lo_import(text) owner to supabase_admin; +alter function pg_catalog.lo_import(text, oid) owner to supabase_admin; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20240606060239_grant_predefined_roles_to_postgres.sql b/postgres_15.8.1.044/migrations/db/migrations/20240606060239_grant_predefined_roles_to_postgres.sql new file mode 100644 index 0000000..324e124 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20240606060239_grant_predefined_roles_to_postgres.sql @@ -0,0 +1,4 @@ +-- migrate:up +grant pg_read_all_data, pg_signal_backend to postgres; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20241031003909_create_orioledb.sql b/postgres_15.8.1.044/migrations/db/migrations/20241031003909_create_orioledb.sql new file mode 100644 index 0000000..694fbb9 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20241031003909_create_orioledb.sql @@ -0,0 +1,11 @@ +-- migrate:up +do $$ +begin + if exists (select 1 from pg_available_extensions where name = 'orioledb') then + if not exists (select 1 from pg_extension where extname = 'orioledb') then + create extension if not exists orioledb; + end if; + end if; +end $$; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20241215003910_backfill_pgmq_metadata.sql b/postgres_15.8.1.044/migrations/db/migrations/20241215003910_backfill_pgmq_metadata.sql new file mode 100644 index 0000000..5785272 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20241215003910_backfill_pgmq_metadata.sql @@ -0,0 +1,79 @@ +-- migrate:up +do $$ +begin + -- Check if the pgmq.meta table exists + if exists ( + select + 1 + from + pg_catalog.pg_class c + join pg_catalog.pg_namespace n + on c.relnamespace = n.oid + where + n.nspname = 'pgmq' + and c.relname = 'meta' + and c.relkind = 'r' -- regular table + -- Make sure only expected columns exist and are correctly named + and ( + select array_agg(attname::text order by attname) + from pg_catalog.pg_attribute a + where + a.attnum > 0 + and a.attrelid = c.oid + ) = array['created_at', 'is_partitioned', 'is_unlogged', 'queue_name']::text[] + ) then + -- Insert data into pgmq.meta for all tables matching the naming pattern 'pgmq.q_' + insert into pgmq.meta (queue_name, is_partitioned, is_unlogged, created_at) + select + substring(c.relname from 3) as queue_name, + false as is_partitioned, + case when c.relpersistence = 'u' then true else false end as is_unlogged, + now() as created_at + from + pg_catalog.pg_class c + join pg_catalog.pg_namespace n + on c.relnamespace = n.oid + where + n.nspname = 'pgmq' + and c.relname like 'q_%' + and c.relkind in ('r', 'p', 'u') + on conflict (queue_name) do nothing; + end if; +end $$; + +-- For logical backups we detach the queue and archive tables from the pgmq extension +-- prior to pausing. Once detached, pgmq.drop_queue breaks. This re-attaches them +-- when a project is unpaused and allows pgmq.drop_queue to work normally. +do $$ +declare + ext_exists boolean; + tbl record; +begin + -- check if pgmq extension is installed + select exists(select 1 from pg_extension where extname = 'pgmq') into ext_exists; + + if ext_exists then + for tbl in + select c.relname as table_name + from pg_class c + join pg_namespace n on c.relnamespace = n.oid + where n.nspname = 'pgmq' + and c.relkind in ('r', 'u') -- include ordinary and unlogged tables + and (c.relname like 'q\_%' or c.relname like 'a\_%') + and c.oid not in ( + select d.objid + from pg_depend d + join pg_extension e on d.refobjid = e.oid + where e.extname = 'pgmq' + and d.classid = 'pg_class'::regclass + and d.deptype = 'e' + ) + loop + execute format('alter extension pgmq add table pgmq.%I', tbl.table_name); + end loop; + end if; +end; +$$; + + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql b/postgres_15.8.1.044/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql new file mode 100644 index 0000000..822a758 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql @@ -0,0 +1,6 @@ +-- migrate:up +alter role supabase_admin set log_statement = none; +alter role supabase_auth_admin set log_statement = none; +alter role supabase_storage_admin set log_statement = none; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20250218031949_pgsodium_mask_role.sql b/postgres_15.8.1.044/migrations/db/migrations/20250218031949_pgsodium_mask_role.sql new file mode 100644 index 0000000..f44fa98 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20250218031949_pgsodium_mask_role.sql @@ -0,0 +1,31 @@ +-- migrate:up + +DO $$ +BEGIN + IF EXISTS (SELECT FROM pg_extension WHERE extname = 'pgsodium') THEN + CREATE OR REPLACE FUNCTION pgsodium.mask_role(masked_role regrole, source_name text, view_name text) + RETURNS void + LANGUAGE plpgsql + SECURITY DEFINER + SET search_path TO '' + AS $function$ + BEGIN + EXECUTE format( + 'GRANT SELECT ON pgsodium.key TO %s', + masked_role); + + EXECUTE format( + 'GRANT pgsodium_keyiduser, pgsodium_keyholder TO %s', + masked_role); + + EXECUTE format( + 'GRANT ALL ON %I TO %s', + view_name, + masked_role); + RETURN; + END + $function$; + END IF; +END $$; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/db/migrations/20250220051611_pg_net_perms_fix.sql b/postgres_15.8.1.044/migrations/db/migrations/20250220051611_pg_net_perms_fix.sql new file mode 100644 index 0000000..cc8ffc2 --- /dev/null +++ b/postgres_15.8.1.044/migrations/db/migrations/20250220051611_pg_net_perms_fix.sql @@ -0,0 +1,64 @@ +-- migrate:up +CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() +RETURNS event_trigger +LANGUAGE plpgsql +AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + +DO $$ +BEGIN + IF EXISTS (SELECT FROM pg_extension WHERE extname = 'pg_net') + THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY INVOKER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY INVOKER; + + REVOKE EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM supabase_functions_admin, postgres, anon, authenticated, service_role; + REVOKE EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM supabase_functions_admin, postgres, anon, authenticated, service_role; + + GRANT ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO PUBLIC; + GRANT ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO PUBLIC; + END IF; +END $$; + +-- migrate:down diff --git a/postgres_15.8.1.044/migrations/docker-compose.yaml b/postgres_15.8.1.044/migrations/docker-compose.yaml new file mode 100644 index 0000000..1b3e8b1 --- /dev/null +++ b/postgres_15.8.1.044/migrations/docker-compose.yaml @@ -0,0 +1,46 @@ +# Usage +# Start: docker-compose up +# Stop: docker-compose down -v + +version: "3.8" + +services: + db: + image: supabase_postgres + restart: "no" + healthcheck: + test: pg_isready -U postgres -h localhost + interval: 2s + timeout: 2s + retries: 10 + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + + pg_prove: + image: horrendo/pg_prove + depends_on: + db: + condition: service_healthy + dbmate: + condition: service_completed_successfully + environment: + PGHOST: db + PGUSER: supabase_admin + PGDATABASE: postgres + PGPASSWORD: ${POSTGRES_PASSWORD} + volumes: + - ./tests:/tests + command: pg_prove /tests/test.sql + + dbmate: + build: + context: . + dockerfile: Dockerfile.dbmate + depends_on: + db: + condition: service_healthy + volumes: + - ./schema.sql:/db/schema.sql + environment: + DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD}@db/postgres?sslmode=disable + command: dump diff --git a/postgres_15.8.1.044/migrations/schema-15.sql b/postgres_15.8.1.044/migrations/schema-15.sql new file mode 100644 index 0000000..33bba06 --- /dev/null +++ b/postgres_15.8.1.044/migrations/schema-15.sql @@ -0,0 +1,1072 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: pgsodium; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgsodium; + + +-- +-- Name: pgsodium; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgsodium WITH SCHEMA pgsodium; + + +-- +-- Name: EXTENSION pgsodium; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgsodium IS 'Pgsodium is a modern cryptography library for Postgres.'; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: vault; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA vault; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: pgjwt; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgjwt WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgjwt; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgjwt IS 'JSON Web Token API for Postgresql'; + + +-- +-- Name: supabase_vault; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS supabase_vault WITH SCHEMA vault; + + +-- +-- Name: EXTENSION supabase_vault; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION supabase_vault IS 'Supabase Vault Extension'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user supabase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $$ +BEGIN + RAISE WARNING 'PgBouncer auth request: %', p_usename; + + RETURN QUERY + SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow + WHERE usename = p_usename; +END; +$$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +-- +-- Name: secrets_encrypt_secret_secret(); Type: FUNCTION; Schema: vault; Owner: - +-- + +CREATE FUNCTION vault.secrets_encrypt_secret_secret() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + new.secret = CASE WHEN new.secret IS NULL THEN NULL ELSE + CASE WHEN new.key_id IS NULL THEN NULL ELSE pg_catalog.encode( + pgsodium.crypto_aead_det_encrypt( + pg_catalog.convert_to(new.secret, 'utf8'), + pg_catalog.convert_to((new.id::text || new.description::text || new.created_at::text || new.updated_at::text)::text, 'utf8'), + new.key_id::uuid, + new.nonce + ), + 'base64') END END; + RETURN new; + END; + $$; + + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(128) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: decrypted_secrets; Type: VIEW; Schema: vault; Owner: - +-- + +CREATE VIEW vault.decrypted_secrets AS + SELECT secrets.id, + secrets.name, + secrets.description, + secrets.secret, + CASE + WHEN (secrets.secret IS NULL) THEN NULL::text + ELSE + CASE + WHEN (secrets.key_id IS NULL) THEN NULL::text + ELSE convert_from(pgsodium.crypto_aead_det_decrypt(decode(secrets.secret, 'base64'::text), convert_to(((((secrets.id)::text || secrets.description) || (secrets.created_at)::text) || (secrets.updated_at)::text), 'utf8'::name), secrets.key_id, secrets.nonce), 'utf8'::name) + END + END AS decrypted_secret, + secrets.key_id, + secrets.nonce, + secrets.created_at, + secrets.updated_at + FROM vault.secrets; + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: supabase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION supabase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + + +-- +-- Dbmate schema migrations +-- + diff --git a/postgres_15.8.1.044/migrations/schema-orioledb-17.sql b/postgres_15.8.1.044/migrations/schema-orioledb-17.sql new file mode 100644 index 0000000..28e756f --- /dev/null +++ b/postgres_15.8.1.044/migrations/schema-orioledb-17.sql @@ -0,0 +1,1087 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET transaction_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: pgsodium; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgsodium; + + +-- +-- Name: pgsodium; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgsodium WITH SCHEMA pgsodium; + + +-- +-- Name: EXTENSION pgsodium; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgsodium IS 'Pgsodium is a modern cryptography library for Postgres.'; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: vault; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA vault; + + +-- +-- Name: orioledb; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS orioledb WITH SCHEMA public; + + +-- +-- Name: EXTENSION orioledb; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION orioledb IS 'OrioleDB -- the next generation transactional engine'; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: pgjwt; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgjwt WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgjwt; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgjwt IS 'JSON Web Token API for Postgresql'; + + +-- +-- Name: supabase_vault; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS supabase_vault WITH SCHEMA vault; + + +-- +-- Name: EXTENSION supabase_vault; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION supabase_vault IS 'Supabase Vault Extension'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user supabase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + + IF EXISTS ( + SELECT FROM pg_extension + WHERE extname = 'pg_net' + -- all versions in use on existing projects as of 2025-02-20 + -- version 0.12.0 onwards don't need these applied + AND extversion IN ('0.2', '0.6', '0.7', '0.7.1', '0.8', '0.10.0', '0.11.0') + ) THEN + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $$ +BEGIN + RAISE WARNING 'PgBouncer auth request: %', p_usename; + + RETURN QUERY + SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow + WHERE usename = p_usename; +END; +$$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +-- +-- Name: secrets_encrypt_secret_secret(); Type: FUNCTION; Schema: vault; Owner: - +-- + +CREATE FUNCTION vault.secrets_encrypt_secret_secret() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + new.secret = CASE WHEN new.secret IS NULL THEN NULL ELSE + CASE WHEN new.key_id IS NULL THEN NULL ELSE pg_catalog.encode( + pgsodium.crypto_aead_det_encrypt( + pg_catalog.convert_to(new.secret, 'utf8'), + pg_catalog.convert_to((new.id::text || new.description::text || new.created_at::text || new.updated_at::text)::text, 'utf8'), + new.key_id::uuid, + new.nonce + ), + 'base64') END END; + RETURN new; + END; + $$; + + +SET default_tablespace = ''; + +SET default_table_access_method = orioledb; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(128) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: decrypted_secrets; Type: VIEW; Schema: vault; Owner: - +-- + +CREATE VIEW vault.decrypted_secrets AS + SELECT id, + name, + description, + secret, + CASE + WHEN (secret IS NULL) THEN NULL::text + ELSE + CASE + WHEN (key_id IS NULL) THEN NULL::text + ELSE convert_from(pgsodium.crypto_aead_det_decrypt(decode(secret, 'base64'::text), convert_to(((((id)::text || description) || (created_at)::text) || (updated_at)::text), 'utf8'::name), key_id, nonce), 'utf8'::name) + END + END AS decrypted_secret, + key_id, + nonce, + created_at, + updated_at + FROM vault.secrets; + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: supabase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION supabase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + + +-- +-- Dbmate schema migrations +-- + diff --git a/postgres_15.8.1.044/migrations/schema.sql b/postgres_15.8.1.044/migrations/schema.sql new file mode 100644 index 0000000..1bff8b9 --- /dev/null +++ b/postgres_15.8.1.044/migrations/schema.sql @@ -0,0 +1,1064 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: auth; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA auth; + + +-- +-- Name: extensions; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA extensions; + + +-- +-- Name: graphql; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql; + + +-- +-- Name: graphql_public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA graphql_public; + + +-- +-- Name: pgbouncer; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgbouncer; + + +-- +-- Name: pgsodium; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA pgsodium; + + +-- +-- Name: pgsodium; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgsodium WITH SCHEMA pgsodium; + + +-- +-- Name: EXTENSION pgsodium; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgsodium IS 'Pgsodium is a modern cryptography library for Postgres.'; + + +-- +-- Name: realtime; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA realtime; + + +-- +-- Name: storage; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA storage; + + +-- +-- Name: vault; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA vault; + + +-- +-- Name: pg_graphql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_graphql WITH SCHEMA graphql; + + +-- +-- Name: EXTENSION pg_graphql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_graphql IS 'pg_graphql: GraphQL support'; + + +-- +-- Name: pg_stat_statements; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pg_stat_statements WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pg_stat_statements; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pg_stat_statements IS 'track planning and execution statistics of all SQL statements executed'; + + +-- +-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions'; + + +-- +-- Name: pgjwt; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS pgjwt WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION pgjwt; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION pgjwt IS 'JSON Web Token API for Postgresql'; + + +-- +-- Name: supabase_vault; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS supabase_vault WITH SCHEMA vault; + + +-- +-- Name: EXTENSION supabase_vault; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION supabase_vault IS 'Supabase Vault Extension'; + + +-- +-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions; + + +-- +-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)'; + + +-- +-- Name: email(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.email() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.email', true), '')::text; +$$; + + +-- +-- Name: role(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.role() RETURNS text + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.role', true), '')::text; +$$; + + +-- +-- Name: uid(); Type: FUNCTION; Schema: auth; Owner: - +-- + +CREATE FUNCTION auth.uid() RETURNS uuid + LANGUAGE sql STABLE + AS $$ + select nullif(current_setting('request.jwt.claim.sub', true), '')::uuid; +$$; + + +-- +-- Name: grant_pg_cron_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_cron_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_cron' + ) + THEN + grant usage on schema cron to postgres with grant option; + + alter default privileges in schema cron grant all on tables to postgres with grant option; + alter default privileges in schema cron grant all on functions to postgres with grant option; + alter default privileges in schema cron grant all on sequences to postgres with grant option; + + alter default privileges for user supabase_admin in schema cron grant all + on sequences to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on tables to postgres with grant option; + alter default privileges for user supabase_admin in schema cron grant all + on functions to postgres with grant option; + + grant all privileges on all tables in schema cron to postgres with grant option; + revoke all on table cron.job from postgres; + grant select on table cron.job to postgres with grant option; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_cron_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_cron_access() IS 'Grants access to pg_cron'; + + +-- +-- Name: grant_pg_graphql_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_graphql_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ +DECLARE + func_is_graphql_resolve bool; +BEGIN + func_is_graphql_resolve = ( + SELECT n.proname = 'resolve' + FROM pg_event_trigger_ddl_commands() AS ev + LEFT JOIN pg_catalog.pg_proc AS n + ON ev.objid = n.oid + ); + + IF func_is_graphql_resolve + THEN + -- Update public wrapper to pass all arguments through to the pg_graphql resolve func + DROP FUNCTION IF EXISTS graphql_public.graphql; + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language sql + as $$ + select graphql.resolve( + query := query, + variables := coalesce(variables, '{}'), + "operationName" := "operationName", + extensions := extensions + ); + $$; + + -- This hook executes when `graphql.resolve` is created. That is not necessarily the last + -- function in the extension so we need to grant permissions on existing entities AND + -- update default permissions to any others that are created after `graphql.resolve` + grant usage on schema graphql to postgres, anon, authenticated, service_role; + grant select on all tables in schema graphql to postgres, anon, authenticated, service_role; + grant execute on all functions in schema graphql to postgres, anon, authenticated, service_role; + grant all on all sequences in schema graphql to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on tables to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on functions to postgres, anon, authenticated, service_role; + alter default privileges in schema graphql grant all on sequences to postgres, anon, authenticated, service_role; + + -- Allow postgres role to allow granting usage on graphql and graphql_public schemas to custom roles + grant usage on schema graphql_public to postgres with grant option; + grant usage on schema graphql to postgres with grant option; + END IF; + +END; +$_$; + + +-- +-- Name: FUNCTION grant_pg_graphql_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_graphql_access() IS 'Grants access to pg_graphql'; + + +-- +-- Name: grant_pg_net_access(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.grant_pg_net_access() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; +END; +$$; + + +-- +-- Name: FUNCTION grant_pg_net_access(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.grant_pg_net_access() IS 'Grants access to pg_net'; + + +-- +-- Name: pgrst_ddl_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_ddl_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + cmd record; +BEGIN + FOR cmd IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF cmd.command_tag IN ( + 'CREATE SCHEMA', 'ALTER SCHEMA' + , 'CREATE TABLE', 'CREATE TABLE AS', 'SELECT INTO', 'ALTER TABLE' + , 'CREATE FOREIGN TABLE', 'ALTER FOREIGN TABLE' + , 'CREATE VIEW', 'ALTER VIEW' + , 'CREATE MATERIALIZED VIEW', 'ALTER MATERIALIZED VIEW' + , 'CREATE FUNCTION', 'ALTER FUNCTION' + , 'CREATE TRIGGER' + , 'CREATE TYPE', 'ALTER TYPE' + , 'CREATE RULE' + , 'COMMENT' + ) + -- don't notify in case of CREATE TEMP table or other objects created on pg_temp + AND cmd.schema_name is distinct from 'pg_temp' + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: pgrst_drop_watch(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.pgrst_drop_watch() RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type IN ( + 'schema' + , 'table' + , 'foreign table' + , 'view' + , 'materialized view' + , 'function' + , 'trigger' + , 'type' + , 'rule' + ) + AND obj.is_temporary IS false -- no pg_temp objects + THEN + NOTIFY pgrst, 'reload schema'; + END IF; + END LOOP; +END; $$; + + +-- +-- Name: set_graphql_placeholder(); Type: FUNCTION; Schema: extensions; Owner: - +-- + +CREATE FUNCTION extensions.set_graphql_placeholder() RETURNS event_trigger + LANGUAGE plpgsql + AS $_$ + DECLARE + graphql_is_dropped bool; + BEGIN + graphql_is_dropped = ( + SELECT ev.schema_name = 'graphql_public' + FROM pg_event_trigger_dropped_objects() AS ev + WHERE ev.schema_name = 'graphql_public' + ); + + IF graphql_is_dropped + THEN + create or replace function graphql_public.graphql( + "operationName" text default null, + query text default null, + variables jsonb default null, + extensions jsonb default null + ) + returns jsonb + language plpgsql + as $$ + DECLARE + server_version float; + BEGIN + server_version = (SELECT (SPLIT_PART((select version()), ' ', 2))::float); + + IF server_version >= 14 THEN + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql extension is not enabled.' + ) + ) + ); + ELSE + RETURN jsonb_build_object( + 'errors', jsonb_build_array( + jsonb_build_object( + 'message', 'pg_graphql is only available on projects running Postgres 14 onwards.' + ) + ) + ); + END IF; + END; + $$; + END IF; + + END; +$_$; + + +-- +-- Name: FUNCTION set_graphql_placeholder(); Type: COMMENT; Schema: extensions; Owner: - +-- + +COMMENT ON FUNCTION extensions.set_graphql_placeholder() IS 'Reintroduces placeholder function for graphql_public.graphql'; + + +-- +-- Name: get_auth(text); Type: FUNCTION; Schema: pgbouncer; Owner: - +-- + +CREATE FUNCTION pgbouncer.get_auth(p_usename text) RETURNS TABLE(username text, password text) + LANGUAGE plpgsql SECURITY DEFINER + AS $$ +BEGIN + RAISE WARNING 'PgBouncer auth request: %', p_usename; + + RETURN QUERY + SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow + WHERE usename = p_usename; +END; +$$; + + +-- +-- Name: extension(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.extension(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +_filename text; +BEGIN + select string_to_array(name, '/') into _parts; + select _parts[array_length(_parts,1)] into _filename; + -- @todo return the last part instead of 2 + return split_part(_filename, '.', 2); +END +$$; + + +-- +-- Name: filename(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.filename(name text) RETURNS text + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[array_length(_parts,1)]; +END +$$; + + +-- +-- Name: foldername(text); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.foldername(name text) RETURNS text[] + LANGUAGE plpgsql + AS $$ +DECLARE +_parts text[]; +BEGIN + select string_to_array(name, '/') into _parts; + return _parts[1:array_length(_parts,1)-1]; +END +$$; + + +-- +-- Name: search(text, text, integer, integer, integer); Type: FUNCTION; Schema: storage; Owner: - +-- + +CREATE FUNCTION storage.search(prefix text, bucketname text, limits integer DEFAULT 100, levels integer DEFAULT 1, offsets integer DEFAULT 0) RETURNS TABLE(name text, id uuid, updated_at timestamp with time zone, created_at timestamp with time zone, last_accessed_at timestamp with time zone, metadata jsonb) + LANGUAGE plpgsql + AS $$ +DECLARE +_bucketId text; +BEGIN + -- will be replaced by migrations when server starts + -- saving space for cloud-init +END +$$; + + +-- +-- Name: secrets_encrypt_secret_secret(); Type: FUNCTION; Schema: vault; Owner: - +-- + +CREATE FUNCTION vault.secrets_encrypt_secret_secret() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + new.secret = CASE WHEN new.secret IS NULL THEN NULL ELSE + CASE WHEN new.key_id IS NULL THEN NULL ELSE pg_catalog.encode( + pgsodium.crypto_aead_det_encrypt( + pg_catalog.convert_to(new.secret, 'utf8'), + pg_catalog.convert_to((new.id::text || new.description::text || new.created_at::text || new.updated_at::text)::text, 'utf8'), + new.key_id::uuid, + new.nonce + ), + 'base64') END END; + RETURN new; + END; + $$; + + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: audit_log_entries; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.audit_log_entries ( + instance_id uuid, + id uuid NOT NULL, + payload json, + created_at timestamp with time zone +); + + +-- +-- Name: TABLE audit_log_entries; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; + + +-- +-- Name: instances; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.instances ( + id uuid NOT NULL, + uuid uuid, + raw_base_config text, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE instances; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.instances IS 'Auth: Manages users across multiple sites.'; + + +-- +-- Name: refresh_tokens; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.refresh_tokens ( + instance_id uuid, + id bigint NOT NULL, + token character varying(255), + user_id character varying(255), + revoked boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE refresh_tokens; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.refresh_tokens IS 'Auth: Store of tokens used to refresh JWT tokens once they expire.'; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE; Schema: auth; Owner: - +-- + +CREATE SEQUENCE auth.refresh_tokens_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: refresh_tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: auth; Owner: - +-- + +ALTER SEQUENCE auth.refresh_tokens_id_seq OWNED BY auth.refresh_tokens.id; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.schema_migrations ( + version character varying(255) NOT NULL +); + + +-- +-- Name: TABLE schema_migrations; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.schema_migrations IS 'Auth: Manages updates to the auth system.'; + + +-- +-- Name: users; Type: TABLE; Schema: auth; Owner: - +-- + +CREATE TABLE auth.users ( + instance_id uuid, + id uuid NOT NULL, + aud character varying(255), + role character varying(255), + email character varying(255), + encrypted_password character varying(255), + confirmed_at timestamp with time zone, + invited_at timestamp with time zone, + confirmation_token character varying(255), + confirmation_sent_at timestamp with time zone, + recovery_token character varying(255), + recovery_sent_at timestamp with time zone, + email_change_token character varying(255), + email_change character varying(255), + email_change_sent_at timestamp with time zone, + last_sign_in_at timestamp with time zone, + raw_app_meta_data jsonb, + raw_user_meta_data jsonb, + is_super_admin boolean, + created_at timestamp with time zone, + updated_at timestamp with time zone +); + + +-- +-- Name: TABLE users; Type: COMMENT; Schema: auth; Owner: - +-- + +COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying(128) NOT NULL +); + + +-- +-- Name: buckets; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.buckets ( + id text NOT NULL, + name text NOT NULL, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now() +); + + +-- +-- Name: migrations; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.migrations ( + id integer NOT NULL, + name character varying(100) NOT NULL, + hash character varying(40) NOT NULL, + executed_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP +); + + +-- +-- Name: objects; Type: TABLE; Schema: storage; Owner: - +-- + +CREATE TABLE storage.objects ( + id uuid DEFAULT extensions.uuid_generate_v4() NOT NULL, + bucket_id text, + name text, + owner uuid, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone DEFAULT now(), + last_accessed_at timestamp with time zone DEFAULT now(), + metadata jsonb +); + + +-- +-- Name: decrypted_secrets; Type: VIEW; Schema: vault; Owner: - +-- + +CREATE VIEW vault.decrypted_secrets AS + SELECT secrets.id, + secrets.name, + secrets.description, + secrets.secret, + CASE + WHEN (secrets.secret IS NULL) THEN NULL::text + ELSE + CASE + WHEN (secrets.key_id IS NULL) THEN NULL::text + ELSE convert_from(pgsodium.crypto_aead_det_decrypt(decode(secrets.secret, 'base64'::text), convert_to(((((secrets.id)::text || secrets.description) || (secrets.created_at)::text) || (secrets.updated_at)::text), 'utf8'::name), secrets.key_id, secrets.nonce), 'utf8'::name) + END + END AS decrypted_secret, + secrets.key_id, + secrets.nonce, + secrets.created_at, + secrets.updated_at + FROM vault.secrets; + + +-- +-- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens ALTER COLUMN id SET DEFAULT nextval('auth.refresh_tokens_id_seq'::regclass); + + +-- +-- Name: audit_log_entries audit_log_entries_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.audit_log_entries + ADD CONSTRAINT audit_log_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: instances instances_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.instances + ADD CONSTRAINT instances_pkey PRIMARY KEY (id); + + +-- +-- Name: refresh_tokens refresh_tokens_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.refresh_tokens + ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: users users_email_key; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_email_key UNIQUE (email); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: auth; Owner: - +-- + +ALTER TABLE ONLY auth.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: buckets buckets_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_pkey PRIMARY KEY (id); + + +-- +-- Name: migrations migrations_name_key; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_name_key UNIQUE (name); + + +-- +-- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.migrations + ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: objects objects_pkey; Type: CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_pkey PRIMARY KEY (id); + + +-- +-- Name: audit_logs_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX audit_logs_instance_id_idx ON auth.audit_log_entries USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_idx ON auth.refresh_tokens USING btree (instance_id); + + +-- +-- Name: refresh_tokens_instance_id_user_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_instance_id_user_id_idx ON auth.refresh_tokens USING btree (instance_id, user_id); + + +-- +-- Name: refresh_tokens_token_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX refresh_tokens_token_idx ON auth.refresh_tokens USING btree (token); + + +-- +-- Name: users_instance_id_email_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_email_idx ON auth.users USING btree (instance_id, email); + + +-- +-- Name: users_instance_id_idx; Type: INDEX; Schema: auth; Owner: - +-- + +CREATE INDEX users_instance_id_idx ON auth.users USING btree (instance_id); + + +-- +-- Name: bname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bname ON storage.buckets USING btree (name); + + +-- +-- Name: bucketid_objname; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE UNIQUE INDEX bucketid_objname ON storage.objects USING btree (bucket_id, name); + + +-- +-- Name: name_prefix_search; Type: INDEX; Schema: storage; Owner: - +-- + +CREATE INDEX name_prefix_search ON storage.objects USING btree (name text_pattern_ops); + + +-- +-- Name: buckets buckets_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.buckets + ADD CONSTRAINT buckets_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects objects_bucketId_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY (bucket_id) REFERENCES storage.buckets(id); + + +-- +-- Name: objects objects_owner_fkey; Type: FK CONSTRAINT; Schema: storage; Owner: - +-- + +ALTER TABLE ONLY storage.objects + ADD CONSTRAINT objects_owner_fkey FOREIGN KEY (owner) REFERENCES auth.users(id); + + +-- +-- Name: objects; Type: ROW SECURITY; Schema: storage; Owner: - +-- + +ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY; + +-- +-- Name: supabase_realtime; Type: PUBLICATION; Schema: -; Owner: - +-- + +CREATE PUBLICATION supabase_realtime WITH (publish = 'insert, update, delete, truncate'); + + +-- +-- Name: issue_graphql_placeholder; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_graphql_placeholder ON sql_drop + WHEN TAG IN ('DROP EXTENSION') + EXECUTE FUNCTION extensions.set_graphql_placeholder(); + + +-- +-- Name: issue_pg_cron_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_cron_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_cron_access(); + + +-- +-- Name: issue_pg_graphql_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_graphql_access ON ddl_command_end + WHEN TAG IN ('CREATE FUNCTION') + EXECUTE FUNCTION extensions.grant_pg_graphql_access(); + + +-- +-- Name: issue_pg_net_access; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end + WHEN TAG IN ('CREATE EXTENSION') + EXECUTE FUNCTION extensions.grant_pg_net_access(); + + +-- +-- Name: pgrst_ddl_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_ddl_watch ON ddl_command_end + EXECUTE FUNCTION extensions.pgrst_ddl_watch(); + + +-- +-- Name: pgrst_drop_watch; Type: EVENT TRIGGER; Schema: -; Owner: - +-- + +CREATE EVENT TRIGGER pgrst_drop_watch ON sql_drop + EXECUTE FUNCTION extensions.pgrst_drop_watch(); + + +-- +-- PostgreSQL database dump complete +-- + + +-- +-- Dbmate schema migrations +-- + diff --git a/postgres_15.8.1.044/migrations/tests/database/exists.sql b/postgres_15.8.1.044/migrations/tests/database/exists.sql new file mode 100644 index 0000000..54b2a38 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/database/exists.sql @@ -0,0 +1,8 @@ + +SELECT has_schema('public'); +SELECT has_schema('auth'); +SELECT has_schema('extensions'); +SELECT has_schema('graphql'); +SELECT has_schema('graphql_public'); +SELECT has_schema('realtime'); +SELECT has_schema('storage'); diff --git a/postgres_15.8.1.044/migrations/tests/database/privs.sql b/postgres_15.8.1.044/migrations/tests/database/privs.sql new file mode 100644 index 0000000..d164e0c --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/database/privs.sql @@ -0,0 +1,33 @@ +SELECT database_privs_are( + 'postgres', 'postgres', ARRAY['CONNECT', 'TEMPORARY', 'CREATE'] +); + +SELECT function_privs_are('pgsodium', 'crypto_aead_det_decrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); +SELECT function_privs_are('pgsodium', 'crypto_aead_det_encrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); +SELECT function_privs_are('pgsodium', 'crypto_aead_det_keygen', array[]::text[], 'service_role', array['EXECUTE']); + +-- Verify public schema privileges +SELECT schema_privs_are('public', 'postgres', array['CREATE', 'USAGE']); +SELECT schema_privs_are('public', 'anon', array['USAGE']); +SELECT schema_privs_are('public', 'authenticated', array['USAGE']); +SELECT schema_privs_are('public', 'service_role', array['USAGE']); + +set role postgres; +create table test_priv(); +SELECT table_owner_is('test_priv', 'postgres'); +SELECT table_privs_are('test_priv', 'supabase_admin', array['DELETE', 'INSERT', 'REFERENCES', 'SELECT', 'TRIGGER', 'TRUNCATE', 'UPDATE']); +SELECT table_privs_are('test_priv', 'postgres', array['DELETE', 'INSERT', 'REFERENCES', 'SELECT', 'TRIGGER', 'TRUNCATE', 'UPDATE']); +SELECT table_privs_are('test_priv', 'anon', array['DELETE', 'INSERT', 'REFERENCES', 'SELECT', 'TRIGGER', 'TRUNCATE', 'UPDATE']); +SELECT table_privs_are('test_priv', 'authenticated', array['DELETE', 'INSERT', 'REFERENCES', 'SELECT', 'TRIGGER', 'TRUNCATE', 'UPDATE']); +SELECT table_privs_are('test_priv', 'service_role', array['DELETE', 'INSERT', 'REFERENCES', 'SELECT', 'TRIGGER', 'TRUNCATE', 'UPDATE']); +reset role; + +-- Verify extensions schema privileges +SELECT schema_privs_are('extensions', 'postgres', array['CREATE', 'USAGE']); +SELECT schema_privs_are('extensions', 'anon', array['USAGE']); +SELECT schema_privs_are('extensions', 'authenticated', array['USAGE']); +SELECT schema_privs_are('extensions', 'service_role', array['USAGE']); + +-- Role memberships +SELECT is_member_of('pg_read_all_data', 'postgres'); +SELECT is_member_of('pg_signal_backend', 'postgres'); diff --git a/postgres_15.8.1.044/migrations/tests/database/test.sql b/postgres_15.8.1.044/migrations/tests/database/test.sql new file mode 100644 index 0000000..465c38a --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/database/test.sql @@ -0,0 +1,3 @@ + +\ir exists.sql +\ir privs.sql diff --git a/postgres_15.8.1.044/migrations/tests/extensions/01-postgis.sql b/postgres_15.8.1.044/migrations/tests/extensions/01-postgis.sql new file mode 100644 index 0000000..23fab50 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/01-postgis.sql @@ -0,0 +1,43 @@ +BEGIN; +create extension if not exists postgis_sfcgal with schema "extensions" cascade; +ROLLBACK; + +BEGIN; +create extension if not exists postgis_raster with schema "extensions" cascade; +ROLLBACK; + +BEGIN; +-- create postgis tiger as supabase_admin +create extension if not exists address_standardizer with schema extensions; +create extension if not exists postgis_tiger_geocoder cascade; + +-- \ir ansible/files/postgresql_extension_custom_scripts/postgis_tiger_geocoder/after-create.sql +grant usage on schema tiger, tiger_data to postgres with grant option; +grant all privileges on all tables in schema tiger, tiger_data to postgres with grant option; +grant all privileges on all routines in schema tiger, tiger_data to postgres with grant option; +grant all privileges on all sequences in schema tiger, tiger_data to postgres with grant option; +alter default privileges in schema tiger, tiger_data grant all on tables to postgres with grant option; +alter default privileges in schema tiger, tiger_data grant all on routines to postgres with grant option; +alter default privileges in schema tiger, tiger_data grant all on sequences to postgres with grant option; +SET search_path TO extensions, public, tiger, tiger_data; +-- postgres role should have access +set local role postgres; +select tiger.pprint_addy(tiger.pagc_normalize_address('710 E Ben White Blvd, Austin, TX 78704')); + +-- other roles can be granted access +grant usage on schema tiger, tiger_data to authenticated; +grant select on all tables in schema tiger, tiger_data to authenticated; +grant execute on all routines in schema tiger, tiger_data to authenticated; + +-- authenticated role should have access now +set local role authenticated; +select tiger.pprint_addy(tiger.pagc_normalize_address('710 E Ben White Blvd, Austin, TX 78704')); +ROLLBACK; + +BEGIN; +-- address standardizer creates a table in extensions schema, owned by supabase_admin +create extension if not exists address_standardizer_data_us with schema extensions; +-- postgres role should have access +set local role postgres; +select * from extensions.us_lex; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/02-pgrouting.sql b/postgres_15.8.1.044/migrations/tests/extensions/02-pgrouting.sql new file mode 100644 index 0000000..27dec0b --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/02-pgrouting.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pgrouting with schema "extensions" cascade; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/03-pgtap.sql b/postgres_15.8.1.044/migrations/tests/extensions/03-pgtap.sql new file mode 100644 index 0000000..ddce974 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/03-pgtap.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pgtap with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/04-pg_cron.sql b/postgres_15.8.1.044/migrations/tests/extensions/04-pg_cron.sql new file mode 100644 index 0000000..7f6686f --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/04-pg_cron.sql @@ -0,0 +1,25 @@ +BEGIN; +-- create cron extension as supabase_admin +create extension if not exists pg_cron; + +-- \ir migrations/db/init-scripts/00000000000003-post-setup.sql +grant usage on schema cron to postgres with grant option; +alter default privileges in schema cron grant all on tables to postgres with grant option; +alter default privileges in schema cron grant all on routines to postgres with grant option; +alter default privileges in schema cron grant all on sequences to postgres with grant option; +grant all privileges on all tables in schema cron to postgres with grant option; +grant all privileges on all routines in schema cron to postgres with grant option; +grant all privileges on all sequences in schema cron to postgres with grant option; + +-- postgres role should have access +set local role postgres; +select * from cron.job; + +-- other roles can be granted access +grant usage on schema cron to authenticated; +grant select on all tables in schema cron to authenticated; + +-- authenticated role should have access now +set local role authenticated; +select * from cron.job; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/05-pgaudit.sql b/postgres_15.8.1.044/migrations/tests/extensions/05-pgaudit.sql new file mode 100644 index 0000000..70ee578 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/05-pgaudit.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pgaudit with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/06-pgjwt.sql b/postgres_15.8.1.044/migrations/tests/extensions/06-pgjwt.sql new file mode 100644 index 0000000..36782fd --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/06-pgjwt.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pgjwt with schema "extensions" cascade; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/07-pgsql-http.sql b/postgres_15.8.1.044/migrations/tests/extensions/07-pgsql-http.sql new file mode 100644 index 0000000..8c37feb --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/07-pgsql-http.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists http with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/08-plpgsql_check.sql b/postgres_15.8.1.044/migrations/tests/extensions/08-plpgsql_check.sql new file mode 100644 index 0000000..0bc8e8b --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/08-plpgsql_check.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists plpgsql_check with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/09-pg-safeupdate.sql b/postgres_15.8.1.044/migrations/tests/extensions/09-pg-safeupdate.sql new file mode 100644 index 0000000..c08ec2e --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/09-pg-safeupdate.sql @@ -0,0 +1,3 @@ +BEGIN; +alter role postgres set session_preload_libraries = 'safeupdate'; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/10-timescaledb.sql b/postgres_15.8.1.044/migrations/tests/extensions/10-timescaledb.sql new file mode 100644 index 0000000..acf32b8 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/10-timescaledb.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists timescaledb with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/11-wal2json.sql b/postgres_15.8.1.044/migrations/tests/extensions/11-wal2json.sql new file mode 100644 index 0000000..ab25131 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/11-wal2json.sql @@ -0,0 +1,6 @@ +BEGIN; +select pg_drop_replication_slot(slot_name) from pg_replication_slots where slot_name = 'test_slot'; +select * from pg_create_logical_replication_slot('test_slot', 'wal2json'); +-- a rollback of the txn does not remove the logical replication slot that gets created, so we need to manually drop it +select pg_drop_replication_slot(slot_name) from pg_replication_slots where slot_name = 'test_slot'; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/12-pljava.sql b/postgres_15.8.1.044/migrations/tests/extensions/12-pljava.sql new file mode 100644 index 0000000..b51c824 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/12-pljava.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pljava with schema "sqlj"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/13-plv8.sql b/postgres_15.8.1.044/migrations/tests/extensions/13-plv8.sql new file mode 100644 index 0000000..a407925 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/13-plv8.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists plv8 with schema "pg_catalog"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/14-pg_plan_filter.sql b/postgres_15.8.1.044/migrations/tests/extensions/14-pg_plan_filter.sql new file mode 100644 index 0000000..941e1d6 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/14-pg_plan_filter.sql @@ -0,0 +1,3 @@ +BEGIN; +alter role postgres set session_preload_libraries = 'plan_filter'; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/15-pg_net.sql b/postgres_15.8.1.044/migrations/tests/extensions/15-pg_net.sql new file mode 100644 index 0000000..121e11e --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/15-pg_net.sql @@ -0,0 +1,23 @@ +BEGIN; +-- create net extension as supabase_admin +create extension if not exists pg_net with schema "extensions"; + +-- \ir migrations/db/init-scripts/00000000000003-post-setup.sql +grant usage on schema net TO postgres, anon, authenticated, service_role; +alter function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) security definer; +alter function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) security definer; +alter function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; +alter function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; +revoke all on function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) from public; +revoke all on function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) from public; +grant execute on function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO postgres, anon, authenticated, service_role; +grant execute on function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO postgres, anon, authenticated, service_role; + +-- postgres role should have access +set local role postgres; +select net.http_get('http://localhost', null::jsonb, null::jsonb, 100); + +-- authenticated role should have access +set local role authenticated; +select net.http_get('http://localhost', null::jsonb, null::jsonb, 100); +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/16-rum.sql b/postgres_15.8.1.044/migrations/tests/extensions/16-rum.sql new file mode 100644 index 0000000..95b0845 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/16-rum.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists rum with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/17-pg_hashids.sql b/postgres_15.8.1.044/migrations/tests/extensions/17-pg_hashids.sql new file mode 100644 index 0000000..594c7e5 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/17-pg_hashids.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pg_hashids with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/18-pgsodium.sql b/postgres_15.8.1.044/migrations/tests/extensions/18-pgsodium.sql new file mode 100644 index 0000000..1c2ff98 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/18-pgsodium.sql @@ -0,0 +1,4 @@ +BEGIN; +create schema if not exists "pgsodium"; +create extension if not exists pgsodium with schema "pgsodium"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/19-pg_graphql.sql b/postgres_15.8.1.044/migrations/tests/extensions/19-pg_graphql.sql new file mode 100644 index 0000000..f55e940 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/19-pg_graphql.sql @@ -0,0 +1,4 @@ +BEGIN; +create schema if not exists "graphql"; +create extension if not exists pg_graphql with schema "graphql"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/20-pg_stat_monitor.sql b/postgres_15.8.1.044/migrations/tests/extensions/20-pg_stat_monitor.sql new file mode 100644 index 0000000..f4075a2 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/20-pg_stat_monitor.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pg_stat_monitor with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/21-auto_explain.sql b/postgres_15.8.1.044/migrations/tests/extensions/21-auto_explain.sql new file mode 100644 index 0000000..e69de29 diff --git a/postgres_15.8.1.044/migrations/tests/extensions/22-pg_jsonschema.sql b/postgres_15.8.1.044/migrations/tests/extensions/22-pg_jsonschema.sql new file mode 100644 index 0000000..d357b61 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/22-pg_jsonschema.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pg_jsonschema with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/23-vault.sql b/postgres_15.8.1.044/migrations/tests/extensions/23-vault.sql new file mode 100644 index 0000000..a2a9086 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/23-vault.sql @@ -0,0 +1,4 @@ +BEGIN; +create schema if not exists "vault"; +create extension if not exists supabase_vault with schema "vault" cascade; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/24-pgroonga.sql b/postgres_15.8.1.044/migrations/tests/extensions/24-pgroonga.sql new file mode 100644 index 0000000..bf3fda7 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/24-pgroonga.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pgroonga with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/25-wrappers.sql b/postgres_15.8.1.044/migrations/tests/extensions/25-wrappers.sql new file mode 100644 index 0000000..4f7f7ac --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/25-wrappers.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists wrappers with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/26-hypopg.sql b/postgres_15.8.1.044/migrations/tests/extensions/26-hypopg.sql new file mode 100644 index 0000000..e6e0706 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/26-hypopg.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists hypopg with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/27-pg_repack.sql b/postgres_15.8.1.044/migrations/tests/extensions/27-pg_repack.sql new file mode 100644 index 0000000..200cf78 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/27-pg_repack.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists pg_repack with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/28-pgvector.sql b/postgres_15.8.1.044/migrations/tests/extensions/28-pgvector.sql new file mode 100644 index 0000000..437bdae --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/28-pgvector.sql @@ -0,0 +1,3 @@ +BEGIN; +create extension if not exists vector with schema "extensions"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/29-pg_tle.sql b/postgres_15.8.1.044/migrations/tests/extensions/29-pg_tle.sql new file mode 100644 index 0000000..29a8fdc --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/29-pg_tle.sql @@ -0,0 +1,4 @@ +BEGIN; +create schema if not exists "pgtle"; +create extension if not exists pg_tle with schema "pgtle"; +ROLLBACK; diff --git a/postgres_15.8.1.044/migrations/tests/extensions/test.sql b/postgres_15.8.1.044/migrations/tests/extensions/test.sql new file mode 100644 index 0000000..7e0d1f3 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/extensions/test.sql @@ -0,0 +1,30 @@ + +\ir 01-postgis.sql +\ir 02-pgrouting.sql +\ir 03-pgtap.sql +\ir 04-pg_cron.sql +\ir 05-pgaudit.sql +\ir 06-pgjwt.sql +\ir 07-pgsql-http.sql +\ir 08-plpgsql_check.sql +\ir 09-pg-safeupdate.sql +\ir 10-timescaledb.sql +\ir 11-wal2json.sql +-- \ir 12-pljava.sql +\ir 13-plv8.sql +\ir 14-pg_plan_filter.sql +\ir 15-pg_net.sql +\ir 16-rum.sql +\ir 17-pg_hashids.sql +\ir 18-pgsodium.sql +\ir 19-pg_graphql.sql +\ir 20-pg_stat_monitor.sql +\ir 21-auto_explain.sql +\ir 22-pg_jsonschema.sql +\ir 23-vault.sql +\ir 24-pgroonga.sql +\ir 25-wrappers.sql +\ir 26-hypopg.sql +\ir 27-pg_repack.sql +\ir 28-pgvector.sql +\ir 29-pg_tle.sql diff --git a/postgres_15.8.1.044/migrations/tests/fixtures.sql b/postgres_15.8.1.044/migrations/tests/fixtures.sql new file mode 100644 index 0000000..f1d36e0 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/fixtures.sql @@ -0,0 +1,67 @@ +CREATE ROLE test_user_role; + +CREATE ROLE test_admin_role; + +GRANT authenticated TO test_user_role; + +GRANT postgres TO test_admin_role; + +INSERT INTO auth.users (id, "role", email) + VALUES (gen_random_uuid (), 'test_user_role', 'bob@supabase.com') +RETURNING + * \gset bob_ + +INSERT INTO auth.users (id, "role", email) + VALUES (gen_random_uuid (), 'test_user_role', 'alice@supabase.com') +RETURNING + * \gset alice_ + +INSERT INTO auth.users (id, "role", email) + VALUES (gen_random_uuid (), 'test_admin_role', 'admin@supabase.com') +RETURNING + * \gset admin_ + +CREATE OR REPLACE FUNCTION test_logout () + RETURNS void + LANGUAGE plpgsql + AS $$ +BEGIN + PERFORM + set_config('request.jwt.claim.sub', NULL, TRUE); + PERFORM + set_config('request.jwt.claim.role', NULL, TRUE); + PERFORM + set_config('request.jwt.claim.email', NULL, TRUE); + RESET ROLE; +END; +$$; + +CREATE OR REPLACE FUNCTION test_login (user_email text, logout_first boolean = TRUE) + RETURNS auth.users + LANGUAGE plpgsql + AS $$ +DECLARE + auth_user auth.users; +BEGIN + IF logout_first THEN + PERFORM + test_logout (); + END IF; + SELECT + * INTO auth_user + FROM + auth.users + WHERE + email = user_email; + PERFORM + set_config('request.jwt.claim.sub', (auth_user).id::text, TRUE); + PERFORM + set_config('request.jwt.claim.role', (auth_user).ROLE, TRUE); + PERFORM + set_config('request.jwt.claim.email', (auth_user).email, TRUE); + RAISE NOTICE '%', format( 'SET ROLE %I; -- Logging in as %L (%L)', (auth_user).ROLE, (auth_user).id, (auth_user).email); + EXECUTE format('SET ROLE %I', (auth_user).ROLE); + RETURN auth_user; +END; +$$; + diff --git a/postgres_15.8.1.044/migrations/tests/storage/exists.sql b/postgres_15.8.1.044/migrations/tests/storage/exists.sql new file mode 100644 index 0000000..fae3d10 --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/storage/exists.sql @@ -0,0 +1,13 @@ + +-- Sanity test object existence in storage schema + +select has_table('storage'::name, 'buckets'::name); +select has_table('storage'::name, 'objects'::name); +select has_table('storage'::name, 'migrations'::name); +select has_function('storage'::name, 'foldername'::name); +select has_function('storage'::name, 'filename'::name); +select has_function('storage'::name, 'extension'::name); +select has_function('storage'::name, 'search'::name); + +select todo('This test should probably fail.'); select schema_privs_are('storage', 'anon', ARRAY['USAGE']); + diff --git a/postgres_15.8.1.044/migrations/tests/storage/privs.sql b/postgres_15.8.1.044/migrations/tests/storage/privs.sql new file mode 100644 index 0000000..72d1d5c --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/storage/privs.sql @@ -0,0 +1 @@ +select is_member_of('authenticator', 'supabase_storage_admin'); diff --git a/postgres_15.8.1.044/migrations/tests/storage/test.sql b/postgres_15.8.1.044/migrations/tests/storage/test.sql new file mode 100644 index 0000000..465c38a --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/storage/test.sql @@ -0,0 +1,3 @@ + +\ir exists.sql +\ir privs.sql diff --git a/postgres_15.8.1.044/migrations/tests/test.sql b/postgres_15.8.1.044/migrations/tests/test.sql new file mode 100644 index 0000000..9682b4a --- /dev/null +++ b/postgres_15.8.1.044/migrations/tests/test.sql @@ -0,0 +1,26 @@ +-- Check and create OrioleDB if available +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_available_extensions WHERE name = 'orioledb') THEN + IF NOT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'orioledb') THEN + CREATE EXTENSION orioledb; + END IF; + END IF; +END $$; + +-- Create all extensions +\ir extensions/test.sql + +BEGIN; + +CREATE EXTENSION IF NOT EXISTS pgtap; + +SELECT no_plan(); + +\ir fixtures.sql +\ir database/test.sql +\ir storage/test.sql + +SELECT * FROM finish(); + +ROLLBACK; diff --git a/postgres_15.8.1.044/nix/cargo-pgrx/buildPgrxExtension.nix b/postgres_15.8.1.044/nix/cargo-pgrx/buildPgrxExtension.nix new file mode 100644 index 0000000..89293ab --- /dev/null +++ b/postgres_15.8.1.044/nix/cargo-pgrx/buildPgrxExtension.nix @@ -0,0 +1,161 @@ +# preBuildAndTest and some small other bits +# taken from https://github.com/tcdi/pgrx/blob/v0.9.4/nix/extension.nix +# (but now heavily modified) +# which uses MIT License with the following license file +# +# MIT License +# +# Portions Copyright 2019-2021 ZomboDB, LLC. +# Portions Copyright 2021-2022 Technology Concepts & Design, Inc. . +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +{ lib +, cargo-pgrx +, pkg-config +, rustPlatform +, stdenv +, Security +, writeShellScriptBin +}: + +# The idea behind: Use it mostly like rustPlatform.buildRustPackage and so +# we hand most of the arguments down. +# +# Additional arguments are: +# - `postgresql` postgresql package of the version of postgresql this extension should be build for. +# Needs to be the build platform variant. +# - `useFakeRustfmt` Whether to use a noop fake command as rustfmt. cargo-pgrx tries to call rustfmt. +# If the generated rust bindings aren't needed to use the extension, its a +# unnecessary and heavy dependency. If you set this to true, you also +# have to add `rustfmt` to `nativeBuildInputs`. + +{ buildAndTestSubdir ? null +, buildType ? "release" +, buildFeatures ? [ ] +, cargoBuildFlags ? [ ] +, postgresql +# cargo-pgrx calls rustfmt on generated bindings, this is not strictly necessary, so we avoid the +# dependency here. Set to false and provide rustfmt in nativeBuildInputs, if you need it, e.g. +# if you include the generated code in the output via postInstall. +, useFakeRustfmt ? true +, usePgTestCheckFeature ? true +, ... +} @ args: +let + rustfmtInNativeBuildInputs = lib.lists.any (dep: lib.getName dep == "rustfmt") (args.nativeBuildInputs or []); +in + +assert lib.asserts.assertMsg ((args.installPhase or "") == "") + "buildPgrxExtensions overwrites the installPhase, so providing one does nothing"; +assert lib.asserts.assertMsg ((args.buildPhase or "") == "") + "buildPgrxExtensions overwrites the buildPhase, so providing one does nothing"; +assert lib.asserts.assertMsg (useFakeRustfmt -> !rustfmtInNativeBuildInputs) + "The parameter useFakeRustfmt is set to true, but rustfmt is included in nativeBuildInputs. Either set useFakeRustfmt to false or remove rustfmt from nativeBuildInputs."; +assert lib.asserts.assertMsg (!useFakeRustfmt -> rustfmtInNativeBuildInputs) + "The parameter useFakeRustfmt is set to false, but rustfmt is not included in nativeBuildInputs. Either set useFakeRustfmt to true or add rustfmt from nativeBuildInputs."; + +let + fakeRustfmt = writeShellScriptBin "rustfmt" '' + exit 0 + ''; + maybeDebugFlag = lib.optionalString (buildType != "release") "--debug"; + maybeEnterBuildAndTestSubdir = lib.optionalString (buildAndTestSubdir != null) '' + export CARGO_TARGET_DIR="$(pwd)/target" + pushd "${buildAndTestSubdir}" + ''; + maybeLeaveBuildAndTestSubdir = lib.optionalString (buildAndTestSubdir != null) "popd"; + + pgrxPostgresMajor = lib.versions.major postgresql.version; + preBuildAndTest = '' + export PGRX_HOME=$(mktemp -d) + export PGDATA="$PGRX_HOME/data-${pgrxPostgresMajor}/" + cargo-pgrx pgrx init "--pg${pgrxPostgresMajor}" ${lib.getDev postgresql}/bin/pg_config + echo "unix_socket_directories = '$(mktemp -d)'" > "$PGDATA/postgresql.conf" + + # This is primarily for Mac or other Nix systems that don't use the nixbld user. + export USER="$(whoami)" + pg_ctl start + createuser -h localhost --superuser --createdb "$USER" || true + pg_ctl stop + ''; + + argsForBuildRustPackage = builtins.removeAttrs args [ "postgresql" "useFakeRustfmt" "usePgTestCheckFeature" ]; + + # so we don't accidentally `(rustPlatform.buildRustPackage argsForBuildRustPackage) // { ... }` because + # we forgot parentheses + finalArgs = argsForBuildRustPackage // { + buildInputs = (args.buildInputs or [ ]) ++ lib.optionals stdenv.hostPlatform.isDarwin [ Security ]; + + nativeBuildInputs = (args.nativeBuildInputs or [ ]) ++ [ + cargo-pgrx + postgresql + pkg-config + rustPlatform.bindgenHook + ] ++ lib.optionals useFakeRustfmt [ fakeRustfmt ]; + + buildPhase = '' + runHook preBuild + + echo "Executing cargo-pgrx buildPhase" + ${preBuildAndTest} + ${maybeEnterBuildAndTestSubdir} + + PGRX_BUILD_FLAGS="--frozen -j $NIX_BUILD_CORES ${builtins.concatStringsSep " " cargoBuildFlags}" \ + ${lib.optionalString stdenv.hostPlatform.isDarwin ''RUSTFLAGS="''${RUSTFLAGS:+''${RUSTFLAGS} }-Clink-args=-Wl,-undefined,dynamic_lookup"''} \ + cargo pgrx package \ + --pg-config ${lib.getDev postgresql}/bin/pg_config \ + ${maybeDebugFlag} \ + --features "${builtins.concatStringsSep " " buildFeatures}" \ + --out-dir "$out" + + ${maybeLeaveBuildAndTestSubdir} + + runHook postBuild + ''; + + preCheck = preBuildAndTest + args.preCheck or ""; + + installPhase = '' + runHook preInstall + + echo "Executing buildPgrxExtension install" + + ${maybeEnterBuildAndTestSubdir} + + cargo-pgrx pgrx stop all + + mv $out/${postgresql}/* $out + rm -rf $out/nix + + ${maybeLeaveBuildAndTestSubdir} + + runHook postInstall + ''; + + PGRX_PG_SYS_SKIP_BINDING_REWRITE = "1"; + CARGO_BUILD_INCREMENTAL = "false"; + RUST_BACKTRACE = "full"; + + checkNoDefaultFeatures = true; + checkFeatures = (args.checkFeatures or [ ]) ++ (lib.optionals usePgTestCheckFeature [ "pg_test" ]) ++ [ "pg${pgrxPostgresMajor}" ]; + }; +in +rustPlatform.buildRustPackage finalArgs diff --git a/postgres_15.8.1.044/nix/cargo-pgrx/default.nix b/postgres_15.8.1.044/nix/cargo-pgrx/default.nix new file mode 100644 index 0000000..64e1516 --- /dev/null +++ b/postgres_15.8.1.044/nix/cargo-pgrx/default.nix @@ -0,0 +1,75 @@ +{ lib +, darwin +, fetchCrate +, openssl +, pkg-config +, makeRustPlatform +, stdenv +, rust-bin +}: +let + rustVersion = "1.76.0"; + rustPlatform = makeRustPlatform { + cargo = rust-bin.stable.${rustVersion}.default; + rustc = rust-bin.stable.${rustVersion}.default; + }; + generic = + { version + , hash + , cargoHash + }: + rustPlatform.buildRustPackage rec { + pname = "cargo-pgrx"; + inherit version; + src = fetchCrate { + inherit version pname hash; + }; + inherit cargoHash; + nativeBuildInputs = lib.optionals stdenv.hostPlatform.isLinux [ + pkg-config + ]; + buildInputs = lib.optionals stdenv.hostPlatform.isLinux [ + openssl + ] ++ lib.optionals stdenv.hostPlatform.isDarwin [ + darwin.apple_sdk.frameworks.Security + ]; + + OPENSSL_DIR = "${openssl.dev}"; + OPENSSL_INCLUDE_DIR = "${openssl.dev}/include"; + OPENSSL_LIB_DIR = "${openssl.out}/lib"; + PKG_CONFIG_PATH = "${openssl.dev}/lib/pkgconfig"; + preCheck = '' + export PGRX_HOME=$(mktemp -d) + ''; + checkFlags = [ + # requires pgrx to be properly initialized with cargo pgrx init + "--skip=command::schema::tests::test_parse_managed_postmasters" + ]; + meta = with lib; { + description = "Build Postgres Extensions with Rust"; + homepage = "https://github.com/pgcentralfoundation/pgrx"; + changelog = "https://github.com/pgcentralfoundation/pgrx/releases/tag/v${version}"; + license = licenses.mit; + maintainers = with maintainers; [ happysalada ]; + mainProgram = "cargo-pgrx"; + }; + }; +in +{ + cargo-pgrx_0_11_3 = generic { + version = "0.11.3"; + hash = "sha256-UHIfwOdXoJvR4Svha6ud0FxahP1wPwUtviUwUnTmLXU="; + cargoHash = "sha256-j4HnD8Zt9uhlV5N7ldIy9564o9qFEqs5KfXHmnQ1WEw="; + }; + cargo-pgrx_0_12_6 = generic { + version = "0.12.6"; + hash = "sha256-7aQkrApALZe6EoQGVShGBj0UIATnfOy2DytFj9IWdEA="; + cargoHash = "sha256-Di4UldQwAt3xVyvgQT1gUhdvYUVp7n/a72pnX45kP0w="; + }; + cargo-pgrx_0_12_9 = generic { + version = "0.12.9"; + hash = "sha256-aR3DZAjeEEAjLQfZ0ZxkjLqTVMIEbU0UiZ62T4BkQq8="; + cargoHash = "sha256-53HKhvsKLTa2JCByLEcK3UzWXoM+LTatd98zvS1C9no="; + }; + inherit rustPlatform; +} diff --git a/postgres_15.8.1.044/nix/do-not-use-vendored-libraries.patch b/postgres_15.8.1.044/nix/do-not-use-vendored-libraries.patch new file mode 100644 index 0000000..6a00534 --- /dev/null +++ b/postgres_15.8.1.044/nix/do-not-use-vendored-libraries.patch @@ -0,0 +1,15 @@ +Do not use vendored libraries + +--- a/vendor/CMakeLists.txt ++++ b/vendor/CMakeLists.txt +@@ -14,10 +14,7 @@ + # License along with this library; if not, write to the Free Software + # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + add_subdirectory(onigmo) +-add_subdirectory(mruby) +-add_subdirectory(mecab) +-add_subdirectory(message_pack) + if(GRN_WITH_MRUBY) + add_subdirectory(groonga-log) + endif() \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/docker/init.sh.in b/postgres_15.8.1.044/nix/docker/init.sh.in new file mode 100644 index 0000000..5d39e7a --- /dev/null +++ b/postgres_15.8.1.044/nix/docker/init.sh.in @@ -0,0 +1,5 @@ +#!/bin/bash +# shellcheck shell=bash +/bin/initdb --locale=C -D /data/postgresql --username=supabase_admin +ln -s /etc/postgresql.conf /data/postgresql/postgresql.conf +/bin/postgres -p @PGSQL_DEFAULT_PORT@ -D /data/postgresql diff --git a/postgres_15.8.1.044/nix/docs/README.md b/postgres_15.8.1.044/nix/docs/README.md new file mode 100644 index 0000000..4006329 --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/README.md @@ -0,0 +1,8 @@ +# Documentation + +This directory contains most of the "runbooks" and documentation on how to use +this repository. + +You probably want to start with the [starting guide](./start-here.md). Then, +learn how to play with `postgres` in the [build guide](./build-postgres.md). +After that, you can probe around a bit. diff --git a/postgres_15.8.1.044/nix/docs/adding-new-package.md b/postgres_15.8.1.044/nix/docs/adding-new-package.md new file mode 100644 index 0000000..30a85f7 --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/adding-new-package.md @@ -0,0 +1,160 @@ +# Adding a new extension package + + +## Pre-packaging steps +1. Make sure you have nix installed [Nix installer](https://github.com/DeterminateSystems/nix-installer) +2. Create a branch off of `develop` + + +## C/C++ postgres extensions + +If you are creating a C/C++ extension, the pattern found in https://github.com/supabase/postgres/blob/develop/nix/ext/pgvector.nix will work well. + +``` +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pgvector"; + version = "0.7.4"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "pgvector"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-qwPaguQUdDHV8q6GDneLq5MuhVroPizpbqt7f08gKJI="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.so $out/lib + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Open-source vector similarity search for Postgres"; + homepage = "https://github.com/${src.owner}/${src.repo}"; + maintainers = with maintainers; [ olirice ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} +``` + +This uses `stdenv.mkDerivation` which is a general nix builder for C and C++ projects (and others). It can auto detect the Makefile, and attempt to use it. ***It's a good practice to not have steps in the Makefile of your project that try to deal with OS specific system paths, or make calls out to the internet, as Nix cannot use these steps to build your project.*** + +Your build should produce all of the sql and control files needed for the install phase. + +1. Once you have created this file, you can add it to `nix/ext/.nix` and edit `flake.nix` and add it to the `ourExtensions` list. +2. `git add .` as nix uses git to track changes +3. In your package file, temporarily empty the `hash = "sha256<...>=";` to `hash = "";` and save and `git add .` +4. Run `nix build .#psql_15/exts/` to try to trigger a build, nix will print the calculated sha256 value that you can add back the the `hash` variable, save the file again, and re-run `nix build .#psql_15/exts/`. +5. Add any needed migrations into the `supabase/postgres` migrations directory. +6. You can then run tests locally to verify that the update of the package succeeded. +7. Now it's ready for PR review! + +## Extensions written in Rust that use `buildPgrxExtension` builder + +Extensions like: + +* https://github.com/supabase/postgres/blob/develop/nix/ext/wrappers/default.nix +* https://github.com/supabase/postgres/blob/develop/nix/ext/pg_graphql.nix +* https://github.com/supabase/postgres/blob/develop/nix/ext/pg_jsonschema.nix + +Are written in Rust, built with `cargo`, and need to use https://github.com/pgcentralfoundation/pgrx to build the extension. + +We in turn have a special nix package `builder` which is sourced from `nixpkgs` and called `buildPgrxExtension` + +A simple example is found in `pg_jsonschema` + + +``` +{ lib, stdenv, fetchFromGitHub, postgresql, buildPgrxExtension_0_11_3, cargo }: + +buildPgrxExtension_0_11_3 rec { + pname = "pg_jsonschema"; + version = "0.3.1"; + inherit postgresql; + + src = fetchFromGitHub { + owner = "supabase"; + repo = pname; + rev = "v${version}"; + hash = "sha256-YdKpOEiDIz60xE7C+EzpYjBcH0HabnDbtZl23CYls6g="; + }; + + nativeBuildInputs = [ cargo ]; + buildInputs = [ postgresql ]; + # update the following array when the pg_jsonschema version is updated + # required to ensure that extensions update scripts from previous versions are generated + + previousVersions = ["0.3.0" "0.2.0" "0.1.4" "0.1.4" "0.1.2" "0.1.1" "0.1.0"]; + CARGO="${cargo}/bin/cargo"; + env = lib.optionalAttrs stdenv.isDarwin { + POSTGRES_LIB = "${postgresql}/lib"; + RUSTFLAGS = "-C link-arg=-undefined -C link-arg=dynamic_lookup"; + }; + cargoHash = "sha256-VcS+efMDppofuFW2zNrhhsbC28By3lYekDFquHPta2g="; + + # FIXME (aseipp): testsuite tries to write files into /nix/store; we'll have + # to fix this a bit later. + doCheck = false; + + preBuild = '' + echo "Processing git tags..." + echo '${builtins.concatStringsSep "," previousVersions}' | sed 's/,/\n/g' > git_tags.txt + ''; + + postInstall = '' + echo "Creating SQL files for previous versions..." + current_version="${version}" + sql_file="$out/share/postgresql/extension/pg_jsonschema--$current_version.sql" + + if [ -f "$sql_file" ]; then + while read -r previous_version; do + if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then + new_file="$out/share/postgresql/extension/pg_jsonschema--$previous_version--$current_version.sql" + echo "Creating $new_file" + cp "$sql_file" "$new_file" + fi + done < git_tags.txt + else + echo "Warning: $sql_file not found" + fi + rm git_tags.txt + ''; + + + meta = with lib; { + description = "JSON Schema Validation for PostgreSQL"; + homepage = "https://github.com/supabase/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} +``` + +Here we have built support in our overlay to specify and pin the version of `buildPgrxExtension` to a specific version (in this case `buildPgrxExtension_0_11_3`). This is currently the only version we can support, but this can be extended in our overlay https://github.com/supabase/postgres/blob/develop/nix/overlays/cargo-pgrx-0-11-3.nix to support other versions. + +A few things about `buildPgrxExtension_x`: + +* It doesn't support `buildPhase`, `installPhase` and those are implemented directly in the builder already +* It mostly just allows `cargo build` to do it's thing, but you may need to set env vars for the build process as seen above +* It caclulates a special `cargoHash` that will be generated after the first in `src` is generated, when running `nix build .#psql_15/exts/` to build the extension + + +## Post Nix derivation release steps + + +1. You can add and run tests as described in https://github.com/supabase/postgres/blob/develop/nix/docs/adding-tests.md +2. You may need to add tests to our test.yml gh action workflow as well. +3. You can add the package and name and version to `ansible/vars.yml` it is not necessary to add the sha256 hash here, as the package is already built and cached in our release process before these vars are ever run. +4. to check that all your files will land in the overall build correctly, you can run `nix profile install .#psql_15/bin` on your machine, and check in `~/.nix-profile/bin, ~/.nix-profile/lib, ~/.nix-profile/share/postgresql/*` and you should see your lib, .control and sql files there. +5. You can also run `nix run .#start-server 15` and in a new terminal window run `nix run .#star-client-and-migrate 15` and try to `CREATE EXTENSION ` and work with it there +6. Check that your extension works with the `pg_upgrade` process (TODO documentation forthcoming) +7. Now you are ready to PR the extension +8. From here, the release process should typically take care of the rest. \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/docs/adding-tests.md b/postgres_15.8.1.044/nix/docs/adding-tests.md new file mode 100644 index 0000000..e828a3e --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/adding-tests.md @@ -0,0 +1,100 @@ +There are basically two types of tests you can add: + +- pgTAP based tests, and +- pg\_regress tests +- Migration tests. + +In all cases, a number of extensions may be installed into the database for +use; you can see those in both [postgresql.conf.in](../tests/postgresql.conf.in) +and [prime.sql](../tests/prime.sql) (extensions may be enabled in either place.) + +## pg\_regress tests + +pg\_regress tests are in [tests/sql](./../tests/sql/) with output in [tests/expected](./../tests/expected/). +To create a new test, create a new SQL file in [tests/sql](./../tests/sql/) and then run: + +``` +nix flake check -L +``` + +Next, review the logs to identify where the test output was written + +``` +postgres> CREATE EXTENSION IF NOT EXISTS index_advisor; +postgres> CREATE EXTENSION +postgres> (using postmaster on localhost, port 5432) +postgres> ============== running regression test queries ============== +postgres> test new_test ... diff: /nix/store/5gk419ddz7mzzwhc9j6yj5i8lkw67pdl-tests/expected/new_test.out: No such file or directory +postgres> diff command failed with status 512: diff "/nix/store/5gk419ddz7mzzwhc9j6yj5i8lkw67pdl-tests/expected/new_test.out" "/nix/store/2fbrvnnr7iz6yigyf0rb0vxnyqvrgxzp-postgres-15.6-check-harness/regression_output/results/new_test.out" > "/nix/store/2fbrvnnr7iz6yigyf0rb0vxnyqvrgxzp-postgres-15.6-check-harness/regression_output/results/new_test.out.diff +``` + +and copy the `regression_output` directory to where you can review + +``` +cp -r /nix/store/2fbrvnnr7iz6yigyf0rb0vxnyqvrgxzp-postgres-15.6-check-harness/regression_output . +``` + +Then you can review the contents of `regression_output/results/new_test.out` to see if it matches what you expected. + +If it does match your expectations, copy the file to [tests/expected](./../tests/expected/) and the test will pass on the next run. + +If the output does not match your expectations, update the `.sql` file, re-run with `nix flake check -L` and try again + + +## pgTAP tests + +These are super easy: simply add `.sql` files to the +[tests/smoke](./../tests/smoke/) directory, then: + +``` +nix flake check -L +``` + +(`-L` prints logs to stderrr, for more details see `man nix`) + +These files are run using `pg_prove`; they pretty much behave exactly like how +you expect; you can read +[the pgTAP documentation](https://pgtap.org/documentation.html) for more. + +For a good example of a pgTAP test as a pull request, check out +[pull request #4](https://github.com/supabase/nix-postgres/pull/4/files). + +## Re-running tests + +`nix flake check` gets its results cached, so if you do it again the tests won't rerun. If you change a file then it will run again. + + + +Limitation: currently there's no way to rerun all the tests, so you have to specify the check attribute. + +To get the correct attribute (`#checks.x86_64-linux.psql_15` above), you can do `nix flake show`. This will show a tree with all the output attributes. + +## Migration tests + +> **NOTE**: Currently, migration tests _do not happen in CI_. They can only be +> run manually. + +Migration tests are pretty simple in the sense they follow a very simple +principle: + +- You put data in the database +- Run the migration procedure +- It should probably not fail + +Step 1 and 2 are easy, and for various reasons (e.g. mistakes from upstream +extension authors), step 3 isn't guaranteed, so that's what the whole idea is +designed to test. + +To add data into the database, modify the +[data.sql](../nix/tests/migrations/data.sql) script and add whatever you want into +it. This script gets loaded into the old version of the database at startup, and +it's expected that the new version of the database can handle it. + +To run the `migration-test` tool, check out the documentation on +[migration-tests](./migration-tests.md). diff --git a/postgres_15.8.1.044/nix/docs/build-postgres.md b/postgres_15.8.1.044/nix/docs/build-postgres.md new file mode 100644 index 0000000..2805d44 --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/build-postgres.md @@ -0,0 +1,124 @@ +# 01 — Using supabase nix + +Let's clone this repo: + +```bash +git clone https://github.com/supabase/postgres $HOME/supabase-postgres +cd $HOME/supabase-postgres +``` + +## Hashes for everyone + +But how do we build stuff within it? With `nix build`, of course! For example, +the following command will, when completed, create a symlink named `result` that +points to a path which contains an entire PostgreSQL 15 installation — +extensions and all: + +``` +nix build .#psql_15/bin +``` + +``` +$ readlink result +/nix/store/ybf48481x033649mgdzk5dyaqv9dppzx-postgresql-and-plugins-15.3 +``` + +``` +$ ls result +bin include lib share +``` + +``` +$ ll result/bin/ +total 9928 +dr-xr-xr-x 2 root root 4096 Dec 31 1969 ./ +dr-xr-xr-x 5 root root 4096 Dec 31 1969 ../ +lrwxrwxrwx 1 root root 79 Dec 31 1969 .initdb-wrapped -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/.initdb-wrapped* +-r-xr-xr-x 1 root root 9829624 Dec 31 1969 .postgres-wrapped* +lrwxrwxrwx 1 root root 73 Dec 31 1969 clusterdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/clusterdb* +lrwxrwxrwx 1 root root 72 Dec 31 1969 createdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/createdb* +lrwxrwxrwx 1 root root 74 Dec 31 1969 createuser -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/createuser* +lrwxrwxrwx 1 root root 70 Dec 31 1969 dropdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/dropdb* +lrwxrwxrwx 1 root root 72 Dec 31 1969 dropuser -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/dropuser* +lrwxrwxrwx 1 root root 68 Dec 31 1969 ecpg -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/ecpg* +lrwxrwxrwx 1 root root 70 Dec 31 1969 initdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/initdb* +lrwxrwxrwx 1 root root 72 Dec 31 1969 oid2name -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/oid2name* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_amcheck -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_amcheck* +lrwxrwxrwx 1 root root 81 Dec 31 1969 pg_archivecleanup -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_archivecleanup* +lrwxrwxrwx 1 root root 77 Dec 31 1969 pg_basebackup -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_basebackup* +lrwxrwxrwx 1 root root 76 Dec 31 1969 pg_checksums -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_checksums* +-r-xr-xr-x 1 root root 53432 Dec 31 1969 pg_config* +lrwxrwxrwx 1 root root 78 Dec 31 1969 pg_controldata -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_controldata* +-r-xr-xr-x 1 root root 82712 Dec 31 1969 pg_ctl* +lrwxrwxrwx 1 root root 71 Dec 31 1969 pg_dump -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_dump* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_dumpall -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_dumpall* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_isready -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_isready* +lrwxrwxrwx 1 root root 77 Dec 31 1969 pg_receivewal -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_receivewal* +lrwxrwxrwx 1 root root 78 Dec 31 1969 pg_recvlogical -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_recvlogical* +lrwxrwxrwx 1 root root 73 Dec 31 1969 pg_repack -> /nix/store/bi9i5ns4cqxk235qz3srs9p4x1qfxfna-pg_repack-1.4.8/bin/pg_repack* +lrwxrwxrwx 1 root root 75 Dec 31 1969 pg_resetwal -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_resetwal* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_restore -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_restore* +lrwxrwxrwx 1 root root 73 Dec 31 1969 pg_rewind -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_rewind* +lrwxrwxrwx 1 root root 77 Dec 31 1969 pg_test_fsync -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_test_fsync* +lrwxrwxrwx 1 root root 78 Dec 31 1969 pg_test_timing -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_test_timing* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_upgrade -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_upgrade* +lrwxrwxrwx 1 root root 79 Dec 31 1969 pg_verifybackup -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_verifybackup* +lrwxrwxrwx 1 root root 74 Dec 31 1969 pg_waldump -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pg_waldump* +lrwxrwxrwx 1 root root 71 Dec 31 1969 pgbench -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/pgbench* +lrwxrwxrwx 1 root root 71 Dec 31 1969 pgsql2shp -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgsql2shp* +lrwxrwxrwx 1 root root 77 Dec 31 1969 pgsql2shp-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgsql2shp-3.3.3* +lrwxrwxrwx 1 root root 75 Dec 31 1969 pgtopo_export -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_export* +lrwxrwxrwx 1 root root 81 Dec 31 1969 pgtopo_export-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_export-3.3.3* +lrwxrwxrwx 1 root root 75 Dec 31 1969 pgtopo_import -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_import* +lrwxrwxrwx 1 root root 81 Dec 31 1969 pgtopo_import-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/pgtopo_import-3.3.3* +-r-xr-xr-x 1 root root 286 Dec 31 1969 postgres* +lrwxrwxrwx 1 root root 74 Dec 31 1969 postmaster -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/postmaster* +lrwxrwxrwx 1 root root 68 Dec 31 1969 psql -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/psql* +lrwxrwxrwx 1 root root 74 Dec 31 1969 raster2pgsql -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/raster2pgsql* +lrwxrwxrwx 1 root root 80 Dec 31 1969 raster2pgsql-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/raster2pgsql-3.3.3* +lrwxrwxrwx 1 root root 73 Dec 31 1969 reindexdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/reindexdb* +lrwxrwxrwx 1 root root 71 Dec 31 1969 shp2pgsql -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/shp2pgsql* +lrwxrwxrwx 1 root root 77 Dec 31 1969 shp2pgsql-3.3.3 -> /nix/store/4wwzd3c136g6j7aqva2gyiqgwy784qjv-postgis-3.3.3/bin/shp2pgsql-3.3.3* +lrwxrwxrwx 1 root root 72 Dec 31 1969 vacuumdb -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/vacuumdb* +lrwxrwxrwx 1 root root 72 Dec 31 1969 vacuumlo -> /nix/store/kdjdxnyhpwpvb11da8s99ylqilspcmzl-postgresql-15.3/bin/vacuumlo* +``` + +As we can see, these files all point to paths under `/nix/store`. We're actually +looking at a "farm" of symlinks to various paths, but collectively they form an +entire installation directory we can reuse as much as we want. + +The path +`/nix/store/ybf48481x033649mgdzk5dyaqv9dppzx-postgresql-and-plugins-15.3` +ultimately is a cryptographically hashed, unique name for our installation of +PostgreSQL with those plugins. This hash includes _everything_ used to build it, +so even a single change anywhere to any extension or version would result in a +_new_ hash. + +The ability to refer to a piece of data by its hash, by some notion of +_content_, is a very powerful primitive, as we'll see later. + +## Build a different version: v16 + +What if we wanted PostgreSQL 16 and plugins? Just replace `_15` with `_16`: + +``` +nix build .#psql_16/bin +``` + +You're done: + +``` +$ readlink result +/nix/store/p7ziflx0000s28bfb213jsghrczknkc4-postgresql-and-plugins-14.8 +``` + + +## Using `nix develop` + + +`nix develop .` will just drop you in a subshell with +tools you need _ready to go instantly_. That's all you need to do! And once that +shell goes away, nix installed tools will be removed from your `$PATH` as well. + +There's an even easier way to do this +[that is completely transparent to you, as well](./use-direnv.md). diff --git a/postgres_15.8.1.044/nix/docs/docker.md b/postgres_15.8.1.044/nix/docs/docker.md new file mode 100644 index 0000000..198c18d --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/docker.md @@ -0,0 +1,14 @@ +Docker images are pushed to `ghcr.io` on every commit. Try the following: + +``` +docker run --rm -it ghcr.io/supabase/nix-postgres-15:latest +``` + +Every Docker image that is built on every push is given a tag that exactly +corresponds to a Git commit in the repository — for example commit +[d3e0c39d34e1bb4d37e058175a7bc376620f6868](https://github.com/supabase/nix-postgres/commit/d3e0c39d34e1bb4d37e058175a7bc376620f6868) +in this repository has a tag in the container registry which can be used to pull +exactly that version. + +This just starts the server. Client container images are not provided; you can +use `nix run` for that, as outlined [here](./start-client-server.md). diff --git a/postgres_15.8.1.044/nix/docs/migration-tests.md b/postgres_15.8.1.044/nix/docs/migration-tests.md new file mode 100644 index 0000000..d04bfeb --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/migration-tests.md @@ -0,0 +1,50 @@ +Migration tests are run similar to running the client and server; see +[more on that here](./start-client-server.md). + +Instead, you use the following format to specify the upgrade: + +``` +nix run .#migration-test [pg_dumpall|pg_upgrade] +``` + +The arguments are: + +- The version to upgrade from +- The version to upgrade to +- The upgrade mechanism: either `pg_dumpall` or `pg_upgrade` + +## Specifying the version + +The versions for upgrading can be one of two forms: + +- A major version number, e.g. `14` or `15` +- A path to `/nix/store`, which points to _any_ version of PostgreSQL, as long + as it has the "expected" layout and is a postgresql install. + +## Always use the latest version of the migration tool + +Unlike the method for starting the client or server, you probably always want to +use the latest version of the `migration-test` tool from the repository. This is +because it can ensure forwards and backwards compatibility if necessary. + +## Upgrading between arbitrary `/nix/store` versions + +If you want to test migrations from arbitrary versions built by the repository, +you can combine `nix build` and `nix run` to do so. You can use the syntax from +the runbook on [running the server & client](./start-client-server.md) to refer +to arbitrary git revisions. + +For example, if you updated an extension in this repository, and you want to +test a migration from PostgreSQL 14 to PostgreSQL 14 + (updated extension), +using `pg_upgrade` — simply record the two git commits you want to +compare, and you could do something like the following: + +``` +OLD_GIT_VERSION=... +NEW_GIT_VERSION=... + +nix run github:supabase/nix-postgres#migration-test \ + $(nix build "github:supabase/nix-postgres/$OLD_GIT_VERSION#psql_14/bin") \ + $(nix build "github:supabase/nix-postgres/$NEW_GIT_VERSION#psql_14/bin") \ + pg_upgrade +``` diff --git a/postgres_15.8.1.044/nix/docs/new-major-postgres.md b/postgres_15.8.1.044/nix/docs/new-major-postgres.md new file mode 100644 index 0000000..1c5a2df --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/new-major-postgres.md @@ -0,0 +1,34 @@ +PostgreSQL versions are managed in upstream nixpkgs. + +See this example PR to add a new version of PostgreSQL; this version is for 16 +beta3, but any version is roughly the same. In short, you need to: + +- Add a new version and hash +- Possibly patch the source code for minor refactorings + - In this example, an old patch had to be rewritten because a function was + split into two different functions; the patch is functionally equivalent but + textually different +- Add the changes to `all-packages.nix` +- Integrate inside the CI and get code review +- Run `nix flake update` to get a new version, once it's ready + +https://github.com/NixOS/nixpkgs/pull/249030 + +## Adding the major version to this repository + +It isn't well abstracted, unfortunately. In short: look for the strings `14` and +`15` under `flake.nix` and `nix/tools/`. More specifically: + +- Add `psql_XX` to `basePackages` in `flake.nix` +- Ditto with `checks` in `flake.nix` +- Modify the tools under `tools/` to understand the new major version +- Make sure the CI is integrated under the GitHub Actions. + +The third step and fourth steps are the most annoying, really. The first two are +easy and by that point you can run `nix flake check` in order to test the build, +at least. + +## Other notes + +See also issue [#6](https://github.com/supabase/nix-postgres/issues/6), which +would make it possible to define PostgreSQL versions inside this repository. diff --git a/postgres_15.8.1.044/nix/docs/nix-overlays.md b/postgres_15.8.1.044/nix/docs/nix-overlays.md new file mode 100644 index 0000000..90b6f22 --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/nix-overlays.md @@ -0,0 +1,36 @@ +Overlays are a feature of Nixpkgs that allow you to: + +- Add new packages with new names to the namespace _without_ modifying upstream + - For example, if there is a package `foobar`, you might add `foobar-1_2_3` to + add a specific version for backwards compatibility +- Globally override _existing_ package names, in terms of other packages. + - For example, if you want to globally override a package to enable a + disabled-by-default feature. + +First, you need to define a file for the overlay under +[overlays/](../overlays/), and then import it in `flake.nix`. There is an +example pull request in +[#14](https://github.com/supabase/nix-postgres/issues/14) for this; an overlay +typically looks like this: + +``` +final: prev: { + gdal = prev.gdalMinimal; +} +``` + +This says "globally override `gdal` with a different version, named +`gdalMinimal`". In this case `gdalMinimal` is a build with less features +enabled. + +The most important part is that there is an equation of the form `lhs = rhs;` +— if the `lhs` refers to an existing name, it's overwritten. If it refers +to a new name, it's introduced. Overwriting an existing name acts as if you +changed the files upstream: so the above example _globally_ overrides GDAL for +anything that depends on it. + +The names `final` and `prev` are used to refer to packages in terms of other +overlays. For more information about this, see the +[NixOS Wiki Page for Overlays](https://nixos.wiki/wiki/Overlays). + +We also use an overlay to override the default build recipe for `postgresql_16`, and instead feed it the specially patched postgres for use with orioledb extension. This experimental variant can be built with `nix build .#psql_orioledb_16/bin`. This will build this patched version of postgres, along with all extensions and wrappers that currently are known to work with orioledb. diff --git a/postgres_15.8.1.044/nix/docs/receipt-files.md b/postgres_15.8.1.044/nix/docs/receipt-files.md new file mode 100644 index 0000000..978f8a9 --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/receipt-files.md @@ -0,0 +1,155 @@ +Every time you run `nix build` on this repository to build PostgreSQL, the +installation directory comes with a _receipt_ file that tells you what's inside +of it. Primarily, this tells you: + +- The version of PostgreSQL, +- The installed extensions, and +- The version of nixpkgs. + +The intent of the receipt file is to provide a mechanism for tooling to +understand installation directories and provide things like upgrade paths or +upgrade mechanisms. + +## Example receipt + +For example: + +``` +nix build .#psql_15/bin +``` + +``` +austin@GANON:~/work/nix-postgres$ nix build .#psql_15/bin +austin@GANON:~/work/nix-postgres$ ls result +bin include lib receipt.json share +``` + +The receipt is in JSON format, under `receipt.json`. Here's an example of what +it would look like: + +```json +{ + "extensions": [ + { + "name": "pgsql-http", + "version": "1.5.0" + }, + { + "name": "pg_plan_filter", + "version": "unstable-2021-09-23" + }, + { + "name": "pg_net", + "version": "0.7.2" + }, + { + "name": "pg_hashids", + "version": "unstable-2022-09-17" + }, + { + "name": "pgsodium", + "version": "3.1.8" + }, + { + "name": "pg_graphql", + "version": "unstable-2023-08-01" + }, + { + "name": "pg_stat_monitor", + "version": "1.0.1" + }, + { + "name": "pg_jsonschema", + "version": "unstable-2023-07-23" + }, + { + "name": "vault", + "version": "0.2.9" + }, + { + "name": "hypopg", + "version": "1.3.1" + }, + { + "name": "pg_tle", + "version": "1.0.4" + }, + { + "name": "supabase-wrappers", + "version": "unstable-2023-07-31" + }, + { + "name": "supautils", + "version": "1.7.3" + } + ], + "nixpkgs": { + "extensions": [ + { + "name": "postgis", + "version": "3.3.3" + }, + { + "name": "pgrouting", + "version": "3.5.0" + }, + { + "name": "pgtap", + "version": "1.2.0" + }, + { + "name": "pg_cron", + "version": "1.5.2" + }, + { + "name": "pgaudit", + "version": "1.7.0" + }, + { + "name": "pgjwt", + "version": "unstable-2021-11-13" + }, + { + "name": "plpgsql_check", + "version": "2.3.4" + }, + { + "name": "pg-safeupdate", + "version": "1.4" + }, + { + "name": "timescaledb", + "version": "2.11.1" + }, + { + "name": "wal2json", + "version": "2.5" + }, + { + "name": "plv8", + "version": "3.1.5" + }, + { + "name": "rum", + "version": "1.3.13" + }, + { + "name": "pgvector", + "version": "0.4.4" + }, + { + "name": "pg_repack", + "version": "1.4.8" + }, + { + "name": "pgroonga", + "version": "3.0.8" + } + ], + "revision": "750fc50bfd132a44972aa15bb21937ae26303bc4" + }, + "psql-version": "15.3", + "receipt-version": "1", + "revision": "vcs=d250647+20230814" +} +``` diff --git a/postgres_15.8.1.044/nix/docs/references.md b/postgres_15.8.1.044/nix/docs/references.md new file mode 100644 index 0000000..fe5b791 --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/references.md @@ -0,0 +1,31 @@ +Nix references and other useful tools: + +- **Zero to Nix**: Start here to get your feet wet with how Nix works, and how + to use Nixpkgs: https://zero-to-nix.com/ +- `nix-installer`: My recommended way to install Nix + - https://github.com/DeterminateSystems/nix-installer +- Nix manual https://nixos.org/manual/nix/stable/ + - Useful primarily for option and command references +- Flake schema reference https://nixos.wiki/wiki/Flakes + - Useful to know what `flake.nix` is referring to +- Example pull requests for this repo: + - Adding smoke tests for an extension: + https://github.com/supabase/nix-postgres/pull/2 + - Extension smoke tests, part 2: + https://github.com/supabase/nix-postgres/pull/3 + - Adding an extension and a smoke test at once: + https://github.com/supabase/nix-postgres/pull/4/files + - Updating an extension to trunk: + https://github.com/supabase/nix-postgres/pull/7 + - Updating an extension to the latest release: + https://github.com/supabase/nix-postgres/pull/9 +- Contributing to [nixpkgs](https://github.com/nixos/nixpkgs) + - Adding a PGRX-powered extension: + https://github.com/NixOS/nixpkgs/pull/246803 + - Adding a normal extension: https://github.com/NixOS/nixpkgs/pull/249000 + - Adding new PostgreSQL versions: https://github.com/NixOS/nixpkgs/pull/249030 +- NixOS Discourse: https://discourse.nixos.org/ + - Useful for community feedback, guidance, and help +- `nix-update`: https://github.com/Mic92/nix-update + - Used in this repository to help update extensions +- pgTAP for testing: https://pgtap.org/documentation.html diff --git a/postgres_15.8.1.044/nix/docs/start-client-server.md b/postgres_15.8.1.044/nix/docs/start-client-server.md new file mode 100644 index 0000000..5f1d0d2 --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/start-client-server.md @@ -0,0 +1,93 @@ +## Running the server + +If you want to run a postgres server, just do this from the root of the +repository: + +``` +nix run .#start-server 15 +``` + +Replace the `15` with a `16`, and you'll be using a different version. Optionally you can specify a second argument for the port. + +You likely have a running postgres, so to not cause a conflict, this uses port 5435 by default. + +Actually, you don't even need the repository. You can do this from arbitrary +directories, if the left-hand side of the hash character (`.` in this case) is a +valid "flake reference": + +``` +# from any arbitrary directory +nix run github:supabase/postgres#start-server 15 +``` + +### Arbitrary versions at arbitrary git revisions + +Let's say you want to use a PostgreSQL build from a specific version of the +repository. You can change the syntax of the above to use _any_ version of the +repository, at any time, by adding the commit hash after the repository name: + +``` +# use postgresql 15 build at commit +nix run github:supabase/postgres/#start-server 15 +``` + +## Running the client + +All of the same rules apply, but try using `start-client` on the right-hand side +of the hash character, instead. For example: + +``` +nix run github:supabase/postgres#start-server 15 & +sleep 5 +nix run github:supabase/postgres#start-client 16 +``` + +## Running a server replica + +To start a replica you can use the `start-postgres-replica` command. + +- first argument: the master version +- second argument: the master port +- third argument: the replica server port + +First start a server and a couple of replicas: + +``` +$ start-postgres-server 15 5435 + +$ start-postgres-replica 15 5439 + +$ start-postgres-replica 15 5440 +``` + +Now check the master server: + +``` +$ start-postgres-client 15 5435 +``` + +```sql +SELECT client_addr, state +FROM pg_stat_replication; + client_addr | state +-------------+----------- + ::1 | streaming + ::1 | streaming +(2 rows) + +create table items as select x::int from generate_series(1,100) x; +``` + +And a replica: + +``` +$ start-postgres-client 15 5439 +``` + +```sql +select count(*) from items; + count +------- + 100 +(1 row) +``` diff --git a/postgres_15.8.1.044/nix/docs/start-here.md b/postgres_15.8.1.044/nix/docs/start-here.md new file mode 100644 index 0000000..acc3158 --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/start-here.md @@ -0,0 +1,70 @@ +Let's go ahead and install Nix. To do that, we'll use the +**[nix-installer tool]** by Determinate Systems. This works on many platforms, +but most importantly it works on **aarch64 Linux** and **x86_64 Linux**. Use the +following command in your shell, **it should work on any Linux distro of your +choice**: + +[nix-installer tool]: https://github.com/DeterminateSystems/nix-installer + +```bash +curl \ + --proto '=https' --tlsv1.2 \ + -sSf -L https://install.determinate.systems/nix \ +| sh -s -- install +``` + +After you do this, **you must log in and log back out of your desktop +environment** to get a new login session. This is so that your shell can have +the Nix tools installed on `$PATH` and so that your user shell can see some +extra settings. + +You should now be able to do something like the following; try running these +same commands on your machine: + +``` +$ nix --version +nix (Nix) 2.16.1 +``` + +``` +$ nix run nixpkgs#nix-info -- -m + - system: `"x86_64-linux"` + - host os: `Linux 5.15.90.1-microsoft-standard-WSL2, Ubuntu, 22.04.2 LTS (Jammy Jellyfish), nobuild` + - multi-user?: `yes` + - sandbox: `yes` + - version: `nix-env (Nix) 2.16.1` + - channels(root): `"nixpkgs"` + - nixpkgs: `/nix/var/nix/profiles/per-user/root/channels/nixpkgs` +``` + +If the above worked, you're now cooking with gas! + +> _**NOTE**_: While there is an upstream tool to install Nix, written in Bash, +> we use the Determinate Systems installer — which will hopefully replace the +> original — because it's faster, and takes care of several extra edge cases +> that the original one couldn't handle, and makes several changes to the +> default installed configuration to make things more user friendly. Determinate +> Systems is staffed by many long-time Nix contributors and the creator of Nix, +> and is trustworthy. + +## Do some fun stuff + +One of the best things about Nix that requires _very little_ knowledge of it is +that it lets you install the latest and greatest versions of many tools _on any +Linux distribution_. We'll explain more about that later on. But just as a few +examples: + +- **Q**: I want the latest version of Deno. Can we get that? +- **A**: `nix profile install nixpkgs#deno`, and you're done! + + + +- **Q**: What about HTTPie? A nice Python application? +- **A**: Same idea: `nix profile install nixpkgs#httpie` + + + +- **Q**: What about my favorite Rust applications, like ripgrep and bat? +- **A.1**: `nix profile install nixpkgs#ripgrep` +- **A.2**: `nix profile install nixpkgs#bat` +- **A.3**: And yes, you also have exa, fd, hyperfine, and more! diff --git a/postgres_15.8.1.044/nix/docs/update-extension.md b/postgres_15.8.1.044/nix/docs/update-extension.md new file mode 100644 index 0000000..786c8c2 --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/update-extension.md @@ -0,0 +1,17 @@ + +# Update an existing nix extension + + +1. Create a branch off of `develop` +2. For instance, if we were updating https://github.com/supabase/postgres/blob/develop/nix/ext/supautils.nix we would: + 1. change the `version = "2.2.1";` to whatever our git tag release version is that we want to update to + 2. temporarily empty the `hash = "sha256-wSUEG0at00TPAoHv6+NMzuUE8mfW6fnHH0MNxvBdUiE=";` to `hash = "";` and save `supautils.nix` and `git add .` + 3. run `nix build .#psql_15/exts/supautils` or the name of the extension to update, nix will print the calculated sha256 value that you can add back the the `hash` variable, save the file again, and re-run nix build .#psql_15/exts/supautils. + 4. NOTE: This step is only necessary for `buildPgrxExtension` packages, which includes supabase-wrappers, pg_jsonschema, and pg_graphql. Otherwise you can skip this step. For our packages that are build with `buildPgrxExtension` you will need to prepend the previous version to the `previousVersions` variable before updating the version in the package (for instance if you are updating `supabase-wrappers` extension from `0.4.1` to `0.4.2` then you would prepend `0.4.1` to this line https://github.com/supabase/postgres/blob/develop/nix/ext/wrappers/default.nix#L18 ). + 5. Add any needed migrations into the `supabase/postgres` migrations directory + 6. update the version in `ansible/vars.yml` as usual + 7. You can then run the `nix flake check -L` tests locally to verify that the update of the package succeeded. + 8. Now it's ready for PR review. + 9. Once the PR is approved, if you want the change to go out in a release, update the common-nix.vars.yml file with the new version prior to merging. + + diff --git a/postgres_15.8.1.044/nix/docs/use-direnv.md b/postgres_15.8.1.044/nix/docs/use-direnv.md new file mode 100644 index 0000000..cf34a23 --- /dev/null +++ b/postgres_15.8.1.044/nix/docs/use-direnv.md @@ -0,0 +1,102 @@ +Have you ever used a tool like `pip`'s `bin/activate` script, or `rbenv`? These +tools populate your shell environment with the right tools and scripts and +dependencies (e.g. `PYTHONPATH`) to run your software. + +What if I told you there was a magical tool that worked like that, and could do +it for arbitrary languages and tools? + +That tool is called **[direnv](https://direnv.net)**. + +## Install direnv and use it in your shell + +First, install `direnv`: + +``` +$ nix profile install nixpkgs#direnv +``` + +``` +$ which direnv +/home/austin/.nix-profile/bin/direnv +``` + +Now, you need to activate it in your shell by hooking into it. If you're using +**Bash**, try putting this in your `.bashrc` and starting up a new interactive +shell: + +``` +eval "$(direnv hook bash)" +``` + +Not using bash? Check the +[direnv hook documentation](https://direnv.net/docs/hook.html) for more. + +## Set up `nix-postgres` + +Let's go back to the `nix-postgres` source code. + +``` +cd $HOME/tmp-nix-postgres +``` + +Now, normally, direnv is going to look for a file called `.envrc` and load that +if it exists. But to be polite, we don't do that by default; we keep a file +named `.envrc.recommended` in the repository instead, and encourage people to do +this: + +``` +echo "source_env .envrc.recommended" >> .envrc +``` + +All this says is "Load the code from `.envrc.recommended` directly", just like a +normal bash script using `source`. The idea of this pattern is to allow users to +have their own customized `.envrc` and piggyback on the committed code for +utility — and `.envrc` is `.gitignore`'d, so you can put e.g. secret +tokens inside without fear of committing them. + +Run the above command, and then... + +## What just happened? + +Oops, a big red error appeared? + +``` +$ echo "source_env .envrc.recommended" >> .envrc +direnv: error /home/austin/work/nix-postgres/.envrc is blocked. Run `direnv allow` to approve its content +``` + +What happened? By default, as a security measure, `direnv` _does not_ load or +execute any code from an `.envrc` file, and instead it MUST be allowed +explicitly. + +## `direnv allow` + +Our `.envrc.recommended` file will integrate with Nix directly. So run +`direnv allow`, and you'll suddenly see the following: + +``` +$ direnv allow +direnv: loading ~/work/nix-postgres/.envrc +direnv: loading ~/work/nix-postgres/.envrc.recommended +direnv: loading https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc (sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=) +direnv: using flake +direnv: nix-direnv: renewed cache +direnv: export +AR +AS +CC +CONFIG_SHELL +CXX +DETERMINISTIC_BUILD +HOST_PATH +IN_NIX_SHELL +LD +NIX_BINTOOLS +NIX_BINTOOLS_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu +NIX_BUILD_CORES +NIX_CC +NIX_CC_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu +NIX_CFLAGS_COMPILE +NIX_ENFORCE_NO_NATIVE +NIX_HARDENING_ENABLE +NIX_LDFLAGS +NIX_STORE +NM +OBJCOPY +OBJDUMP +PYTHONHASHSEED +PYTHONNOUSERSITE +PYTHONPATH +RANLIB +READELF +SIZE +SOURCE_DATE_EPOCH +STRINGS +STRIP +_PYTHON_HOST_PLATFORM +_PYTHON_SYSCONFIGDATA_NAME +__structuredAttrs +buildInputs +buildPhase +builder +cmakeFlags +configureFlags +depsBuildBuild +depsBuildBuildPropagated +depsBuildTarget +depsBuildTargetPropagated +depsHostHost +depsHostHostPropagated +depsTargetTarget +depsTargetTargetPropagated +doCheck +doInstallCheck +dontAddDisableDepTrack +mesonFlags +name +nativeBuildInputs +out +outputs +patches +phases +preferLocalBuild +propagatedBuildInputs +propagatedNativeBuildInputs +shell +shellHook +stdenv +strictDeps +system ~PATH ~XDG_DATA_DIRS +``` + +What just happened is that we populated the ambient shell environment with tools +specified inside of `flake.nix` — we'll cover Flakes later. But for now, +your tools are provisioned! + + +## The power of `direnv` + +`direnv` with Nix is a frighteningly good development combination for many +purposes. This is its main power: you can use it to create on-demand developer +shells for any language, tool, or environment, and all you need to do is `cd` to +the right directory. + +This is the power of `direnv`: your projects always, on demand, will have the +right tools configured and available, no matter if you last worked on them a day +ago or a year ago, or it was done by your teammate, or you have a brand new +computer that you've never programmed on. diff --git a/postgres_15.8.1.044/nix/ext/0001-build-Allow-using-V8-from-system.patch b/postgres_15.8.1.044/nix/ext/0001-build-Allow-using-V8-from-system.patch new file mode 100644 index 0000000..ab2c6f0 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/0001-build-Allow-using-V8-from-system.patch @@ -0,0 +1,46 @@ +diff --git a/Makefile b/Makefile +index 38879cc..6e78eeb 100644 +--- a/Makefile ++++ b/Makefile +@@ -20,6 +20,7 @@ OBJS = $(SRCS:.cc=.o) + MODULE_big = plv8-$(PLV8_VERSION) + EXTENSION = plv8 + PLV8_DATA = plv8.control plv8--$(PLV8_VERSION).sql ++USE_SYSTEM_V8 = 0 + + + # Platform detection +@@ -41,6 +42,7 @@ PGXS := $(shell $(PG_CONFIG) --pgxs) + PG_VERSION_NUM := $(shell cat `$(PG_CONFIG) --includedir-server`/pg_config*.h \ + | perl -ne 'print $$1 and exit if /PG_VERSION_NUM\s+(\d+)/') + ++ifeq ($(USE_SYSTEM_V8),0) + AUTOV8_DIR = build/v8 + AUTOV8_OUT = build/v8/out.gn/obj + AUTOV8_STATIC_LIBS = -lv8_libplatform -lv8_libbase +@@ -66,6 +68,7 @@ v8: + make -f Makefiles/Makefile.macos v8 + endif + endif ++endif + + # enable direct jsonb conversion by default + CCFLAGS += -DJSONB_DIRECT_CONVERSION +@@ -83,6 +86,7 @@ ifdef BIGINT_GRACEFUL + endif + + ++ifeq ($(USE_SYSTEM_V8),0) + # We're gonna build static link. Rip it out after include Makefile + SHLIB_LINK := $(filter-out -lv8, $(SHLIB_LINK)) + +@@ -101,6 +105,7 @@ else + SHLIB_LINK += -lrt -std=c++14 + endif + endif ++endif + + DATA = $(PLV8_DATA) + ifndef DISABLE_DIALECT +-- +2.37.3 diff --git a/postgres_15.8.1.044/nix/ext/gdal.nix b/postgres_15.8.1.044/nix/ext/gdal.nix new file mode 100644 index 0000000..83924d9 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/gdal.nix @@ -0,0 +1,69 @@ +{ lib +, stdenv +, fetchFromGitHub +, cmake +, pkg-config +, curl +, expat +, libgeotiff +, geos +, json_c +, libxml2 +, postgresql +, proj +, sqlite +, libtiff +, zlib +}: + +stdenv.mkDerivation rec { + pname = "gdal"; + version = "3.8.5"; + + src = fetchFromGitHub { + owner = "OSGeo"; + repo = "gdal"; + rev = "v${version}"; + hash = "sha256-Z+mYlyOX9vJ772qwZMQfCbD/V7RL6+9JLHTzoZ55ot0="; + }; + + nativeBuildInputs = [ + cmake + pkg-config + ]; + + buildInputs = [ + curl + expat + libgeotiff + geos + json_c + libxml2 + postgresql + proj + sqlite + libtiff + zlib + ]; + + cmakeFlags = [ + "-DGDAL_USE_INTERNAL_LIBS=OFF" + "-DGEOTIFF_INCLUDE_DIR=${lib.getDev libgeotiff}/include" + "-DGEOTIFF_LIBRARY_RELEASE=${lib.getLib libgeotiff}/lib/libgeotiff${stdenv.hostPlatform.extensions.sharedLibrary}" + "-DBUILD_PYTHON_BINDINGS=OFF" + ] ++ lib.optionals (!stdenv.isDarwin) [ + "-DCMAKE_SKIP_BUILD_RPATH=ON" + ] ++ lib.optionals stdenv.isDarwin [ + "-DCMAKE_BUILD_WITH_INSTALL_NAME_DIR=ON" + ]; + + enableParallelBuilding = true; + + meta = with lib; { + description = "Translator library for raster geospatial data formats (PostGIS-focused build)"; + homepage = "https://www.gdal.org/"; + license = licenses.mit; + maintainers = with maintainers; teams.geospatial.members ++ [ marcweber dotlambda ]; + platforms = platforms.unix; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/hypopg.nix b/postgres_15.8.1.044/nix/ext/hypopg.nix new file mode 100644 index 0000000..300a449 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/hypopg.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "hypopg"; + version = "1.4.1"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "HypoPG"; + repo = pname; + rev = "refs/tags/${version}"; + hash = "sha256-88uKPSnITRZ2VkelI56jZ9GWazG/Rn39QlyHKJKSKMM="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Hypothetical Indexes for PostgreSQL"; + homepage = "https://github.com/HypoPG/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/index_advisor.nix b/postgres_15.8.1.044/nix/ext/index_advisor.nix new file mode 100644 index 0000000..3ed5a5f --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/index_advisor.nix @@ -0,0 +1,30 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "index_advisor"; + version = "0.2.0"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "olirice"; + repo = pname; + rev = "v${version}"; + hash = "sha256-G0eQk2bY5CNPMeokN/nb05g03CuiplRf902YXFVQFbs="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Recommend indexes to improve query performance in PostgreSQL"; + homepage = "https://github.com/olirice/index_advisor"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/mecab-naist-jdic/default.nix b/postgres_15.8.1.044/nix/ext/mecab-naist-jdic/default.nix new file mode 100644 index 0000000..d7ea6c5 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/mecab-naist-jdic/default.nix @@ -0,0 +1,41 @@ +{ lib, stdenv, fetchurl, mecab }: + +stdenv.mkDerivation rec { + pname = "mecab-naist-jdic"; + version = "0.6.3b-20111013"; + + src = fetchurl { + url = "https://github.com/supabase/mecab-naist-jdic/raw/main/mecab-naist-jdic-${version}.tar.gz"; + sha256 = "sha256-yzdwDcmne5U/K/OxW0nP7NZ4SFMKLPirywm1lMpWKMw="; + }; + + buildInputs = [ mecab ]; + + configureFlags = [ + "--with-charset=utf8" + ]; + + buildPhase = '' + runHook preBuild + make + ${mecab}/libexec/mecab/mecab-dict-index -d . -o . -f UTF-8 -t utf-8 + runHook postBuild + ''; + + installPhase = '' + runHook preInstall + + mkdir -p $out/lib/mecab/dic/naist-jdic + cp *.dic *.bin *.def $out/lib/mecab/dic/naist-jdic/ + + runHook postInstall + ''; + + meta = with lib; { + description = "Naist Japanese Dictionary for MeCab"; + homepage = "https://taku910.github.io/mecab/"; + license = licenses.gpl2; + platforms = platforms.unix; + maintainers = with maintainers; [ samrose ]; + }; +} \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/ext/orioledb.nix b/postgres_15.8.1.044/nix/ext/orioledb.nix new file mode 100644 index 0000000..856539d --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/orioledb.nix @@ -0,0 +1,38 @@ +{ lib, stdenv, fetchFromGitHub, curl, libkrb5, postgresql, python3, openssl }: + +stdenv.mkDerivation rec { + pname = "orioledb"; + name = pname; + src = fetchFromGitHub { + owner = "orioledb"; + repo = "orioledb"; + rev = "beta9"; + sha256 = "sha256-z2EHWsY+hhtnYzAxOl2PWjqfyJ+wp9SCau5LKPT2ec0="; + }; + version = "beta9"; + buildInputs = [ curl libkrb5 postgresql python3 openssl ]; + buildPhase = "make USE_PGXS=1 ORIOLEDB_PATCHSET_VERSION=5"; + installPhase = '' + runHook preInstall + + mkdir -p $out/{lib,share/postgresql/extension} + + # Copy the extension library + cp orioledb${postgresql.dlSuffix} $out/lib/ + + # Copy sql files from the sql directory + cp sql/*.sql $out/share/postgresql/extension/ + + # Copy control file + cp orioledb.control $out/share/postgresql/extension/ + + runHook postInstall + ''; + doCheck = true; + meta = with lib; { + description = "orioledb"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pg-safeupdate.nix b/postgres_15.8.1.044/nix/ext/pg-safeupdate.nix new file mode 100644 index 0000000..d24fab5 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg-safeupdate.nix @@ -0,0 +1,29 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg-safeupdate"; + version = "1.4"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "eradman"; + repo = pname; + rev = version; + hash = "sha256-1cyvVEC9MQGMr7Tg6EUbsVBrMc8ahdFS3+CmDkmAq4Y="; + }; + + installPhase = '' + install -D safeupdate${postgresql.dlSuffix} -t $out/lib + ''; + + meta = with lib; { + description = "A simple extension to PostgreSQL that requires criteria for UPDATE and DELETE"; + homepage = "https://github.com/eradman/pg-safeupdate"; + changelog = "https://github.com/eradman/pg-safeupdate/raw/${src.rev}/NEWS"; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + broken = versionOlder postgresql.version "14"; + maintainers = with maintainers; [ samrose ]; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pg_backtrace.nix b/postgres_15.8.1.044/nix/ext/pg_backtrace.nix new file mode 100644 index 0000000..47ede88 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_backtrace.nix @@ -0,0 +1,33 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_backtrace"; + version = "1.1"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "pashkinelfe"; + repo = pname; + rev = "d100bac815a7365e199263f5b3741baf71b14c70"; + hash = "sha256-IVCL4r4oj1Ams03D8y+XCFkckPFER/W9tQ68GkWQQMY="; + }; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Updated fork of pg_backtrace"; + homepage = "https://github.com/pashkinelfe/pg_backtrace"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pg_cron.nix b/postgres_15.8.1.044/nix/ext/pg_cron.nix new file mode 100644 index 0000000..d51254a --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_cron.nix @@ -0,0 +1,32 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_cron"; + version = "1.6.4"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "citusdata"; + repo = pname; + rev = "v${version}"; + hash = "sha256-t1DpFkPiSfdoGG2NgNT7g1lkvSooZoRoUrix6cBID40="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Run Cron jobs through PostgreSQL"; + homepage = "https://github.com/citusdata/pg_cron"; + changelog = "https://github.com/citusdata/pg_cron/raw/v${version}/CHANGELOG.md"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pg_graphql.nix b/postgres_15.8.1.044/nix/ext/pg_graphql.nix new file mode 100644 index 0000000..54e5009 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_graphql.nix @@ -0,0 +1,48 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, buildPgrxExtension_0_12_9, cargo, rust-bin }: + +let + rustVersion = "nightly"; + cargo = rust-bin.nightly.latest.default; +in +buildPgrxExtension_0_12_9 rec { + pname = "pg_graphql"; + version = "1.5.11"; + inherit postgresql; + + src = fetchFromGitHub { + owner = "supabase"; + repo = pname; + rev = "v${version}"; + hash = "sha256-BMZc9ui+2J3U24HzZZVCU5+KWhz+5qeUsRGeptiqbek="; + }; + + nativeBuildInputs = [ cargo ]; + buildInputs = [ postgresql ]; + + CARGO = "${cargo}/bin/cargo"; + + cargoLock = { + lockFile = "${src}/Cargo.lock"; + }; + # Setting RUSTFLAGS in env to ensure it's available for all phases + env = lib.optionalAttrs stdenv.isDarwin { + POSTGRES_LIB = "${postgresql}/lib"; + PGPORT = "5434"; + RUSTFLAGS = "-C link-arg=-undefined -C link-arg=dynamic_lookup"; + NIX_BUILD_CORES = "4"; # Limit parallel jobs + CARGO_BUILD_JOBS = "4"; # Limit cargo parallelism + }; +CARGO_BUILD_RUSTFLAGS = "--cfg tokio_unstable -C debuginfo=0"; + CARGO_PROFILE_RELEASE_BUILD_OVERRIDE_DEBUG = true; + + + doCheck = false; + + meta = with lib; { + description = "GraphQL support for PostreSQL"; + homepage = "https://github.com/supabase/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/ext/pg_hashids.nix b/postgres_15.8.1.044/nix/ext/pg_hashids.nix new file mode 100644 index 0000000..50bff95 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_hashids.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_hashids"; + version = "cd0e1b31d52b394a0df64079406a14a4f7387cd6"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "iCyberon"; + repo = pname; + rev = "${version}"; + hash = "sha256-Nmb7XLqQflYZfqj0yrewfb1Hl5YgEB5wfjBunPwIuOU="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Generate short unique IDs in PostgreSQL"; + homepage = "https://github.com/iCyberon/pg_hashids"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pg_jsonschema.nix b/postgres_15.8.1.044/nix/ext/pg_jsonschema.nix new file mode 100644 index 0000000..50a2d34 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_jsonschema.nix @@ -0,0 +1,73 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, buildPgrxExtension_0_12_6, cargo, rust-bin }: +let + rustVersion = "1.80.0"; + cargo = rust-bin.stable.${rustVersion}.default; +in +buildPgrxExtension_0_12_6 rec { + pname = "pg_jsonschema"; + version = "0.3.3"; + inherit postgresql; + + src = fetchFromGitHub { + owner = "supabase"; + repo = pname; + rev = "v${version}"; + hash = "sha256-Au1mqatoFKVq9EzJrpu1FVq5a1kBb510sfC980mDlsU="; + }; + + nativeBuildInputs = [ cargo ]; + buildInputs = [ postgresql ]; + # update the following array when the pg_jsonschema version is updated + # required to ensure that extensions update scripts from previous versions are generated + + previousVersions = ["0.3.1" "0.3.0" "0.2.0" "0.1.4" "0.1.4" "0.1.2" "0.1.1" "0.1.0"]; + CARGO="${cargo}/bin/cargo"; + #darwin env needs PGPORT to be unique for build to not clash with other pgrx extensions + env = lib.optionalAttrs stdenv.isDarwin { + POSTGRES_LIB = "${postgresql}/lib"; + RUSTFLAGS = "-C link-arg=-undefined -C link-arg=dynamic_lookup"; + PGPORT = "5433"; + }; + + cargoLock = { + lockFile = "${src}/Cargo.lock"; + allowBuiltinFetchGit = false; + }; + + # FIXME (aseipp): testsuite tries to write files into /nix/store; we'll have + # to fix this a bit later. + doCheck = false; + + preBuild = '' + echo "Processing git tags..." + echo '${builtins.concatStringsSep "," previousVersions}' | sed 's/,/\n/g' > git_tags.txt + ''; + + postInstall = '' + echo "Creating SQL files for previous versions..." + current_version="${version}" + sql_file="$out/share/postgresql/extension/pg_jsonschema--$current_version.sql" + + if [ -f "$sql_file" ]; then + while read -r previous_version; do + if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then + new_file="$out/share/postgresql/extension/pg_jsonschema--$previous_version--$current_version.sql" + echo "Creating $new_file" + cp "$sql_file" "$new_file" + fi + done < git_tags.txt + else + echo "Warning: $sql_file not found" + fi + rm git_tags.txt + ''; + + + meta = with lib; { + description = "JSON Schema Validation for PostgreSQL"; + homepage = "https://github.com/supabase/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/ext/pg_net.nix b/postgres_15.8.1.044/nix/ext/pg_net.nix new file mode 100644 index 0000000..22be108 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_net.nix @@ -0,0 +1,33 @@ +{ lib, stdenv, fetchFromGitHub, curl, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_net"; + version = "0.14.0"; + + buildInputs = [ curl postgresql ]; + + src = fetchFromGitHub { + owner = "supabase"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-c1pxhTyrE5j6dY+M5eKAboQNofIORS+Dccz+7HKEKQI="; + }; + + env.NIX_CFLAGS_COMPILE = "-Wno-error"; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Async networking for Postgres"; + homepage = "https://github.com/supabase/pg_net"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pg_partman.nix b/postgres_15.8.1.044/nix/ext/pg_partman.nix new file mode 100644 index 0000000..1ece633 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_partman.nix @@ -0,0 +1,34 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_partman"; + version = "5.1.0"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "pgpartman"; + repo = pname; + rev = "refs/tags/v${version}"; + sha256 = "sha256-GrVOJ5ywZMyqyDroYDLdKkXDdIJSDGhDfveO/ZvrmYs="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp src/*${postgresql.dlSuffix} $out/lib + cp updates/* $out/share/postgresql/extension + cp -r sql/* $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Partition management extension for PostgreSQL"; + homepage = "https://github.com/pgpartman/pg_partman"; + changelog = "https://github.com/pgpartman/pg_partman/blob/v${version}/CHANGELOG.md"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + broken = versionOlder postgresql.version "14"; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pg_plan_filter.nix b/postgres_15.8.1.044/nix/ext/pg_plan_filter.nix new file mode 100644 index 0000000..0ed5272 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_plan_filter.nix @@ -0,0 +1,30 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_plan_filter"; + version = "5081a7b5cb890876e67d8e7486b6a64c38c9a492"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "pgexperts"; + repo = pname; + rev = "${version}"; + hash = "sha256-YNeIfmccT/DtOrwDmpYFCuV2/P6k3Zj23VWBDkOh6sw="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp *.sql $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Filter PostgreSQL statements by execution plans"; + homepage = "https://github.com/pgexperts/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pg_regress.nix b/postgres_15.8.1.044/nix/ext/pg_regress.nix new file mode 100644 index 0000000..6e581c4 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_regress.nix @@ -0,0 +1,24 @@ +{ lib +, stdenv +, postgresql +}: + +stdenv.mkDerivation { + pname = "pg_regress"; + version = postgresql.version; + + phases = [ "installPhase" ]; + + installPhase = '' + mkdir -p $out/bin + cp ${postgresql}/lib/pgxs/src/test/regress/pg_regress $out/bin/ + ''; + + meta = with lib; { + description = "Regression testing tool for PostgreSQL"; + homepage = "https://www.postgresql.org/"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/ext/pg_repack.nix b/postgres_15.8.1.044/nix/ext/pg_repack.nix new file mode 100644 index 0000000..7b8a695 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_repack.nix @@ -0,0 +1,66 @@ +{ lib +, stdenv +, fetchFromGitHub +, openssl +, postgresql +, postgresqlTestHook +, readline +, testers +, zlib +}: + +stdenv.mkDerivation (finalAttrs: { + pname = "pg_repack"; + version = "1.5.2"; + + buildInputs = postgresql.buildInputs ++ [ postgresql ]; + + src = fetchFromGitHub { + owner = "reorg"; + repo = "pg_repack"; + rev = "ver_${finalAttrs.version}"; + hash = "sha256-wfjiLkx+S3zVrAynisX1GdazueVJ3EOwQEPcgUQt7eA="; + }; + + installPhase = '' + install -D bin/pg_repack -t $out/bin/ + install -D lib/pg_repack${postgresql.dlSuffix} -t $out/lib/ + install -D lib/{pg_repack--${finalAttrs.version}.sql,pg_repack.control} -t $out/share/postgresql/extension + ''; + + passthru.tests = { + version = testers.testVersion { + package = finalAttrs.finalPackage; + }; + extension = stdenv.mkDerivation { + name = "plpgsql-check-test"; + dontUnpack = true; + doCheck = true; + buildInputs = [ postgresqlTestHook ]; + nativeCheckInputs = [ (postgresql.withPackages (ps: [ ps.pg_repack ])) ]; + postgresqlTestUserOptions = "LOGIN SUPERUSER"; + failureHook = "postgresqlStop"; + checkPhase = '' + runHook preCheck + psql -a -v ON_ERROR_STOP=1 -c "CREATE EXTENSION pg_repack;" + runHook postCheck + ''; + installPhase = "touch $out"; + }; + }; + + meta = with lib; { + description = "Reorganize tables in PostgreSQL databases with minimal locks"; + longDescription = '' + pg_repack is a PostgreSQL extension which lets you remove bloat from tables and indexes, and optionally restore + the physical order of clustered indexes. Unlike CLUSTER and VACUUM FULL it works online, without holding an + exclusive lock on the processed tables during processing. pg_repack is efficient to boot, + with performance comparable to using CLUSTER directly. + ''; + homepage = "https://github.com/reorg/pg_repack"; + license = licenses.bsd3; + maintainers = with maintainers; [ samrose ]; + inherit (postgresql.meta) platforms; + mainProgram = "pg_repack"; + }; +}) diff --git a/postgres_15.8.1.044/nix/ext/pg_stat_monitor.nix b/postgres_15.8.1.044/nix/ext/pg_stat_monitor.nix new file mode 100644 index 0000000..c3e91fe --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_stat_monitor.nix @@ -0,0 +1,49 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +let + # NOTE (aseipp): the 1.x series of pg_stat_monitor has some non-standard and + # weird build logic (Percona projects in general seem to have their own + # strange build harness) where it will try to pick the right .sql file to + # install into the extension dir based on the postgresql major version. for + # our purposes, we only need to support v13 and v14+, so just replicate this + # logic from the makefile and pick the right file here. + # + # this seems to all be cleaned up in version 2.0 of the extension, so ideally + # we could upgrade to it later on and nuke this. + # DEPRECATED sqlFilename = if lib.versionOlder postgresql.version "14" + # then "pg_stat_monitor--1.0.13.sql.in" + # else "pg_stat_monitor--1.0.14.sql.in"; + +in +stdenv.mkDerivation rec { + pname = "pg_stat_monitor"; + version = "2.1.0"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "percona"; + repo = pname; + rev = "refs/tags/${version}"; + hash = "sha256-STJVvvrLVLe1JevNu6u6EftzAWv+X+J8lu66su7Or2s="; + }; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Query Performance Monitoring Tool for PostgreSQL"; + homepage = "https://github.com/percona/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + broken = lib.versionOlder postgresql.version "15"; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pg_tle.nix b/postgres_15.8.1.044/nix/ext/pg_tle.nix new file mode 100644 index 0000000..0221d87 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pg_tle.nix @@ -0,0 +1,36 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, flex, openssl, libkrb5 }: + +stdenv.mkDerivation rec { + pname = "pg_tle"; + version = "1.4.0"; + + nativeBuildInputs = [ flex ]; + buildInputs = [ openssl postgresql libkrb5 ]; + + src = fetchFromGitHub { + owner = "aws"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-crxj5R9jblIv0h8lpqddAoYe2UqgUlnvbOajKTzVces="; + }; + + + makeFlags = [ "FLEX=flex" ]; + + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Framework for 'Trusted Language Extensions' in PostgreSQL"; + homepage = "https://github.com/aws/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pgaudit.nix b/postgres_15.8.1.044/nix/ext/pgaudit.nix new file mode 100644 index 0000000..84fbf44 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pgaudit.nix @@ -0,0 +1,48 @@ +{ lib, stdenv, fetchFromGitHub, libkrb5, openssl, postgresql }: +#adapted from https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/sql/postgresql/ext/pgaudit.nix +let + source = { + "17" = { + version = "17.0"; + hash = "sha256-3ksq09wiudQPuBQI3dhEQi8IkXKLVIsPFgBnwLiicro="; + }; + "16" = { + version = "16.0"; + hash = "sha256-8+tGOl1U5y9Zgu+9O5UDDE4bec4B0JC/BQ6GLhHzQzc="; + }; + "15" = { + version = "1.7.0"; + hash = "sha256-8pShPr4HJaJQPjW1iPJIpj3CutTx8Tgr+rOqoXtgCcw="; + }; + }.${lib.versions.major postgresql.version} or (throw "Source for pgaudit is not available for ${postgresql.version}"); +in +stdenv.mkDerivation { + pname = "pgaudit"; + inherit (source) version; + + src = fetchFromGitHub { + owner = "pgaudit"; + repo = "pgaudit"; + rev = source.version; + hash = source.hash; + }; + + buildInputs = [ libkrb5 openssl postgresql ]; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + install -D -t $out/lib pgaudit${postgresql.dlSuffix} + install -D -t $out/share/postgresql/extension *.sql + install -D -t $out/share/postgresql/extension *.control + ''; + + meta = with lib; { + description = "Open Source PostgreSQL Audit Logging"; + homepage = "https://github.com/pgaudit/pgaudit"; + changelog = "https://github.com/pgaudit/pgaudit/releases/tag/${source.version}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pgjwt.nix b/postgres_15.8.1.044/nix/ext/pgjwt.nix new file mode 100644 index 0000000..2eb60f7 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pgjwt.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, unstableGitUpdater }: + +stdenv.mkDerivation rec { + pname = "pgjwt"; + version = "9742dab1b2f297ad3811120db7b21451bca2d3c9"; + + src = fetchFromGitHub { + owner = "michelp"; + repo = "pgjwt"; + rev = "${version}"; + hash = "sha256-Hw3R9bMGDmh+dMzjmqZSy/rT4mX8cPU969OJiARFg10="; + }; + + dontBuild = true; + installPhase = '' + mkdir -p $out/share/postgresql/extension + cp pg*sql *.control $out/share/postgresql/extension + ''; + + passthru.updateScript = unstableGitUpdater { }; + + meta = with lib; { + description = "PostgreSQL implementation of JSON Web Tokens"; + longDescription = '' + sign() and verify() functions to create and verify JSON Web Tokens. + ''; + license = licenses.mit; + platforms = postgresql.meta.platforms; + maintainers = with maintainers; [samrose]; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pgmq.nix b/postgres_15.8.1.044/nix/ext/pgmq.nix new file mode 100644 index 0000000..97a3c27 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pgmq.nix @@ -0,0 +1,33 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pgmq"; + version = "1.4.4"; + buildInputs = [ postgresql ]; + src = fetchFromGitHub { + owner = "tembo-io"; + repo = pname; + rev = "v${version}"; + hash = "sha256-z+8/BqIlHwlMnuIzMz6eylmYbSmhtsNt7TJf/CxbdVw="; + }; + + buildPhase = '' + cd pgmq-extension + ''; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + mv sql/pgmq.sql $out/share/postgresql/extension/pgmq--${version}.sql + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "A lightweight message queue. Like AWS SQS and RSMQ but on Postgres."; + homepage = "https://github.com/tembo-io/pgmq"; + maintainers = with maintainers; [ olirice ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pgroonga.nix b/postgres_15.8.1.044/nix/ext/pgroonga.nix new file mode 100644 index 0000000..7ebe849 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pgroonga.nix @@ -0,0 +1,79 @@ +{ lib, stdenv, fetchurl, pkg-config, postgresql, msgpack-c, callPackage, mecab, makeWrapper, xxHash }: +let + supabase-groonga = callPackage ../supabase-groonga.nix { }; +in +stdenv.mkDerivation rec { + pname = "pgroonga"; + version = "3.2.5"; + src = fetchurl { + url = "https://packages.groonga.org/source/${pname}/${pname}-${version}.tar.gz"; + sha256 = "sha256-GM9EOQty72hdE4Ecq8jpDudhZLiH3pP9ODLxs8DXcSY="; + }; + nativeBuildInputs = [ pkg-config makeWrapper ]; + + buildInputs = [ postgresql msgpack-c supabase-groonga mecab ] ++ lib.optionals stdenv.isDarwin [ + xxHash + ]; + + propagatedBuildInputs = [ supabase-groonga ]; + configureFlags = [ + "--with-mecab=${mecab}" + "--enable-mecab" + "--with-groonga=${supabase-groonga}" + "--with-groonga-plugin-dir=${supabase-groonga}/lib/groonga/plugins" + ]; + + makeFlags = [ + "HAVE_MSGPACK=1" + "MSGPACK_PACKAGE_NAME=msgpack-c" + "HAVE_MECAB=1" + ]; + + NIX_CFLAGS_COMPILE = lib.optionalString stdenv.isDarwin (builtins.concatStringsSep " " [ + "-Wno-error=incompatible-function-pointer-types" + "-Wno-error=format" + "-Wno-format" + "-I${supabase-groonga}/include/groonga" + "-I${xxHash}/include" + "-DPGRN_VERSION=\"${version}\"" + ]); + + preConfigure = '' + export GROONGA_LIBS="-L${supabase-groonga}/lib -lgroonga" + export GROONGA_CFLAGS="-I${supabase-groonga}/include" + export MECAB_CONFIG="${mecab}/bin/mecab-config" + ${lib.optionalString stdenv.isDarwin '' + export CPPFLAGS="-I${supabase-groonga}/include/groonga -I${xxHash}/include -DPGRN_VERSION=\"${version}\"" + export CFLAGS="-I${supabase-groonga}/include/groonga -I${xxHash}/include -DPGRN_VERSION=\"${version}\"" + export PG_CPPFLAGS="-Wno-error=incompatible-function-pointer-types -Wno-error=format" + ''} + ''; + + installPhase = '' + mkdir -p $out/lib $out/share/postgresql/extension $out/bin + install -D pgroonga${postgresql.dlSuffix} -t $out/lib/ + install -D pgroonga.control -t $out/share/postgresql/extension + install -D data/pgroonga-*.sql -t $out/share/postgresql/extension + install -D pgroonga_database${postgresql.dlSuffix} -t $out/lib/ + install -D pgroonga_database.control -t $out/share/postgresql/extension + install -D data/pgroonga_database-*.sql -t $out/share/postgresql/extension + + echo "Debug: Groonga plugins directory contents:" + ls -l ${supabase-groonga}/lib/groonga/plugins/tokenizers/ + ''; + + meta = with lib; { + description = "A PostgreSQL extension to use Groonga as the index"; + longDescription = '' + PGroonga is a PostgreSQL extension to use Groonga as the index. + PostgreSQL supports full text search against languages that use only alphabet and digit. + It means that PostgreSQL doesn't support full text search against Japanese, Chinese and so on. + You can use super fast full text search feature against all languages by installing PGroonga into your PostgreSQL. + ''; + homepage = "https://pgroonga.github.io/"; + changelog = "https://github.com/pgroonga/pgroonga/releases/tag/${version}"; + license = licenses.postgresql; + platforms = postgresql.meta.platforms; + maintainers = with maintainers; [ samrose ]; + }; +} \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/ext/pgrouting.nix b/postgres_15.8.1.044/nix/ext/pgrouting.nix new file mode 100644 index 0000000..5587566 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pgrouting.nix @@ -0,0 +1,60 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, perl, cmake, boost }: + +stdenv.mkDerivation rec { + pname = "pgrouting"; + version = "3.4.1"; + + nativeBuildInputs = [ cmake perl ]; + buildInputs = [ postgresql boost ]; + + src = fetchFromGitHub { + owner = "pgRouting"; + repo = pname; + rev = "v${version}"; + hash = "sha256-QC77AnPGpPQGEWi6JtJdiNsB2su5+aV2pKg5ImR2B0k="; + }; + + #disable compile time warnings for incompatible pointer types only on macos and pg16 + NIX_CFLAGS_COMPILE = lib.optionalString (stdenv.isDarwin && lib.versionAtLeast postgresql.version "16") + "-Wno-error=int-conversion -Wno-error=incompatible-pointer-types"; + + cmakeFlags = [ + "-DPOSTGRESQL_VERSION=${postgresql.version}" + ] ++ lib.optionals (stdenv.isDarwin && lib.versionAtLeast postgresql.version "16") [ + "-DCMAKE_MACOSX_RPATH=ON" + "-DCMAKE_SHARED_MODULE_SUFFIX=.dylib" + "-DCMAKE_SHARED_LIBRARY_SUFFIX=.dylib" + ]; + + preConfigure = lib.optionalString (stdenv.isDarwin && lib.versionAtLeast postgresql.version "16") '' + export DLSUFFIX=.dylib + export CMAKE_SHARED_LIBRARY_SUFFIX=.dylib + export CMAKE_SHARED_MODULE_SUFFIX=.dylib + export MACOSX_RPATH=ON + ''; + + postBuild = lib.optionalString (stdenv.isDarwin && lib.versionAtLeast postgresql.version "16") '' + shopt -s nullglob + for file in lib/libpgrouting-*.so; do + if [ -f "$file" ]; then + mv "$file" "''${file%.so}.dylib" + fi + done + shopt -u nullglob + ''; + + installPhase = '' + install -D lib/*${postgresql.dlSuffix} -t $out/lib + install -D sql/pgrouting--*.sql -t $out/share/postgresql/extension + install -D sql/common/pgrouting.control -t $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "A PostgreSQL/PostGIS extension that provides geospatial routing functionality"; + homepage = "https://pgrouting.org/"; + changelog = "https://github.com/pgRouting/pgrouting/releases/tag/v${version}"; + maintainers = with maintainers; [ steve-chavez samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.gpl2Plus; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pgsodium.nix b/postgres_15.8.1.044/nix/ext/pgsodium.nix new file mode 100644 index 0000000..4e184fa --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pgsodium.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, libsodium, postgresql }: + +stdenv.mkDerivation rec { + pname = "pgsodium"; + version = "3.1.8"; + + buildInputs = [ libsodium postgresql ]; + + src = fetchFromGitHub { + owner = "michelp"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-j5F1PPdwfQRbV8XJ8Mloi8FvZF0MTl4eyIJcBYQy1E4="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Modern cryptography for PostgreSQL"; + homepage = "https://github.com/michelp/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pgsql-http.nix b/postgres_15.8.1.044/nix/ext/pgsql-http.nix new file mode 100644 index 0000000..03fc3e6 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pgsql-http.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, curl, postgresql }: + +stdenv.mkDerivation rec { + pname = "pgsql-http"; + version = "1.6.1"; + + buildInputs = [ curl postgresql ]; + + src = fetchFromGitHub { + owner = "pramsey"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-C8eqi0q1dnshUAZjIsZFwa5FTYc7vmATF3vv2CReWPM="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp *.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "HTTP client for Postgres"; + homepage = "https://github.com/pramsey/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pgtap.nix b/postgres_15.8.1.044/nix/ext/pgtap.nix new file mode 100644 index 0000000..c5a17c9 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pgtap.nix @@ -0,0 +1,33 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, perl, perlPackages, which }: + +stdenv.mkDerivation rec { + pname = "pgtap"; + version = "1.2.0"; + + src = fetchFromGitHub { + owner = "theory"; + repo = "pgtap"; + rev = "v${version}"; + hash = "sha256-lb0PRffwo6J5a6Hqw1ggvn0cW7gPZ02OEcLPi9ineI8="; + }; + + nativeBuildInputs = [ postgresql perl perlPackages.TAPParserSourceHandlerpgTAP which ]; + + installPhase = '' + install -D {sql/pgtap--${version}.sql,pgtap.control} -t $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "A unit testing framework for PostgreSQL"; + longDescription = '' + pgTAP is a unit testing framework for PostgreSQL written in PL/pgSQL and PL/SQL. + It includes a comprehensive collection of TAP-emitting assertion functions, + as well as the ability to integrate with other TAP-emitting test frameworks. + It can also be used in the xUnit testing style. + ''; + maintainers = with maintainers; [ samrose ]; + homepage = "https://pgtap.org"; + inherit (postgresql.meta) platforms; + license = licenses.mit; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pgvector.nix b/postgres_15.8.1.044/nix/ext/pgvector.nix new file mode 100644 index 0000000..a7d58ec --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pgvector.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pgvector"; + version = "0.8.0"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "pgvector"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-JsZV+I4eRMypXTjGmjCtMBXDVpqTIPHQa28ogXncE/Q="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp *${postgresql.dlSuffix} $out/lib + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Open-source vector similarity search for Postgres"; + homepage = "https://github.com/${src.owner}/${src.repo}"; + maintainers = with maintainers; [ olirice ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/pljava.nix b/postgres_15.8.1.044/nix/ext/pljava.nix new file mode 100644 index 0000000..16f8a59 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/pljava.nix @@ -0,0 +1,51 @@ +{ stdenv, lib, fetchFromGitHub, openssl, openjdk, maven, postgresql, libkrb5, makeWrapper, gcc, pkg-config, which }: + +maven.buildMavenPackage rec { + pname = "pljava"; + + version = "1.6.7"; + + src = fetchFromGitHub { + owner = "tada"; + repo = "pljava"; + rev = "V1_6_7"; + sha256 = "sha256-M17adSLsw47KZ2BoUwxyWkXKRD8TcexDAy61Yfw4fNU="; + + }; + + mvnParameters = "clean install -Dmaven.test.skip -DskipTests -Dmaven.javadoc.skip=true"; + mvnHash = "sha256-lcxRduh/nKcPL6YQIVTsNH0L4ga0LgJpQKgX5IPkRzs="; + + nativeBuildInputs = [ makeWrapper maven openjdk postgresql openssl postgresql gcc libkrb5 pkg-config ]; + buildInputs = [ stdenv.cc.cc.lib which]; + buildPhase = '' + export PATH=$(lib.makeBinPath [ postgresql ]):$PATH + + ''; + buildOffline = true; + + installPhase = '' + mkdir -p $out/pljavabuild + cp -r * $out/pljavabuild + mkdir -p $out/share/postgresql/extension/pljava + mkdir -p $out/share/postgresql/pljava + mkdir -p $out/lib + mkdir -p $out/etc + java -Dpgconfig=${postgresql}/bin/pg_config \ + -Dpgconfig.sharedir=$out/share \ + -Dpgconfig.sysconfdir=$out/etc/pljava.policy \ + -Dpgconfig.pkglibdir=$out/lib \ + -jar $out/pljavabuild/pljava-packaging/target/pljava-pg15.jar + cp $out/share/pljava/* $out/share/postgresql/extension/pljava + cp $out/share/pljava/* $out/share/postgresql/pljava + cp $out/share/extension/*.control $out/share/postgresql/extension + rm -r $out/pljavabuild + ''; + + meta = with lib; { + description = "PL/Java extension for PostgreSQL"; + homepage = https://github.com/tada/pljava; + license = licenses.bsd3; + maintainers = [ maintainers.samrose ]; # Update with actual maintainer info + }; +} diff --git a/postgres_15.8.1.044/nix/ext/plpgsql-check.nix b/postgres_15.8.1.044/nix/ext/plpgsql-check.nix new file mode 100644 index 0000000..7be2aac --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/plpgsql-check.nix @@ -0,0 +1,46 @@ +{ lib, stdenv, fetchFromGitHub, postgresql, postgresqlTestHook }: + +stdenv.mkDerivation rec { + pname = "plpgsql-check"; + version = "2.7.11"; + + src = fetchFromGitHub { + owner = "okbob"; + repo = "plpgsql_check"; + rev = "v${version}"; + hash = "sha256-vR3MvfmUP2QEAtXFpq0NCCKck3wZPD+H3QleHtyVQJs="; + }; + + buildInputs = [ postgresql ]; + + installPhase = '' + install -D -t $out/lib *${postgresql.dlSuffix} + install -D -t $out/share/postgresql/extension *.sql + install -D -t $out/share/postgresql/extension *.control + ''; + + passthru.tests.extension = stdenv.mkDerivation { + name = "plpgsql-check-test"; + dontUnpack = true; + doCheck = true; + buildInputs = [ postgresqlTestHook ]; + nativeCheckInputs = [ (postgresql.withPackages (ps: [ ps.plpgsql_check ])) ]; + postgresqlTestUserOptions = "LOGIN SUPERUSER"; + failureHook = "postgresqlStop"; + checkPhase = '' + runHook preCheck + psql -a -v ON_ERROR_STOP=1 -c "CREATE EXTENSION plpgsql_check;" + runHook postCheck + ''; + installPhase = "touch $out"; + }; + + meta = with lib; { + description = "Linter tool for language PL/pgSQL"; + homepage = "https://github.com/okbob/plpgsql_check"; + changelog = "https://github.com/okbob/plpgsql_check/releases/tag/v${version}"; + platforms = postgresql.meta.platforms; + license = licenses.mit; + maintainers = [ maintainers.marsam ]; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/plv8.nix b/postgres_15.8.1.044/nix/ext/plv8.nix new file mode 100644 index 0000000..340a073 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/plv8.nix @@ -0,0 +1,146 @@ +{ stdenv +, lib +, fetchFromGitHub +, v8 +, perl +, postgresql +# For passthru test on various systems, and local development on macos +# not we are not currently using passthru tests but retaining for possible contrib +# to nixpkgs +, runCommand +, coreutils +, gnugrep +, clang +, xcbuild +, darwin +, patchelf +}: + +stdenv.mkDerivation (finalAttrs: { + pname = "plv8"; + version = "3.1.10"; + + src = fetchFromGitHub { + owner = "plv8"; + repo = "plv8"; + rev = "v${finalAttrs.version}"; + hash = "sha256-g1A/XPC0dX2360Gzvmo9/FSQnM6Wt2K4eR0pH0p9fz4="; + }; + + patches = [ + # Allow building with system v8. + # https://github.com/plv8/plv8/pull/505 (rejected) + ./0001-build-Allow-using-V8-from-system.patch + ]; + + nativeBuildInputs = [ + perl + ] ++ lib.optionals stdenv.isDarwin [ + clang + xcbuild + ]; + + buildInputs = [ + v8 + postgresql + ] ++ lib.optionals stdenv.isDarwin [ + darwin.apple_sdk.frameworks.CoreFoundation + darwin.apple_sdk.frameworks.Kerberos + ]; + + buildFlags = [ "all" ]; + + makeFlags = [ + # Nixpkgs build a v8 monolith instead of separate v8_libplatform. + "USE_SYSTEM_V8=1" + "V8_OUTDIR=${v8}/lib" + "PG_CONFIG=${postgresql}/bin/pg_config" + ] ++ lib.optionals stdenv.isDarwin [ + "CC=${clang}/bin/clang" + "CXX=${clang}/bin/clang++" + "SHLIB_LINK=-L${v8}/lib -lv8_monolith -Wl,-rpath,${v8}/lib" + ] ++ lib.optionals (!stdenv.isDarwin) [ + "SHLIB_LINK=-lv8" + ]; + + NIX_LDFLAGS = (lib.optionals stdenv.isDarwin [ + "-L${postgresql}/lib" + "-L${v8}/lib" + "-lv8_monolith" + "-lpq" + "-lpgcommon" + "-lpgport" + "-F${darwin.apple_sdk.frameworks.CoreFoundation}/Library/Frameworks" + "-framework" "CoreFoundation" + "-F${darwin.apple_sdk.frameworks.Kerberos}/Library/Frameworks" + "-framework" "Kerberos" + "-undefined" "dynamic_lookup" + "-flat_namespace" + ]); + + installFlags = [ + # PGXS only supports installing to postgresql prefix so we need to redirect this + "DESTDIR=${placeholder "out"}" + ]; + + # No configure script. + dontConfigure = true; + + postPatch = '' + patchShebangs ./generate_upgrade.sh + substituteInPlace generate_upgrade.sh \ + --replace " 2.3.10 " " 2.3.10 2.3.11 2.3.12 2.3.13 2.3.14 2.3.15 " + + ${lib.optionalString stdenv.isDarwin '' + # Replace g++ with clang++ in Makefile + sed -i 's/g++/clang++/g' Makefile + ''} + ''; + + postInstall = '' + # Move the redirected to proper directory. + # There appear to be no references to the install directories + # so changing them does not cause issues. + mv "$out/nix/store"/*/* "$out" + rmdir "$out/nix/store"/* "$out/nix/store" "$out/nix" + + # Handle different PostgreSQL versions + if [ "${lib.versions.major postgresql.version}" = "15" ]; then + mv "$out/lib/plv8-${finalAttrs.version}.so" "$out/lib/plv8.so" + ln -s "$out/lib/plv8.so" "$out/lib/plv8-${finalAttrs.version}.so" + sed -i 's|module_pathname = '"'"'$libdir/plv8-[0-9.]*'"'"'|module_pathname = '"'"'$libdir/plv8'"'"'|' "$out/share/postgresql/extension/plv8.control" + sed -i 's|module_pathname = '"'"'$libdir/plv8-[0-9.]*'"'"'|module_pathname = '"'"'$libdir/plv8'"'"'|' "$out/share/postgresql/extension/plcoffee.control" + sed -i 's|module_pathname = '"'"'$libdir/plv8-[0-9.]*'"'"'|module_pathname = '"'"'$libdir/plv8'"'"'|' "$out/share/postgresql/extension/plls.control" + + ${lib.optionalString stdenv.isDarwin '' + install_name_tool -add_rpath "${v8}/lib" $out/lib/plv8.so + install_name_tool -add_rpath "${postgresql}/lib" $out/lib/plv8.so + install_name_tool -add_rpath "${stdenv.cc.cc.lib}/lib" $out/lib/plv8.so + install_name_tool -change @rpath/libv8_monolith.dylib ${v8}/lib/libv8_monolith.dylib $out/lib/plv8.so + ''} + + ${lib.optionalString (!stdenv.isDarwin) '' + ${patchelf}/bin/patchelf --set-rpath "${v8}/lib:${postgresql}/lib:${stdenv.cc.cc.lib}/lib" $out/lib/plv8.so + ''} + else + ${lib.optionalString stdenv.isDarwin '' + install_name_tool -add_rpath "${v8}/lib" $out/lib/plv8-${finalAttrs.version}${postgresql.dlSuffix} + install_name_tool -add_rpath "${postgresql}/lib" $out/lib/plv8-${finalAttrs.version}${postgresql.dlSuffix} + install_name_tool -add_rpath "${stdenv.cc.cc.lib}/lib" $out/lib/plv8-${finalAttrs.version}${postgresql.dlSuffix} + install_name_tool -change @rpath/libv8_monolith.dylib ${v8}/lib/libv8_monolith.dylib $out/lib/plv8-${finalAttrs.version}${postgresql.dlSuffix} + ''} + + ${lib.optionalString (!stdenv.isDarwin) '' + ${patchelf}/bin/patchelf --set-rpath "${v8}/lib:${postgresql}/lib:${stdenv.cc.cc.lib}/lib" $out/lib/plv8-${finalAttrs.version}${postgresql.dlSuffix} + ''} + fi + ''; + + meta = with lib; { + description = "V8 Engine Javascript Procedural Language add-on for PostgreSQL"; + homepage = "https://plv8.github.io/"; + maintainers = with maintainers; [ samrose ]; + platforms = [ "x86_64-linux" "aarch64-linux" "aarch64-darwin" "x86_64-darwin" ]; + license = licenses.postgresql; + }; +}) diff --git a/postgres_15.8.1.044/nix/ext/postgis.nix b/postgres_15.8.1.044/nix/ext/postgis.nix new file mode 100644 index 0000000..2c8a36c --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/postgis.nix @@ -0,0 +1,87 @@ +{ fetchurl +, lib, stdenv +, perl +, libxml2 +, postgresql +, geos +, proj +, json_c +, pkg-config +, file +, protobufc +, libiconv +, pcre2 +, nixosTests +, callPackage +}: + +let + sfcgal = callPackage ./sfcgal/sfcgal.nix { }; + gdal = callPackage ./gdal.nix { inherit postgresql; }; +in +stdenv.mkDerivation rec { + pname = "postgis"; + version = "3.3.7"; + + outputs = [ "out" "doc" ]; + + src = fetchurl { + url = "https://download.osgeo.org/postgis/source/postgis-${version}.tar.gz"; + sha256 = "sha256-UHJKDd5JrcJT5Z4CTYsY/va+ToU0GUPG1eHhuXTkP84="; + }; + + buildInputs = [ libxml2 postgresql geos proj gdal json_c protobufc pcre2.dev sfcgal ] + ++ lib.optional stdenv.isDarwin libiconv; + nativeBuildInputs = [ perl pkg-config ]; + dontDisableStatic = true; + + + env.NIX_LDFLAGS = "-L${lib.getLib json_c}/lib"; + + preConfigure = '' + sed -i 's@/usr/bin/file@${file}/bin/file@' configure + configureFlags="--datadir=$out/share/postgresql --datarootdir=$out/share/postgresql --bindir=$out/bin --docdir=$doc/share/doc/${pname} --with-gdalconfig=${gdal}/bin/gdal-config --with-jsondir=${json_c.dev} --disable-extension-upgrades-install --with-sfcgal" + + makeFlags="PERL=${perl}/bin/perl datadir=$out/share/postgresql pkglibdir=$out/lib bindir=$out/bin docdir=$doc/share/doc/${pname}" + ''; + + postConfigure = '' + sed -i "s|@mkdir -p \$(DESTDIR)\$(PGSQL_BINDIR)||g ; + s|\$(DESTDIR)\$(PGSQL_BINDIR)|$prefix/bin|g + " \ + "raster/loader/Makefile"; + sed -i "s|\$(DESTDIR)\$(PGSQL_BINDIR)|$prefix/bin|g + " \ + "raster/scripts/python/Makefile"; + mkdir -p $out/bin + ln -s ${postgresql}/bin/postgres $out/bin/postgres + ''; + +postInstall = '' + rm $out/bin/postgres + for prog in $out/bin/*; do # */ + ln -s $prog $prog-${version} + done + # Add function definition and usage to tiger geocoder files + for file in $out/share/postgresql/extension/postgis_tiger_geocoder*--${version}.sql; do + sed -i "/SELECT postgis_extension_AddToSearchPath('tiger');/a SELECT postgis_extension_AddToSearchPath('extensions');" "$file" + done + # Original topology patching + for file in $out/share/postgresql/extension/postgis_topology*--${version}.sql; do + sed -i "/SELECT topology.AddToSearchPath('topology');/i SELECT topology.AddToSearchPath('extensions');" "$file" + done + mkdir -p $doc/share/doc/postgis + mv doc/* $doc/share/doc/postgis/ +''; + + passthru.tests.postgis = nixosTests.postgis; + + meta = with lib; { + description = "Geographic Objects for PostgreSQL"; + homepage = "https://postgis.net/"; + changelog = "https://git.osgeo.org/gitea/postgis/postgis/raw/tag/${version}/NEWS"; + license = licenses.gpl2; + maintainers = with maintainers; [ samrose ]; + inherit (postgresql.meta) platforms; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/rum.nix b/postgres_15.8.1.044/nix/ext/rum.nix new file mode 100644 index 0000000..1270f1f --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/rum.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "rum"; + version = "1.3.14"; + + src = fetchFromGitHub { + owner = "postgrespro"; + repo = "rum"; + rev = version; + hash = "sha256-VsfpxQqRBu9bIAP+TfMRXd+B3hSjuhU2NsutocNiCt8="; + }; + + buildInputs = [ postgresql ]; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + install -D -t $out/lib *${postgresql.dlSuffix} + install -D -t $out/share/postgresql/extension *.control + install -D -t $out/share/postgresql/extension *.sql + ''; + + meta = with lib; { + description = "Full text search index method for PostgreSQL"; + homepage = "https://github.com/postgrespro/rum"; + license = licenses.postgresql; + platforms = postgresql.meta.platforms; + maintainers = with maintainers; [ samrose ]; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/sfcgal/sfcgal.nix b/postgres_15.8.1.044/nix/ext/sfcgal/sfcgal.nix new file mode 100644 index 0000000..54d7b52 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/sfcgal/sfcgal.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitLab, cgal, cmake, pkg-config, gmp, mpfr, boost }: + +stdenv.mkDerivation rec { + pname = "sfcgal"; + version = "61f3b08ade49493b56c6bafa98c7c1f84addbc10"; + + src = fetchFromGitLab { + owner = "sfcgal"; + repo = "SFCGAL"; + rev = "${version}"; + hash = "sha256-nKSqiFyMkZAYptIeShb1zFg9lYSny3kcGJfxdeTFqxw="; + }; + + nativeBuildInputs = [ cmake pkg-config cgal gmp mpfr boost ]; + + cmakeFlags = [ "-DCGAL_DIR=${cgal}" "-DCMAKE_PREFIX_PATH=${cgal}" ]; + + + postPatch = '' + substituteInPlace sfcgal.pc.in \ + --replace '$'{prefix}/@CMAKE_INSTALL_LIBDIR@ @CMAKE_INSTALL_FULL_LIBDIR@ + ''; + + meta = with lib; { + description = "A wrapper around CGAL that intents to implement 2D and 3D operations on OGC standards models"; + homepage = "https://sfcgal.gitlab.io/SFCGAL/"; + license = with licenses; [ gpl3Plus lgpl3Plus]; + platforms = platforms.all; + maintainers = with maintainers; [ samrose ]; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/supautils.nix b/postgres_15.8.1.044/nix/ext/supautils.nix new file mode 100644 index 0000000..f3f5f14 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/supautils.nix @@ -0,0 +1,29 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "supautils"; + version = "2.6.0"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "supabase"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-QNfUpQjqHNzbNqBvjb5a3GtNH9hjbBMDUK19xUU3LpI="; + }; + + installPhase = '' + mkdir -p $out/lib + + install -D *${postgresql.dlSuffix} -t $out/lib + ''; + + meta = with lib; { + description = "PostgreSQL extension for enhanced security"; + homepage = "https://github.com/supabase/${pname}"; + maintainers = with maintainers; [ steve-chavez ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/timescaledb-2.9.1.nix b/postgres_15.8.1.044/nix/ext/timescaledb-2.9.1.nix new file mode 100644 index 0000000..ad955e8 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/timescaledb-2.9.1.nix @@ -0,0 +1,51 @@ +{ lib, stdenv, fetchFromGitHub, cmake, postgresql, openssl, libkrb5 }: + +stdenv.mkDerivation rec { + pname = "timescaledb-apache"; + version = "2.9.1"; + + nativeBuildInputs = [ cmake ]; + buildInputs = [ postgresql openssl libkrb5 ]; + + src = fetchFromGitHub { + owner = "timescale"; + repo = "timescaledb"; + rev = version; + hash = "sha256-fvVSxDiGZAewyuQ2vZDb0I6tmlDXl6trjZp8+qDBtb8="; + }; + + cmakeFlags = [ "-DSEND_TELEMETRY_DEFAULT=OFF" "-DREGRESS_CHECKS=OFF" "-DTAP_CHECKS=OFF" "-DAPACHE_ONLY=1" ] + ++ lib.optionals stdenv.isDarwin [ "-DLINTER=OFF" ]; + + # Fix the install phase which tries to install into the pgsql extension dir, + # and cannot be manually overridden. This is rather fragile but works OK. + postPatch = '' + for x in CMakeLists.txt sql/CMakeLists.txt; do + substituteInPlace "$x" \ + --replace 'DESTINATION "''${PG_SHAREDIR}/extension"' "DESTINATION \"$out/share/postgresql/extension\"" + done + + for x in src/CMakeLists.txt src/loader/CMakeLists.txt tsl/src/CMakeLists.txt; do + substituteInPlace "$x" \ + --replace 'DESTINATION ''${PG_PKGLIBDIR}' "DESTINATION \"$out/lib\"" + done + ''; + + + # timescaledb-2.9.1.so already exists in the lib directory + # we have no need for the timescaledb.so or control file + postInstall = '' + rm $out/lib/timescaledb.so + rm $out/share/postgresql/extension/timescaledb.control + ''; + + meta = with lib; { + description = "Scales PostgreSQL for time-series data via automatic partitioning across time and space"; + homepage = "https://www.timescale.com/"; + changelog = "https://github.com/timescale/timescaledb/blob/${version}/CHANGELOG.md"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.asl20; + broken = versionOlder postgresql.version "13"; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/timescaledb.nix b/postgres_15.8.1.044/nix/ext/timescaledb.nix new file mode 100644 index 0000000..1c87916 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/timescaledb.nix @@ -0,0 +1,43 @@ +{ lib, stdenv, fetchFromGitHub, cmake, postgresql, openssl, libkrb5 }: + +stdenv.mkDerivation rec { + pname = "timescaledb-apache"; + version = "2.16.1"; + + nativeBuildInputs = [ cmake ]; + buildInputs = [ postgresql openssl libkrb5 ]; + + src = fetchFromGitHub { + owner = "timescale"; + repo = "timescaledb"; + rev = version; + hash = "sha256-sLxWdBmih9mgiO51zLLxn9uwJVYc5JVHJjSWoADoJ+w="; + }; + + cmakeFlags = [ "-DSEND_TELEMETRY_DEFAULT=OFF" "-DREGRESS_CHECKS=OFF" "-DTAP_CHECKS=OFF" "-DAPACHE_ONLY=1" ] + ++ lib.optionals stdenv.isDarwin [ "-DLINTER=OFF" ]; + + # Fix the install phase which tries to install into the pgsql extension dir, + # and cannot be manually overridden. This is rather fragile but works OK. + postPatch = '' + for x in CMakeLists.txt sql/CMakeLists.txt; do + substituteInPlace "$x" \ + --replace 'DESTINATION "''${PG_SHAREDIR}/extension"' "DESTINATION \"$out/share/postgresql/extension\"" + done + + for x in src/CMakeLists.txt src/loader/CMakeLists.txt tsl/src/CMakeLists.txt; do + substituteInPlace "$x" \ + --replace 'DESTINATION ''${PG_PKGLIBDIR}' "DESTINATION \"$out/lib\"" + done + ''; + + meta = with lib; { + description = "Scales PostgreSQL for time-series data via automatic partitioning across time and space"; + homepage = "https://www.timescale.com/"; + changelog = "https://github.com/timescale/timescaledb/blob/${version}/CHANGELOG.md"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.asl20; + broken = versionOlder postgresql.version "13"; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/use-system-groonga.patch b/postgres_15.8.1.044/nix/ext/use-system-groonga.patch new file mode 100644 index 0000000..6d3042b --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/use-system-groonga.patch @@ -0,0 +1,21 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 33b34477..f4ffefe5 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -12,7 +12,6 @@ if(MSVC_VERSION LESS 1800) + message(FATAL_ERROR "PGroonga supports only MSVC 2013 or later") + endif() + +-add_subdirectory(vendor/groonga) + + set(PGRN_POSTGRESQL_DIR "${CMAKE_INSTALL_PREFIX}" + CACHE PATH "PostgreSQL binary directory") +@@ -52,8 +51,6 @@ string(REGEX REPLACE "([0-9]+)\\.([0-9]+)\\.([0-9]+)" "\\3" + string(REGEX REPLACE ".*comment = '([^']+)'.*" "\\1" + PGRN_DESCRIPTION "${PGRN_CONTROL}") + +-file(READ "${CMAKE_CURRENT_SOURCE_DIR}/vendor/groonga/bundled_message_pack_version" +- PGRN_BUNDLED_MESSAGE_PACK_VERSION) + string(STRIP + "${PGRN_BUNDLED_MESSAGE_PACK_VERSION}" + PGRN_BUNDLED_MESSAGE_PACK_VERSION) \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/ext/vault.nix b/postgres_15.8.1.044/nix/ext/vault.nix new file mode 100644 index 0000000..c822fcd --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/vault.nix @@ -0,0 +1,30 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "vault"; + version = "0.2.9"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "supabase"; + repo = pname; + rev = "refs/tags/v${version}"; + hash = "sha256-kXTngBW4K6FkZM8HvJG2Jha6OQqbejhnk7tchxy031I="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Store encrypted secrets in PostgreSQL"; + homepage = "https://github.com/supabase/${pname}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/wal2json.nix b/postgres_15.8.1.044/nix/ext/wal2json.nix new file mode 100644 index 0000000..ed578c7 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/wal2json.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "wal2json"; + version = "2_6"; + + src = fetchFromGitHub { + owner = "eulerto"; + repo = "wal2json"; + rev = "wal2json_${builtins.replaceStrings ["."] ["_"] version}"; + hash = "sha256-+QoACPCKiFfuT2lJfSUmgfzC5MXf75KpSoc2PzPxKyM="; + }; + + buildInputs = [ postgresql ]; + + makeFlags = [ "USE_PGXS=1" ]; + + installPhase = '' + install -D -t $out/lib *${postgresql.dlSuffix} + install -D -t $out/share/postgresql/extension sql/*.sql + ''; + + meta = with lib; { + description = "PostgreSQL JSON output plugin for changeset extraction"; + homepage = "https://github.com/eulerto/wal2json"; + changelog = "https://github.com/eulerto/wal2json/releases/tag/wal2json_${version}"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.bsd3; + }; +} diff --git a/postgres_15.8.1.044/nix/ext/wrappers/default.nix b/postgres_15.8.1.044/nix/ext/wrappers/default.nix new file mode 100644 index 0000000..5dda199 --- /dev/null +++ b/postgres_15.8.1.044/nix/ext/wrappers/default.nix @@ -0,0 +1,164 @@ +{ lib +, stdenv +, fetchFromGitHub +, openssl +, pkg-config +, postgresql +, buildPgrxExtension_0_12_6 +, cargo +, darwin +, jq +, rust-bin +, git +}: +let + rustVersion = "1.80.0"; + cargo = rust-bin.stable.${rustVersion}.default; +in +buildPgrxExtension_0_12_6 rec { + pname = "supabase-wrappers"; + version = "0.4.4"; + # update the following array when the wrappers version is updated + # required to ensure that extensions update scripts from previous versions are generated + previousVersions = ["0.4.3" "0.4.2" "0.4.1" "0.4.0" "0.3.1" "0.3.0" "0.2.0" "0.1.19" "0.1.18" "0.1.17" "0.1.16" "0.1.15" "0.1.14" "0.1.12" "0.1.11" "0.1.10" "0.1.9" "0.1.8" "0.1.7" "0.1.6" "0.1.5" "0.1.4" "0.1.1" "0.1.0"]; + inherit postgresql; + src = fetchFromGitHub { + owner = "supabase"; + repo = "wrappers"; + rev = "v${version}"; + hash = "sha256-QoGFJpq8PuvMM8SS+VZd7MlNl56uFivRjs1tCtwX+oE="; + }; + + nativeBuildInputs = [ pkg-config cargo git ]; + buildInputs = [ openssl postgresql ] ++ lib.optionals (stdenv.isDarwin) [ + darwin.apple_sdk.frameworks.CoreFoundation + darwin.apple_sdk.frameworks.Security + darwin.apple_sdk.frameworks.SystemConfiguration + ]; + + NIX_LDFLAGS = "-L${postgresql}/lib -lpq"; + + # Set necessary environment variables for pgrx + env = lib.optionalAttrs stdenv.isDarwin { + POSTGRES_LIB = "${postgresql}/lib"; + RUSTFLAGS = "-C link-arg=-undefined -C link-arg=dynamic_lookup"; + PGPORT = "5435"; + }; + + OPENSSL_NO_VENDOR = 1; + #need to set this to 2 to avoid cpu starvation + CARGO_BUILD_JOBS = "2"; + CARGO="${cargo}/bin/cargo"; + + #CARGO_NET_GIT_FETCH_WITH_CLI = "true"; + cargoLock = { + lockFile = "${src}/Cargo.lock"; + allowBuiltinFetchGit = false; + outputHashes = { + "clickhouse-rs-1.1.0-alpha.1" = "sha256-G+v4lNP5eK2U45D1fL90Dq24pUSlpIysNCxuZ17eac0="; + }; + }; + + preConfigure = '' + cd wrappers + + # update the clickhouse-rs dependency + # append the branch name to the git URL to help cargo locate the commit + # while maintaining the rev for reproducibility + awk -i inplace ' + /\[dependencies.clickhouse-rs\]/ { + print + getline + if ($0 ~ /git =/) { + print "git = \"https://github.com/suharev7/clickhouse-rs/async-await\"" + } else { + print + } + while ($0 !~ /^\[/ && NF > 0) { + getline + if ($0 ~ /rev =/) print + if ($0 ~ /^\[/) print + } + next + } + { print } + ' Cargo.toml + + # Verify the file is still valid TOML, break build with this error + # if it is not + if ! cargo verify-project 2>/dev/null; then + echo "Failed to maintain valid TOML syntax" + exit 1 + fi + + cd .. + ''; + + buildAndTestSubdir = "wrappers"; + buildFeatures = [ + "helloworld_fdw" + "all_fdws" + ]; + doCheck = false; + + preBuild = '' + echo "Processing git tags..." + echo '${builtins.concatStringsSep "," previousVersions}' | sed 's/,/\n/g' > git_tags.txt + ''; + + postInstall = '' + echo "Modifying main SQL file to use unversioned library name..." + current_version="${version}" + main_sql_file="$out/share/postgresql/extension/wrappers--$current_version.sql" + if [ -f "$main_sql_file" ]; then + sed -i 's|$libdir/wrappers-[0-9.]*|$libdir/wrappers|g' "$main_sql_file" + echo "Modified $main_sql_file" + else + echo "Warning: $main_sql_file not found" + fi + echo "Creating and modifying SQL files for previous versions..." + + if [ -f "$main_sql_file" ]; then + while read -r previous_version; do + if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then + new_file="$out/share/postgresql/extension/wrappers--$previous_version--$current_version.sql" + echo "Creating $new_file" + cp "$main_sql_file" "$new_file" + sed -i 's|$libdir/wrappers-[0-9.]*|$libdir/wrappers|g' "$new_file" + echo "Modified $new_file" + fi + done < git_tags.txt + else + echo "Warning: $main_sql_file not found" + fi + mv $out/lib/wrappers-${version}${postgresql.dlSuffix} $out/lib/wrappers${postgresql.dlSuffix} + ln -s $out/lib/wrappers${postgresql.dlSuffix} $out/lib/wrappers-${version}${postgresql.dlSuffix} + + echo "Creating wrappers.so symlinks to support pg_upgrade..." + if [ -f "$out/lib/wrappers.so" ]; then + while read -r previous_version; do + if [ "$(printf '%s\n' "$previous_version" "$current_version" | sort -V | head -n1)" = "$previous_version" ] && [ "$previous_version" != "$current_version" ]; then + new_file="$out/lib/wrappers-$previous_version.so" + echo "Creating $new_file" + ln -s "$out/lib/wrappers.so" "$new_file" + fi + done < git_tags.txt + else + echo "Warning: $out/lib/wrappers.so not found" + fi + + rm git_tags.txt + echo "Contents of updated wrappers.control:" + cat "$out/share/postgresql/extension/wrappers.control" + echo "List of generated SQL files:" + ls -l $out/share/postgresql/extension/wrappers--*.sql + ''; + + meta = with lib; { + description = "Various Foreign Data Wrappers (FDWs) for PostreSQL"; + homepage = "https://github.com/supabase/wrappers"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/postgres_15.8.1.044/nix/fix-cmake-install-path.patch b/postgres_15.8.1.044/nix/fix-cmake-install-path.patch new file mode 100644 index 0000000..1fe317b --- /dev/null +++ b/postgres_15.8.1.044/nix/fix-cmake-install-path.patch @@ -0,0 +1,21 @@ +Fix CMake install path + +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1141,11 +1141,11 @@ + + set(prefix "${CMAKE_INSTALL_PREFIX}") + set(exec_prefix "\${prefix}") +-set(bindir "\${exec_prefix}/${CMAKE_INSTALL_BINDIR}") +-set(sbindir "\${exec_prefix}/${CMAKE_INSTALL_SBINDIR}") +-set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}") +-set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") +-set(datarootdir "\${prefix}/${CMAKE_INSTALL_DATAROOTDIR}") ++set(bindir "${CMAKE_INSTALL_FULL_BINDIR}") ++set(sbindir "${CMAKE_INSTALL_FULL_SBINDIR}") ++set(libdir "${CMAKE_INSTALL_FULL_LIBDIR}") ++set(includedir "${CMAKE_INSTALL_FULL_INCLUDEDIR}") ++set(datarootdir "${CMAKE_INSTALL_FULL_DATAROOTDIR}") + set(datadir "\${datarootdir}") + set(expanded_pluginsdir "${GRN_PLUGINS_DIR}") + set(GRN_EXPANDED_DEFAULT_DOCUMENT_ROOT "${GRN_DEFAULT_DOCUMENT_ROOT}") \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/init.sh b/postgres_15.8.1.044/nix/init.sh new file mode 100644 index 0000000..7a0aadf --- /dev/null +++ b/postgres_15.8.1.044/nix/init.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# shellcheck shell=bash + +export PGUSER=supabase_admin +export PGDATA=$PWD/postgres_data +export PGHOST=$PWD/postgres +export PGPORT=5432 +export PGPASS=postgres +export LOG_PATH=$PGHOST/LOG +export PGDATABASE=testdb +export DATABASE_URL="postgresql:///$PGDATABASE?host=$PGHOST&port=$PGPORT" +mkdir -p $PGHOST +if [ ! -d $PGDATA ]; then + echo 'Initializing postgresql database...' + initdb $PGDATA --locale=C --username $PGUSER -A md5 --pwfile=<(echo $PGPASS) --auth=trust + echo "listen_addresses='*'" >> $PGDATA/postgresql.conf + echo "unix_socket_directories='$PGHOST'" >> $PGDATA/postgresql.conf + echo "unix_socket_permissions=0700" >> $PGDATA/postgresql.conf +fi +chmod o-rwx $PGDATA diff --git a/postgres_15.8.1.044/nix/overlays/cargo-pgrx-0-11-3.nix b/postgres_15.8.1.044/nix/overlays/cargo-pgrx-0-11-3.nix new file mode 100644 index 0000000..41ba97d --- /dev/null +++ b/postgres_15.8.1.044/nix/overlays/cargo-pgrx-0-11-3.nix @@ -0,0 +1,7 @@ +final: prev: { + #cargo-pgrx_0_11_3 = cargo-pgrx.cargo-pgrx_0_11_3; + + buildPgrxExtension_0_11_3 = prev.buildPgrxExtension.override { + cargo-pgrx = final.cargo-pgrx_0_11_3; + }; +} diff --git a/postgres_15.8.1.044/nix/overlays/psql_16-oriole.nix b/postgres_15.8.1.044/nix/overlays/psql_16-oriole.nix new file mode 100644 index 0000000..309129f --- /dev/null +++ b/postgres_15.8.1.044/nix/overlays/psql_16-oriole.nix @@ -0,0 +1,21 @@ +final: prev: { + pg_orioledb = prev.postgresql_16.overrideAttrs (old: { + pname = "postgresql_orioledb"; + version = "16_31"; + src = prev.fetchurl { + url = "https://github.com/orioledb/postgres/archive/refs/tags/patches16_31.tar.gz"; + sha256 = "sha256-29uHUACwZKh8e4zJ9tWzEhLNjEuh6P31KbpxnMEhtuI="; + }; + buildInputs = old.buildInputs ++ [ + prev.bison + prev.docbook5 + prev.docbook_xsl + prev.docbook_xsl_ns + prev.docbook_xml_dtd_45 + prev.flex + prev.libxslt + prev.perl + ]; + }); + postgresql_orioledb = final.pg_orioledb; +} diff --git a/postgres_15.8.1.044/nix/postgresql/15.nix b/postgres_15.8.1.044/nix/postgresql/15.nix new file mode 100644 index 0000000..63f4928 --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/15.nix @@ -0,0 +1,4 @@ +import ./generic.nix { + version = "15.8"; + hash = "sha256-RANRX5pp7rPv68mPMLjGlhIr/fiV6Ss7I/W452nty2o="; +} diff --git a/postgres_15.8.1.044/nix/postgresql/16.nix b/postgres_15.8.1.044/nix/postgresql/16.nix new file mode 100644 index 0000000..c964fc5 --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/16.nix @@ -0,0 +1,4 @@ +import ./generic.nix { + version = "16.3"; + hash = "sha256-Mxlj1dPcTK9CFqBJ+kC2bWvLjHMGFYWUEblRh2TmBYU="; +} diff --git a/postgres_15.8.1.044/nix/postgresql/default.nix b/postgres_15.8.1.044/nix/postgresql/default.nix new file mode 100644 index 0000000..a065d5a --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/default.nix @@ -0,0 +1,20 @@ +self: +let + versions = { + postgresql_15 = ./15.nix; + postgresql_16 = ./16.nix; + postgresql_orioledb-16 = ./orioledb-16.nix; + postgresql_orioledb-17 = ./orioledb-17.nix; + }; + mkAttributes = jitSupport: + self.lib.mapAttrs' (version: path: + let + attrName = if jitSupport then "${version}_jit" else version; + in + self.lib.nameValuePair attrName (import path { + inherit jitSupport self; + }) + ) versions; +in +# variations without and with JIT +(mkAttributes false) // (mkAttributes true) diff --git a/postgres_15.8.1.044/nix/postgresql/generic.nix b/postgres_15.8.1.044/nix/postgresql/generic.nix new file mode 100644 index 0000000..19b73c1 --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/generic.nix @@ -0,0 +1,320 @@ +let + + generic = + # adapted from the nixpkgs postgresql package + # dependencies + { stdenv, lib, fetchurl, fetchpatch, makeWrapper + , glibc, zlib, readline, openssl, icu, lz4, zstd, systemd, libossp_uuid + , pkg-config, libxml2, tzdata, libkrb5, substituteAll, darwin + , linux-pam + #orioledb specific + , perl, bison, flex, docbook_xsl, docbook_xml_dtd_45, docbook_xsl_ns, libxslt + + # This is important to obtain a version of `libpq` that does not depend on systemd. + , systemdSupport ? lib.meta.availableOn stdenv.hostPlatform systemd && !stdenv.hostPlatform.isStatic + , enableSystemd ? null + , gssSupport ? with stdenv.hostPlatform; !isWindows && !isStatic + + # for postgresql.pkgs + , self, newScope, buildEnv + + # source specification + , version, hash, muslPatches ? {} + + # for tests + , testers + + # JIT + , jitSupport + , nukeReferences, patchelf, llvmPackages + + # PL/Python + , pythonSupport ? false + , python3 + + # detection of crypt fails when using llvm stdenv, so we add it manually + # for <13 (where it got removed: https://github.com/postgres/postgres/commit/c45643d618e35ec2fe91438df15abd4f3c0d85ca) + , libxcrypt + } @args: + let + atLeast = lib.versionAtLeast version; + olderThan = lib.versionOlder version; + lz4Enabled = atLeast "14"; + zstdEnabled = atLeast "15"; + + systemdSupport' = if enableSystemd == null then systemdSupport else (lib.warn "postgresql: argument enableSystemd is deprecated, please use systemdSupport instead." enableSystemd); + + pname = "postgresql"; + + stdenv' = if jitSupport then llvmPackages.stdenv else stdenv; + in stdenv'.mkDerivation (finalAttrs: { + inherit version; + pname = pname + lib.optionalString jitSupport "-jit"; + + src = if (builtins.match "[0-9][0-9]_.*" version != null) then + fetchurl { + url = "https://github.com/orioledb/postgres/archive/refs/tags/patches${version}.tar.gz"; + inherit hash; + } + else + fetchurl { + url = "mirror://postgresql/source/v${version}/${pname}-${version}.tar.bz2"; + inherit hash; + }; + + hardeningEnable = lib.optionals (!stdenv'.cc.isClang) [ "pie" ]; + + outputs = [ "out" "lib" ]; + setOutputFlags = false; # $out retains configureFlags :-/ + + buildInputs = [ + zlib + readline + openssl + (libxml2.override {python = python3;}) + icu + ] + ++ lib.optionals (olderThan "13") [ libxcrypt ] + ++ lib.optionals jitSupport [ llvmPackages.llvm ] + ++ lib.optionals lz4Enabled [ lz4 ] + ++ lib.optionals zstdEnabled [ zstd ] + ++ lib.optionals systemdSupport' [ systemd ] + ++ lib.optionals pythonSupport [ python3 ] + ++ lib.optionals gssSupport [ libkrb5 ] + ++ lib.optionals stdenv'.isLinux [ linux-pam ] + ++ lib.optionals (!stdenv'.isDarwin) [ libossp_uuid ] + ++ lib.optionals (builtins.match "[0-9][0-9]_.*" version != null) [ + perl bison flex docbook_xsl docbook_xml_dtd_45 docbook_xsl_ns libxslt + ]; + + nativeBuildInputs = [ + makeWrapper + pkg-config + ] + ++ lib.optionals jitSupport [ llvmPackages.llvm.dev nukeReferences patchelf ]; + + enableParallelBuilding = true; + + separateDebugInfo = true; + + buildFlags = [ "world-bin" ]; + + # Makes cross-compiling work when xml2-config can't be executed on the host. + # Fixed upstream in https://github.com/postgres/postgres/commit/0bc8cebdb889368abdf224aeac8bc197fe4c9ae6 + env.NIX_CFLAGS_COMPILE = lib.optionalString (olderThan "13") "-I${libxml2.dev}/include/libxml2"; + + configureFlags = [ + "--with-openssl" + "--with-libxml" + "--with-icu" + "--sysconfdir=/etc" + "--libdir=$(lib)/lib" + "--with-system-tzdata=${tzdata}/share/zoneinfo" + "--enable-debug" + (lib.optionalString systemdSupport' "--with-systemd") + (if stdenv'.isDarwin then "--with-uuid=e2fs" else "--with-ossp-uuid") + ] ++ lib.optionals lz4Enabled [ "--with-lz4" ] + ++ lib.optionals zstdEnabled [ "--with-zstd" ] + ++ lib.optionals gssSupport [ "--with-gssapi" ] + ++ lib.optionals pythonSupport [ "--with-python" ] + ++ lib.optionals jitSupport [ "--with-llvm" ] + ++ lib.optionals stdenv'.isLinux [ "--with-pam" ]; + + patches = [ + (if atLeast "16" then ./patches/relative-to-symlinks-16+.patch else ./patches/relative-to-symlinks.patch) + ./patches/less-is-more.patch + ./patches/paths-for-split-outputs.patch + ./patches/specify_pkglibdir_at_runtime.patch + ./patches/paths-with-postgresql-suffix.patch + + (substituteAll { + src = ./patches/locale-binary-path.patch; + locale = "${if stdenv.isDarwin then darwin.adv_cmds else lib.getBin stdenv.cc.libc}/bin/locale"; + }) + ] ++ lib.optionals stdenv'.hostPlatform.isMusl ( + # Using fetchurl instead of fetchpatch on purpose: https://github.com/NixOS/nixpkgs/issues/240141 + map fetchurl (lib.attrValues muslPatches) + ) ++ lib.optionals stdenv'.isLinux [ + (if atLeast "13" then ./patches/socketdir-in-run-13+.patch else ./patches/socketdir-in-run.patch) + ]; + + installTargets = [ "install-world-bin" ]; + + postPatch = '' + # Hardcode the path to pgxs so pg_config returns the path in $out + substituteInPlace "src/common/config_info.c" --subst-var out + '' + lib.optionalString jitSupport '' + # Force lookup of jit stuff in $out instead of $lib + substituteInPlace src/backend/jit/jit.c --replace pkglib_path \"$out/lib\" + substituteInPlace src/backend/jit/llvm/llvmjit.c --replace pkglib_path \"$out/lib\" + substituteInPlace src/backend/jit/llvm/llvmjit_inline.cpp --replace pkglib_path \"$out/lib\" + ''; + + postInstall = + '' + moveToOutput "lib/pgxs" "$out" # looks strange, but not deleting it + moveToOutput "lib/libpgcommon*.a" "$out" + moveToOutput "lib/libpgport*.a" "$out" + moveToOutput "lib/libecpg*" "$out" + + # Prevent a retained dependency on gcc-wrapper. + substituteInPlace "$out/lib/pgxs/src/Makefile.global" --replace ${stdenv'.cc}/bin/ld ld + + if [ -z "''${dontDisableStatic:-}" ]; then + # Remove static libraries in case dynamic are available. + for i in $out/lib/*.a $lib/lib/*.a; do + name="$(basename "$i")" + ext="${stdenv'.hostPlatform.extensions.sharedLibrary}" + if [ -e "$lib/lib/''${name%.a}$ext" ] || [ -e "''${i%.a}$ext" ]; then + rm "$i" + fi + done + fi + '' + lib.optionalString jitSupport '' + # Move the bitcode and libllvmjit.so library out of $lib; otherwise, every client that + # depends on libpq.so will also have libLLVM.so in its closure too, bloating it + moveToOutput "lib/bitcode" "$out" + moveToOutput "lib/llvmjit*" "$out" + + # In the case of JIT support, prevent a retained dependency on clang-wrapper + substituteInPlace "$out/lib/pgxs/src/Makefile.global" --replace ${stdenv'.cc}/bin/clang clang + nuke-refs $out/lib/llvmjit_types.bc $(find $out/lib/bitcode -type f) + + # Stop out depending on the default output of llvm + substituteInPlace $out/lib/pgxs/src/Makefile.global \ + --replace ${llvmPackages.llvm.out}/bin "" \ + --replace '$(LLVM_BINPATH)/' "" + + # Stop out depending on the -dev output of llvm + substituteInPlace $out/lib/pgxs/src/Makefile.global \ + --replace ${llvmPackages.llvm.dev}/bin/llvm-config llvm-config \ + --replace -I${llvmPackages.llvm.dev}/include "" + + ${lib.optionalString (!stdenv'.isDarwin) '' + # Stop lib depending on the -dev output of llvm + rpath=$(patchelf --print-rpath $out/lib/llvmjit.so) + nuke-refs -e $out $out/lib/llvmjit.so + # Restore the correct rpath + patchelf $out/lib/llvmjit.so --set-rpath "$rpath" + ''} + ''; + + postFixup = lib.optionalString (!stdenv'.isDarwin && stdenv'.hostPlatform.libc == "glibc") + '' + # initdb needs access to "locale" command from glibc. + wrapProgram $out/bin/initdb --prefix PATH ":" ${glibc.bin}/bin + ''; + + doCheck = !stdenv'.isDarwin; + # autodetection doesn't seem to able to find this, but it's there. + checkTarget = "check"; + + disallowedReferences = [ stdenv'.cc ]; + + passthru = let + this = self.callPackage generic args; + jitToggle = this.override { + jitSupport = !jitSupport; + }; + in + { + psqlSchema = lib.versions.major version; + + withJIT = if jitSupport then this else jitToggle; + withoutJIT = if jitSupport then jitToggle else this; + + dlSuffix = if olderThan "16" then ".so" else stdenv.hostPlatform.extensions.sharedLibrary; + + pkgs = let + scope = { + inherit jitSupport; + inherit (llvmPackages) llvm; + postgresql = this; + stdenv = stdenv'; + }; + newSelf = self // scope; + newSuper = { callPackage = newScope (scope // this.pkgs); }; + in import ./ext newSelf newSuper; + + withPackages = postgresqlWithPackages { + inherit makeWrapper buildEnv; + postgresql = this; + } + this.pkgs; + + tests = { + postgresql-wal-receiver = import ../../../../nixos/tests/postgresql-wal-receiver.nix { + inherit (stdenv) system; + pkgs = self; + package = this; + }; + pkg-config = testers.testMetaPkgConfig finalAttrs.finalPackage; + } // lib.optionalAttrs jitSupport { + postgresql-jit = import ../../../../nixos/tests/postgresql-jit.nix { + inherit (stdenv) system; + pkgs = self; + package = this; + }; + }; + }; + + meta = with lib; { + homepage = "https://www.postgresql.org"; + description = "Powerful, open source object-relational database system"; + license = licenses.postgresql; + changelog = "https://www.postgresql.org/docs/release/${finalAttrs.version}/"; + maintainers = with maintainers; [ thoughtpolice danbst globin ivan ma27 wolfgangwalther ]; + pkgConfigModules = [ "libecpg" "libecpg_compat" "libpgtypes" "libpq" ]; + platforms = platforms.unix; + + # JIT support doesn't work with cross-compilation. It is attempted to build LLVM-bytecode + # (`%.bc` is the corresponding `make(1)`-rule) for each sub-directory in `backend/` for + # the JIT apparently, but with a $(CLANG) that can produce binaries for the build, not the + # host-platform. + # + # I managed to get a cross-build with JIT support working with + # `depsBuildBuild = [ llvmPackages.clang ] ++ buildInputs`, but considering that the + # resulting LLVM IR isn't platform-independent this doesn't give you much. + # In fact, I tried to test the result in a VM-test, but as soon as JIT was used to optimize + # a query, postgres would coredump with `Illegal instruction`. + broken = (jitSupport && stdenv.hostPlatform != stdenv.buildPlatform) + # Allmost all tests fail FATAL errors for v12 and v13 + || (jitSupport && stdenv.hostPlatform.isMusl && olderThan "14"); + }; + }); + + postgresqlWithPackages = { postgresql, makeWrapper, buildEnv }: pkgs: f: buildEnv { + name = "postgresql-and-plugins-${postgresql.version}"; + paths = f pkgs ++ [ + postgresql + postgresql.lib + #TODO RM postgresql.man # in case user installs this into environment + ]; + nativeBuildInputs = [ makeWrapper ]; + + + # We include /bin to ensure the $out/bin directory is created, which is + # needed because we'll be removing the files from that directory in postBuild + # below. See #22653 + pathsToLink = ["/" "/bin"]; + + # Note: the duplication of executables is about 4MB size. + # So a nicer solution was patching postgresql to allow setting the + # libdir explicitly. + postBuild = '' + mkdir -p $out/bin + rm $out/bin/{pg_config,postgres,pg_ctl} + cp --target-directory=$out/bin ${postgresql}/bin/{postgres,pg_config,pg_ctl} + wrapProgram $out/bin/postgres --set NIX_PGLIBDIR $out/lib + ''; + + passthru.version = postgresql.version; + passthru.psqlSchema = postgresql.psqlSchema; + }; + +in +# passed by .nix +versionArgs: +# passed by default.nix +{ self, ... } @defaultArgs: +self.callPackage generic (defaultArgs // versionArgs) diff --git a/postgres_15.8.1.044/nix/postgresql/orioledb-16.nix b/postgres_15.8.1.044/nix/postgresql/orioledb-16.nix new file mode 100644 index 0000000..e9a6685 --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/orioledb-16.nix @@ -0,0 +1,4 @@ +import ./generic.nix { + version = "16_31"; + hash = "sha256-29uHUACwZKh8e4zJ9tWzEhLNjEuh6P31KbpxnMEhtuI="; +} diff --git a/postgres_15.8.1.044/nix/postgresql/orioledb-17.nix b/postgres_15.8.1.044/nix/postgresql/orioledb-17.nix new file mode 100644 index 0000000..b79052e --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/orioledb-17.nix @@ -0,0 +1,4 @@ +import ./generic.nix { + version = "17_5"; + hash = "sha256-OgXLpFanNp+ngPFKyCEDUFvIEWQ9nK/1csUO9lVTXaQ="; +} diff --git a/postgres_15.8.1.044/nix/postgresql/patches/less-is-more.patch b/postgres_15.8.1.044/nix/postgresql/patches/less-is-more.patch new file mode 100644 index 0000000..a72d1a2 --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/patches/less-is-more.patch @@ -0,0 +1,11 @@ +--- a/src/include/fe_utils/print.h ++++ b/src/include/fe_utils/print.h +@@ -18,7 +18,7 @@ + + /* This is not a particularly great place for this ... */ + #ifndef __CYGWIN__ +-#define DEFAULT_PAGER "more" ++#define DEFAULT_PAGER "less" + #else + #define DEFAULT_PAGER "less" + #endif diff --git a/postgres_15.8.1.044/nix/postgresql/patches/locale-binary-path.patch b/postgres_15.8.1.044/nix/postgresql/patches/locale-binary-path.patch new file mode 100644 index 0000000..8068683 --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/patches/locale-binary-path.patch @@ -0,0 +1,11 @@ +--- a/src/backend/commands/collationcmds.c ++++ b/src/backend/commands/collationcmds.c +@@ -611,7 +611,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS) + aliases = (CollAliasData *) palloc(maxaliases * sizeof(CollAliasData)); + naliases = 0; + +- locale_a_handle = OpenPipeStream("locale -a", "r"); ++ locale_a_handle = OpenPipeStream("@locale@ -a", "r"); + if (locale_a_handle == NULL) + ereport(ERROR, + (errcode_for_file_access(), diff --git a/postgres_15.8.1.044/nix/postgresql/patches/paths-for-split-outputs.patch b/postgres_15.8.1.044/nix/postgresql/patches/paths-for-split-outputs.patch new file mode 100644 index 0000000..2134f7e --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/patches/paths-for-split-outputs.patch @@ -0,0 +1,11 @@ +--- a/src/common/config_info.c ++++ b/src/common/config_info.c +@@ -118,7 +118,7 @@ + i++; + + configdata[i].name = pstrdup("PGXS"); ++ strlcpy(path, "@out@/lib", sizeof(path)); +- get_pkglib_path(my_exec_path, path); + strlcat(path, "/pgxs/src/makefiles/pgxs.mk", sizeof(path)); + cleanup_path(path); + configdata[i].setting = pstrdup(path); diff --git a/postgres_15.8.1.044/nix/postgresql/patches/paths-with-postgresql-suffix.patch b/postgres_15.8.1.044/nix/postgresql/patches/paths-with-postgresql-suffix.patch new file mode 100644 index 0000000..04d2f55 --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/patches/paths-with-postgresql-suffix.patch @@ -0,0 +1,41 @@ +Nix outputs put the `name' in each store path like +/nix/store/...-. This was confusing the Postgres make script +because it thought its data directory already had postgresql in its +directory. This lead to Postgres installing all of its fils in +$out/share. To fix this, we just look for postgres or psql in the part +after the / using make's notdir. + +--- +--- a/src/Makefile.global.in ++++ b/src/Makefile.global.in +@@ -102,15 +102,15 @@ datarootdir := @datarootdir@ + bindir := @bindir@ + + datadir := @datadir@ +-ifeq "$(findstring pgsql, $(datadir))" "" +-ifeq "$(findstring postgres, $(datadir))" "" ++ifeq "$(findstring pgsql, $(notdir $(datadir)))" "" ++ifeq "$(findstring postgres, $(notdir $(datadir)))" "" + override datadir := $(datadir)/postgresql + endif + endif + + sysconfdir := @sysconfdir@ +-ifeq "$(findstring pgsql, $(sysconfdir))" "" +-ifeq "$(findstring postgres, $(sysconfdir))" "" ++ifeq "$(findstring pgsql, $(notdir $(sysconfdir)))" "" ++ifeq "$(findstring postgres, $(notdir $(sysconfdir)))" "" + override sysconfdir := $(sysconfdir)/postgresql + endif + endif +@@ -136,8 +136,8 @@ endif + mandir := @mandir@ + + docdir := @docdir@ +-ifeq "$(findstring pgsql, $(docdir))" "" +-ifeq "$(findstring postgres, $(docdir))" "" ++ifeq "$(findstring pgsql, $(notdir $(docdir)))" "" ++ifeq "$(findstring postgres, $(notdir $(docdir)))" "" + override docdir := $(docdir)/postgresql + endif + endif diff --git a/postgres_15.8.1.044/nix/postgresql/patches/relative-to-symlinks-16+.patch b/postgres_15.8.1.044/nix/postgresql/patches/relative-to-symlinks-16+.patch new file mode 100644 index 0000000..996072e --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/patches/relative-to-symlinks-16+.patch @@ -0,0 +1,13 @@ +On NixOS we *want* stuff relative to symlinks. +--- +--- a/src/common/exec.c ++++ b/src/common/exec.c +@@ -238,6 +238,8 @@ + static int + normalize_exec_path(char *path) + { ++ return 0; ++ + /* + * We used to do a lot of work ourselves here, but now we just let + * realpath(3) do all the heavy lifting. diff --git a/postgres_15.8.1.044/nix/postgresql/patches/relative-to-symlinks.patch b/postgres_15.8.1.044/nix/postgresql/patches/relative-to-symlinks.patch new file mode 100644 index 0000000..c9b199b --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/patches/relative-to-symlinks.patch @@ -0,0 +1,13 @@ +On NixOS we *want* stuff relative to symlinks. +--- +--- a/src/common/exec.c ++++ b/src/common/exec.c +@@ -218,6 +218,8 @@ + static int + resolve_symlinks(char *path) + { ++ return 0; ++ + #ifdef HAVE_READLINK + struct stat buf; + char orig_wd[MAXPGPATH], diff --git a/postgres_15.8.1.044/nix/postgresql/patches/socketdir-in-run-13+.patch b/postgres_15.8.1.044/nix/postgresql/patches/socketdir-in-run-13+.patch new file mode 100644 index 0000000..fd808b6 --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/patches/socketdir-in-run-13+.patch @@ -0,0 +1,11 @@ +--- a/src/include/pg_config_manual.h ++++ b/src/include/pg_config_manual.h +@@ -201,7 +201,7 @@ + * support them yet. + */ + #ifndef WIN32 +-#define DEFAULT_PGSOCKET_DIR "/tmp" ++#define DEFAULT_PGSOCKET_DIR "/run/postgresql" + #else + #define DEFAULT_PGSOCKET_DIR "" + #endif diff --git a/postgres_15.8.1.044/nix/postgresql/patches/socketdir-in-run.patch b/postgres_15.8.1.044/nix/postgresql/patches/socketdir-in-run.patch new file mode 100644 index 0000000..4932ef6 --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/patches/socketdir-in-run.patch @@ -0,0 +1,11 @@ +--- a/src/include/pg_config_manual.h ++++ b/src/include/pg_config_manual.h +@@ -179,7 +179,7 @@ + * here's where to twiddle it. You can also override this at runtime + * with the postmaster's -k switch. + */ +-#define DEFAULT_PGSOCKET_DIR "/tmp" ++#define DEFAULT_PGSOCKET_DIR "/run/postgresql" + + /* + * This is the default event source for Windows event log. diff --git a/postgres_15.8.1.044/nix/postgresql/patches/specify_pkglibdir_at_runtime.patch b/postgres_15.8.1.044/nix/postgresql/patches/specify_pkglibdir_at_runtime.patch new file mode 100644 index 0000000..b94fc9e --- /dev/null +++ b/postgres_15.8.1.044/nix/postgresql/patches/specify_pkglibdir_at_runtime.patch @@ -0,0 +1,28 @@ +--- a/src/port/path.c ++++ b/src/port/path.c +@@ -714,7 +714,11 @@ + void + get_lib_path(const char *my_exec_path, char *ret_path) + { +- make_relative_path(ret_path, LIBDIR, PGBINDIR, my_exec_path); ++ char const * const nix_pglibdir = getenv("NIX_PGLIBDIR"); ++ if(nix_pglibdir == NULL) ++ make_relative_path(ret_path, LIBDIR, PGBINDIR, my_exec_path); ++ else ++ make_relative_path(ret_path, nix_pglibdir, PGBINDIR, my_exec_path); + } + + /* +@@ -723,7 +727,11 @@ + void + get_pkglib_path(const char *my_exec_path, char *ret_path) + { +- make_relative_path(ret_path, PKGLIBDIR, PGBINDIR, my_exec_path); ++ char const * const nix_pglibdir = getenv("NIX_PGLIBDIR"); ++ if(nix_pglibdir == NULL) ++ make_relative_path(ret_path, PKGLIBDIR, PGBINDIR, my_exec_path); ++ else ++ make_relative_path(ret_path, nix_pglibdir, PGBINDIR, my_exec_path); + } + + /* diff --git a/postgres_15.8.1.044/nix/supabase-groonga.nix b/postgres_15.8.1.044/nix/supabase-groonga.nix new file mode 100644 index 0000000..410bab0 --- /dev/null +++ b/postgres_15.8.1.044/nix/supabase-groonga.nix @@ -0,0 +1,75 @@ +{ lib, stdenv, cmake, fetchurl, kytea, msgpack-c, mecab, pkg-config, rapidjson +, testers, xxHash, zstd, postgresqlPackages, makeWrapper, suggestSupport ? false +, zeromq, libevent, openssl, lz4Support ? false, lz4, zlibSupport ? true, zlib +, writeShellScriptBin, callPackage }: +let mecab-naist-jdic = callPackage ./ext/mecab-naist-jdic { }; +in stdenv.mkDerivation (finalAttrs: { + pname = "supabase-groonga"; + version = "14.0.5"; + src = fetchurl { + url = + "https://packages.groonga.org/source/groonga/groonga-${finalAttrs.version}.tar.gz"; + hash = "sha256-y4UGnv8kK0z+br8wXpPf57NMXkdEJHcLCuTvYiubnIc="; + }; + patches = + [ ./fix-cmake-install-path.patch ./do-not-use-vendored-libraries.patch ]; + nativeBuildInputs = [ cmake pkg-config makeWrapper ]; + buildInputs = [ rapidjson xxHash zstd mecab kytea msgpack-c ] + ++ lib.optionals lz4Support [ lz4 ] ++ lib.optional zlibSupport [ zlib ] + ++ lib.optionals suggestSupport [ zeromq libevent ]; + cmakeFlags = [ + "-DWITH_MECAB=ON" + "-DMECAB_DICDIR=${mecab-naist-jdic}/lib/mecab/dic/naist-jdic" + "-DMECAB_CONFIG=${mecab}/bin/mecab-config" + "-DENABLE_MECAB_TOKENIZER=ON" + "-DMECAB_INCLUDE_DIR=${mecab}/include" + "-DMECAB_LIBRARY=${mecab}/lib/libmecab.so" + "-DGROONGA_ENABLE_TOKENIZER_MECAB=YES" + "-DGRN_WITH_MECAB=YES" + ]; + preConfigure = '' + export MECAB_DICDIR=${mecab-naist-jdic}/lib/mecab/dic/naist-jdic + echo "MeCab dictionary directory is: $MECAB_DICDIR" + ''; + buildPhase = '' + cmake --build . -- VERBOSE=1 + grep -i mecab CMakeCache.txt || (echo "MeCab not detected in CMake cache" && exit 1) + echo "CMake cache contents related to MeCab:" + grep -i mecab CMakeCache.txt + ''; + + # installPhase = '' + # mkdir -p $out/bin $out/lib/groonga/plugins + # cp -r lib/groonga/plugins/* $out/lib/groonga/plugins + # cp -r bin/* $out/bin + # echo "Installed Groonga plugins:" + # ls -l $out/lib/groonga/plugins + # ''; + + postInstall = '' + echo "Searching for MeCab-related files:" + find $out -name "*mecab*" + + echo "Checking Groonga plugins directory:" + ls -l $out/lib/groonga/plugins + + echo "Wrapping Groonga binary:" + wrapProgram $out/bin/groonga \ + --set GRN_PLUGINS_DIR $out/lib/groonga/plugins + + ''; + env.NIX_CFLAGS_COMPILE = + lib.optionalString zlibSupport "-I${zlib.dev}/include"; + + meta = with lib; { + homepage = "https://groonga.org/"; + description = "Open-source fulltext search engine and column store"; + license = licenses.lgpl21; + maintainers = [ maintainers.samrose ]; + platforms = platforms.all; + longDescription = '' + Groonga is an open-source fulltext search engine and column store. + It lets you write high-performance applications that requires fulltext search. + ''; + }; +}) diff --git a/postgres_15.8.1.044/nix/tests/expected/hypopg.out b/postgres_15.8.1.044/nix/tests/expected/hypopg.out new file mode 100644 index 0000000..35c8a5b --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/hypopg.out @@ -0,0 +1,14 @@ +create schema v; +create table v.samp( + id int +); +select 1 from hypopg_create_index($$ + create index on v.samp(id) +$$); + ?column? +---------- + 1 +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.samp diff --git a/postgres_15.8.1.044/nix/tests/expected/index_advisor.out b/postgres_15.8.1.044/nix/tests/expected/index_advisor.out new file mode 100644 index 0000000..5a269ba --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/index_advisor.out @@ -0,0 +1,16 @@ +create schema v; +create table v.book( + id int primary key, + title text not null +); +select + index_statements, errors +from + index_advisor('select id from v.book where title = $1'); + index_statements | errors +------------------------------------------------+-------- + {"CREATE INDEX ON v.book USING btree (title)"} | {} +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.book diff --git a/postgres_15.8.1.044/nix/tests/expected/pg-safeupdate.out b/postgres_15.8.1.044/nix/tests/expected/pg-safeupdate.out new file mode 100644 index 0000000..f910011 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pg-safeupdate.out @@ -0,0 +1,12 @@ +load 'safeupdate'; +set safeupdate.enabled=1; +create schema v; +create table v.foo( + id int, + val text +); +update v.foo + set val = 'bar'; +ERROR: UPDATE requires a WHERE clause +drop schema v cascade; +NOTICE: drop cascades to table v.foo diff --git a/postgres_15.8.1.044/nix/tests/expected/pg_graphql.out b/postgres_15.8.1.044/nix/tests/expected/pg_graphql.out new file mode 100644 index 0000000..63a3520 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pg_graphql.out @@ -0,0 +1,259 @@ +begin; + comment on schema public is '@graphql({"inflect_names": true})'; + create table account( + id serial primary key, + email varchar(255) not null, + priority int, + status text default 'active' + ); + create table blog( + id serial primary key, + owner_id integer not null references account(id) + ); + comment on table blog is e'@graphql({"totalCount": {"enabled": true}})'; + -- Make sure functions still work + create function _echo_email(account) + returns text + language sql + as $$ select $1.email $$; + /* + Literals + */ + select graphql.resolve($$ + mutation { + insertIntoAccountCollection(objects: [ + { email: "foo@barsley.com", priority: 1 }, + { email: "bar@foosworth.com" } + ]) { + affectedCount + records { + id + status + echoEmail + blogCollection { + totalCount + } + } + } + } + $$); + resolve +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + {"data": {"insertIntoAccountCollection": {"records": [{"id": 1, "status": "active", "echoEmail": "foo@barsley.com", "blogCollection": {"totalCount": 0}}, {"id": 2, "status": "active", "echoEmail": "bar@foosworth.com", "blogCollection": {"totalCount": 0}}], "affectedCount": 2}}} +(1 row) + + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: [{ + ownerId: 1 + }]) { + records { + id + owner { + id + } + } + } + } + $$); + resolve +-------------------------------------------------------------------------------------- + {"data": {"insertIntoBlogCollection": {"records": [{"id": 1, "owner": {"id": 1}}]}}} +(1 row) + + -- Override a default on status with null + select graphql.resolve($$ + mutation { + insertIntoAccountCollection(objects: [ + { email: "baz@baz.com", status: null }, + ]) { + affectedCount + records { + email + status + } + } + } + $$); + resolve +------------------------------------------------------------------------------------------------------------------------ + {"data": {"insertIntoAccountCollection": {"records": [{"email": "baz@baz.com", "status": null}], "affectedCount": 1}}} +(1 row) + + /* + Variables + */ + select graphql.resolve($$ + mutation newAccount($emailAddress: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: $emailAddress }, + { email: "other@email.com" } + ]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"emailAddress": "foo@bar.com"}'::jsonb + ); + resolve +-------------------------------------------------------------------------------------------------------------------------------- + {"data": {"xyz": {"records": [{"id": 4, "email": "foo@bar.com"}, {"id": 5, "email": "other@email.com"}], "affectedCount": 2}}} +(1 row) + + -- Variable override of default with null results in null + select graphql.resolve($$ + mutation newAccount($status: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: "1@email.com", status: $status} + ]) { + affectedCount + records { + email + status + } + } + } + $$, + variables := '{"status": null}'::jsonb + ); + resolve +------------------------------------------------------------------------------------------------ + {"data": {"xyz": {"records": [{"email": "1@email.com", "status": null}], "affectedCount": 1}}} +(1 row) + + -- Skipping variable override of default results in default + select graphql.resolve($$ + mutation newAccount($status: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: "x@y.com", status: $status}, + ]) { + affectedCount + records { + email + status + } + } + } + $$, + variables := '{}'::jsonb + ); + resolve +------------------------------------------------------------------------------------------------ + {"data": {"xyz": {"records": [{"email": "x@y.com", "status": "active"}], "affectedCount": 1}}} +(1 row) + + select graphql.resolve($$ + mutation newAccount($acc: AccountInsertInput!) { + insertIntoAccountCollection(objects: [$acc]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"acc": {"email": "bar@foo.com"}}'::jsonb + ); + resolve +----------------------------------------------------------------------------------------------------------------- + {"data": {"insertIntoAccountCollection": {"records": [{"id": 8, "email": "bar@foo.com"}], "affectedCount": 1}}} +(1 row) + + select graphql.resolve($$ + mutation newAccounts($acc: [AccountInsertInput!]!) { + insertIntoAccountCollection(objects: $accs) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"accs": [{"email": "bar@foo.com"}]}'::jsonb + ); + resolve +----------------------------------------------------------------------------------------------------------------- + {"data": {"insertIntoAccountCollection": {"records": [{"id": 9, "email": "bar@foo.com"}], "affectedCount": 1}}} +(1 row) + + -- Single object coerces to a list + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: {ownerId: 1}) { + affectedCount + } + } + $$); + resolve +-------------------------------------------------------------- + {"data": {"insertIntoBlogCollection": {"affectedCount": 1}}} +(1 row) + + /* + Errors + */ + -- Field does not exist + select graphql.resolve($$ + mutation createAccount($acc: AccountInsertInput) { + insertIntoAccountCollection(objects: [$acc]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"acc": {"doesNotExist": "other"}}'::jsonb + ); + resolve +--------------------------------------------------------------------------------------------------------------------- + {"data": null, "errors": [{"message": "Input for type AccountInsertInput contains extra keys [\"doesNotExist\"]"}]} +(1 row) + + -- Wrong input type (list of string, not list of object) + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: ["not an object"]) { + affectedCount + } + } + $$); + resolve +----------------------------------------------------------------------------------- + {"data": null, "errors": [{"message": "Invalid input for BlogInsertInput type"}]} +(1 row) + + -- objects argument is missing + select graphql.resolve($$ + mutation { + insertIntoBlogCollection { + affectedCount + } + } + $$); + resolve +--------------------------------------------------------------------------- + {"data": null, "errors": [{"message": "Invalid input for NonNull type"}]} +(1 row) + + -- Empty call + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: []) { + affectedCount + } + } + $$); + resolve +-------------------------------------------------------------------------------------------- + {"data": null, "errors": [{"message": "At least one record must be provided to objects"}]} +(1 row) + +rollback; diff --git a/postgres_15.8.1.044/nix/tests/expected/pg_hashids.out b/postgres_15.8.1.044/nix/tests/expected/pg_hashids.out new file mode 100644 index 0000000..393218e --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pg_hashids.out @@ -0,0 +1,36 @@ +select id_encode(1001); -- Result: jNl + id_encode +----------- + jNl +(1 row) + +select id_encode(1234567, 'This is my salt'); -- Result: Pdzxp + id_encode +----------- + Pdzxp +(1 row) + +select id_encode(1234567, 'This is my salt', 10); -- Result: PlRPdzxpR7 + id_encode +------------ + PlRPdzxpR7 +(1 row) + +select id_encode(1234567, 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 3GJ956J9B9 + id_encode +------------ + 3GJ956J9B9 +(1 row) + +select id_decode('PlRPdzxpR7', 'This is my salt', 10); -- Result: 1234567 + id_decode +----------- + {1234567} +(1 row) + +select id_decode('3GJ956J9B9', 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 1234567 + id_decode +----------- + {1234567} +(1 row) + diff --git a/postgres_15.8.1.044/nix/tests/expected/pg_jsonschema.out b/postgres_15.8.1.044/nix/tests/expected/pg_jsonschema.out new file mode 100644 index 0000000..c291141 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pg_jsonschema.out @@ -0,0 +1,73 @@ +begin; +-- Test json_matches_schema +create table customer( + id serial primary key, + metadata json, + check ( + json_matches_schema( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }', + metadata + ) + ) +); +insert into customer(metadata) +values ('{"tags": ["vip", "darkmode-ui"]}'); +-- Test jsonb_matches_schema +select + jsonb_matches_schema( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }', + '{"tags": ["vip", "darkmode-ui"]}'::jsonb +); + jsonb_matches_schema +---------------------- + t +(1 row) + +-- Test jsonschema_is_valid +select + jsonschema_is_valid( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }'); + jsonschema_is_valid +--------------------- + t +(1 row) + +-- Test invalid payload +insert into customer(metadata) +values ('{"tags": [1, 3]}'); +ERROR: new row for relation "customer" violates check constraint "customer_metadata_check" +DETAIL: Failing row contains (2, {"tags": [1, 3]}). +rollback; diff --git a/postgres_15.8.1.044/nix/tests/expected/pg_net.out b/postgres_15.8.1.044/nix/tests/expected/pg_net.out new file mode 100644 index 0000000..6b3ca5b --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pg_net.out @@ -0,0 +1,11 @@ +-- This is a very basic test because you can't get the value returned +-- by a pg_net request in the same transaction that created it; +select + net.http_get ( + 'https://postman-echo.com/get?foo1=bar1&foo2=bar2' + ) as request_id; + request_id +------------ + 1 +(1 row) + diff --git a/postgres_15.8.1.044/nix/tests/expected/pg_plan_filter.out b/postgres_15.8.1.044/nix/tests/expected/pg_plan_filter.out new file mode 100644 index 0000000..4bdcd65 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pg_plan_filter.out @@ -0,0 +1,16 @@ +begin; + load 'plan_filter'; + create schema v; + -- create a sample table + create table v.test_table ( + id serial primary key, + data text + ); + -- insert some test data + insert into v.test_table (data) + values ('sample1'), ('sample2'), ('sample3'); + set local plan_filter.statement_cost_limit = 0.001; + select * from v.test_table; +ERROR: plan cost limit exceeded +HINT: The plan for your query shows that it would probably have an excessive run time. This may be due to a logic error in the SQL, or it maybe just a very costly query. Rewrite your query or increase the configuration parameter "plan_filter.statement_cost_limit". +rollback; diff --git a/postgres_15.8.1.044/nix/tests/expected/pg_tle.out b/postgres_15.8.1.044/nix/tests/expected/pg_tle.out new file mode 100644 index 0000000..387e63e --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pg_tle.out @@ -0,0 +1,92 @@ +set client_min_messages = warning; +select + pgtle.install_extension( + 'pg_distance', + '0.1', + 'Distance functions for two points', + $_pg_tle_$ + CREATE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) + RETURNS float8 + AS $$ + SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); + $$ LANGUAGE SQL; + + CREATE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 1); + $$ LANGUAGE SQL; + + CREATE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 2); + $$ LANGUAGE SQL; + $_pg_tle_$ + ); + install_extension +------------------- + t +(1 row) + +create extension pg_distance; +select manhattan_dist(1, 1, 5, 5)::numeric(10,2); + manhattan_dist +---------------- + 8.00 +(1 row) + +select euclidean_dist(1, 1, 5, 5)::numeric(10,2); + euclidean_dist +---------------- + 5.66 +(1 row) + +SELECT pgtle.install_update_path( + 'pg_distance', + '0.1', + '0.2', + $_pg_tle_$ + CREATE OR REPLACE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) + RETURNS float8 + AS $$ + SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + + CREATE OR REPLACE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 1); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + + CREATE OR REPLACE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 2); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + $_pg_tle_$ + ); + install_update_path +--------------------- + t +(1 row) + +select + pgtle.set_default_version('pg_distance', '0.2'); + set_default_version +--------------------- + t +(1 row) + +alter extension pg_distance update; +drop extension pg_distance; +select + pgtle.uninstall_extension('pg_distance'); + uninstall_extension +--------------------- + t +(1 row) + +-- Restore original state if any of the above fails +drop extension pg_tle cascade; +create extension pg_tle; diff --git a/postgres_15.8.1.044/nix/tests/expected/pgaudit.out b/postgres_15.8.1.044/nix/tests/expected/pgaudit.out new file mode 100644 index 0000000..1937be6 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pgaudit.out @@ -0,0 +1,24 @@ +-- Note: there is no test that the logs were correctly output. Only checking for exceptions +set pgaudit.log = 'write, ddl'; +set pgaudit.log_relation = on; +set pgaudit.log_level = notice; +create schema v; +create table v.account( + id int, + name text, + password text, + description text +); +insert into v.account (id, name, password, description) +values (1, 'user1', 'HASH1', 'blah, blah'); +select + * +from + v.account; + id | name | password | description +----+-------+----------+------------- + 1 | user1 | HASH1 | blah, blah +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.account diff --git a/postgres_15.8.1.044/nix/tests/expected/pgjwt.out b/postgres_15.8.1.044/nix/tests/expected/pgjwt.out new file mode 100644 index 0000000..4e4500f --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pgjwt.out @@ -0,0 +1,22 @@ +select + sign( + payload := '{"sub":"1234567890","name":"John Doe","iat":1516239022}', + secret := 'secret', + algorithm := 'HS256' + ); + sign +------------------------------------------------------------------------------------------------------------------------------------------------------------- + eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.XbPfbIHMI6arZ3Y922BhjWgQzWXcXNrz0ogtVhfEd2o +(1 row) + +select + verify( + token := 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiRm9vIn0.Q8hKjuadCEhnCPuqIj9bfLhTh_9QSxshTRsA5Aq4IuM', + secret := 'secret', + algorithm := 'HS256' + ); + verify +---------------------------------------------------------------- + ("{""alg"":""HS256"",""typ"":""JWT""}","{""name"":""Foo""}",t) +(1 row) + diff --git a/postgres_15.8.1.044/nix/tests/expected/pgmq.out b/postgres_15.8.1.044/nix/tests/expected/pgmq.out new file mode 100644 index 0000000..9fb1819 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pgmq.out @@ -0,0 +1,141 @@ +-- Test the standard flow +select + pgmq.create('Foo'); + create +-------- + +(1 row) + +select + * +from + pgmq.send( + queue_name:='Foo', + msg:='{"foo": "bar1"}' + ); + send +------ + 1 +(1 row) + +-- Test queue is not case sensitive +select + * +from + pgmq.send( + queue_name:='foo', -- note: lowercase useage + msg:='{"foo": "bar2"}', + delay:=5 + ); + send +------ + 2 +(1 row) + +select + msg_id, + read_ct, + message +from + pgmq.read( + queue_name:='Foo', + vt:=30, + qty:=2 + ); + msg_id | read_ct | message +--------+---------+----------------- + 1 | 1 | {"foo": "bar1"} +(1 row) + +select + msg_id, + read_ct, + message +from + pgmq.pop('Foo'); + msg_id | read_ct | message +--------+---------+--------- +(0 rows) + +-- Archive message with msg_id=2. +select + pgmq.archive( + queue_name:='Foo', + msg_id:=2 + ); + archive +--------- + t +(1 row) + +select + pgmq.create('my_queue'); + create +-------- + +(1 row) + +select + pgmq.send_batch( + queue_name:='my_queue', + msgs:=array['{"foo": "bar3"}','{"foo": "bar4"}','{"foo": "bar5"}']::jsonb[] +); + send_batch +------------ + 1 + 2 + 3 +(3 rows) + +select + pgmq.archive( + queue_name:='my_queue', + msg_ids:=array[3, 4, 5] + ); + archive +--------- + 3 +(1 row) + +select + pgmq.delete('my_queue', 6); + delete +-------- + f +(1 row) + +select + pgmq.drop_queue('my_queue'); + drop_queue +------------ + t +(1 row) + +/* +-- Disabled until pg_partman goes back into the image +select + pgmq.create_partitioned( + 'my_partitioned_queue', + '5 seconds', + '10 seconds' +); +*/ +-- Make sure SQLI enabling characters are blocked +select pgmq.create('F--oo'); +ERROR: queue name contains invalid characters: $, ;, --, or \' +CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE +PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization +SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" +PL/pgSQL function pgmq."create"(text) line 3 at PERFORM +select pgmq.create('F$oo'); +ERROR: queue name contains invalid characters: $, ;, --, or \' +CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE +PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization +SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" +PL/pgSQL function pgmq."create"(text) line 3 at PERFORM +select pgmq.create($$F'oo$$); +ERROR: queue name contains invalid characters: $, ;, --, or \' +CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE +PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization +SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" +PL/pgSQL function pgmq."create"(text) line 3 at PERFORM diff --git a/postgres_15.8.1.044/nix/tests/expected/pgrouting.out b/postgres_15.8.1.044/nix/tests/expected/pgrouting.out new file mode 100644 index 0000000..2362a72 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pgrouting.out @@ -0,0 +1,31 @@ +create schema v; +-- create the roads table +create table v.roads ( + id serial primary key, + source integer, + target integer, + cost double precision +); +-- insert sample data into roads table +insert into v.roads (source, target, cost) values +(1, 2, 1.0), +(2, 3, 1.0), +(3, 4, 1.0), +(1, 3, 2.5), +(3, 5, 2.0); +-- create a function to use pgRouting to find the shortest path +select * from pgr_dijkstra( + 'select id, source, target, cost from v.roads', + 1, -- start node + 4 -- end node +); + seq | path_seq | node | edge | cost | agg_cost +-----+----------+------+------+------+---------- + 1 | 1 | 1 | 1 | 1 | 0 + 2 | 2 | 2 | 2 | 1 | 1 + 3 | 3 | 3 | 3 | 1 | 2 + 4 | 4 | 4 | -1 | 0 | 3 +(4 rows) + +drop schema v cascade; +NOTICE: drop cascades to table v.roads diff --git a/postgres_15.8.1.044/nix/tests/expected/pgsodium.out b/postgres_15.8.1.044/nix/tests/expected/pgsodium.out new file mode 100644 index 0000000..418bf2d --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pgsodium.out @@ -0,0 +1,9 @@ +select + status +from + pgsodium.create_key(); + status +-------- + valid +(1 row) + diff --git a/postgres_15.8.1.044/nix/tests/expected/pgtap.out b/postgres_15.8.1.044/nix/tests/expected/pgtap.out new file mode 100644 index 0000000..272d838 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/pgtap.out @@ -0,0 +1,21 @@ +begin; +select plan(1); + plan +------ + 1..1 +(1 row) + +-- Run the tests. +select pass( 'My test passed, w00t!' ); + pass +------------------------------ + ok 1 - My test passed, w00t! +(1 row) + +-- Finish the tests and clean up. +select * from finish(); + finish +-------- +(0 rows) + +rollback; diff --git a/postgres_15.8.1.044/nix/tests/expected/plpgsql-check.out b/postgres_15.8.1.044/nix/tests/expected/plpgsql-check.out new file mode 100644 index 0000000..2b5bf82 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/plpgsql-check.out @@ -0,0 +1,35 @@ +create schema v; +create table v.t1( + a int, + b int +); +create or replace function v.f1() + returns void + language plpgsql +as $$ +declare r record; +begin + for r in select * from v.t1 + loop + raise notice '%', r.c; -- there is bug - table t1 missing "c" column + end loop; +end; +$$; +select * from v.f1(); + f1 +---- + +(1 row) + +-- use plpgsql_check_function to check the function for errors +select * from plpgsql_check_function('v.f1()'); + plpgsql_check_function +------------------------------------------------- + error:42703:6:RAISE:record "r" has no field "c" + Context: SQL expression "r.c" +(2 rows) + +drop schema v cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table v.t1 +drop cascades to function v.f1() diff --git a/postgres_15.8.1.044/nix/tests/expected/postgis.out b/postgres_15.8.1.044/nix/tests/expected/postgis.out new file mode 100644 index 0000000..53194d8 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/postgis.out @@ -0,0 +1,59 @@ +create schema v; +-- create a table to store geographic points +create table v.places ( + id serial primary key, + name text, + geom geometry(point, 4326) -- using WGS 84 coordinate system +); +-- insert some sample geographic points into the places table +insert into v.places (name, geom) +values + ('place_a', st_setsrid(st_makepoint(-73.9857, 40.7484), 4326)), -- latitude and longitude for a location + ('place_b', st_setsrid(st_makepoint(-74.0060, 40.7128), 4326)), -- another location + ('place_c', st_setsrid(st_makepoint(-73.9687, 40.7851), 4326)); -- yet another location +-- calculate the distance between two points (in meters) +select + a.name as place_a, + b.name as place_b, + st_distance(a.geom::geography, b.geom::geography) as distance_meters +from + v.places a, + v.places b +where + a.name = 'place_a' + and b.name = 'place_b'; + place_a | place_b | distance_meters +---------+---------+----------------- + place_a | place_b | 4309.25283351 +(1 row) + +-- find all places within a 5km radius of 'place_a' +select + name, + st_distance( + geom::geography, + ( + select + geom + from + v.places + where + name = 'place_a' + )::geography) as distance_meters +from + v.places +where + st_dwithin( + geom::geography, + (select geom from v.places where name = 'place_a')::geography, + 5000 + ) + and name != 'place_a'; + name | distance_meters +---------+----------------- + place_b | 4309.25283351 + place_c | 4320.8765634 +(2 rows) + +drop schema v cascade; +NOTICE: drop cascades to table v.places diff --git a/postgres_15.8.1.044/nix/tests/expected/vault.out b/postgres_15.8.1.044/nix/tests/expected/vault.out new file mode 100644 index 0000000..e4eaff2 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/vault.out @@ -0,0 +1,42 @@ +select + 1 +from + vault.create_secret('my_s3kre3t'); + ?column? +---------- + 1 +(1 row) + +select + 1 +from + vault.create_secret( + 'another_s3kre3t', + 'unique_name', + 'This is the description' + ); + ?column? +---------- + 1 +(1 row) + +insert into vault.secrets (secret) +values + ('s3kre3t_k3y'); +select + name, + description +from + vault.decrypted_secrets +order by + created_at desc +limit + 3; + name | description +-------------+------------------------- + | + unique_name | This is the description + | +(3 rows) + + diff --git a/postgres_15.8.1.044/nix/tests/expected/wal2json.out b/postgres_15.8.1.044/nix/tests/expected/wal2json.out new file mode 100644 index 0000000..6edc359 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/wal2json.out @@ -0,0 +1,42 @@ +create schema v; +create table v.foo( + id int primary key +); +select + 1 +from + pg_create_logical_replication_slot('reg_test', 'wal2json', false); + ?column? +---------- + 1 +(1 row) + +insert into v.foo(id) values (1); +select + data +from + pg_logical_slot_get_changes( + 'reg_test', + null, + null, + 'include-pk', '1', + 'include-transaction', 'false', + 'include-timestamp', 'false', + 'include-type-oids', 'false', + 'format-version', '2', + 'actions', 'insert,update,delete' + ) x; + data +-------------------------------------------------------------------------------------------------------------------------------------- + {"action":"I","schema":"v","table":"foo","columns":[{"name":"id","type":"integer","value":1}],"pk":[{"name":"id","type":"integer"}]} +(1 row) + +select + pg_drop_replication_slot('reg_test'); + pg_drop_replication_slot +-------------------------- + +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.foo diff --git a/postgres_15.8.1.044/nix/tests/expected/z_15_ext_interface.out b/postgres_15.8.1.044/nix/tests/expected/z_15_ext_interface.out new file mode 100644 index 0000000..9914fa3 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/z_15_ext_interface.out @@ -0,0 +1,6361 @@ +/* + +The purpose of this test is to monitor the SQL interface exposed +by Postgres extensions so we have to manually review/approve any difference +that emerge as versions change. + +*/ +/* + +List all extensions that are not enabled +If a new entry shows up in this list, that means a new extension has been +added and you should `create extension ...` to enable it in ./nix/tests/prime + +*/ +create extension if not exists adminpack; +create extension if not exists plv8; +create extension if not exists plcoffee; +create extension if not exists plls; +create extension if not exists old_snapshot; +create extension if not exists timescaledb; +create extension if not exists postgis_tiger_geocoder; +select + name +from + pg_available_extensions +where + installed_version is null +order by + name asc; + name +----------------- + pg_cron + tsm_system_time +(2 rows) + +/* + +Monitor relocatability and config of each extension +- lesson learned from pg_cron + +*/ +select + extname as extension_name, + extrelocatable as is_relocatable +from + pg_extension +order by + extname asc; + extension_name | is_relocatable +------------------------------+---------------- + address_standardizer | t + address_standardizer_data_us | t + adminpack | f + amcheck | t + autoinc | t + bloom | t + btree_gin | t + btree_gist | t + citext | t + cube | t + dblink | t + dict_int | t + dict_xsyn | t + earthdistance | t + file_fdw | t + fuzzystrmatch | t + hstore | t + http | f + hypopg | t + index_advisor | t + insert_username | t + intagg | t + intarray | t + isn | t + lo | t + ltree | t + moddatetime | t + old_snapshot | t + pageinspect | t + pg_backtrace | t + pg_buffercache | t + pg_freespacemap | t + pg_graphql | f + pg_hashids | t + pg_jsonschema | f + pg_net | f + pg_prewarm | t + pg_repack | f + pg_stat_monitor | t + pg_stat_statements | t + pg_surgery | t + pg_tle | f + pg_trgm | t + pg_visibility | t + pg_walinspect | t + pgaudit | t + pgcrypto | t + pgjwt | f + pgmq | f + pgroonga | f + pgroonga_database | f + pgrouting | t + pgrowlocks | t + pgsodium | f + pgstattuple | t + pgtap | t + plcoffee | f + plls | f + plpgsql | f + plpgsql_check | f + plv8 | f + postgis | f + postgis_raster | f + postgis_sfcgal | t + postgis_tiger_geocoder | f + postgis_topology | f + postgres_fdw | t + refint | t + rum | t + seg | t + sslinfo | t + supabase_vault | f + tablefunc | t + tcn | t + timescaledb | f + tsm_system_rows | t + unaccent | t + uuid-ossp | t + vector | t + wrappers | f + xml2 | f +(81 rows) + +/* + +Monitor extension public function interface + +*/ +select + e.extname as extension_name, + n.nspname as schema_name, + p.proname as function_name, + pg_catalog.pg_get_function_identity_arguments(p.oid) as argument_types, + pg_catalog.pg_get_function_result(p.oid) as return_type +from + pg_catalog.pg_proc p + join pg_catalog.pg_namespace n + on n.oid = p.pronamespace + join pg_catalog.pg_depend d + on d.objid = p.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid +where + d.deptype = 'e' + -- Filter out changes between pg15 and pg16 from extensions that ship with postgres + -- new in pg16 + and not (e.extname = 'fuzzystrmatch' and p.proname = 'daitch_mokotoff') + and not (e.extname = 'pageinspect' and p.proname = 'bt_multi_page_stats') + and not (e.extname = 'pg_buffercache' and p.proname = 'pg_buffercache_summary') + and not (e.extname = 'pg_buffercache' and p.proname = 'pg_buffercache_usage_counts') + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_block_info') + -- removed in pg16 + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_records_info_till_end_of_wal') + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_stats_till_end_of_wal') + -- changed in pg16 - output signature added a column + and not (e.extname = 'pageinspect' and p.proname = 'brin_page_items') +order by + e.extname, + n.nspname, + p.proname, + md5(pg_catalog.pg_get_function_identity_arguments(p.oid)); + extension_name | schema_name | function_name | argument_types | return_type +------------------------+--------------------------+--------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + address_standardizer | public | parse_address | text, OUT num text, OUT street text, OUT street2 text, OUT address1 text, OUT city text, OUT state text, OUT zip text, OUT zipplus text, OUT country text | record + address_standardizer | public | standardize_address | lextab text, gaztab text, rultab text, address text | stdaddr + address_standardizer | public | standardize_address | lextab text, gaztab text, rultab text, micro text, macro text | stdaddr + adminpack | pg_catalog | pg_file_rename | text, text | boolean + adminpack | pg_catalog | pg_file_rename | text, text, text | boolean + adminpack | pg_catalog | pg_file_sync | text | void + adminpack | pg_catalog | pg_file_unlink | text | boolean + adminpack | pg_catalog | pg_file_write | text, text, boolean | bigint + adminpack | pg_catalog | pg_logdir_ls | | SETOF record + amcheck | public | bt_index_check | index regclass | void + amcheck | public | bt_index_check | index regclass, heapallindexed boolean | void + amcheck | public | bt_index_parent_check | index regclass | void + amcheck | public | bt_index_parent_check | index regclass, heapallindexed boolean | void + amcheck | public | bt_index_parent_check | index regclass, heapallindexed boolean, rootdescend boolean | void + amcheck | public | verify_heapam | relation regclass, on_error_stop boolean, check_toast boolean, skip text, startblock bigint, endblock bigint, OUT blkno bigint, OUT offnum integer, OUT attnum integer, OUT msg text | SETOF record + autoinc | public | autoinc | | trigger + bloom | public | blhandler | internal | index_am_handler + btree_gin | public | gin_btree_consistent | internal, smallint, anyelement, integer, internal, internal | boolean + btree_gin | public | gin_compare_prefix_anyenum | anyenum, anyenum, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bit | bit, bit, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bool | boolean, boolean, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bpchar | character, character, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bytea | bytea, bytea, smallint, internal | integer + btree_gin | public | gin_compare_prefix_char | "char", "char", smallint, internal | integer + btree_gin | public | gin_compare_prefix_cidr | cidr, cidr, smallint, internal | integer + btree_gin | public | gin_compare_prefix_date | date, date, smallint, internal | integer + btree_gin | public | gin_compare_prefix_float4 | real, real, smallint, internal | integer + btree_gin | public | gin_compare_prefix_float8 | double precision, double precision, smallint, internal | integer + btree_gin | public | gin_compare_prefix_inet | inet, inet, smallint, internal | integer + btree_gin | public | gin_compare_prefix_int2 | smallint, smallint, smallint, internal | integer + btree_gin | public | gin_compare_prefix_int4 | integer, integer, smallint, internal | integer + btree_gin | public | gin_compare_prefix_int8 | bigint, bigint, smallint, internal | integer + btree_gin | public | gin_compare_prefix_interval | interval, interval, smallint, internal | integer + btree_gin | public | gin_compare_prefix_macaddr | macaddr, macaddr, smallint, internal | integer + btree_gin | public | gin_compare_prefix_macaddr8 | macaddr8, macaddr8, smallint, internal | integer + btree_gin | public | gin_compare_prefix_money | money, money, smallint, internal | integer + btree_gin | public | gin_compare_prefix_name | name, name, smallint, internal | integer + btree_gin | public | gin_compare_prefix_numeric | numeric, numeric, smallint, internal | integer + btree_gin | public | gin_compare_prefix_oid | oid, oid, smallint, internal | integer + btree_gin | public | gin_compare_prefix_text | text, text, smallint, internal | integer + btree_gin | public | gin_compare_prefix_time | time without time zone, time without time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_timestamp | timestamp without time zone, timestamp without time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_timestamptz | timestamp with time zone, timestamp with time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_timetz | time with time zone, time with time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_uuid | uuid, uuid, smallint, internal | integer + btree_gin | public | gin_compare_prefix_varbit | bit varying, bit varying, smallint, internal | integer + btree_gin | public | gin_enum_cmp | anyenum, anyenum | integer + btree_gin | public | gin_extract_query_anyenum | anyenum, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bit | bit, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bool | boolean, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bpchar | character, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bytea | bytea, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_char | "char", internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_cidr | cidr, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_date | date, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_float4 | real, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_float8 | double precision, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_inet | inet, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_int2 | smallint, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_int4 | integer, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_int8 | bigint, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_interval | interval, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_macaddr | macaddr, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_macaddr8 | macaddr8, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_money | money, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_name | name, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_numeric | numeric, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_oid | oid, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_text | text, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_time | time without time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_timestamp | timestamp without time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_timestamptz | timestamp with time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_timetz | time with time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_uuid | uuid, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_varbit | bit varying, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_value_anyenum | anyenum, internal | internal + btree_gin | public | gin_extract_value_bit | bit, internal | internal + btree_gin | public | gin_extract_value_bool | boolean, internal | internal + btree_gin | public | gin_extract_value_bpchar | character, internal | internal + btree_gin | public | gin_extract_value_bytea | bytea, internal | internal + btree_gin | public | gin_extract_value_char | "char", internal | internal + btree_gin | public | gin_extract_value_cidr | cidr, internal | internal + btree_gin | public | gin_extract_value_date | date, internal | internal + btree_gin | public | gin_extract_value_float4 | real, internal | internal + btree_gin | public | gin_extract_value_float8 | double precision, internal | internal + btree_gin | public | gin_extract_value_inet | inet, internal | internal + btree_gin | public | gin_extract_value_int2 | smallint, internal | internal + btree_gin | public | gin_extract_value_int4 | integer, internal | internal + btree_gin | public | gin_extract_value_int8 | bigint, internal | internal + btree_gin | public | gin_extract_value_interval | interval, internal | internal + btree_gin | public | gin_extract_value_macaddr | macaddr, internal | internal + btree_gin | public | gin_extract_value_macaddr8 | macaddr8, internal | internal + btree_gin | public | gin_extract_value_money | money, internal | internal + btree_gin | public | gin_extract_value_name | name, internal | internal + btree_gin | public | gin_extract_value_numeric | numeric, internal | internal + btree_gin | public | gin_extract_value_oid | oid, internal | internal + btree_gin | public | gin_extract_value_text | text, internal | internal + btree_gin | public | gin_extract_value_time | time without time zone, internal | internal + btree_gin | public | gin_extract_value_timestamp | timestamp without time zone, internal | internal + btree_gin | public | gin_extract_value_timestamptz | timestamp with time zone, internal | internal + btree_gin | public | gin_extract_value_timetz | time with time zone, internal | internal + btree_gin | public | gin_extract_value_uuid | uuid, internal | internal + btree_gin | public | gin_extract_value_varbit | bit varying, internal | internal + btree_gin | public | gin_numeric_cmp | numeric, numeric | integer + btree_gist | public | cash_dist | money, money | money + btree_gist | public | date_dist | date, date | integer + btree_gist | public | float4_dist | real, real | real + btree_gist | public | float8_dist | double precision, double precision | double precision + btree_gist | public | gbt_bit_compress | internal | internal + btree_gist | public | gbt_bit_consistent | internal, bit, smallint, oid, internal | boolean + btree_gist | public | gbt_bit_penalty | internal, internal, internal | internal + btree_gist | public | gbt_bit_picksplit | internal, internal | internal + btree_gist | public | gbt_bit_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_bit_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_bool_compress | internal | internal + btree_gist | public | gbt_bool_consistent | internal, boolean, smallint, oid, internal | boolean + btree_gist | public | gbt_bool_fetch | internal | internal + btree_gist | public | gbt_bool_penalty | internal, internal, internal | internal + btree_gist | public | gbt_bool_picksplit | internal, internal | internal + btree_gist | public | gbt_bool_same | gbtreekey2, gbtreekey2, internal | internal + btree_gist | public | gbt_bool_union | internal, internal | gbtreekey2 + btree_gist | public | gbt_bpchar_compress | internal | internal + btree_gist | public | gbt_bpchar_consistent | internal, character, smallint, oid, internal | boolean + btree_gist | public | gbt_bytea_compress | internal | internal + btree_gist | public | gbt_bytea_consistent | internal, bytea, smallint, oid, internal | boolean + btree_gist | public | gbt_bytea_penalty | internal, internal, internal | internal + btree_gist | public | gbt_bytea_picksplit | internal, internal | internal + btree_gist | public | gbt_bytea_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_bytea_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_cash_compress | internal | internal + btree_gist | public | gbt_cash_consistent | internal, money, smallint, oid, internal | boolean + btree_gist | public | gbt_cash_distance | internal, money, smallint, oid, internal | double precision + btree_gist | public | gbt_cash_fetch | internal | internal + btree_gist | public | gbt_cash_penalty | internal, internal, internal | internal + btree_gist | public | gbt_cash_picksplit | internal, internal | internal + btree_gist | public | gbt_cash_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_cash_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_date_compress | internal | internal + btree_gist | public | gbt_date_consistent | internal, date, smallint, oid, internal | boolean + btree_gist | public | gbt_date_distance | internal, date, smallint, oid, internal | double precision + btree_gist | public | gbt_date_fetch | internal | internal + btree_gist | public | gbt_date_penalty | internal, internal, internal | internal + btree_gist | public | gbt_date_picksplit | internal, internal | internal + btree_gist | public | gbt_date_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_date_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_decompress | internal | internal + btree_gist | public | gbt_enum_compress | internal | internal + btree_gist | public | gbt_enum_consistent | internal, anyenum, smallint, oid, internal | boolean + btree_gist | public | gbt_enum_fetch | internal | internal + btree_gist | public | gbt_enum_penalty | internal, internal, internal | internal + btree_gist | public | gbt_enum_picksplit | internal, internal | internal + btree_gist | public | gbt_enum_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_enum_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_float4_compress | internal | internal + btree_gist | public | gbt_float4_consistent | internal, real, smallint, oid, internal | boolean + btree_gist | public | gbt_float4_distance | internal, real, smallint, oid, internal | double precision + btree_gist | public | gbt_float4_fetch | internal | internal + btree_gist | public | gbt_float4_penalty | internal, internal, internal | internal + btree_gist | public | gbt_float4_picksplit | internal, internal | internal + btree_gist | public | gbt_float4_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_float4_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_float8_compress | internal | internal + btree_gist | public | gbt_float8_consistent | internal, double precision, smallint, oid, internal | boolean + btree_gist | public | gbt_float8_distance | internal, double precision, smallint, oid, internal | double precision + btree_gist | public | gbt_float8_fetch | internal | internal + btree_gist | public | gbt_float8_penalty | internal, internal, internal | internal + btree_gist | public | gbt_float8_picksplit | internal, internal | internal + btree_gist | public | gbt_float8_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_float8_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_inet_compress | internal | internal + btree_gist | public | gbt_inet_consistent | internal, inet, smallint, oid, internal | boolean + btree_gist | public | gbt_inet_penalty | internal, internal, internal | internal + btree_gist | public | gbt_inet_picksplit | internal, internal | internal + btree_gist | public | gbt_inet_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_inet_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_int2_compress | internal | internal + btree_gist | public | gbt_int2_consistent | internal, smallint, smallint, oid, internal | boolean + btree_gist | public | gbt_int2_distance | internal, smallint, smallint, oid, internal | double precision + btree_gist | public | gbt_int2_fetch | internal | internal + btree_gist | public | gbt_int2_penalty | internal, internal, internal | internal + btree_gist | public | gbt_int2_picksplit | internal, internal | internal + btree_gist | public | gbt_int2_same | gbtreekey4, gbtreekey4, internal | internal + btree_gist | public | gbt_int2_union | internal, internal | gbtreekey4 + btree_gist | public | gbt_int4_compress | internal | internal + btree_gist | public | gbt_int4_consistent | internal, integer, smallint, oid, internal | boolean + btree_gist | public | gbt_int4_distance | internal, integer, smallint, oid, internal | double precision + btree_gist | public | gbt_int4_fetch | internal | internal + btree_gist | public | gbt_int4_penalty | internal, internal, internal | internal + btree_gist | public | gbt_int4_picksplit | internal, internal | internal + btree_gist | public | gbt_int4_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_int4_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_int8_compress | internal | internal + btree_gist | public | gbt_int8_consistent | internal, bigint, smallint, oid, internal | boolean + btree_gist | public | gbt_int8_distance | internal, bigint, smallint, oid, internal | double precision + btree_gist | public | gbt_int8_fetch | internal | internal + btree_gist | public | gbt_int8_penalty | internal, internal, internal | internal + btree_gist | public | gbt_int8_picksplit | internal, internal | internal + btree_gist | public | gbt_int8_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_int8_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_intv_compress | internal | internal + btree_gist | public | gbt_intv_consistent | internal, interval, smallint, oid, internal | boolean + btree_gist | public | gbt_intv_decompress | internal | internal + btree_gist | public | gbt_intv_distance | internal, interval, smallint, oid, internal | double precision + btree_gist | public | gbt_intv_fetch | internal | internal + btree_gist | public | gbt_intv_penalty | internal, internal, internal | internal + btree_gist | public | gbt_intv_picksplit | internal, internal | internal + btree_gist | public | gbt_intv_same | gbtreekey32, gbtreekey32, internal | internal + btree_gist | public | gbt_intv_union | internal, internal | gbtreekey32 + btree_gist | public | gbt_macad8_compress | internal | internal + btree_gist | public | gbt_macad8_consistent | internal, macaddr8, smallint, oid, internal | boolean + btree_gist | public | gbt_macad8_fetch | internal | internal + btree_gist | public | gbt_macad8_penalty | internal, internal, internal | internal + btree_gist | public | gbt_macad8_picksplit | internal, internal | internal + btree_gist | public | gbt_macad8_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_macad8_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_macad_compress | internal | internal + btree_gist | public | gbt_macad_consistent | internal, macaddr, smallint, oid, internal | boolean + btree_gist | public | gbt_macad_fetch | internal | internal + btree_gist | public | gbt_macad_penalty | internal, internal, internal | internal + btree_gist | public | gbt_macad_picksplit | internal, internal | internal + btree_gist | public | gbt_macad_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_macad_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_numeric_compress | internal | internal + btree_gist | public | gbt_numeric_consistent | internal, numeric, smallint, oid, internal | boolean + btree_gist | public | gbt_numeric_penalty | internal, internal, internal | internal + btree_gist | public | gbt_numeric_picksplit | internal, internal | internal + btree_gist | public | gbt_numeric_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_numeric_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_oid_compress | internal | internal + btree_gist | public | gbt_oid_consistent | internal, oid, smallint, oid, internal | boolean + btree_gist | public | gbt_oid_distance | internal, oid, smallint, oid, internal | double precision + btree_gist | public | gbt_oid_fetch | internal | internal + btree_gist | public | gbt_oid_penalty | internal, internal, internal | internal + btree_gist | public | gbt_oid_picksplit | internal, internal | internal + btree_gist | public | gbt_oid_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_oid_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_text_compress | internal | internal + btree_gist | public | gbt_text_consistent | internal, text, smallint, oid, internal | boolean + btree_gist | public | gbt_text_penalty | internal, internal, internal | internal + btree_gist | public | gbt_text_picksplit | internal, internal | internal + btree_gist | public | gbt_text_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_text_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_time_compress | internal | internal + btree_gist | public | gbt_time_consistent | internal, time without time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_time_distance | internal, time without time zone, smallint, oid, internal | double precision + btree_gist | public | gbt_time_fetch | internal | internal + btree_gist | public | gbt_time_penalty | internal, internal, internal | internal + btree_gist | public | gbt_time_picksplit | internal, internal | internal + btree_gist | public | gbt_time_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_time_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_timetz_compress | internal | internal + btree_gist | public | gbt_timetz_consistent | internal, time with time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_ts_compress | internal | internal + btree_gist | public | gbt_ts_consistent | internal, timestamp without time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_ts_distance | internal, timestamp without time zone, smallint, oid, internal | double precision + btree_gist | public | gbt_ts_fetch | internal | internal + btree_gist | public | gbt_ts_penalty | internal, internal, internal | internal + btree_gist | public | gbt_ts_picksplit | internal, internal | internal + btree_gist | public | gbt_ts_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_ts_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_tstz_compress | internal | internal + btree_gist | public | gbt_tstz_consistent | internal, timestamp with time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_tstz_distance | internal, timestamp with time zone, smallint, oid, internal | double precision + btree_gist | public | gbt_uuid_compress | internal | internal + btree_gist | public | gbt_uuid_consistent | internal, uuid, smallint, oid, internal | boolean + btree_gist | public | gbt_uuid_fetch | internal | internal + btree_gist | public | gbt_uuid_penalty | internal, internal, internal | internal + btree_gist | public | gbt_uuid_picksplit | internal, internal | internal + btree_gist | public | gbt_uuid_same | gbtreekey32, gbtreekey32, internal | internal + btree_gist | public | gbt_uuid_union | internal, internal | gbtreekey32 + btree_gist | public | gbt_var_decompress | internal | internal + btree_gist | public | gbt_var_fetch | internal | internal + btree_gist | public | gbtreekey16_in | cstring | gbtreekey16 + btree_gist | public | gbtreekey16_out | gbtreekey16 | cstring + btree_gist | public | gbtreekey2_in | cstring | gbtreekey2 + btree_gist | public | gbtreekey2_out | gbtreekey2 | cstring + btree_gist | public | gbtreekey32_in | cstring | gbtreekey32 + btree_gist | public | gbtreekey32_out | gbtreekey32 | cstring + btree_gist | public | gbtreekey4_in | cstring | gbtreekey4 + btree_gist | public | gbtreekey4_out | gbtreekey4 | cstring + btree_gist | public | gbtreekey8_in | cstring | gbtreekey8 + btree_gist | public | gbtreekey8_out | gbtreekey8 | cstring + btree_gist | public | gbtreekey_var_in | cstring | gbtreekey_var + btree_gist | public | gbtreekey_var_out | gbtreekey_var | cstring + btree_gist | public | int2_dist | smallint, smallint | smallint + btree_gist | public | int4_dist | integer, integer | integer + btree_gist | public | int8_dist | bigint, bigint | bigint + btree_gist | public | interval_dist | interval, interval | interval + btree_gist | public | oid_dist | oid, oid | oid + btree_gist | public | time_dist | time without time zone, time without time zone | interval + btree_gist | public | ts_dist | timestamp without time zone, timestamp without time zone | interval + btree_gist | public | tstz_dist | timestamp with time zone, timestamp with time zone | interval + citext | public | citext | boolean | citext + citext | public | citext | inet | citext + citext | public | citext | character | citext + citext | public | citext_cmp | citext, citext | integer + citext | public | citext_eq | citext, citext | boolean + citext | public | citext_ge | citext, citext | boolean + citext | public | citext_gt | citext, citext | boolean + citext | public | citext_hash | citext | integer + citext | public | citext_hash_extended | citext, bigint | bigint + citext | public | citext_larger | citext, citext | citext + citext | public | citext_le | citext, citext | boolean + citext | public | citext_lt | citext, citext | boolean + citext | public | citext_ne | citext, citext | boolean + citext | public | citext_pattern_cmp | citext, citext | integer + citext | public | citext_pattern_ge | citext, citext | boolean + citext | public | citext_pattern_gt | citext, citext | boolean + citext | public | citext_pattern_le | citext, citext | boolean + citext | public | citext_pattern_lt | citext, citext | boolean + citext | public | citext_smaller | citext, citext | citext + citext | public | citextin | cstring | citext + citext | public | citextout | citext | cstring + citext | public | citextrecv | internal | citext + citext | public | citextsend | citext | bytea + citext | public | max | citext | citext + citext | public | min | citext | citext + citext | public | regexp_match | citext, citext | text[] + citext | public | regexp_match | citext, citext, text | text[] + citext | public | regexp_matches | citext, citext | SETOF text[] + citext | public | regexp_matches | citext, citext, text | SETOF text[] + citext | public | regexp_replace | citext, citext, text, text | text + citext | public | regexp_replace | citext, citext, text | text + citext | public | regexp_split_to_array | citext, citext | text[] + citext | public | regexp_split_to_array | citext, citext, text | text[] + citext | public | regexp_split_to_table | citext, citext | SETOF text + citext | public | regexp_split_to_table | citext, citext, text | SETOF text + citext | public | replace | citext, citext, citext | text + citext | public | split_part | citext, citext, integer | text + citext | public | strpos | citext, citext | integer + citext | public | texticlike | citext, citext | boolean + citext | public | texticlike | citext, text | boolean + citext | public | texticnlike | citext, citext | boolean + citext | public | texticnlike | citext, text | boolean + citext | public | texticregexeq | citext, citext | boolean + citext | public | texticregexeq | citext, text | boolean + citext | public | texticregexne | citext, citext | boolean + citext | public | texticregexne | citext, text | boolean + citext | public | translate | citext, citext, text | text + cube | public | cube | cube, double precision, double precision | cube + cube | public | cube | double precision | cube + cube | public | cube | double precision[], double precision[] | cube + cube | public | cube | cube, double precision | cube + cube | public | cube | double precision, double precision | cube + cube | public | cube | double precision[] | cube + cube | public | cube_cmp | cube, cube | integer + cube | public | cube_contained | cube, cube | boolean + cube | public | cube_contains | cube, cube | boolean + cube | public | cube_coord | cube, integer | double precision + cube | public | cube_coord_llur | cube, integer | double precision + cube | public | cube_dim | cube | integer + cube | public | cube_distance | cube, cube | double precision + cube | public | cube_enlarge | cube, double precision, integer | cube + cube | public | cube_eq | cube, cube | boolean + cube | public | cube_ge | cube, cube | boolean + cube | public | cube_gt | cube, cube | boolean + cube | public | cube_in | cstring | cube + cube | public | cube_inter | cube, cube | cube + cube | public | cube_is_point | cube | boolean + cube | public | cube_le | cube, cube | boolean + cube | public | cube_ll_coord | cube, integer | double precision + cube | public | cube_lt | cube, cube | boolean + cube | public | cube_ne | cube, cube | boolean + cube | public | cube_out | cube | cstring + cube | public | cube_overlap | cube, cube | boolean + cube | public | cube_recv | internal | cube + cube | public | cube_send | cube | bytea + cube | public | cube_size | cube | double precision + cube | public | cube_subset | cube, integer[] | cube + cube | public | cube_union | cube, cube | cube + cube | public | cube_ur_coord | cube, integer | double precision + cube | public | distance_chebyshev | cube, cube | double precision + cube | public | distance_taxicab | cube, cube | double precision + cube | public | g_cube_consistent | internal, cube, smallint, oid, internal | boolean + cube | public | g_cube_distance | internal, cube, smallint, oid, internal | double precision + cube | public | g_cube_penalty | internal, internal, internal | internal + cube | public | g_cube_picksplit | internal, internal | internal + cube | public | g_cube_same | cube, cube, internal | internal + cube | public | g_cube_union | internal, internal | cube + dblink | public | dblink | text, boolean | SETOF record + dblink | public | dblink | text | SETOF record + dblink | public | dblink | text, text | SETOF record + dblink | public | dblink | text, text, boolean | SETOF record + dblink | public | dblink_build_sql_delete | text, int2vector, integer, text[] | text + dblink | public | dblink_build_sql_insert | text, int2vector, integer, text[], text[] | text + dblink | public | dblink_build_sql_update | text, int2vector, integer, text[], text[] | text + dblink | public | dblink_cancel_query | text | text + dblink | public | dblink_close | text, boolean | text + dblink | public | dblink_close | text | text + dblink | public | dblink_close | text, text | text + dblink | public | dblink_close | text, text, boolean | text + dblink | public | dblink_connect | text | text + dblink | public | dblink_connect | text, text | text + dblink | public | dblink_connect_u | text | text + dblink | public | dblink_connect_u | text, text | text + dblink | public | dblink_current_query | | text + dblink | public | dblink_disconnect | text | text + dblink | public | dblink_disconnect | | text + dblink | public | dblink_error_message | text | text + dblink | public | dblink_exec | text, boolean | text + dblink | public | dblink_exec | text | text + dblink | public | dblink_exec | text, text | text + dblink | public | dblink_exec | text, text, boolean | text + dblink | public | dblink_fdw_validator | options text[], catalog oid | void + dblink | public | dblink_fetch | text, text, integer, boolean | SETOF record + dblink | public | dblink_fetch | text, integer | SETOF record + dblink | public | dblink_fetch | text, text, integer | SETOF record + dblink | public | dblink_fetch | text, integer, boolean | SETOF record + dblink | public | dblink_get_connections | | text[] + dblink | public | dblink_get_notify | OUT notify_name text, OUT be_pid integer, OUT extra text | SETOF record + dblink | public | dblink_get_notify | conname text, OUT notify_name text, OUT be_pid integer, OUT extra text | SETOF record + dblink | public | dblink_get_pkey | text | SETOF dblink_pkey_results + dblink | public | dblink_get_result | text, boolean | SETOF record + dblink | public | dblink_get_result | text | SETOF record + dblink | public | dblink_is_busy | text | integer + dblink | public | dblink_open | text, text, text, boolean | text + dblink | public | dblink_open | text, text | text + dblink | public | dblink_open | text, text, boolean | text + dblink | public | dblink_open | text, text, text | text + dblink | public | dblink_send_query | text, text | integer + dict_int | public | dintdict_init | internal | internal + dict_int | public | dintdict_lexize | internal, internal, internal, internal | internal + dict_xsyn | public | dxsyn_init | internal | internal + dict_xsyn | public | dxsyn_lexize | internal, internal, internal, internal | internal + earthdistance | public | earth | | double precision + earthdistance | public | earth_box | earth, double precision | cube + earthdistance | public | earth_distance | earth, earth | double precision + earthdistance | public | gc_to_sec | double precision | double precision + earthdistance | public | geo_distance | point, point | double precision + earthdistance | public | latitude | earth | double precision + earthdistance | public | ll_to_earth | double precision, double precision | earth + earthdistance | public | longitude | earth | double precision + earthdistance | public | sec_to_gc | double precision | double precision + file_fdw | public | file_fdw_handler | | fdw_handler + file_fdw | public | file_fdw_validator | text[], oid | void + fuzzystrmatch | public | difference | text, text | integer + fuzzystrmatch | public | dmetaphone | text | text + fuzzystrmatch | public | dmetaphone_alt | text | text + fuzzystrmatch | public | levenshtein | text, text | integer + fuzzystrmatch | public | levenshtein | text, text, integer, integer, integer | integer + fuzzystrmatch | public | levenshtein_less_equal | text, text, integer, integer, integer, integer | integer + fuzzystrmatch | public | levenshtein_less_equal | text, text, integer | integer + fuzzystrmatch | public | metaphone | text, integer | text + fuzzystrmatch | public | soundex | text | text + fuzzystrmatch | public | text_soundex | text | text + hstore | public | akeys | hstore | text[] + hstore | public | avals | hstore | text[] + hstore | public | defined | hstore, text | boolean + hstore | public | delete | hstore, text | hstore + hstore | public | delete | hstore, text[] | hstore + hstore | public | delete | hstore, hstore | hstore + hstore | public | each | hs hstore, OUT key text, OUT value text | SETOF record + hstore | public | exist | hstore, text | boolean + hstore | public | exists_all | hstore, text[] | boolean + hstore | public | exists_any | hstore, text[] | boolean + hstore | public | fetchval | hstore, text | text + hstore | public | ghstore_compress | internal | internal + hstore | public | ghstore_consistent | internal, hstore, smallint, oid, internal | boolean + hstore | public | ghstore_decompress | internal | internal + hstore | public | ghstore_in | cstring | ghstore + hstore | public | ghstore_options | internal | void + hstore | public | ghstore_out | ghstore | cstring + hstore | public | ghstore_penalty | internal, internal, internal | internal + hstore | public | ghstore_picksplit | internal, internal | internal + hstore | public | ghstore_same | ghstore, ghstore, internal | internal + hstore | public | ghstore_union | internal, internal | ghstore + hstore | public | gin_consistent_hstore | internal, smallint, hstore, integer, internal, internal | boolean + hstore | public | gin_extract_hstore | hstore, internal | internal + hstore | public | gin_extract_hstore_query | hstore, internal, smallint, internal, internal | internal + hstore | public | hs_concat | hstore, hstore | hstore + hstore | public | hs_contained | hstore, hstore | boolean + hstore | public | hs_contains | hstore, hstore | boolean + hstore | public | hstore | text[], text[] | hstore + hstore | public | hstore | text, text | hstore + hstore | public | hstore | text[] | hstore + hstore | public | hstore | record | hstore + hstore | public | hstore_cmp | hstore, hstore | integer + hstore | public | hstore_eq | hstore, hstore | boolean + hstore | public | hstore_ge | hstore, hstore | boolean + hstore | public | hstore_gt | hstore, hstore | boolean + hstore | public | hstore_hash | hstore | integer + hstore | public | hstore_hash_extended | hstore, bigint | bigint + hstore | public | hstore_in | cstring | hstore + hstore | public | hstore_le | hstore, hstore | boolean + hstore | public | hstore_lt | hstore, hstore | boolean + hstore | public | hstore_ne | hstore, hstore | boolean + hstore | public | hstore_out | hstore | cstring + hstore | public | hstore_recv | internal | hstore + hstore | public | hstore_send | hstore | bytea + hstore | public | hstore_subscript_handler | internal | internal + hstore | public | hstore_to_array | hstore | text[] + hstore | public | hstore_to_json | hstore | json + hstore | public | hstore_to_json_loose | hstore | json + hstore | public | hstore_to_jsonb | hstore | jsonb + hstore | public | hstore_to_jsonb_loose | hstore | jsonb + hstore | public | hstore_to_matrix | hstore | text[] + hstore | public | hstore_version_diag | hstore | integer + hstore | public | isdefined | hstore, text | boolean + hstore | public | isexists | hstore, text | boolean + hstore | public | populate_record | anyelement, hstore | anyelement + hstore | public | skeys | hstore | SETOF text + hstore | public | slice | hstore, text[] | hstore + hstore | public | slice_array | hstore, text[] | text[] + hstore | public | svals | hstore | SETOF text + hstore | public | tconvert | text, text | hstore + http | public | bytea_to_text | data bytea | text + http | public | http | request http_request | http_response + http | public | http_delete | uri character varying, content character varying, content_type character varying | http_response + http | public | http_delete | uri character varying | http_response + http | public | http_get | uri character varying, data jsonb | http_response + http | public | http_get | uri character varying | http_response + http | public | http_head | uri character varying | http_response + http | public | http_header | field character varying, value character varying | http_header + http | public | http_list_curlopt | | TABLE(curlopt text, value text) + http | public | http_patch | uri character varying, content character varying, content_type character varying | http_response + http | public | http_post | uri character varying, data jsonb | http_response + http | public | http_post | uri character varying, content character varying, content_type character varying | http_response + http | public | http_put | uri character varying, content character varying, content_type character varying | http_response + http | public | http_reset_curlopt | | boolean + http | public | http_set_curlopt | curlopt character varying, value character varying | boolean + http | public | text_to_bytea | data text | bytea + http | public | urlencode | data jsonb | text + http | public | urlencode | string character varying | text + http | public | urlencode | string bytea | text + hypopg | public | hypopg | OUT indexname text, OUT indexrelid oid, OUT indrelid oid, OUT innatts integer, OUT indisunique boolean, OUT indkey int2vector, OUT indcollation oidvector, OUT indclass oidvector, OUT indoption oidvector, OUT indexprs pg_node_tree, OUT indpred pg_node_tree, OUT amid oid | SETOF record + hypopg | public | hypopg_create_index | sql_order text, OUT indexrelid oid, OUT indexname text | SETOF record + hypopg | public | hypopg_drop_index | indexid oid | boolean + hypopg | public | hypopg_get_indexdef | indexid oid | text + hypopg | public | hypopg_hidden_indexes | | TABLE(indexid oid) + hypopg | public | hypopg_hide_index | indexid oid | boolean + hypopg | public | hypopg_relation_size | indexid oid | bigint + hypopg | public | hypopg_reset | | void + hypopg | public | hypopg_reset_index | | void + hypopg | public | hypopg_unhide_all_indexes | | void + hypopg | public | hypopg_unhide_index | indexid oid | boolean + index_advisor | public | index_advisor | query text | TABLE(startup_cost_before jsonb, startup_cost_after jsonb, total_cost_before jsonb, total_cost_after jsonb, index_statements text[], errors text[]) + insert_username | public | insert_username | | trigger + intagg | public | int_agg_final_array | internal | integer[] + intagg | public | int_agg_state | internal, integer | internal + intagg | public | int_array_aggregate | integer | integer[] + intagg | public | int_array_enum | integer[] | SETOF integer + intarray | public | _int_contained | integer[], integer[] | boolean + intarray | public | _int_contained_joinsel | internal, oid, internal, smallint, internal | double precision + intarray | public | _int_contained_sel | internal, oid, internal, integer | double precision + intarray | public | _int_contains | integer[], integer[] | boolean + intarray | public | _int_contains_joinsel | internal, oid, internal, smallint, internal | double precision + intarray | public | _int_contains_sel | internal, oid, internal, integer | double precision + intarray | public | _int_different | integer[], integer[] | boolean + intarray | public | _int_inter | integer[], integer[] | integer[] + intarray | public | _int_matchsel | internal, oid, internal, integer | double precision + intarray | public | _int_overlap | integer[], integer[] | boolean + intarray | public | _int_overlap_joinsel | internal, oid, internal, smallint, internal | double precision + intarray | public | _int_overlap_sel | internal, oid, internal, integer | double precision + intarray | public | _int_same | integer[], integer[] | boolean + intarray | public | _int_union | integer[], integer[] | integer[] + intarray | public | _intbig_in | cstring | intbig_gkey + intarray | public | _intbig_out | intbig_gkey | cstring + intarray | public | boolop | integer[], query_int | boolean + intarray | public | bqarr_in | cstring | query_int + intarray | public | bqarr_out | query_int | cstring + intarray | public | g_int_compress | internal | internal + intarray | public | g_int_consistent | internal, integer[], smallint, oid, internal | boolean + intarray | public | g_int_decompress | internal | internal + intarray | public | g_int_options | internal | void + intarray | public | g_int_penalty | internal, internal, internal | internal + intarray | public | g_int_picksplit | internal, internal | internal + intarray | public | g_int_same | integer[], integer[], internal | internal + intarray | public | g_int_union | internal, internal | integer[] + intarray | public | g_intbig_compress | internal | internal + intarray | public | g_intbig_consistent | internal, integer[], smallint, oid, internal | boolean + intarray | public | g_intbig_decompress | internal | internal + intarray | public | g_intbig_options | internal | void + intarray | public | g_intbig_penalty | internal, internal, internal | internal + intarray | public | g_intbig_picksplit | internal, internal | internal + intarray | public | g_intbig_same | intbig_gkey, intbig_gkey, internal | internal + intarray | public | g_intbig_union | internal, internal | intbig_gkey + intarray | public | ginint4_consistent | internal, smallint, integer[], integer, internal, internal, internal, internal | boolean + intarray | public | ginint4_queryextract | integer[], internal, smallint, internal, internal, internal, internal | internal + intarray | public | icount | integer[] | integer + intarray | public | idx | integer[], integer | integer + intarray | public | intarray_del_elem | integer[], integer | integer[] + intarray | public | intarray_push_array | integer[], integer[] | integer[] + intarray | public | intarray_push_elem | integer[], integer | integer[] + intarray | public | intset | integer | integer[] + intarray | public | intset_subtract | integer[], integer[] | integer[] + intarray | public | intset_union_elem | integer[], integer | integer[] + intarray | public | querytree | query_int | text + intarray | public | rboolop | query_int, integer[] | boolean + intarray | public | sort | integer[] | integer[] + intarray | public | sort | integer[], text | integer[] + intarray | public | sort_asc | integer[] | integer[] + intarray | public | sort_desc | integer[] | integer[] + intarray | public | subarray | integer[], integer | integer[] + intarray | public | subarray | integer[], integer, integer | integer[] + intarray | public | uniq | integer[] | integer[] + isn | public | btean13cmp | ean13, ean13 | integer + isn | public | btean13cmp | ean13, issn | integer + isn | public | btean13cmp | ean13, upc | integer + isn | public | btean13cmp | ean13, issn13 | integer + isn | public | btean13cmp | ean13, ismn | integer + isn | public | btean13cmp | ean13, ismn13 | integer + isn | public | btean13cmp | ean13, isbn13 | integer + isn | public | btean13cmp | ean13, isbn | integer + isn | public | btisbn13cmp | isbn13, isbn13 | integer + isn | public | btisbn13cmp | isbn13, ean13 | integer + isn | public | btisbn13cmp | isbn13, isbn | integer + isn | public | btisbncmp | isbn, ean13 | integer + isn | public | btisbncmp | isbn, isbn | integer + isn | public | btisbncmp | isbn, isbn13 | integer + isn | public | btismn13cmp | ismn13, ismn13 | integer + isn | public | btismn13cmp | ismn13, ean13 | integer + isn | public | btismn13cmp | ismn13, ismn | integer + isn | public | btismncmp | ismn, ismn | integer + isn | public | btismncmp | ismn, ean13 | integer + isn | public | btismncmp | ismn, ismn13 | integer + isn | public | btissn13cmp | issn13, issn13 | integer + isn | public | btissn13cmp | issn13, ean13 | integer + isn | public | btissn13cmp | issn13, issn | integer + isn | public | btissncmp | issn, ean13 | integer + isn | public | btissncmp | issn, issn13 | integer + isn | public | btissncmp | issn, issn | integer + isn | public | btupccmp | upc, upc | integer + isn | public | btupccmp | upc, ean13 | integer + isn | public | ean13_in | cstring | ean13 + isn | public | ean13_out | isbn13 | cstring + isn | public | ean13_out | ismn13 | cstring + isn | public | ean13_out | ean13 | cstring + isn | public | ean13_out | issn13 | cstring + isn | public | hashean13 | ean13 | integer + isn | public | hashisbn | isbn | integer + isn | public | hashisbn13 | isbn13 | integer + isn | public | hashismn | ismn | integer + isn | public | hashismn13 | ismn13 | integer + isn | public | hashissn | issn | integer + isn | public | hashissn13 | issn13 | integer + isn | public | hashupc | upc | integer + isn | public | is_valid | isbn13 | boolean + isn | public | is_valid | upc | boolean + isn | public | is_valid | ismn | boolean + isn | public | is_valid | issn | boolean + isn | public | is_valid | isbn | boolean + isn | public | is_valid | ismn13 | boolean + isn | public | is_valid | ean13 | boolean + isn | public | is_valid | issn13 | boolean + isn | public | isbn | ean13 | isbn + isn | public | isbn13 | ean13 | isbn13 + isn | public | isbn13_in | cstring | isbn13 + isn | public | isbn_in | cstring | isbn + isn | public | ismn | ean13 | ismn + isn | public | ismn13 | ean13 | ismn13 + isn | public | ismn13_in | cstring | ismn13 + isn | public | ismn_in | cstring | ismn + isn | public | isn_out | upc | cstring + isn | public | isn_out | ismn | cstring + isn | public | isn_out | issn | cstring + isn | public | isn_out | isbn | cstring + isn | public | isn_weak | boolean | boolean + isn | public | isn_weak | | boolean + isn | public | isneq | ismn, ismn | boolean + isn | public | isneq | ean13, ean13 | boolean + isn | public | isneq | isbn, ean13 | boolean + isn | public | isneq | issn, ean13 | boolean + isn | public | isneq | isbn13, isbn13 | boolean + isn | public | isneq | ean13, issn | boolean + isn | public | isneq | isbn, isbn | boolean + isn | public | isneq | ean13, upc | boolean + isn | public | isneq | ean13, issn13 | boolean + isn | public | isneq | ean13, ismn | boolean + isn | public | isneq | upc, upc | boolean + isn | public | isneq | issn13, issn13 | boolean + isn | public | isneq | upc, ean13 | boolean + isn | public | isneq | ismn13, ismn13 | boolean + isn | public | isneq | issn13, ean13 | boolean + isn | public | isneq | ean13, ismn13 | boolean + isn | public | isneq | ismn13, ean13 | boolean + isn | public | isneq | issn13, issn | boolean + isn | public | isneq | ismn, ean13 | boolean + isn | public | isneq | ismn13, ismn | boolean + isn | public | isneq | ean13, isbn13 | boolean + isn | public | isneq | isbn13, ean13 | boolean + isn | public | isneq | isbn13, isbn | boolean + isn | public | isneq | isbn, isbn13 | boolean + isn | public | isneq | ismn, ismn13 | boolean + isn | public | isneq | ean13, isbn | boolean + isn | public | isneq | issn, issn13 | boolean + isn | public | isneq | issn, issn | boolean + isn | public | isnge | ismn, ismn | boolean + isn | public | isnge | ean13, ean13 | boolean + isn | public | isnge | isbn, ean13 | boolean + isn | public | isnge | issn, ean13 | boolean + isn | public | isnge | isbn13, isbn13 | boolean + isn | public | isnge | ean13, issn | boolean + isn | public | isnge | isbn, isbn | boolean + isn | public | isnge | ean13, upc | boolean + isn | public | isnge | ean13, issn13 | boolean + isn | public | isnge | ean13, ismn | boolean + isn | public | isnge | upc, upc | boolean + isn | public | isnge | issn13, issn13 | boolean + isn | public | isnge | upc, ean13 | boolean + isn | public | isnge | ismn13, ismn13 | boolean + isn | public | isnge | issn13, ean13 | boolean + isn | public | isnge | ean13, ismn13 | boolean + isn | public | isnge | ismn13, ean13 | boolean + isn | public | isnge | issn13, issn | boolean + isn | public | isnge | ismn, ean13 | boolean + isn | public | isnge | ismn13, ismn | boolean + isn | public | isnge | ean13, isbn13 | boolean + isn | public | isnge | isbn13, ean13 | boolean + isn | public | isnge | isbn13, isbn | boolean + isn | public | isnge | isbn, isbn13 | boolean + isn | public | isnge | ismn, ismn13 | boolean + isn | public | isnge | ean13, isbn | boolean + isn | public | isnge | issn, issn13 | boolean + isn | public | isnge | issn, issn | boolean + isn | public | isngt | ismn, ismn | boolean + isn | public | isngt | ean13, ean13 | boolean + isn | public | isngt | isbn, ean13 | boolean + isn | public | isngt | issn, ean13 | boolean + isn | public | isngt | isbn13, isbn13 | boolean + isn | public | isngt | ean13, issn | boolean + isn | public | isngt | isbn, isbn | boolean + isn | public | isngt | ean13, upc | boolean + isn | public | isngt | ean13, issn13 | boolean + isn | public | isngt | ean13, ismn | boolean + isn | public | isngt | upc, upc | boolean + isn | public | isngt | issn13, issn13 | boolean + isn | public | isngt | upc, ean13 | boolean + isn | public | isngt | ismn13, ismn13 | boolean + isn | public | isngt | issn13, ean13 | boolean + isn | public | isngt | ean13, ismn13 | boolean + isn | public | isngt | ismn13, ean13 | boolean + isn | public | isngt | issn13, issn | boolean + isn | public | isngt | ismn, ean13 | boolean + isn | public | isngt | ismn13, ismn | boolean + isn | public | isngt | ean13, isbn13 | boolean + isn | public | isngt | isbn13, ean13 | boolean + isn | public | isngt | isbn13, isbn | boolean + isn | public | isngt | isbn, isbn13 | boolean + isn | public | isngt | ismn, ismn13 | boolean + isn | public | isngt | ean13, isbn | boolean + isn | public | isngt | issn, issn13 | boolean + isn | public | isngt | issn, issn | boolean + isn | public | isnle | ismn, ismn | boolean + isn | public | isnle | ean13, ean13 | boolean + isn | public | isnle | isbn, ean13 | boolean + isn | public | isnle | issn, ean13 | boolean + isn | public | isnle | isbn13, isbn13 | boolean + isn | public | isnle | ean13, issn | boolean + isn | public | isnle | isbn, isbn | boolean + isn | public | isnle | ean13, upc | boolean + isn | public | isnle | ean13, issn13 | boolean + isn | public | isnle | ean13, ismn | boolean + isn | public | isnle | upc, upc | boolean + isn | public | isnle | issn13, issn13 | boolean + isn | public | isnle | upc, ean13 | boolean + isn | public | isnle | ismn13, ismn13 | boolean + isn | public | isnle | issn13, ean13 | boolean + isn | public | isnle | ean13, ismn13 | boolean + isn | public | isnle | ismn13, ean13 | boolean + isn | public | isnle | issn13, issn | boolean + isn | public | isnle | ismn, ean13 | boolean + isn | public | isnle | ismn13, ismn | boolean + isn | public | isnle | ean13, isbn13 | boolean + isn | public | isnle | isbn13, ean13 | boolean + isn | public | isnle | isbn13, isbn | boolean + isn | public | isnle | isbn, isbn13 | boolean + isn | public | isnle | ismn, ismn13 | boolean + isn | public | isnle | ean13, isbn | boolean + isn | public | isnle | issn, issn13 | boolean + isn | public | isnle | issn, issn | boolean + isn | public | isnlt | ismn, ismn | boolean + isn | public | isnlt | ean13, ean13 | boolean + isn | public | isnlt | isbn, ean13 | boolean + isn | public | isnlt | issn, ean13 | boolean + isn | public | isnlt | isbn13, isbn13 | boolean + isn | public | isnlt | ean13, issn | boolean + isn | public | isnlt | isbn, isbn | boolean + isn | public | isnlt | ean13, upc | boolean + isn | public | isnlt | ean13, issn13 | boolean + isn | public | isnlt | ean13, ismn | boolean + isn | public | isnlt | upc, upc | boolean + isn | public | isnlt | issn13, issn13 | boolean + isn | public | isnlt | upc, ean13 | boolean + isn | public | isnlt | ismn13, ismn13 | boolean + isn | public | isnlt | issn13, ean13 | boolean + isn | public | isnlt | ean13, ismn13 | boolean + isn | public | isnlt | ismn13, ean13 | boolean + isn | public | isnlt | issn13, issn | boolean + isn | public | isnlt | ismn, ean13 | boolean + isn | public | isnlt | ismn13, ismn | boolean + isn | public | isnlt | ean13, isbn13 | boolean + isn | public | isnlt | isbn13, ean13 | boolean + isn | public | isnlt | isbn13, isbn | boolean + isn | public | isnlt | isbn, isbn13 | boolean + isn | public | isnlt | ismn, ismn13 | boolean + isn | public | isnlt | ean13, isbn | boolean + isn | public | isnlt | issn, issn13 | boolean + isn | public | isnlt | issn, issn | boolean + isn | public | isnne | ismn, ismn | boolean + isn | public | isnne | ean13, ean13 | boolean + isn | public | isnne | isbn, ean13 | boolean + isn | public | isnne | issn, ean13 | boolean + isn | public | isnne | isbn13, isbn13 | boolean + isn | public | isnne | ean13, issn | boolean + isn | public | isnne | isbn, isbn | boolean + isn | public | isnne | ean13, upc | boolean + isn | public | isnne | ean13, issn13 | boolean + isn | public | isnne | ean13, ismn | boolean + isn | public | isnne | upc, upc | boolean + isn | public | isnne | issn13, issn13 | boolean + isn | public | isnne | upc, ean13 | boolean + isn | public | isnne | ismn13, ismn13 | boolean + isn | public | isnne | issn13, ean13 | boolean + isn | public | isnne | ean13, ismn13 | boolean + isn | public | isnne | ismn13, ean13 | boolean + isn | public | isnne | issn13, issn | boolean + isn | public | isnne | ismn, ean13 | boolean + isn | public | isnne | ismn13, ismn | boolean + isn | public | isnne | ean13, isbn13 | boolean + isn | public | isnne | isbn13, ean13 | boolean + isn | public | isnne | isbn13, isbn | boolean + isn | public | isnne | isbn, isbn13 | boolean + isn | public | isnne | ismn, ismn13 | boolean + isn | public | isnne | ean13, isbn | boolean + isn | public | isnne | issn, issn13 | boolean + isn | public | isnne | issn, issn | boolean + isn | public | issn | ean13 | issn + isn | public | issn13 | ean13 | issn13 + isn | public | issn13_in | cstring | issn13 + isn | public | issn_in | cstring | issn + isn | public | make_valid | isbn13 | isbn13 + isn | public | make_valid | upc | upc + isn | public | make_valid | ismn | ismn + isn | public | make_valid | issn | issn + isn | public | make_valid | isbn | isbn + isn | public | make_valid | ismn13 | ismn13 + isn | public | make_valid | ean13 | ean13 + isn | public | make_valid | issn13 | issn13 + isn | public | upc | ean13 | upc + isn | public | upc_in | cstring | upc + lo | public | lo_manage | | trigger + lo | public | lo_oid | lo | oid + ltree | public | _lt_q_regex | ltree[], lquery[] | boolean + ltree | public | _lt_q_rregex | lquery[], ltree[] | boolean + ltree | public | _ltq_extract_regex | ltree[], lquery | ltree + ltree | public | _ltq_regex | ltree[], lquery | boolean + ltree | public | _ltq_rregex | lquery, ltree[] | boolean + ltree | public | _ltree_compress | internal | internal + ltree | public | _ltree_consistent | internal, ltree[], smallint, oid, internal | boolean + ltree | public | _ltree_extract_isparent | ltree[], ltree | ltree + ltree | public | _ltree_extract_risparent | ltree[], ltree | ltree + ltree | public | _ltree_gist_options | internal | void + ltree | public | _ltree_isparent | ltree[], ltree | boolean + ltree | public | _ltree_penalty | internal, internal, internal | internal + ltree | public | _ltree_picksplit | internal, internal | internal + ltree | public | _ltree_r_isparent | ltree, ltree[] | boolean + ltree | public | _ltree_r_risparent | ltree, ltree[] | boolean + ltree | public | _ltree_risparent | ltree[], ltree | boolean + ltree | public | _ltree_same | ltree_gist, ltree_gist, internal | internal + ltree | public | _ltree_union | internal, internal | ltree_gist + ltree | public | _ltxtq_exec | ltree[], ltxtquery | boolean + ltree | public | _ltxtq_extract_exec | ltree[], ltxtquery | ltree + ltree | public | _ltxtq_rexec | ltxtquery, ltree[] | boolean + ltree | public | index | ltree, ltree | integer + ltree | public | index | ltree, ltree, integer | integer + ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree[] | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree | ltree + ltree | public | lquery_in | cstring | lquery + ltree | public | lquery_out | lquery | cstring + ltree | public | lquery_recv | internal | lquery + ltree | public | lquery_send | lquery | bytea + ltree | public | lt_q_regex | ltree, lquery[] | boolean + ltree | public | lt_q_rregex | lquery[], ltree | boolean + ltree | public | ltq_regex | ltree, lquery | boolean + ltree | public | ltq_rregex | lquery, ltree | boolean + ltree | public | ltree2text | ltree | text + ltree | public | ltree_addltree | ltree, ltree | ltree + ltree | public | ltree_addtext | ltree, text | ltree + ltree | public | ltree_cmp | ltree, ltree | integer + ltree | public | ltree_compress | internal | internal + ltree | public | ltree_consistent | internal, ltree, smallint, oid, internal | boolean + ltree | public | ltree_decompress | internal | internal + ltree | public | ltree_eq | ltree, ltree | boolean + ltree | public | ltree_ge | ltree, ltree | boolean + ltree | public | ltree_gist_in | cstring | ltree_gist + ltree | public | ltree_gist_options | internal | void + ltree | public | ltree_gist_out | ltree_gist | cstring + ltree | public | ltree_gt | ltree, ltree | boolean + ltree | public | ltree_in | cstring | ltree + ltree | public | ltree_isparent | ltree, ltree | boolean + ltree | public | ltree_le | ltree, ltree | boolean + ltree | public | ltree_lt | ltree, ltree | boolean + ltree | public | ltree_ne | ltree, ltree | boolean + ltree | public | ltree_out | ltree | cstring + ltree | public | ltree_penalty | internal, internal, internal | internal + ltree | public | ltree_picksplit | internal, internal | internal + ltree | public | ltree_recv | internal | ltree + ltree | public | ltree_risparent | ltree, ltree | boolean + ltree | public | ltree_same | ltree_gist, ltree_gist, internal | internal + ltree | public | ltree_send | ltree | bytea + ltree | public | ltree_textadd | text, ltree | ltree + ltree | public | ltree_union | internal, internal | ltree_gist + ltree | public | ltreeparentsel | internal, oid, internal, integer | double precision + ltree | public | ltxtq_exec | ltree, ltxtquery | boolean + ltree | public | ltxtq_in | cstring | ltxtquery + ltree | public | ltxtq_out | ltxtquery | cstring + ltree | public | ltxtq_recv | internal | ltxtquery + ltree | public | ltxtq_rexec | ltxtquery, ltree | boolean + ltree | public | ltxtq_send | ltxtquery | bytea + ltree | public | nlevel | ltree | integer + ltree | public | subltree | ltree, integer, integer | ltree + ltree | public | subpath | ltree, integer | ltree + ltree | public | subpath | ltree, integer, integer | ltree + ltree | public | text2ltree | text | ltree + moddatetime | public | moddatetime | | trigger + old_snapshot | public | pg_old_snapshot_time_mapping | OUT array_offset integer, OUT end_timestamp timestamp with time zone, OUT newest_xmin xid | SETOF record + pageinspect | public | brin_metapage_info | page bytea, OUT magic text, OUT version integer, OUT pagesperrange integer, OUT lastrevmappage bigint | record + pageinspect | public | brin_page_type | page bytea | text + pageinspect | public | brin_revmap_data | page bytea, OUT pages tid | SETOF tid + pageinspect | public | bt_metap | relname text, OUT magic integer, OUT version integer, OUT root bigint, OUT level bigint, OUT fastroot bigint, OUT fastlevel bigint, OUT last_cleanup_num_delpages bigint, OUT last_cleanup_num_tuples double precision, OUT allequalimage boolean | record + pageinspect | public | bt_page_items | page bytea, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT nulls boolean, OUT vars boolean, OUT data text, OUT dead boolean, OUT htid tid, OUT tids tid[] | SETOF record + pageinspect | public | bt_page_items | relname text, blkno bigint, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT nulls boolean, OUT vars boolean, OUT data text, OUT dead boolean, OUT htid tid, OUT tids tid[] | SETOF record + pageinspect | public | bt_page_stats | relname text, blkno bigint, OUT blkno bigint, OUT type "char", OUT live_items integer, OUT dead_items integer, OUT avg_item_size integer, OUT page_size integer, OUT free_size integer, OUT btpo_prev bigint, OUT btpo_next bigint, OUT btpo_level bigint, OUT btpo_flags integer | record + pageinspect | public | fsm_page_contents | page bytea | text + pageinspect | public | get_raw_page | text, bigint | bytea + pageinspect | public | get_raw_page | text, text, bigint | bytea + pageinspect | public | gin_leafpage_items | page bytea, OUT first_tid tid, OUT nbytes smallint, OUT tids tid[] | SETOF record + pageinspect | public | gin_metapage_info | page bytea, OUT pending_head bigint, OUT pending_tail bigint, OUT tail_free_size integer, OUT n_pending_pages bigint, OUT n_pending_tuples bigint, OUT n_total_pages bigint, OUT n_entry_pages bigint, OUT n_data_pages bigint, OUT n_entries bigint, OUT version integer | record + pageinspect | public | gin_page_opaque_info | page bytea, OUT rightlink bigint, OUT maxoff integer, OUT flags text[] | record + pageinspect | public | gist_page_items | page bytea, index_oid regclass, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT dead boolean, OUT keys text | SETOF record + pageinspect | public | gist_page_items_bytea | page bytea, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT dead boolean, OUT key_data bytea | SETOF record + pageinspect | public | gist_page_opaque_info | page bytea, OUT lsn pg_lsn, OUT nsn pg_lsn, OUT rightlink bigint, OUT flags text[] | record + pageinspect | public | hash_bitmap_info | index_oid regclass, blkno bigint, OUT bitmapblkno bigint, OUT bitmapbit integer, OUT bitstatus boolean | SETOF record + pageinspect | public | hash_metapage_info | page bytea, OUT magic bigint, OUT version bigint, OUT ntuples double precision, OUT ffactor integer, OUT bsize integer, OUT bmsize integer, OUT bmshift integer, OUT maxbucket bigint, OUT highmask bigint, OUT lowmask bigint, OUT ovflpoint bigint, OUT firstfree bigint, OUT nmaps bigint, OUT procid oid, OUT spares bigint[], OUT mapp bigint[] | record + pageinspect | public | hash_page_items | page bytea, OUT itemoffset integer, OUT ctid tid, OUT data bigint | SETOF record + pageinspect | public | hash_page_stats | page bytea, OUT live_items integer, OUT dead_items integer, OUT page_size integer, OUT free_size integer, OUT hasho_prevblkno bigint, OUT hasho_nextblkno bigint, OUT hasho_bucket bigint, OUT hasho_flag integer, OUT hasho_page_id integer | record + pageinspect | public | hash_page_type | page bytea | text + pageinspect | public | heap_page_item_attrs | page bytea, rel_oid regclass, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_attrs bytea[] | SETOF record + pageinspect | public | heap_page_item_attrs | page bytea, rel_oid regclass, do_detoast boolean, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_attrs bytea[] | SETOF record + pageinspect | public | heap_page_items | page bytea, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_data bytea | SETOF record + pageinspect | public | heap_tuple_infomask_flags | t_infomask integer, t_infomask2 integer, OUT raw_flags text[], OUT combined_flags text[] | record + pageinspect | public | page_checksum | page bytea, blkno bigint | smallint + pageinspect | public | page_header | page bytea, OUT lsn pg_lsn, OUT checksum smallint, OUT flags smallint, OUT lower integer, OUT upper integer, OUT special integer, OUT pagesize integer, OUT version smallint, OUT prune_xid xid | record + pageinspect | public | tuple_data_split | rel_oid oid, t_data bytea, t_infomask integer, t_infomask2 integer, t_bits text | bytea[] + pageinspect | public | tuple_data_split | rel_oid oid, t_data bytea, t_infomask integer, t_infomask2 integer, t_bits text, do_detoast boolean | bytea[] + pg_backtrace | public | pg_backtrace_init | | void + pg_buffercache | public | pg_buffercache_pages | | SETOF record + pg_freespacemap | public | pg_freespace | rel regclass, OUT blkno bigint, OUT avail smallint | SETOF record + pg_freespacemap | public | pg_freespace | regclass, bigint | smallint + pg_graphql | graphql | _internal_resolve | query text, variables jsonb, "operationName" text, extensions jsonb | jsonb + pg_graphql | graphql | comment_directive | comment_ text | jsonb + pg_graphql | graphql | exception | message text | text + pg_graphql | graphql | get_schema_version | | integer + pg_graphql | graphql | increment_schema_version | | event_trigger + pg_graphql | graphql | resolve | query text, variables jsonb, "operationName" text, extensions jsonb | jsonb + pg_graphql | graphql_public | graphql | "operationName" text, query text, variables jsonb, extensions jsonb | jsonb + pg_hashids | public | hash_decode | text, text, integer | integer + pg_hashids | public | hash_encode | bigint | text + pg_hashids | public | hash_encode | bigint, text | text + pg_hashids | public | hash_encode | bigint, text, integer | text + pg_hashids | public | id_decode | text | bigint[] + pg_hashids | public | id_decode | text, text | bigint[] + pg_hashids | public | id_decode | text, text, integer, text | bigint[] + pg_hashids | public | id_decode | text, text, integer | bigint[] + pg_hashids | public | id_decode_once | text | bigint + pg_hashids | public | id_decode_once | text, text | bigint + pg_hashids | public | id_decode_once | text, text, integer, text | bigint + pg_hashids | public | id_decode_once | text, text, integer | bigint + pg_hashids | public | id_encode | bigint | text + pg_hashids | public | id_encode | bigint[] | text + pg_hashids | public | id_encode | bigint[], text | text + pg_hashids | public | id_encode | bigint[], text, integer | text + pg_hashids | public | id_encode | bigint, text | text + pg_hashids | public | id_encode | bigint, text, integer | text + pg_hashids | public | id_encode | bigint, text, integer, text | text + pg_hashids | public | id_encode | bigint[], text, integer, text | text + pg_jsonschema | public | json_matches_schema | schema json, instance json | boolean + pg_jsonschema | public | jsonb_matches_schema | schema json, instance jsonb | boolean + pg_jsonschema | public | jsonschema_is_valid | schema json | boolean + pg_jsonschema | public | jsonschema_validation_errors | schema json, instance json | text[] + pg_net | net | _await_response | request_id bigint | boolean + pg_net | net | _encode_url_with_params_array | url text, params_array text[] | text + pg_net | net | _http_collect_response | request_id bigint, async boolean | net.http_response_result + pg_net | net | _urlencode_string | string character varying | text + pg_net | net | check_worker_is_up | | void + pg_net | net | http_collect_response | request_id bigint, async boolean | net.http_response_result + pg_net | net | http_delete | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | http_get | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | http_post | url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | worker_restart | | boolean + pg_prewarm | public | autoprewarm_dump_now | | bigint + pg_prewarm | public | autoprewarm_start_worker | | void + pg_prewarm | public | pg_prewarm | regclass, mode text, fork text, first_block bigint, last_block bigint | bigint + pg_repack | repack | conflicted_triggers | oid | SETOF name + pg_repack | repack | create_index_type | oid, oid | void + pg_repack | repack | create_log_table | oid | void + pg_repack | repack | create_table | oid, name | void + pg_repack | repack | disable_autovacuum | regclass | void + pg_repack | repack | get_alter_col_storage | oid | text + pg_repack | repack | get_assign | oid, text | text + pg_repack | repack | get_columns_for_create_as | oid | text + pg_repack | repack | get_compare_pkey | oid, text | text + pg_repack | repack | get_create_index_type | oid, name | text + pg_repack | repack | get_create_trigger | relid oid, pkid oid | text + pg_repack | repack | get_drop_columns | oid, text | text + pg_repack | repack | get_enable_trigger | relid oid | text + pg_repack | repack | get_index_columns | oid | text + pg_repack | repack | get_order_by | oid, oid | text + pg_repack | repack | get_storage_param | oid | text + pg_repack | repack | get_table_and_inheritors | regclass | regclass[] + pg_repack | repack | oid2text | oid | text + pg_repack | repack | repack_apply | sql_peek cstring, sql_insert cstring, sql_delete cstring, sql_update cstring, sql_pop cstring, count integer | integer + pg_repack | repack | repack_drop | oid, integer | void + pg_repack | repack | repack_index_swap | oid | void + pg_repack | repack | repack_indexdef | oid, oid, name, boolean | text + pg_repack | repack | repack_swap | oid | void + pg_repack | repack | repack_trigger | | trigger + pg_repack | repack | version | | text + pg_repack | repack | version_sql | | text + pg_stat_monitor | public | decode_error_level | elevel integer | text + pg_stat_monitor | public | get_cmd_type | cmd_type integer | text + pg_stat_monitor | public | get_histogram_timings | | text + pg_stat_monitor | public | histogram | _bucket integer, _quryid bigint | SETOF record + pg_stat_monitor | public | pg_stat_monitor_internal | showtext boolean, OUT bucket bigint, OUT userid oid, OUT username text, OUT dbid oid, OUT datname text, OUT client_ip bigint, OUT queryid bigint, OUT planid bigint, OUT query text, OUT query_plan text, OUT pgsm_query_id bigint, OUT top_queryid bigint, OUT top_query text, OUT application_name text, OUT relations text, OUT cmd_type integer, OUT elevel integer, OUT sqlcode text, OUT message text, OUT bucket_start_time timestamp with time zone, OUT calls bigint, OUT total_exec_time double precision, OUT min_exec_time double precision, OUT max_exec_time double precision, OUT mean_exec_time double precision, OUT stddev_exec_time double precision, OUT rows bigint, OUT plans bigint, OUT total_plan_time double precision, OUT min_plan_time double precision, OUT max_plan_time double precision, OUT mean_plan_time double precision, OUT stddev_plan_time double precision, OUT shared_blks_hit bigint, OUT shared_blks_read bigint, OUT shared_blks_dirtied bigint, OUT shared_blks_written bigint, OUT local_blks_hit bigint, OUT local_blks_read bigint, OUT local_blks_dirtied bigint, OUT local_blks_written bigint, OUT temp_blks_read bigint, OUT temp_blks_written bigint, OUT shared_blk_read_time double precision, OUT shared_blk_write_time double precision, OUT local_blk_read_time double precision, OUT local_blk_write_time double precision, OUT temp_blk_read_time double precision, OUT temp_blk_write_time double precision, OUT resp_calls text, OUT cpu_user_time double precision, OUT cpu_sys_time double precision, OUT wal_records bigint, OUT wal_fpi bigint, OUT wal_bytes numeric, OUT comments text, OUT jit_functions bigint, OUT jit_generation_time double precision, OUT jit_inlining_count bigint, OUT jit_inlining_time double precision, OUT jit_optimization_count bigint, OUT jit_optimization_time double precision, OUT jit_emission_count bigint, OUT jit_emission_time double precision, OUT jit_deform_count bigint, OUT jit_deform_time double precision, OUT stats_since timestamp with time zone, OUT minmax_stats_since timestamp with time zone, OUT toplevel boolean, OUT bucket_done boolean | SETOF record + pg_stat_monitor | public | pg_stat_monitor_reset | | void + pg_stat_monitor | public | pg_stat_monitor_version | | text + pg_stat_monitor | public | pgsm_create_11_view | | integer + pg_stat_monitor | public | pgsm_create_13_view | | integer + pg_stat_monitor | public | pgsm_create_14_view | | integer + pg_stat_monitor | public | pgsm_create_15_view | | integer + pg_stat_monitor | public | pgsm_create_17_view | | integer + pg_stat_monitor | public | pgsm_create_view | | integer + pg_stat_monitor | public | range | | text[] + pg_stat_statements | extensions | pg_stat_statements | showtext boolean, OUT userid oid, OUT dbid oid, OUT toplevel boolean, OUT queryid bigint, OUT query text, OUT plans bigint, OUT total_plan_time double precision, OUT min_plan_time double precision, OUT max_plan_time double precision, OUT mean_plan_time double precision, OUT stddev_plan_time double precision, OUT calls bigint, OUT total_exec_time double precision, OUT min_exec_time double precision, OUT max_exec_time double precision, OUT mean_exec_time double precision, OUT stddev_exec_time double precision, OUT rows bigint, OUT shared_blks_hit bigint, OUT shared_blks_read bigint, OUT shared_blks_dirtied bigint, OUT shared_blks_written bigint, OUT local_blks_hit bigint, OUT local_blks_read bigint, OUT local_blks_dirtied bigint, OUT local_blks_written bigint, OUT temp_blks_read bigint, OUT temp_blks_written bigint, OUT blk_read_time double precision, OUT blk_write_time double precision, OUT temp_blk_read_time double precision, OUT temp_blk_write_time double precision, OUT wal_records bigint, OUT wal_fpi bigint, OUT wal_bytes numeric, OUT jit_functions bigint, OUT jit_generation_time double precision, OUT jit_inlining_count bigint, OUT jit_inlining_time double precision, OUT jit_optimization_count bigint, OUT jit_optimization_time double precision, OUT jit_emission_count bigint, OUT jit_emission_time double precision | SETOF record + pg_stat_statements | extensions | pg_stat_statements_info | OUT dealloc bigint, OUT stats_reset timestamp with time zone | record + pg_stat_statements | extensions | pg_stat_statements_reset | userid oid, dbid oid, queryid bigint | void + pg_surgery | public | heap_force_freeze | reloid regclass, tids tid[] | void + pg_surgery | public | heap_force_kill | reloid regclass, tids tid[] | void + pg_tle | pgtle | available_extension_versions | OUT name name, OUT version text, OUT superuser boolean, OUT trusted boolean, OUT relocatable boolean, OUT schema name, OUT requires name[], OUT comment text | SETOF record + pg_tle | pgtle | available_extensions | OUT name name, OUT default_version text, OUT comment text | SETOF record + pg_tle | pgtle | create_base_type | typenamespace regnamespace, typename name, infunc regprocedure, outfunc regprocedure, internallength integer, alignment text, storage text | void + pg_tle | pgtle | create_base_type_if_not_exists | typenamespace regnamespace, typename name, infunc regprocedure, outfunc regprocedure, internallength integer, alignment text, storage text | boolean + pg_tle | pgtle | create_operator_func | typenamespace regnamespace, typename name, opfunc regprocedure | void + pg_tle | pgtle | create_operator_func_if_not_exists | typenamespace regnamespace, typename name, opfunc regprocedure | boolean + pg_tle | pgtle | create_shell_type | typenamespace regnamespace, typename name | void + pg_tle | pgtle | create_shell_type_if_not_exists | typenamespace regnamespace, typename name | boolean + pg_tle | pgtle | extension_update_paths | name name, OUT source text, OUT target text, OUT path text | SETOF record + pg_tle | pgtle | install_extension | name text, version text, description text, ext text, requires text[] | boolean + pg_tle | pgtle | install_extension_version_sql | name text, version text, ext text | boolean + pg_tle | pgtle | install_update_path | name text, fromvers text, tovers text, ext text | boolean + pg_tle | pgtle | pg_tle_feature_info_sql_drop | | event_trigger + pg_tle | pgtle | register_feature | proc regproc, feature pgtle.pg_tle_features | void + pg_tle | pgtle | register_feature_if_not_exists | proc regproc, feature pgtle.pg_tle_features | boolean + pg_tle | pgtle | set_default_version | name text, version text | boolean + pg_tle | pgtle | uninstall_extension | extname text | boolean + pg_tle | pgtle | uninstall_extension | extname text, version text | boolean + pg_tle | pgtle | uninstall_extension_if_exists | extname text | boolean + pg_tle | pgtle | uninstall_update_path | extname text, fromvers text, tovers text | boolean + pg_tle | pgtle | uninstall_update_path_if_exists | extname text, fromvers text, tovers text | boolean + pg_tle | pgtle | unregister_feature | proc regproc, feature pgtle.pg_tle_features | void + pg_tle | pgtle | unregister_feature_if_exists | proc regproc, feature pgtle.pg_tle_features | boolean + pg_trgm | public | gin_extract_query_trgm | text, internal, smallint, internal, internal, internal, internal | internal + pg_trgm | public | gin_extract_value_trgm | text, internal | internal + pg_trgm | public | gin_trgm_consistent | internal, smallint, text, integer, internal, internal, internal, internal | boolean + pg_trgm | public | gin_trgm_triconsistent | internal, smallint, text, integer, internal, internal, internal | "char" + pg_trgm | public | gtrgm_compress | internal | internal + pg_trgm | public | gtrgm_consistent | internal, text, smallint, oid, internal | boolean + pg_trgm | public | gtrgm_decompress | internal | internal + pg_trgm | public | gtrgm_distance | internal, text, smallint, oid, internal | double precision + pg_trgm | public | gtrgm_in | cstring | gtrgm + pg_trgm | public | gtrgm_options | internal | void + pg_trgm | public | gtrgm_out | gtrgm | cstring + pg_trgm | public | gtrgm_penalty | internal, internal, internal | internal + pg_trgm | public | gtrgm_picksplit | internal, internal | internal + pg_trgm | public | gtrgm_same | gtrgm, gtrgm, internal | internal + pg_trgm | public | gtrgm_union | internal, internal | gtrgm + pg_trgm | public | set_limit | real | real + pg_trgm | public | show_limit | | real + pg_trgm | public | show_trgm | text | text[] + pg_trgm | public | similarity | text, text | real + pg_trgm | public | similarity_dist | text, text | real + pg_trgm | public | similarity_op | text, text | boolean + pg_trgm | public | strict_word_similarity | text, text | real + pg_trgm | public | strict_word_similarity_commutator_op | text, text | boolean + pg_trgm | public | strict_word_similarity_dist_commutator_op | text, text | real + pg_trgm | public | strict_word_similarity_dist_op | text, text | real + pg_trgm | public | strict_word_similarity_op | text, text | boolean + pg_trgm | public | word_similarity | text, text | real + pg_trgm | public | word_similarity_commutator_op | text, text | boolean + pg_trgm | public | word_similarity_dist_commutator_op | text, text | real + pg_trgm | public | word_similarity_dist_op | text, text | real + pg_trgm | public | word_similarity_op | text, text | boolean + pg_visibility | public | pg_check_frozen | regclass, OUT t_ctid tid | SETOF tid + pg_visibility | public | pg_check_visible | regclass, OUT t_ctid tid | SETOF tid + pg_visibility | public | pg_truncate_visibility_map | regclass | void + pg_visibility | public | pg_visibility | regclass, OUT blkno bigint, OUT all_visible boolean, OUT all_frozen boolean, OUT pd_all_visible boolean | SETOF record + pg_visibility | public | pg_visibility | regclass, blkno bigint, OUT all_visible boolean, OUT all_frozen boolean, OUT pd_all_visible boolean | record + pg_visibility | public | pg_visibility_map | regclass, blkno bigint, OUT all_visible boolean, OUT all_frozen boolean | record + pg_visibility | public | pg_visibility_map | regclass, OUT blkno bigint, OUT all_visible boolean, OUT all_frozen boolean | SETOF record + pg_visibility | public | pg_visibility_map_summary | regclass, OUT all_visible bigint, OUT all_frozen bigint | record + pg_walinspect | public | pg_get_wal_record_info | in_lsn pg_lsn, OUT start_lsn pg_lsn, OUT end_lsn pg_lsn, OUT prev_lsn pg_lsn, OUT xid xid, OUT resource_manager text, OUT record_type text, OUT record_length integer, OUT main_data_length integer, OUT fpi_length integer, OUT description text, OUT block_ref text | record + pg_walinspect | public | pg_get_wal_records_info | start_lsn pg_lsn, end_lsn pg_lsn, OUT start_lsn pg_lsn, OUT end_lsn pg_lsn, OUT prev_lsn pg_lsn, OUT xid xid, OUT resource_manager text, OUT record_type text, OUT record_length integer, OUT main_data_length integer, OUT fpi_length integer, OUT description text, OUT block_ref text | SETOF record + pg_walinspect | public | pg_get_wal_stats | start_lsn pg_lsn, end_lsn pg_lsn, per_record boolean, OUT "resource_manager/record_type" text, OUT count bigint, OUT count_percentage double precision, OUT record_size bigint, OUT record_size_percentage double precision, OUT fpi_size bigint, OUT fpi_size_percentage double precision, OUT combined_size bigint, OUT combined_size_percentage double precision | SETOF record + pgaudit | public | pgaudit_ddl_command_end | | event_trigger + pgaudit | public | pgaudit_sql_drop | | event_trigger + pgcrypto | extensions | armor | bytea, text[], text[] | text + pgcrypto | extensions | armor | bytea | text + pgcrypto | extensions | crypt | text, text | text + pgcrypto | extensions | dearmor | text | bytea + pgcrypto | extensions | decrypt | bytea, bytea, text | bytea + pgcrypto | extensions | decrypt_iv | bytea, bytea, bytea, text | bytea + pgcrypto | extensions | digest | text, text | bytea + pgcrypto | extensions | digest | bytea, text | bytea + pgcrypto | extensions | encrypt | bytea, bytea, text | bytea + pgcrypto | extensions | encrypt_iv | bytea, bytea, bytea, text | bytea + pgcrypto | extensions | gen_random_bytes | integer | bytea + pgcrypto | extensions | gen_random_uuid | | uuid + pgcrypto | extensions | gen_salt | text | text + pgcrypto | extensions | gen_salt | text, integer | text + pgcrypto | extensions | hmac | text, text, text | bytea + pgcrypto | extensions | hmac | bytea, bytea, text | bytea + pgcrypto | extensions | pgp_armor_headers | text, OUT key text, OUT value text | SETOF record + pgcrypto | extensions | pgp_key_id | bytea | text + pgcrypto | extensions | pgp_pub_decrypt | bytea, bytea | text + pgcrypto | extensions | pgp_pub_decrypt | bytea, bytea, text, text | text + pgcrypto | extensions | pgp_pub_decrypt | bytea, bytea, text | text + pgcrypto | extensions | pgp_pub_decrypt_bytea | bytea, bytea | bytea + pgcrypto | extensions | pgp_pub_decrypt_bytea | bytea, bytea, text, text | bytea + pgcrypto | extensions | pgp_pub_decrypt_bytea | bytea, bytea, text | bytea + pgcrypto | extensions | pgp_pub_encrypt | text, bytea, text | bytea + pgcrypto | extensions | pgp_pub_encrypt | text, bytea | bytea + pgcrypto | extensions | pgp_pub_encrypt_bytea | bytea, bytea | bytea + pgcrypto | extensions | pgp_pub_encrypt_bytea | bytea, bytea, text | bytea + pgcrypto | extensions | pgp_sym_decrypt | bytea, text | text + pgcrypto | extensions | pgp_sym_decrypt | bytea, text, text | text + pgcrypto | extensions | pgp_sym_decrypt_bytea | bytea, text | bytea + pgcrypto | extensions | pgp_sym_decrypt_bytea | bytea, text, text | bytea + pgcrypto | extensions | pgp_sym_encrypt | text, text | bytea + pgcrypto | extensions | pgp_sym_encrypt | text, text, text | bytea + pgcrypto | extensions | pgp_sym_encrypt_bytea | bytea, text | bytea + pgcrypto | extensions | pgp_sym_encrypt_bytea | bytea, text, text | bytea + pgjwt | extensions | algorithm_sign | signables text, secret text, algorithm text | text + pgjwt | extensions | sign | payload json, secret text, algorithm text | text + pgjwt | extensions | try_cast_double | inp text | double precision + pgjwt | extensions | url_decode | data text | bytea + pgjwt | extensions | url_encode | data bytea | text + pgjwt | extensions | verify | token text, secret text, algorithm text | TABLE(header json, payload json, valid boolean) + pgmq | pgmq | _belongs_to_pgmq | table_name text | boolean + pgmq | pgmq | _ensure_pg_partman_installed | | void + pgmq | pgmq | _get_partition_col | partition_interval text | text + pgmq | pgmq | _get_pg_partman_major_version | | integer + pgmq | pgmq | _get_pg_partman_schema | | text + pgmq | pgmq | archive | queue_name text, msg_id bigint | boolean + pgmq | pgmq | archive | queue_name text, msg_ids bigint[] | SETOF bigint + pgmq | pgmq | convert_archive_partitioned | table_name text, partition_interval text, retention_interval text, leading_partition integer | void + pgmq | pgmq | create | queue_name text | void + pgmq | pgmq | create_non_partitioned | queue_name text | void + pgmq | pgmq | create_partitioned | queue_name text, partition_interval text, retention_interval text | void + pgmq | pgmq | create_unlogged | queue_name text | void + pgmq | pgmq | delete | queue_name text, msg_id bigint | boolean + pgmq | pgmq | delete | queue_name text, msg_ids bigint[] | SETOF bigint + pgmq | pgmq | detach_archive | queue_name text | void + pgmq | pgmq | drop_queue | queue_name text | boolean + pgmq | pgmq | format_table_name | queue_name text, prefix text | text + pgmq | pgmq | list_queues | | SETOF pgmq.queue_record + pgmq | pgmq | metrics | queue_name text | pgmq.metrics_result + pgmq | pgmq | metrics_all | | SETOF pgmq.metrics_result + pgmq | pgmq | pop | queue_name text | SETOF pgmq.message_record + pgmq | pgmq | purge_queue | queue_name text | bigint + pgmq | pgmq | read | queue_name text, vt integer, qty integer | SETOF pgmq.message_record + pgmq | pgmq | read_with_poll | queue_name text, vt integer, qty integer, max_poll_seconds integer, poll_interval_ms integer | SETOF pgmq.message_record + pgmq | pgmq | send | queue_name text, msg jsonb, delay integer | SETOF bigint + pgmq | pgmq | send_batch | queue_name text, msgs jsonb[], delay integer | SETOF bigint + pgmq | pgmq | set_vt | queue_name text, msg_id bigint, vt integer | SETOF pgmq.message_record + pgmq | pgmq | validate_queue_name | queue_name text | void + pgroonga | pgroonga | command | groongacommand text | text + pgroonga | pgroonga | command | groongacommand text, arguments text[] | text + pgroonga | pgroonga | command_escape_value | value text | text + pgroonga | pgroonga | contain_varchar_array | character varying[], character varying | boolean + pgroonga | pgroonga | escape | value bigint | text + pgroonga | pgroonga | escape | value double precision | text + pgroonga | pgroonga | escape | value text, special_characters text | text + pgroonga | pgroonga | escape | value timestamp without time zone | text + pgroonga | pgroonga | escape | value smallint | text + pgroonga | pgroonga | escape | value text | text + pgroonga | pgroonga | escape | value timestamp with time zone | text + pgroonga | pgroonga | escape | value real | text + pgroonga | pgroonga | escape | value boolean | text + pgroonga | pgroonga | escape | value integer | text + pgroonga | pgroonga | flush | indexname cstring | boolean + pgroonga | pgroonga | highlight_html | target text, keywords text[] | text + pgroonga | pgroonga | match_in_text | text, text[] | boolean + pgroonga | pgroonga | match_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | match_in_varchar | character varying, character varying[] | boolean + pgroonga | pgroonga | match_jsonb | jsonb, text | boolean + pgroonga | pgroonga | match_positions_byte | target text, keywords text[] | integer[] + pgroonga | pgroonga | match_positions_character | target text, keywords text[] | integer[] + pgroonga | pgroonga | match_query | text, text | boolean + pgroonga | pgroonga | match_query | character varying, character varying | boolean + pgroonga | pgroonga | match_query | text[], text | boolean + pgroonga | pgroonga | match_regexp | text, text | boolean + pgroonga | pgroonga | match_regexp | character varying, character varying | boolean + pgroonga | pgroonga | match_script_jsonb | jsonb, text | boolean + pgroonga | pgroonga | match_term | target text, term text | boolean + pgroonga | pgroonga | match_term | target text[], term text | boolean + pgroonga | pgroonga | match_term | target character varying[], term character varying | boolean + pgroonga | pgroonga | match_term | target character varying, term character varying | boolean + pgroonga | pgroonga | match_text | text, text | boolean + pgroonga | pgroonga | match_text_array | text[], text | boolean + pgroonga | pgroonga | match_varchar | character varying, character varying | boolean + pgroonga | pgroonga | prefix_in_text | text, text[] | boolean + pgroonga | pgroonga | prefix_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | prefix_rk_in_text | text, text[] | boolean + pgroonga | pgroonga | prefix_rk_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | prefix_rk_text | text, text | boolean + pgroonga | pgroonga | prefix_rk_text_array | text[], text | boolean + pgroonga | pgroonga | prefix_text | text, text | boolean + pgroonga | pgroonga | prefix_text_array | text[], text | boolean + pgroonga | pgroonga | query_escape | query text | text + pgroonga | pgroonga | query_expand | tablename cstring, termcolumnname text, synonymscolumnname text, query text | text + pgroonga | pgroonga | query_extract_keywords | query text | text[] + pgroonga | pgroonga | query_in_text | text, text[] | boolean + pgroonga | pgroonga | query_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | query_in_varchar | character varying, character varying[] | boolean + pgroonga | pgroonga | query_jsonb | jsonb, text | boolean + pgroonga | pgroonga | query_text | text, text | boolean + pgroonga | pgroonga | query_text_array | text[], text | boolean + pgroonga | pgroonga | query_varchar | character varying, character varying | boolean + pgroonga | pgroonga | regexp_text | text, text | boolean + pgroonga | pgroonga | regexp_varchar | character varying, character varying | boolean + pgroonga | pgroonga | score | "row" record | double precision + pgroonga | pgroonga | script_jsonb | jsonb, text | boolean + pgroonga | pgroonga | script_text | text, text | boolean + pgroonga | pgroonga | script_text_array | text[], text | boolean + pgroonga | pgroonga | script_varchar | character varying, character varying | boolean + pgroonga | pgroonga | similar_text | text, text | boolean + pgroonga | pgroonga | similar_text_array | text[], text | boolean + pgroonga | pgroonga | similar_varchar | character varying, character varying | boolean + pgroonga | pgroonga | snippet_html | target text, keywords text[], width integer | text[] + pgroonga | pgroonga | table_name | indexname cstring | text + pgroonga | public | pgroonga_command | groongacommand text | text + pgroonga | public | pgroonga_command | groongacommand text, arguments text[] | text + pgroonga | public | pgroonga_command_escape_value | value text | text + pgroonga | public | pgroonga_condition | query text, weights integer[], scorers text[], schema_name text, index_name text, column_name text, fuzzy_max_distance_ratio real | pgroonga_condition + pgroonga | public | pgroonga_contain_varchar_array | character varying[], character varying | boolean + pgroonga | public | pgroonga_equal_query_text_array | targets text[], query text | boolean + pgroonga | public | pgroonga_equal_query_text_array_condition | targets text[], condition pgroonga_condition | boolean + pgroonga | public | pgroonga_equal_query_text_array_condition | targets text[], condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_equal_query_varchar_array | targets character varying[], query text | boolean + pgroonga | public | pgroonga_equal_query_varchar_array_condition | targets character varying[], condition pgroonga_condition | boolean + pgroonga | public | pgroonga_equal_query_varchar_array_condition | targets character varying[], condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_equal_text | target text, other text | boolean + pgroonga | public | pgroonga_equal_text_condition | target text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_equal_text_condition | target text, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_equal_varchar | target character varying, other character varying | boolean + pgroonga | public | pgroonga_equal_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_equal_varchar_condition | target character varying, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_escape | value bigint | text + pgroonga | public | pgroonga_escape | value double precision | text + pgroonga | public | pgroonga_escape | value text, special_characters text | text + pgroonga | public | pgroonga_escape | value timestamp without time zone | text + pgroonga | public | pgroonga_escape | value smallint | text + pgroonga | public | pgroonga_escape | value text | text + pgroonga | public | pgroonga_escape | value timestamp with time zone | text + pgroonga | public | pgroonga_escape | value real | text + pgroonga | public | pgroonga_escape | value boolean | text + pgroonga | public | pgroonga_escape | value integer | text + pgroonga | public | pgroonga_flush | indexname cstring | boolean + pgroonga | public | pgroonga_handler | internal | index_am_handler + pgroonga | public | pgroonga_highlight_html | target text, keywords text[], indexname cstring | text + pgroonga | public | pgroonga_highlight_html | targets text[], keywords text[], indexname cstring | text[] + pgroonga | public | pgroonga_highlight_html | target text, keywords text[] | text + pgroonga | public | pgroonga_highlight_html | targets text[], keywords text[] | text[] + pgroonga | public | pgroonga_index_column_name | indexname cstring, columnindex integer | text + pgroonga | public | pgroonga_index_column_name | indexname cstring, columnname text | text + pgroonga | public | pgroonga_is_writable | | boolean + pgroonga | public | pgroonga_list_broken_indexes | | SETOF text + pgroonga | public | pgroonga_list_lagged_indexes | | SETOF text + pgroonga | public | pgroonga_match_in_text | text, text[] | boolean + pgroonga | public | pgroonga_match_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_match_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_match_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_match_positions_byte | target text, keywords text[], indexname cstring | integer[] + pgroonga | public | pgroonga_match_positions_byte | target text, keywords text[] | integer[] + pgroonga | public | pgroonga_match_positions_character | target text, keywords text[], indexname cstring | integer[] + pgroonga | public | pgroonga_match_positions_character | target text, keywords text[] | integer[] + pgroonga | public | pgroonga_match_query | text, text | boolean + pgroonga | public | pgroonga_match_query | character varying, character varying | boolean + pgroonga | public | pgroonga_match_query | text[], text | boolean + pgroonga | public | pgroonga_match_regexp | text, text | boolean + pgroonga | public | pgroonga_match_regexp | character varying, character varying | boolean + pgroonga | public | pgroonga_match_script_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_match_term | target text, term text | boolean + pgroonga | public | pgroonga_match_term | target text[], term text | boolean + pgroonga | public | pgroonga_match_term | target character varying[], term character varying | boolean + pgroonga | public | pgroonga_match_term | target character varying, term character varying | boolean + pgroonga | public | pgroonga_match_text | text, text | boolean + pgroonga | public | pgroonga_match_text_array | text[], text | boolean + pgroonga | public | pgroonga_match_text_array_condition | target text[], condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_match_text_array_condition | target text[], condition pgroonga_condition | boolean + pgroonga | public | pgroonga_match_text_array_condition_with_scorers | target text[], condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_match_text_condition | target text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_match_text_condition | target text, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_match_text_condition_with_scorers | target text, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_match_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_match_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_match_varchar_condition | target character varying, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_match_varchar_condition_with_scorers | target character varying, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_normalize | target text | text + pgroonga | public | pgroonga_normalize | target text, normalizername text | text + pgroonga | public | pgroonga_not_prefix_in_text | text, text[] | boolean + pgroonga | public | pgroonga_prefix_in_text | text, text[] | boolean + pgroonga | public | pgroonga_prefix_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_prefix_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_prefix_in_varchar_array | character varying[], character varying[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_text | text, text[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_varchar_array | character varying[], character varying[] | boolean + pgroonga | public | pgroonga_prefix_rk_text | text, text | boolean + pgroonga | public | pgroonga_prefix_rk_text_array | text[], text | boolean + pgroonga | public | pgroonga_prefix_rk_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_prefix_rk_varchar_array | character varying[], character varying | boolean + pgroonga | public | pgroonga_prefix_text | text, text | boolean + pgroonga | public | pgroonga_prefix_text_array | text[], text | boolean + pgroonga | public | pgroonga_prefix_text_array_condition | text[], pgroonga_condition | boolean + pgroonga | public | pgroonga_prefix_text_condition | text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_prefix_text_condition | text, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_prefix_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_prefix_varchar_array | character varying[], character varying | boolean + pgroonga | public | pgroonga_prefix_varchar_array_condition | character varying[], pgroonga_condition | boolean + pgroonga | public | pgroonga_prefix_varchar_condition | target character varying, conditoin pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_prefix_varchar_condition | target character varying, conditoin pgroonga_condition | boolean + pgroonga | public | pgroonga_query_escape | query text | text + pgroonga | public | pgroonga_query_expand | tablename cstring, termcolumnname text, synonymscolumnname text, query text | text + pgroonga | public | pgroonga_query_extract_keywords | query text, index_name text | text[] + pgroonga | public | pgroonga_query_in_text | text, text[] | boolean + pgroonga | public | pgroonga_query_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_query_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_query_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_query_text | text, text | boolean + pgroonga | public | pgroonga_query_text_array | text[], text | boolean + pgroonga | public | pgroonga_query_text_array_condition | targets text[], condition pgroonga_condition | boolean + pgroonga | public | pgroonga_query_text_array_condition | targets text[], condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_text_array_condition_with_scorers | targets text[], condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_query_text_condition | target text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_text_condition | target text, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_query_text_condition_with_scorers | target text, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_query_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_query_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_varchar_condition | target character varying, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_query_varchar_condition_with_scorers | target character varying, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_regexp_in_text | text, text[] | boolean + pgroonga | public | pgroonga_regexp_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_regexp_text | text, text | boolean + pgroonga | public | pgroonga_regexp_text_array | targets text[], pattern text | boolean + pgroonga | public | pgroonga_regexp_text_array_condition | targets text[], pattern pgroonga_condition | boolean + pgroonga | public | pgroonga_regexp_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_result_to_jsonb_objects | result jsonb | jsonb + pgroonga | public | pgroonga_result_to_recordset | result jsonb | SETOF record + pgroonga | public | pgroonga_score | "row" record | double precision + pgroonga | public | pgroonga_score | tableoid oid, ctid tid | double precision + pgroonga | public | pgroonga_script_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_script_text | text, text | boolean + pgroonga | public | pgroonga_script_text_array | text[], text | boolean + pgroonga | public | pgroonga_script_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_set_writable | newwritable boolean | boolean + pgroonga | public | pgroonga_similar_text | text, text | boolean + pgroonga | public | pgroonga_similar_text_array | text[], text | boolean + pgroonga | public | pgroonga_similar_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_snippet_html | target text, keywords text[], width integer | text[] + pgroonga | public | pgroonga_table_name | indexname cstring | text + pgroonga | public | pgroonga_tokenize | target text, VARIADIC options text[] | json[] + pgroonga | public | pgroonga_vacuum | | boolean + pgroonga | public | pgroonga_wal_apply | indexname cstring | bigint + pgroonga | public | pgroonga_wal_apply | | bigint + pgroonga | public | pgroonga_wal_set_applied_position | block bigint, "offset" bigint | boolean + pgroonga | public | pgroonga_wal_set_applied_position | indexname cstring, block bigint, "offset" bigint | boolean + pgroonga | public | pgroonga_wal_set_applied_position | indexname cstring | boolean + pgroonga | public | pgroonga_wal_set_applied_position | | boolean + pgroonga | public | pgroonga_wal_status | | TABLE(name text, oid oid, current_block bigint, current_offset bigint, current_size bigint, last_block bigint, last_offset bigint, last_size bigint) + pgroonga | public | pgroonga_wal_truncate | indexname cstring | bigint + pgroonga | public | pgroonga_wal_truncate | | bigint + pgroonga_database | public | pgroonga_database_remove | | boolean + pgrouting | public | _pgr_alphashape | text, alpha double precision, OUT seq1 bigint, OUT textgeom text | SETOF record + pgrouting | public | _pgr_array_reverse | anyarray | anyarray + pgrouting | public | _pgr_articulationpoints | edges_sql text, OUT seq integer, OUT node bigint | SETOF record + pgrouting | public | _pgr_astar | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_astar | edges_sql text, combinations_sql text, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bdastar | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bdastar | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bddijkstra | text, anyarray, anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bddijkstra | text, text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bellmanford | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bellmanford | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_biconnectedcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT edge bigint | SETOF record + pgrouting | public | _pgr_binarybreadthfirstsearch | edges_sql text, combinations_sql text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_binarybreadthfirstsearch | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bipartite | edges_sql text, OUT node bigint, OUT color bigint | SETOF record + pgrouting | public | _pgr_boost_version | | text + pgrouting | public | _pgr_breadthfirstsearch | edges_sql text, from_vids anyarray, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bridges | edges_sql text, OUT seq integer, OUT edge bigint | SETOF record + pgrouting | public | _pgr_build_type | | text + pgrouting | public | _pgr_checkcolumn | text, text, text, is_optional boolean, dryrun boolean | boolean + pgrouting | public | _pgr_checkquery | text | text + pgrouting | public | _pgr_checkverttab | vertname text, columnsarr text[], reporterrs integer, fnname text, OUT sname text, OUT vname text | record + pgrouting | public | _pgr_chinesepostman | edges_sql text, only_cost boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_compilation_date | | text + pgrouting | public | _pgr_compiler_version | | text + pgrouting | public | _pgr_connectedcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | _pgr_contraction | edges_sql text, contraction_order bigint[], max_cycles integer, forbidden_vertices bigint[], directed boolean, OUT type text, OUT id bigint, OUT contracted_vertices bigint[], OUT source bigint, OUT target bigint, OUT cost double precision | SETOF record + pgrouting | public | _pgr_createindex | tabname text, colname text, indext text, reporterrs integer, fnname text | void + pgrouting | public | _pgr_createindex | sname text, tname text, colname text, indext text, reporterrs integer, fnname text | void + pgrouting | public | _pgr_cuthillmckeeordering | text, OUT seq bigint, OUT node bigint | SETOF record + pgrouting | public | _pgr_dagshortestpath | text, anyarray, anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dagshortestpath | text, text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_depthfirstsearch | edges_sql text, root_vids anyarray, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, only_cost boolean, normal boolean, n_goals bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, n_goals bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, only_cost boolean, normal boolean, n_goals bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstranear | text, anyarray, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstranear | text, anyarray, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstranear | text, bigint, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstravia | edges_sql text, via_vids anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_drivingdistance | edges_sql text, start_vids anyarray, distance double precision, directed boolean, equicost boolean, OUT seq integer, OUT from_v bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edgecoloring | edges_sql text, OUT edge_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | _pgr_edgedisjointpaths | text, text, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edgedisjointpaths | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edwardmoore | edges_sql text, combinations_sql text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edwardmoore | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_endpoint | g geometry | geometry + pgrouting | public | _pgr_floydwarshall | edges_sql text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_get_statement | o_sql text | text + pgrouting | public | _pgr_getcolumnname | tab text, col text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_getcolumnname | sname text, tname text, col text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_getcolumntype | tab text, col text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_getcolumntype | sname text, tname text, cname text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_gettablename | tab text, reporterrs integer, fnname text, OUT sname text, OUT tname text | record + pgrouting | public | _pgr_git_hash | | text + pgrouting | public | _pgr_hawickcircuits | text, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_iscolumnindexed | tab text, col text, reporterrs integer, fnname text | boolean + pgrouting | public | _pgr_iscolumnindexed | sname text, tname text, cname text, reporterrs integer, fnname text | boolean + pgrouting | public | _pgr_iscolumnintable | tab text, col text | boolean + pgrouting | public | _pgr_isplanar | text | boolean + pgrouting | public | _pgr_johnson | edges_sql text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_kruskal | text, anyarray, fn_suffix text, max_depth bigint, distance double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_ksp | edges_sql text, start_vid bigint, end_vid bigint, k integer, directed boolean, heap_paths boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_lengauertarjandominatortree | edges_sql text, root_vid bigint, OUT seq integer, OUT vid bigint, OUT idom bigint | SETOF record + pgrouting | public | _pgr_lib_version | | text + pgrouting | public | _pgr_linegraph | text, directed boolean, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT reverse_cost double precision | SETOF record + pgrouting | public | _pgr_linegraphfull | text, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT edge bigint | SETOF record + pgrouting | public | _pgr_makeconnected | text, OUT seq bigint, OUT start_vid bigint, OUT end_vid bigint | SETOF record + pgrouting | public | _pgr_maxcardinalitymatch | edges_sql text, directed boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint | SETOF record + pgrouting | public | _pgr_maxflow | edges_sql text, combinations_sql text, algorithm integer, only_flow boolean, OUT seq integer, OUT edge_id bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | _pgr_maxflow | edges_sql text, sources anyarray, targets anyarray, algorithm integer, only_flow boolean, OUT seq integer, OUT edge_id bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | _pgr_maxflowmincost | edges_sql text, sources anyarray, targets anyarray, only_cost boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_maxflowmincost | edges_sql text, combinations_sql text, only_cost boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_msg | msgkind integer, fnname text, msg text | void + pgrouting | public | _pgr_onerror | errcond boolean, reporterrs integer, fnname text, msgerr text, hinto text, msgok text | void + pgrouting | public | _pgr_operating_system | | text + pgrouting | public | _pgr_parameter_check | fn text, sql text, big boolean | boolean + pgrouting | public | _pgr_pgsql_version | | text + pgrouting | public | _pgr_pickdeliver | text, text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | _pgr_pickdelivereuclidean | text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | _pgr_pointtoid | point geometry, tolerance double precision, vertname text, srid integer | bigint + pgrouting | public | _pgr_prim | text, anyarray, order_by text, max_depth bigint, distance double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_quote_ident | idname text | text + pgrouting | public | _pgr_sequentialvertexcoloring | edges_sql text, OUT vertex_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | _pgr_startpoint | g geometry | geometry + pgrouting | public | _pgr_stoerwagner | edges_sql text, OUT seq integer, OUT edge bigint, OUT cost double precision, OUT mincut double precision | SETOF record + pgrouting | public | _pgr_strongcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | _pgr_topologicalsort | edges_sql text, OUT seq integer, OUT sorted_v bigint | SETOF record + pgrouting | public | _pgr_transitiveclosure | edges_sql text, OUT seq integer, OUT vid bigint, OUT target_array bigint[] | SETOF record + pgrouting | public | _pgr_trsp | sql text, source_eid integer, source_pos double precision, target_eid integer, target_pos double precision, directed boolean, has_reverse_cost boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp_withpoints | text, text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT departure bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp_withpoints | text, text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT departure bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trspvia | text, text, anyarray, boolean, boolean, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_trspvia_withpoints | text, text, text, anyarray, boolean, boolean, boolean, character, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_trspviavertices | sql text, vids integer[], directed boolean, has_rcost boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record + pgrouting | public | _pgr_tsp | matrix_row_sql text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_tspeuclidean | coordinates_sql text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_turnrestrictedpath | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, stop_on_first boolean, strict boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_versionless | v1 text, v2 text | boolean + pgrouting | public | _pgr_vrponedepot | text, text, text, integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | _pgr_withpoints | edges_sql text, points_sql text, combinations_sql text, directed boolean, driving_side character, details boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpoints | edges_sql text, points_sql text, start_pids anyarray, end_pids anyarray, directed boolean, driving_side character, details boolean, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsdd | edges_sql text, points_sql text, start_pid anyarray, distance double precision, directed boolean, driving_side character, details boolean, equicost boolean, OUT seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsksp | edges_sql text, points_sql text, start_pid bigint, end_pid bigint, k integer, directed boolean, heap_paths boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsvia | sql text, via_edges bigint[], fraction double precision[], directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsvia | text, text, anyarray, boolean, boolean, boolean, character, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _v4trsp | text, text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _v4trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_alphashape | geometry, alpha double precision | geometry + pgrouting | public | pgr_analyzegraph | text, double precision, the_geom text, id text, source text, target text, rows_where text | character varying + pgrouting | public | pgr_analyzeoneway | text, text[], text[], text[], text[], two_way_if_null boolean, oneway text, source text, target text | text + pgrouting | public | pgr_articulationpoints | text, OUT node bigint | SETOF bigint + pgrouting | public | pgr_astar | text, anyarray, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, bigint, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, bigint, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, bigint, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, anyarray, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, bigint, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcostmatrix | text, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, bigint, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, anyarray, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, anyarray, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, bigint, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, text, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, anyarray, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, bigint, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, anyarray, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, bigint, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, text, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcostmatrix | text, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, bigint, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, anyarray, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, anyarray, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, bigint, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracostmatrix | text, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_biconnectedcomponents | text, OUT seq bigint, OUT component bigint, OUT edge bigint | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bipartite | text, OUT vertex_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_breadthfirstsearch | text, anyarray, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_breadthfirstsearch | text, bigint, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bridges | text, OUT edge bigint | SETOF bigint + pgrouting | public | pgr_chinesepostman | text, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_chinesepostmancost | text | double precision + pgrouting | public | pgr_connectedcomponents | text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | pgr_contraction | text, bigint[], max_cycles integer, forbidden_vertices bigint[], directed boolean, OUT type text, OUT id bigint, OUT contracted_vertices bigint[], OUT source bigint, OUT target bigint, OUT cost double precision | SETOF record + pgrouting | public | pgr_createtopology | text, double precision, the_geom text, id text, source text, target text, rows_where text, clean boolean | character varying + pgrouting | public | pgr_createverticestable | text, the_geom text, source text, target text, rows_where text | text + pgrouting | public | pgr_cuthillmckeeordering | text, OUT seq bigint, OUT node bigint | SETOF record + pgrouting | public | pgr_dagshortestpath | text, anyarray, anyarray, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, bigint, bigint, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, anyarray, bigint, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, text, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, bigint, anyarray, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_degree | text, text, dryrun boolean, OUT node bigint, OUT degree bigint | SETOF record + pgrouting | public | pgr_depthfirstsearch | text, bigint, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_depthfirstsearch | text, anyarray, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, bigint, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, anyarray, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, anyarray, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, bigint, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracostmatrix | text, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, bigint, anyarray, directed boolean, cap bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, anyarray, bigint, directed boolean, cap bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, anyarray, anyarray, directed boolean, cap bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, text, directed boolean, cap bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, text, directed boolean, cap bigint, global boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, anyarray, anyarray, directed boolean, cap bigint, global boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, bigint, anyarray, directed boolean, cap bigint, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, anyarray, bigint, directed boolean, cap bigint, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstravia | text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | pgr_drivingdistance | text, bigint, double precision, directed boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_drivingdistance | text, anyarray, double precision, directed boolean, equicost boolean, OUT seq integer, OUT from_v bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgecoloring | text, OUT edge_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, text, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edmondskarp | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edwardmoore | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_extractvertices | text, dryrun boolean, OUT id bigint, OUT in_edges bigint[], OUT out_edges bigint[], OUT x double precision, OUT y double precision, OUT geom geometry | SETOF record + pgrouting | public | pgr_findcloseedges | text, geometry[], double precision, cap integer, partial boolean, dryrun boolean, OUT edge_id bigint, OUT fraction double precision, OUT side character, OUT distance double precision, OUT geom geometry, OUT edge geometry | SETOF record + pgrouting | public | pgr_findcloseedges | text, geometry, double precision, cap integer, partial boolean, dryrun boolean, OUT edge_id bigint, OUT fraction double precision, OUT side character, OUT distance double precision, OUT geom geometry, OUT edge geometry | SETOF record + pgrouting | public | pgr_floydwarshall | text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_full_version | OUT version text, OUT build_type text, OUT compile_date text, OUT library text, OUT system text, OUT postgresql text, OUT compiler text, OUT boost text, OUT hash text | record + pgrouting | public | pgr_hawickcircuits | text, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_isplanar | text | boolean + pgrouting | public | pgr_johnson | text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskal | text, OUT edge bigint, OUT cost double precision | SETOF record + pgrouting | public | pgr_kruskalbfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskalbfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, bigint, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, bigint, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, anyarray, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, anyarray, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_ksp | text, bigint, bigint, integer, directed boolean, heap_paths boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_lengauertarjandominatortree | text, bigint, OUT seq integer, OUT vertex_id bigint, OUT idom bigint | SETOF record + pgrouting | public | pgr_linegraph | text, directed boolean, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT reverse_cost double precision | SETOF record + pgrouting | public | pgr_linegraphfull | text, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT edge bigint | SETOF record + pgrouting | public | pgr_makeconnected | text, OUT seq bigint, OUT start_vid bigint, OUT end_vid bigint | SETOF record + pgrouting | public | pgr_maxcardinalitymatch | text, directed boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint | SETOF record + pgrouting | public | pgr_maxcardinalitymatch | text, OUT edge bigint | SETOF bigint + pgrouting | public | pgr_maxflow | text, anyarray, anyarray | bigint + pgrouting | public | pgr_maxflow | text, text | bigint + pgrouting | public | pgr_maxflow | text, bigint, anyarray | bigint + pgrouting | public | pgr_maxflow | text, anyarray, bigint | bigint + pgrouting | public | pgr_maxflow | text, bigint, bigint | bigint + pgrouting | public | pgr_maxflowmincost | text, text, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost_cost | text, anyarray, anyarray | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, text | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, bigint, anyarray | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, anyarray, bigint | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, bigint, bigint | double precision + pgrouting | public | pgr_nodenetwork | text, double precision, id text, the_geom text, table_ending text, rows_where text, outall boolean | text + pgrouting | public | pgr_pickdeliver | text, text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | pgr_pickdelivereuclidean | text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | pgr_prim | text, OUT edge bigint, OUT cost double precision | SETOF record + pgrouting | public | pgr_primbfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primbfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, bigint, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, bigint, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, anyarray, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, anyarray, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_pushrelabel | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_sequentialvertexcoloring | text, OUT vertex_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | pgr_stoerwagner | text, OUT seq integer, OUT edge bigint, OUT cost double precision, OUT mincut double precision | SETOF record + pgrouting | public | pgr_strongcomponents | text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | pgr_topologicalsort | text, OUT seq integer, OUT sorted_v bigint | SETOF record + pgrouting | public | pgr_transitiveclosure | text, OUT seq integer, OUT vid bigint, OUT target_array bigint[] | SETOF record + pgrouting | public | pgr_trsp | text, text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, integer, double precision, integer, double precision, boolean, boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, integer, integer, boolean, boolean, restrictions_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, anyarray, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, bigint, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, bigint, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trspvia | text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | pgr_trspvia_withpoints | text, text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | pgr_trspviaedges | text, integer[], double precision[], boolean, boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_trspviavertices | text, anyarray, boolean, boolean, restrictions_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_tsp | text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_tspeuclidean | text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_turnrestrictedpath | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, stop_on_first boolean, strict boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_version | | text + pgrouting | public | pgr_vrponedepot | text, text, text, integer, OUT oid integer, OUT opos integer, OUT vid integer, OUT tarrival integer, OUT tdepart integer | SETOF record + pgrouting | public | pgr_withpoints | text, text, anyarray, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, bigint, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, bigint, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, text, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, bigint, bigint, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, anyarray, anyarray, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, anyarray, bigint, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, bigint, anyarray, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscostmatrix | text, text, anyarray, directed boolean, driving_side character, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsdd | text, text, bigint, double precision, directed boolean, driving_side character, details boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsdd | text, text, anyarray, double precision, directed boolean, driving_side character, details boolean, equicost boolean, OUT seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsksp | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsvia | text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrowlocks | public | pgrowlocks | relname text, OUT locked_row tid, OUT locker xid, OUT multi boolean, OUT xids xid[], OUT modes text[], OUT pids integer[] | SETOF record + pgsodium | pgsodium | create_key | key_type pgsodium.key_type, name text, raw_key bytea, raw_key_nonce bytea, parent_key uuid, key_context bytea, expires timestamp with time zone, associated_data text | pgsodium.valid_key + pgsodium | pgsodium | create_mask_view | relid oid, debug boolean | void + pgsodium | pgsodium | create_mask_view | relid oid, subid integer, debug boolean | void + pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_uuid uuid, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_det_decrypt | ciphertext bytea, additional bytea, key bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_uuid uuid, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_keygen | | bytea + pgsodium | pgsodium | crypto_aead_det_noncegen | | bytea + pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_keygen | | bytea + pgsodium | pgsodium | crypto_aead_ietf_noncegen | | bytea + pgsodium | pgsodium | crypto_auth | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_auth | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_auth | message bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256_keygen | | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256_verify | hash bytea, message bytea, key_id bigint, context bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha256_verify | signature bytea, message bytea, key_uuid uuid | boolean + pgsodium | pgsodium | crypto_auth_hmacsha256_verify | hash bytea, message bytea, secret bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512_keygen | | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512_verify | hash bytea, message bytea, key_id bigint, context bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha512_verify | signature bytea, message bytea, key_uuid uuid | boolean + pgsodium | pgsodium | crypto_auth_hmacsha512_verify | hash bytea, message bytea, secret bytea | boolean + pgsodium | pgsodium | crypto_auth_keygen | | bytea + pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key_uuid uuid | boolean + pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key bytea | boolean + pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key_id bigint, context bytea | boolean + pgsodium | pgsodium | crypto_box | message bytea, nonce bytea, public bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_box_new_keypair | | pgsodium.crypto_box_keypair + pgsodium | pgsodium | crypto_box_new_seed | | bytea + pgsodium | pgsodium | crypto_box_noncegen | | bytea + pgsodium | pgsodium | crypto_box_open | ciphertext bytea, nonce bytea, public bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_box_seal | message bytea, public_key bytea | bytea + pgsodium | pgsodium | crypto_box_seal_open | ciphertext bytea, public_key bytea, secret_key bytea | bytea + pgsodium | pgsodium | crypto_box_seed_new_keypair | seed bytea | pgsodium.crypto_box_keypair + pgsodium | pgsodium | crypto_cmp | text, text | boolean + pgsodium | pgsodium | crypto_generichash | message bytea, key bigint, context bytea | bytea + pgsodium | pgsodium | crypto_generichash | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_generichash | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_generichash_keygen | | bytea + pgsodium | pgsodium | crypto_hash_sha256 | message bytea | bytea + pgsodium | pgsodium | crypto_hash_sha512 | message bytea | bytea + pgsodium | pgsodium | crypto_kdf_derive_from_key | subkey_size integer, subkey_id bigint, context bytea, primary_key uuid | bytea + pgsodium | pgsodium | crypto_kdf_derive_from_key | subkey_size bigint, subkey_id bigint, context bytea, primary_key bytea | bytea + pgsodium | pgsodium | crypto_kdf_keygen | | bytea + pgsodium | pgsodium | crypto_kx_client_session_keys | client_pk bytea, client_sk bytea, server_pk bytea | pgsodium.crypto_kx_session + pgsodium | pgsodium | crypto_kx_new_keypair | | pgsodium.crypto_kx_keypair + pgsodium | pgsodium | crypto_kx_new_seed | | bytea + pgsodium | pgsodium | crypto_kx_seed_new_keypair | seed bytea | pgsodium.crypto_kx_keypair + pgsodium | pgsodium | crypto_kx_server_session_keys | server_pk bytea, server_sk bytea, client_pk bytea | pgsodium.crypto_kx_session + pgsodium | pgsodium | crypto_pwhash | password bytea, salt bytea | bytea + pgsodium | pgsodium | crypto_pwhash_saltgen | | bytea + pgsodium | pgsodium | crypto_pwhash_str | password bytea | bytea + pgsodium | pgsodium | crypto_pwhash_str_verify | hashed_password bytea, password bytea | boolean + pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_secretbox_keygen | | bytea + pgsodium | pgsodium | crypto_secretbox_noncegen | | bytea + pgsodium | pgsodium | crypto_secretbox_open | ciphertext bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_secretbox_open | message bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_secretbox_open | message bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_secretstream_keygen | | bytea + pgsodium | pgsodium | crypto_shorthash | message bytea, key bigint, context bytea | bytea + pgsodium | pgsodium | crypto_shorthash | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_shorthash | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_shorthash_keygen | | bytea + pgsodium | pgsodium | crypto_sign | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_detached | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_final_create | state bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_final_verify | state bytea, signature bytea, key bytea | boolean + pgsodium | pgsodium | crypto_sign_init | | bytea + pgsodium | pgsodium | crypto_sign_new_keypair | | pgsodium.crypto_sign_keypair + pgsodium | pgsodium | crypto_sign_new_seed | | bytea + pgsodium | pgsodium | crypto_sign_open | signed_message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_seed_new_keypair | seed bytea | pgsodium.crypto_sign_keypair + pgsodium | pgsodium | crypto_sign_update | state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg | message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg | state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg1 | state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg2 | cur_state bytea, initial_state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_verify_detached | sig bytea, message bytea, key bytea | boolean + pgsodium | pgsodium | crypto_signcrypt_new_keypair | | pgsodium.crypto_signcrypt_keypair + pgsodium | pgsodium | crypto_signcrypt_sign_after | state bytea, sender_sk bytea, ciphertext bytea | bytea + pgsodium | pgsodium | crypto_signcrypt_sign_before | sender bytea, recipient bytea, sender_sk bytea, recipient_pk bytea, additional bytea | pgsodium.crypto_signcrypt_state_key + pgsodium | pgsodium | crypto_signcrypt_verify_after | state bytea, signature bytea, sender_pk bytea, ciphertext bytea | boolean + pgsodium | pgsodium | crypto_signcrypt_verify_before | signature bytea, sender bytea, recipient bytea, additional bytea, sender_pk bytea, recipient_sk bytea | pgsodium.crypto_signcrypt_state_key + pgsodium | pgsodium | crypto_signcrypt_verify_public | signature bytea, sender bytea, recipient bytea, additional bytea, sender_pk bytea, ciphertext bytea | boolean + pgsodium | pgsodium | crypto_stream_xchacha20 | bigint, bytea, bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20 | bigint, bytea, bigint, context bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_keygen | | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_noncegen | | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor | bytea, bytea, bigint, context bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor | bytea, bytea, bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor_ic | bytea, bytea, bigint, bigint, context bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor_ic | bytea, bytea, bigint, bytea | bytea + pgsodium | pgsodium | decrypted_columns | relid oid | text + pgsodium | pgsodium | derive_key | key_id bigint, key_len integer, context bytea | bytea + pgsodium | pgsodium | disable_security_label_trigger | | void + pgsodium | pgsodium | enable_security_label_trigger | | void + pgsodium | pgsodium | encrypted_column | relid oid, m record | text + pgsodium | pgsodium | encrypted_columns | relid oid | text + pgsodium | pgsodium | get_key_by_id | uuid | pgsodium.valid_key + pgsodium | pgsodium | get_key_by_name | text | pgsodium.valid_key + pgsodium | pgsodium | get_named_keys | filter text | SETOF pgsodium.valid_key + pgsodium | pgsodium | has_mask | role regrole, source_name text | boolean + pgsodium | pgsodium | key_encrypt_secret_raw_key | | trigger + pgsodium | pgsodium | mask_columns | source_relid oid | TABLE(attname name, key_id text, key_id_column text, associated_column text, nonce_column text, format_type text) + pgsodium | pgsodium | mask_role | masked_role regrole, source_name text, view_name text | void + pgsodium | pgsodium | pgsodium_derive | key_id bigint, key_len integer, context bytea | bytea + pgsodium | pgsodium | quote_assoc | text, boolean | text + pgsodium | pgsodium | randombytes_buf | size integer | bytea + pgsodium | pgsodium | randombytes_buf_deterministic | size integer, seed bytea | bytea + pgsodium | pgsodium | randombytes_new_seed | | bytea + pgsodium | pgsodium | randombytes_random | | integer + pgsodium | pgsodium | randombytes_uniform | upper_bound integer | integer + pgsodium | pgsodium | sodium_base642bin | base64 text | bytea + pgsodium | pgsodium | sodium_bin2base64 | bin bytea | text + pgsodium | pgsodium | trg_mask_update | | event_trigger + pgsodium | pgsodium | update_mask | target oid, debug boolean | void + pgsodium | pgsodium | update_masks | debug boolean | void + pgsodium | pgsodium | version | | text + pgstattuple | public | pg_relpages | relname text | bigint + pgstattuple | public | pg_relpages | relname regclass | bigint + pgstattuple | public | pgstatginindex | relname regclass, OUT version integer, OUT pending_pages integer, OUT pending_tuples bigint | record + pgstattuple | public | pgstathashindex | relname regclass, OUT version integer, OUT bucket_pages bigint, OUT overflow_pages bigint, OUT bitmap_pages bigint, OUT unused_pages bigint, OUT live_items bigint, OUT dead_items bigint, OUT free_percent double precision | record + pgstattuple | public | pgstatindex | relname regclass, OUT version integer, OUT tree_level integer, OUT index_size bigint, OUT root_block_no bigint, OUT internal_pages bigint, OUT leaf_pages bigint, OUT empty_pages bigint, OUT deleted_pages bigint, OUT avg_leaf_density double precision, OUT leaf_fragmentation double precision | record + pgstattuple | public | pgstatindex | relname text, OUT version integer, OUT tree_level integer, OUT index_size bigint, OUT root_block_no bigint, OUT internal_pages bigint, OUT leaf_pages bigint, OUT empty_pages bigint, OUT deleted_pages bigint, OUT avg_leaf_density double precision, OUT leaf_fragmentation double precision | record + pgstattuple | public | pgstattuple | reloid regclass, OUT table_len bigint, OUT tuple_count bigint, OUT tuple_len bigint, OUT tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT free_space bigint, OUT free_percent double precision | record + pgstattuple | public | pgstattuple | relname text, OUT table_len bigint, OUT tuple_count bigint, OUT tuple_len bigint, OUT tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT free_space bigint, OUT free_percent double precision | record + pgstattuple | public | pgstattuple_approx | reloid regclass, OUT table_len bigint, OUT scanned_percent double precision, OUT approx_tuple_count bigint, OUT approx_tuple_len bigint, OUT approx_tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT approx_free_space bigint, OUT approx_free_percent double precision | record + pgtap | public | _add | text, integer | integer + pgtap | public | _add | text, integer, text | integer + pgtap | public | _alike | boolean, anyelement, text, text | text + pgtap | public | _ancestor_of | name, name, integer | boolean + pgtap | public | _ancestor_of | name, name, name, name, integer | boolean + pgtap | public | _are | text, name[], name[], text | text + pgtap | public | _areni | text, text[], text[], text | text + pgtap | public | _array_to_sorted_string | name[], text | text + pgtap | public | _assets_are | text, text[], text[], text | text + pgtap | public | _cast_exists | name, name, name, name | boolean + pgtap | public | _cast_exists | name, name | boolean + pgtap | public | _cast_exists | name, name, name | boolean + pgtap | public | _cdi | name, name, anyelement | text + pgtap | public | _cdi | name, name, anyelement, text | text + pgtap | public | _cdi | name, name, name, anyelement, text | text + pgtap | public | _cexists | name, name | boolean + pgtap | public | _cexists | name, name, name | boolean + pgtap | public | _ckeys | name, character | name[] + pgtap | public | _ckeys | name, name, character | name[] + pgtap | public | _cleanup | | boolean + pgtap | public | _cmp_types | oid, name | boolean + pgtap | public | _col_is_null | name, name, name, text, boolean | text + pgtap | public | _col_is_null | name, name, text, boolean | text + pgtap | public | _constraint | name, character, name[], text, text | text + pgtap | public | _constraint | name, name, character, name[], text, text | text + pgtap | public | _contract_on | text | "char" + pgtap | public | _currtest | | integer + pgtap | public | _db_privs | | name[] + pgtap | public | _def_is | text, text, anyelement, text | text + pgtap | public | _definer | name, name, name[] | boolean + pgtap | public | _definer | name, name[] | boolean + pgtap | public | _definer | name | boolean + pgtap | public | _definer | name, name | boolean + pgtap | public | _dexists | name | boolean + pgtap | public | _dexists | name, name | boolean + pgtap | public | _do_ne | text, text, text, text | text + pgtap | public | _docomp | text, text, text, text | text + pgtap | public | _error_diag | text, text, text, text, text, text, text, text, text, text | text + pgtap | public | _expand_context | character | text + pgtap | public | _expand_on | character | text + pgtap | public | _expand_vol | character | text + pgtap | public | _ext_exists | name | boolean + pgtap | public | _ext_exists | name, name | boolean + pgtap | public | _extensions | name | SETOF name + pgtap | public | _extensions | | SETOF name + pgtap | public | _extras | character, name, name[] | name[] + pgtap | public | _extras | character[], name[] | name[] + pgtap | public | _extras | character, name[] | name[] + pgtap | public | _extras | character[], name, name[] | name[] + pgtap | public | _finish | integer, integer, integer, boolean | SETOF text + pgtap | public | _fkexists | name, name, name[] | boolean + pgtap | public | _fkexists | name, name[] | boolean + pgtap | public | _fprivs_are | text, name, name[], text | text + pgtap | public | _func_compare | name, name, boolean, text | text + pgtap | public | _func_compare | name, name, name[], anyelement, anyelement, text | text + pgtap | public | _func_compare | name, name, name[], boolean, text | text + pgtap | public | _func_compare | name, name, anyelement, anyelement, text | text + pgtap | public | _funkargs | name[] | text + pgtap | public | _get | text | integer + pgtap | public | _get_ac_privs | name, text | text[] + pgtap | public | _get_col_ns_type | name, name, name | text + pgtap | public | _get_col_privs | name, text, name | text[] + pgtap | public | _get_col_type | name, name | text + pgtap | public | _get_col_type | name, name, name | text + pgtap | public | _get_context | name, name | "char" + pgtap | public | _get_db_owner | name | name + pgtap | public | _get_db_privs | name, text | text[] + pgtap | public | _get_dtype | name, text, boolean | text + pgtap | public | _get_dtype | name | text + pgtap | public | _get_fdw_privs | name, text | text[] + pgtap | public | _get_func_owner | name, name, name[] | name + pgtap | public | _get_func_owner | name, name[] | name + pgtap | public | _get_func_privs | text, text | text[] + pgtap | public | _get_index_owner | name, name | name + pgtap | public | _get_index_owner | name, name, name | name + pgtap | public | _get_lang_privs | name, text | text[] + pgtap | public | _get_language_owner | name | name + pgtap | public | _get_latest | text | integer[] + pgtap | public | _get_latest | text, integer | integer + pgtap | public | _get_note | integer | text + pgtap | public | _get_note | text | text + pgtap | public | _get_opclass_owner | name | name + pgtap | public | _get_opclass_owner | name, name | name + pgtap | public | _get_rel_owner | character[], name, name | name + pgtap | public | _get_rel_owner | character, name | name + pgtap | public | _get_rel_owner | name | name + pgtap | public | _get_rel_owner | name, name | name + pgtap | public | _get_rel_owner | character[], name | name + pgtap | public | _get_rel_owner | character, name, name | name + pgtap | public | _get_schema_owner | name | name + pgtap | public | _get_schema_privs | name, text | text[] + pgtap | public | _get_sequence_privs | name, text | text[] + pgtap | public | _get_server_privs | name, text | text[] + pgtap | public | _get_table_privs | name, text | text[] + pgtap | public | _get_tablespace_owner | name | name + pgtap | public | _get_tablespaceprivs | name, text | text[] + pgtap | public | _get_type_owner | name | name + pgtap | public | _get_type_owner | name, name | name + pgtap | public | _got_func | name, name, name[] | boolean + pgtap | public | _got_func | name, name[] | boolean + pgtap | public | _got_func | name | boolean + pgtap | public | _got_func | name, name | boolean + pgtap | public | _grolist | name | oid[] + pgtap | public | _has_def | name, name | boolean + pgtap | public | _has_def | name, name, name | boolean + pgtap | public | _has_group | name | boolean + pgtap | public | _has_role | name | boolean + pgtap | public | _has_type | name, name, character[] | boolean + pgtap | public | _has_type | name, character[] | boolean + pgtap | public | _has_user | name | boolean + pgtap | public | _hasc | name, character | boolean + pgtap | public | _hasc | name, name, character | boolean + pgtap | public | _have_index | name, name | boolean + pgtap | public | _have_index | name, name, name | boolean + pgtap | public | _ident_array_to_sorted_string | name[], text | text + pgtap | public | _ident_array_to_string | name[], text | text + pgtap | public | _ikeys | name, name | text[] + pgtap | public | _ikeys | name, name, name | text[] + pgtap | public | _inherited | name | boolean + pgtap | public | _inherited | name, name | boolean + pgtap | public | _is_indexed | name, name, text[] | boolean + pgtap | public | _is_instead | name, name | boolean + pgtap | public | _is_instead | name, name, name | boolean + pgtap | public | _is_schema | name | boolean + pgtap | public | _is_super | name | boolean + pgtap | public | _is_trusted | name | boolean + pgtap | public | _is_verbose | | boolean + pgtap | public | _keys | name, character | SETOF name[] + pgtap | public | _keys | name, name, character | SETOF name[] + pgtap | public | _lang | name, name, name[] | name + pgtap | public | _lang | name, name[] | name + pgtap | public | _lang | name | name + pgtap | public | _lang | name, name | name + pgtap | public | _missing | character, name, name[] | name[] + pgtap | public | _missing | character[], name[] | name[] + pgtap | public | _missing | character, name[] | name[] + pgtap | public | _missing | character[], name, name[] | name[] + pgtap | public | _nosuch | name, name, name[] | text + pgtap | public | _op_exists | name, name, name, name | boolean + pgtap | public | _op_exists | name, name, name, name, name | boolean + pgtap | public | _op_exists | name, name, name | boolean + pgtap | public | _opc_exists | name | boolean + pgtap | public | _opc_exists | name, name | boolean + pgtap | public | _partof | name, name, name, name | boolean + pgtap | public | _partof | name, name | boolean + pgtap | public | _parts | name | SETOF name + pgtap | public | _parts | name, name | SETOF name + pgtap | public | _pg_sv_column_array | oid, smallint[] | name[] + pgtap | public | _pg_sv_table_accessible | oid, oid | boolean + pgtap | public | _pg_sv_type_array | oid[] | name[] + pgtap | public | _prokind | p_oid oid | "char" + pgtap | public | _query | text | text + pgtap | public | _quote_ident_like | text, text | text + pgtap | public | _refine_vol | text | text + pgtap | public | _relcomp | text, text, text, text, text | text + pgtap | public | _relcomp | text, text, text, text | text + pgtap | public | _relcomp | text, anyarray, text, text | text + pgtap | public | _relexists | name | boolean + pgtap | public | _relexists | name, name | boolean + pgtap | public | _relne | text, text, text, text | text + pgtap | public | _relne | text, anyarray, text, text | text + pgtap | public | _returns | name, name, name[] | text + pgtap | public | _returns | name, name[] | text + pgtap | public | _returns | name | text + pgtap | public | _returns | name, name | text + pgtap | public | _rexists | character[], name, name | boolean + pgtap | public | _rexists | character, name | boolean + pgtap | public | _rexists | character[], name | boolean + pgtap | public | _rexists | character, name, name | boolean + pgtap | public | _rule_on | name, name | "char" + pgtap | public | _rule_on | name, name, name | "char" + pgtap | public | _runem | text[], boolean | SETOF text + pgtap | public | _runner | text[], text[], text[], text[], text[] | SETOF text + pgtap | public | _set | text, integer | integer + pgtap | public | _set | integer, integer | integer + pgtap | public | _set | text, integer, text | integer + pgtap | public | _strict | name, name, name[] | boolean + pgtap | public | _strict | name, name[] | boolean + pgtap | public | _strict | name | boolean + pgtap | public | _strict | name, name | boolean + pgtap | public | _table_privs | | name[] + pgtap | public | _temptable | text, text | text + pgtap | public | _temptable | anyarray, text | text + pgtap | public | _temptypes | text | text + pgtap | public | _time_trials | text, integer, numeric | SETOF _time_trial_type + pgtap | public | _tlike | boolean, text, text, text | text + pgtap | public | _todo | | text + pgtap | public | _trig | name, name | boolean + pgtap | public | _trig | name, name, name | boolean + pgtap | public | _type_func | "char", name | boolean + pgtap | public | _type_func | "char", name, name, name[] | boolean + pgtap | public | _type_func | "char", name, name[] | boolean + pgtap | public | _type_func | "char", name, name | boolean + pgtap | public | _types_are | name, name[], text, character[] | text + pgtap | public | _types_are | name[], text, character[] | text + pgtap | public | _unalike | boolean, anyelement, text, text | text + pgtap | public | _vol | name, name, name[] | text + pgtap | public | _vol | name, name[] | text + pgtap | public | _vol | name | text + pgtap | public | _vol | name, name | text + pgtap | public | add_result | boolean, boolean, text, text, text | integer + pgtap | public | alike | anyelement, text | text + pgtap | public | alike | anyelement, text, text | text + pgtap | public | any_column_privs_are | name, name, name, name[], text | text + pgtap | public | any_column_privs_are | name, name, name[] | text + pgtap | public | any_column_privs_are | name, name, name, name[] | text + pgtap | public | any_column_privs_are | name, name, name[], text | text + pgtap | public | bag_eq | text, anyarray, text | text + pgtap | public | bag_eq | text, text | text + pgtap | public | bag_eq | text, text, text | text + pgtap | public | bag_eq | text, anyarray | text + pgtap | public | bag_has | text, text | text + pgtap | public | bag_has | text, text, text | text + pgtap | public | bag_hasnt | text, text | text + pgtap | public | bag_hasnt | text, text, text | text + pgtap | public | bag_ne | text, anyarray, text | text + pgtap | public | bag_ne | text, text | text + pgtap | public | bag_ne | text, text, text | text + pgtap | public | bag_ne | text, anyarray | text + pgtap | public | can | name[] | text + pgtap | public | can | name[], text | text + pgtap | public | can | name, name[], text | text + pgtap | public | can | name, name[] | text + pgtap | public | cast_context_is | name, name, text, text | text + pgtap | public | cast_context_is | name, name, text | text + pgtap | public | casts_are | text[] | text + pgtap | public | casts_are | text[], text | text + pgtap | public | check_test | text, boolean | SETOF text + pgtap | public | check_test | text, boolean, text, text, text, boolean | SETOF text + pgtap | public | check_test | text, boolean, text, text | SETOF text + pgtap | public | check_test | text, boolean, text | SETOF text + pgtap | public | check_test | text, boolean, text, text, text | SETOF text + pgtap | public | cmp_ok | anyelement, text, anyelement | text + pgtap | public | cmp_ok | anyelement, text, anyelement, text | text + pgtap | public | col_default_is | name, name, anyelement | text + pgtap | public | col_default_is | name, name, text, text | text + pgtap | public | col_default_is | name, name, name, text, text | text + pgtap | public | col_default_is | name, name, text | text + pgtap | public | col_default_is | name, name, anyelement, text | text + pgtap | public | col_default_is | name, name, name, anyelement, text | text + pgtap | public | col_has_check | name, name, name, text | text + pgtap | public | col_has_check | name, name[], text | text + pgtap | public | col_has_check | name, name[] | text + pgtap | public | col_has_check | name, name, text | text + pgtap | public | col_has_check | name, name, name[], text | text + pgtap | public | col_has_check | name, name | text + pgtap | public | col_has_default | name, name, name, text | text + pgtap | public | col_has_default | name, name, text | text + pgtap | public | col_has_default | name, name | text + pgtap | public | col_hasnt_default | name, name, name, text | text + pgtap | public | col_hasnt_default | name, name, text | text + pgtap | public | col_hasnt_default | name, name | text + pgtap | public | col_is_fk | name, name, name, text | text + pgtap | public | col_is_fk | name, name[], text | text + pgtap | public | col_is_fk | name, name[] | text + pgtap | public | col_is_fk | name, name, text | text + pgtap | public | col_is_fk | name, name, name[], text | text + pgtap | public | col_is_fk | name, name | text + pgtap | public | col_is_null | table_name name, column_name name, description text | text + pgtap | public | col_is_null | schema_name name, table_name name, column_name name, description text | text + pgtap | public | col_is_pk | name, name, name, text | text + pgtap | public | col_is_pk | name, name[], text | text + pgtap | public | col_is_pk | name, name[] | text + pgtap | public | col_is_pk | name, name, text | text + pgtap | public | col_is_pk | name, name, name[], text | text + pgtap | public | col_is_pk | name, name | text + pgtap | public | col_is_unique | name, name, name, text | text + pgtap | public | col_is_unique | name, name[], text | text + pgtap | public | col_is_unique | name, name, name[] | text + pgtap | public | col_is_unique | name, name[] | text + pgtap | public | col_is_unique | name, name, text | text + pgtap | public | col_is_unique | name, name, name[], text | text + pgtap | public | col_is_unique | name, name | text + pgtap | public | col_is_unique | name, name, name | text + pgtap | public | col_isnt_fk | name, name, name, text | text + pgtap | public | col_isnt_fk | name, name[], text | text + pgtap | public | col_isnt_fk | name, name[] | text + pgtap | public | col_isnt_fk | name, name, text | text + pgtap | public | col_isnt_fk | name, name, name[], text | text + pgtap | public | col_isnt_fk | name, name | text + pgtap | public | col_isnt_pk | name, name, name, text | text + pgtap | public | col_isnt_pk | name, name[], text | text + pgtap | public | col_isnt_pk | name, name[] | text + pgtap | public | col_isnt_pk | name, name, text | text + pgtap | public | col_isnt_pk | name, name, name[], text | text + pgtap | public | col_isnt_pk | name, name | text + pgtap | public | col_not_null | table_name name, column_name name, description text | text + pgtap | public | col_not_null | schema_name name, table_name name, column_name name, description text | text + pgtap | public | col_type_is | name, name, name, text | text + pgtap | public | col_type_is | name, name, text, text | text + pgtap | public | col_type_is | name, name, name, text, text | text + pgtap | public | col_type_is | name, name, name, name, text, text | text + pgtap | public | col_type_is | name, name, text | text + pgtap | public | col_type_is | name, name, name, name, text | text + pgtap | public | collect_tap | VARIADIC text[] | text + pgtap | public | collect_tap | character varying[] | text + pgtap | public | column_privs_are | name, name, name, name[], text | text + pgtap | public | column_privs_are | name, name, name, name, name[] | text + pgtap | public | column_privs_are | name, name, name, name[] | text + pgtap | public | column_privs_are | name, name, name, name, name[], text | text + pgtap | public | columns_are | name, name[], text | text + pgtap | public | columns_are | name, name, name[] | text + pgtap | public | columns_are | name, name[] | text + pgtap | public | columns_are | name, name, name[], text | text + pgtap | public | composite_owner_is | name, name, name, text | text + pgtap | public | composite_owner_is | name, name, text | text + pgtap | public | composite_owner_is | name, name | text + pgtap | public | composite_owner_is | name, name, name | text + pgtap | public | database_privs_are | name, name, name[] | text + pgtap | public | database_privs_are | name, name, name[], text | text + pgtap | public | db_owner_is | name, name, text | text + pgtap | public | db_owner_is | name, name | text + pgtap | public | diag | msg text | text + pgtap | public | diag | VARIADIC text[] | text + pgtap | public | diag | VARIADIC anyarray | text + pgtap | public | diag | msg anyelement | text + pgtap | public | diag_test_name | text | text + pgtap | public | display_oper | name, oid | text + pgtap | public | do_tap | text | SETOF text + pgtap | public | do_tap | name, text | SETOF text + pgtap | public | do_tap | name | SETOF text + pgtap | public | do_tap | | SETOF text + pgtap | public | doesnt_imatch | anyelement, text | text + pgtap | public | doesnt_imatch | anyelement, text, text | text + pgtap | public | doesnt_match | anyelement, text | text + pgtap | public | doesnt_match | anyelement, text, text | text + pgtap | public | domain_type_is | name, text, name, text, text | text + pgtap | public | domain_type_is | text, text | text + pgtap | public | domain_type_is | name, text, text, text | text + pgtap | public | domain_type_is | name, text, text | text + pgtap | public | domain_type_is | text, text, text | text + pgtap | public | domain_type_is | name, text, name, text | text + pgtap | public | domain_type_isnt | name, text, name, text, text | text + pgtap | public | domain_type_isnt | text, text | text + pgtap | public | domain_type_isnt | name, text, text, text | text + pgtap | public | domain_type_isnt | name, text, text | text + pgtap | public | domain_type_isnt | text, text, text | text + pgtap | public | domain_type_isnt | name, text, name, text | text + pgtap | public | domains_are | name[] | text + pgtap | public | domains_are | name[], text | text + pgtap | public | domains_are | name, name[], text | text + pgtap | public | domains_are | name, name[] | text + pgtap | public | enum_has_labels | name, name[], text | text + pgtap | public | enum_has_labels | name, name, name[] | text + pgtap | public | enum_has_labels | name, name[] | text + pgtap | public | enum_has_labels | name, name, name[], text | text + pgtap | public | enums_are | name[] | text + pgtap | public | enums_are | name[], text | text + pgtap | public | enums_are | name, name[], text | text + pgtap | public | enums_are | name, name[] | text + pgtap | public | extensions_are | name[] | text + pgtap | public | extensions_are | name[], text | text + pgtap | public | extensions_are | name, name[], text | text + pgtap | public | extensions_are | name, name[] | text + pgtap | public | fail | text | text + pgtap | public | fail | | text + pgtap | public | fdw_privs_are | name, name, name[] | text + pgtap | public | fdw_privs_are | name, name, name[], text | text + pgtap | public | findfuncs | text | text[] + pgtap | public | findfuncs | text, text | text[] + pgtap | public | findfuncs | name, text, text | text[] + pgtap | public | findfuncs | name, text | text[] + pgtap | public | finish | exception_on_failure boolean | SETOF text + pgtap | public | fk_ok | name, name, name, name, name, text | text + pgtap | public | fk_ok | name, name, name, name, name, name, text | text + pgtap | public | fk_ok | name, name, name, name | text + pgtap | public | fk_ok | name, name, name, name, text | text + pgtap | public | fk_ok | name, name, name[], name, name, name[], text | text + pgtap | public | fk_ok | name, name, name[], name, name, name[] | text + pgtap | public | fk_ok | name, name[], name, name[] | text + pgtap | public | fk_ok | name, name[], name, name[], text | text + pgtap | public | foreign_table_owner_is | name, name, name, text | text + pgtap | public | foreign_table_owner_is | name, name, text | text + pgtap | public | foreign_table_owner_is | name, name | text + pgtap | public | foreign_table_owner_is | name, name, name | text + pgtap | public | foreign_tables_are | name[] | text + pgtap | public | foreign_tables_are | name[], text | text + pgtap | public | foreign_tables_are | name, name[], text | text + pgtap | public | foreign_tables_are | name, name[] | text + pgtap | public | function_lang_is | name, name, name, text | text + pgtap | public | function_lang_is | name, name, text | text + pgtap | public | function_lang_is | name, name[], name, text | text + pgtap | public | function_lang_is | name, name, name[], name | text + pgtap | public | function_lang_is | name, name | text + pgtap | public | function_lang_is | name, name, name | text + pgtap | public | function_lang_is | name, name[], name | text + pgtap | public | function_lang_is | name, name, name[], name, text | text + pgtap | public | function_owner_is | name, name[], name, text | text + pgtap | public | function_owner_is | name, name, name[], name | text + pgtap | public | function_owner_is | name, name[], name | text + pgtap | public | function_owner_is | name, name, name[], name, text | text + pgtap | public | function_privs_are | name, name, name[], name, name[], text | text + pgtap | public | function_privs_are | name, name[], name, name[] | text + pgtap | public | function_privs_are | name, name[], name, name[], text | text + pgtap | public | function_privs_are | name, name, name[], name, name[] | text + pgtap | public | function_returns | name, name, name[], text, text | text + pgtap | public | function_returns | name, name[], text | text + pgtap | public | function_returns | name, name, text, text | text + pgtap | public | function_returns | name, name, text | text + pgtap | public | function_returns | name, text, text | text + pgtap | public | function_returns | name, text | text + pgtap | public | function_returns | name, name, name[], text | text + pgtap | public | function_returns | name, name[], text, text | text + pgtap | public | functions_are | name[] | text + pgtap | public | functions_are | name[], text | text + pgtap | public | functions_are | name, name[], text | text + pgtap | public | functions_are | name, name[] | text + pgtap | public | groups_are | name[] | text + pgtap | public | groups_are | name[], text | text + pgtap | public | has_cast | name, name, name, text | text + pgtap | public | has_cast | name, name, name, name | text + pgtap | public | has_cast | name, name, text | text + pgtap | public | has_cast | name, name, name, name, text | text + pgtap | public | has_cast | name, name | text + pgtap | public | has_cast | name, name, name | text + pgtap | public | has_check | name, name, text | text + pgtap | public | has_check | name, text | text + pgtap | public | has_check | name | text + pgtap | public | has_column | name, name, name, text | text + pgtap | public | has_column | name, name, text | text + pgtap | public | has_column | name, name | text + pgtap | public | has_composite | name, name, text | text + pgtap | public | has_composite | name, text | text + pgtap | public | has_composite | name | text + pgtap | public | has_domain | name, name, text | text + pgtap | public | has_domain | name, text | text + pgtap | public | has_domain | name | text + pgtap | public | has_domain | name, name | text + pgtap | public | has_enum | name, name, text | text + pgtap | public | has_enum | name, text | text + pgtap | public | has_enum | name | text + pgtap | public | has_enum | name, name | text + pgtap | public | has_extension | name, name, text | text + pgtap | public | has_extension | name, text | text + pgtap | public | has_extension | name | text + pgtap | public | has_extension | name, name | text + pgtap | public | has_fk | name, name, text | text + pgtap | public | has_fk | name, text | text + pgtap | public | has_fk | name | text + pgtap | public | has_foreign_table | name, name, text | text + pgtap | public | has_foreign_table | name, text | text + pgtap | public | has_foreign_table | name | text + pgtap | public | has_foreign_table | name, name | text + pgtap | public | has_function | name, name[], text | text + pgtap | public | has_function | name, name, name[] | text + pgtap | public | has_function | name, name[] | text + pgtap | public | has_function | name, name, text | text + pgtap | public | has_function | name, text | text + pgtap | public | has_function | name | text + pgtap | public | has_function | name, name, name[], text | text + pgtap | public | has_function | name, name | text + pgtap | public | has_group | name, text | text + pgtap | public | has_group | name | text + pgtap | public | has_index | name, name, name, text | text + pgtap | public | has_index | name, name, name, name[], text | text + pgtap | public | has_index | name, name, name[] | text + pgtap | public | has_index | name, name, name, name | text + pgtap | public | has_index | name, name, text | text + pgtap | public | has_index | name, name, name, name[] | text + pgtap | public | has_index | name, name, name, name, text | text + pgtap | public | has_index | name, name, name[], text | text + pgtap | public | has_index | name, name | text + pgtap | public | has_index | name, name, name | text + pgtap | public | has_inherited_tables | name, name, text | text + pgtap | public | has_inherited_tables | name, text | text + pgtap | public | has_inherited_tables | name | text + pgtap | public | has_inherited_tables | name, name | text + pgtap | public | has_language | name, text | text + pgtap | public | has_language | name | text + pgtap | public | has_leftop | name, name, name, text | text + pgtap | public | has_leftop | name, name, name, name | text + pgtap | public | has_leftop | name, name, text | text + pgtap | public | has_leftop | name, name, name, name, text | text + pgtap | public | has_leftop | name, name | text + pgtap | public | has_leftop | name, name, name | text + pgtap | public | has_materialized_view | name, name, text | text + pgtap | public | has_materialized_view | name, text | text + pgtap | public | has_materialized_view | name | text + pgtap | public | has_opclass | name, name, text | text + pgtap | public | has_opclass | name, text | text + pgtap | public | has_opclass | name | text + pgtap | public | has_opclass | name, name | text + pgtap | public | has_operator | name, name, name, text | text + pgtap | public | has_operator | name, name, name, name, name, text | text + pgtap | public | has_operator | name, name, name, name | text + pgtap | public | has_operator | name, name, name, name, text | text + pgtap | public | has_operator | name, name, name, name, name | text + pgtap | public | has_operator | name, name, name | text + pgtap | public | has_pk | name, name, text | text + pgtap | public | has_pk | name, text | text + pgtap | public | has_pk | name | text + pgtap | public | has_relation | name, name, text | text + pgtap | public | has_relation | name, text | text + pgtap | public | has_relation | name | text + pgtap | public | has_rightop | name, name, name, text | text + pgtap | public | has_rightop | name, name, name, name | text + pgtap | public | has_rightop | name, name, text | text + pgtap | public | has_rightop | name, name, name, name, text | text + pgtap | public | has_rightop | name, name | text + pgtap | public | has_rightop | name, name, name | text + pgtap | public | has_role | name, text | text + pgtap | public | has_role | name | text + pgtap | public | has_rule | name, name, name, text | text + pgtap | public | has_rule | name, name, text | text + pgtap | public | has_rule | name, name | text + pgtap | public | has_rule | name, name, name | text + pgtap | public | has_schema | name, text | text + pgtap | public | has_schema | name | text + pgtap | public | has_sequence | name, name, text | text + pgtap | public | has_sequence | name, text | text + pgtap | public | has_sequence | name | text + pgtap | public | has_sequence | name, name | text + pgtap | public | has_table | name, name, text | text + pgtap | public | has_table | name, text | text + pgtap | public | has_table | name | text + pgtap | public | has_table | name, name | text + pgtap | public | has_tablespace | name, text, text | text + pgtap | public | has_tablespace | name, text | text + pgtap | public | has_tablespace | name | text + pgtap | public | has_trigger | name, name, name, text | text + pgtap | public | has_trigger | name, name, text | text + pgtap | public | has_trigger | name, name | text + pgtap | public | has_trigger | name, name, name | text + pgtap | public | has_type | name, name, text | text + pgtap | public | has_type | name, text | text + pgtap | public | has_type | name | text + pgtap | public | has_type | name, name | text + pgtap | public | has_unique | text | text + pgtap | public | has_unique | text, text | text + pgtap | public | has_unique | text, text, text | text + pgtap | public | has_user | name, text | text + pgtap | public | has_user | name | text + pgtap | public | has_view | name, name, text | text + pgtap | public | has_view | name, text | text + pgtap | public | has_view | name | text + pgtap | public | has_view | name, name | text + pgtap | public | hasnt_cast | name, name, name, text | text + pgtap | public | hasnt_cast | name, name, name, name | text + pgtap | public | hasnt_cast | name, name, text | text + pgtap | public | hasnt_cast | name, name, name, name, text | text + pgtap | public | hasnt_cast | name, name | text + pgtap | public | hasnt_cast | name, name, name | text + pgtap | public | hasnt_column | name, name, name, text | text + pgtap | public | hasnt_column | name, name, text | text + pgtap | public | hasnt_column | name, name | text + pgtap | public | hasnt_composite | name, name, text | text + pgtap | public | hasnt_composite | name, text | text + pgtap | public | hasnt_composite | name | text + pgtap | public | hasnt_domain | name, name, text | text + pgtap | public | hasnt_domain | name, text | text + pgtap | public | hasnt_domain | name | text + pgtap | public | hasnt_domain | name, name | text + pgtap | public | hasnt_enum | name, name, text | text + pgtap | public | hasnt_enum | name, text | text + pgtap | public | hasnt_enum | name | text + pgtap | public | hasnt_enum | name, name | text + pgtap | public | hasnt_extension | name, name, text | text + pgtap | public | hasnt_extension | name, text | text + pgtap | public | hasnt_extension | name | text + pgtap | public | hasnt_extension | name, name | text + pgtap | public | hasnt_fk | name, name, text | text + pgtap | public | hasnt_fk | name, text | text + pgtap | public | hasnt_fk | name | text + pgtap | public | hasnt_foreign_table | name, name, text | text + pgtap | public | hasnt_foreign_table | name, text | text + pgtap | public | hasnt_foreign_table | name | text + pgtap | public | hasnt_foreign_table | name, name | text + pgtap | public | hasnt_function | name, name[], text | text + pgtap | public | hasnt_function | name, name, name[] | text + pgtap | public | hasnt_function | name, name[] | text + pgtap | public | hasnt_function | name, name, text | text + pgtap | public | hasnt_function | name, text | text + pgtap | public | hasnt_function | name | text + pgtap | public | hasnt_function | name, name, name[], text | text + pgtap | public | hasnt_function | name, name | text + pgtap | public | hasnt_group | name, text | text + pgtap | public | hasnt_group | name | text + pgtap | public | hasnt_index | name, name, name, text | text + pgtap | public | hasnt_index | name, name, text | text + pgtap | public | hasnt_index | name, name | text + pgtap | public | hasnt_index | name, name, name | text + pgtap | public | hasnt_inherited_tables | name, name, text | text + pgtap | public | hasnt_inherited_tables | name, text | text + pgtap | public | hasnt_inherited_tables | name | text + pgtap | public | hasnt_inherited_tables | name, name | text + pgtap | public | hasnt_language | name, text | text + pgtap | public | hasnt_language | name | text + pgtap | public | hasnt_leftop | name, name, name, text | text + pgtap | public | hasnt_leftop | name, name, name, name | text + pgtap | public | hasnt_leftop | name, name, text | text + pgtap | public | hasnt_leftop | name, name, name, name, text | text + pgtap | public | hasnt_leftop | name, name | text + pgtap | public | hasnt_leftop | name, name, name | text + pgtap | public | hasnt_materialized_view | name, name, text | text + pgtap | public | hasnt_materialized_view | name, text | text + pgtap | public | hasnt_materialized_view | name | text + pgtap | public | hasnt_opclass | name, name, text | text + pgtap | public | hasnt_opclass | name, text | text + pgtap | public | hasnt_opclass | name | text + pgtap | public | hasnt_opclass | name, name | text + pgtap | public | hasnt_operator | name, name, name, text | text + pgtap | public | hasnt_operator | name, name, name, name, name, text | text + pgtap | public | hasnt_operator | name, name, name, name | text + pgtap | public | hasnt_operator | name, name, name, name, text | text + pgtap | public | hasnt_operator | name, name, name, name, name | text + pgtap | public | hasnt_operator | name, name, name | text + pgtap | public | hasnt_pk | name, name, text | text + pgtap | public | hasnt_pk | name, text | text + pgtap | public | hasnt_pk | name | text + pgtap | public | hasnt_relation | name, name, text | text + pgtap | public | hasnt_relation | name, text | text + pgtap | public | hasnt_relation | name | text + pgtap | public | hasnt_rightop | name, name, name, text | text + pgtap | public | hasnt_rightop | name, name, name, name | text + pgtap | public | hasnt_rightop | name, name, text | text + pgtap | public | hasnt_rightop | name, name, name, name, text | text + pgtap | public | hasnt_rightop | name, name | text + pgtap | public | hasnt_rightop | name, name, name | text + pgtap | public | hasnt_role | name, text | text + pgtap | public | hasnt_role | name | text + pgtap | public | hasnt_rule | name, name, name, text | text + pgtap | public | hasnt_rule | name, name, text | text + pgtap | public | hasnt_rule | name, name | text + pgtap | public | hasnt_rule | name, name, name | text + pgtap | public | hasnt_schema | name, text | text + pgtap | public | hasnt_schema | name | text + pgtap | public | hasnt_sequence | name, name, text | text + pgtap | public | hasnt_sequence | name, text | text + pgtap | public | hasnt_sequence | name | text + pgtap | public | hasnt_table | name, name, text | text + pgtap | public | hasnt_table | name, text | text + pgtap | public | hasnt_table | name | text + pgtap | public | hasnt_table | name, name | text + pgtap | public | hasnt_tablespace | name, text | text + pgtap | public | hasnt_tablespace | name | text + pgtap | public | hasnt_trigger | name, name, name, text | text + pgtap | public | hasnt_trigger | name, name, text | text + pgtap | public | hasnt_trigger | name, name | text + pgtap | public | hasnt_trigger | name, name, name | text + pgtap | public | hasnt_type | name, name, text | text + pgtap | public | hasnt_type | name, text | text + pgtap | public | hasnt_type | name | text + pgtap | public | hasnt_type | name, name | text + pgtap | public | hasnt_user | name, text | text + pgtap | public | hasnt_user | name | text + pgtap | public | hasnt_view | name, name, text | text + pgtap | public | hasnt_view | name, text | text + pgtap | public | hasnt_view | name | text + pgtap | public | hasnt_view | name, name | text + pgtap | public | ialike | anyelement, text | text + pgtap | public | ialike | anyelement, text, text | text + pgtap | public | imatches | anyelement, text | text + pgtap | public | imatches | anyelement, text, text | text + pgtap | public | in_todo | | boolean + pgtap | public | index_is_primary | name, name, name, text | text + pgtap | public | index_is_primary | name | text + pgtap | public | index_is_primary | name, name | text + pgtap | public | index_is_primary | name, name, name | text + pgtap | public | index_is_type | name, name, name, name | text + pgtap | public | index_is_type | name, name, name, name, text | text + pgtap | public | index_is_type | name, name | text + pgtap | public | index_is_type | name, name, name | text + pgtap | public | index_is_unique | name, name, name, text | text + pgtap | public | index_is_unique | name | text + pgtap | public | index_is_unique | name, name | text + pgtap | public | index_is_unique | name, name, name | text + pgtap | public | index_owner_is | name, name, name, text | text + pgtap | public | index_owner_is | name, name, name, name | text + pgtap | public | index_owner_is | name, name, name, name, text | text + pgtap | public | index_owner_is | name, name, name | text + pgtap | public | indexes_are | name, name[], text | text + pgtap | public | indexes_are | name, name, name[] | text + pgtap | public | indexes_are | name, name[] | text + pgtap | public | indexes_are | name, name, name[], text | text + pgtap | public | is | anyelement, anyelement, text | text + pgtap | public | is | anyelement, anyelement | text + pgtap | public | is_aggregate | name, name[], text | text + pgtap | public | is_aggregate | name, name, name[] | text + pgtap | public | is_aggregate | name, name[] | text + pgtap | public | is_aggregate | name, name, text | text + pgtap | public | is_aggregate | name, text | text + pgtap | public | is_aggregate | name | text + pgtap | public | is_aggregate | name, name, name[], text | text + pgtap | public | is_aggregate | name, name | text + pgtap | public | is_ancestor_of | name, name, name, name, integer, text | text + pgtap | public | is_ancestor_of | name, name, integer | text + pgtap | public | is_ancestor_of | name, name, name, name | text + pgtap | public | is_ancestor_of | name, name, text | text + pgtap | public | is_ancestor_of | name, name, name, name, text | text + pgtap | public | is_ancestor_of | name, name, name, name, integer | text + pgtap | public | is_ancestor_of | name, name | text + pgtap | public | is_ancestor_of | name, name, integer, text | text + pgtap | public | is_clustered | name, name, name, text | text + pgtap | public | is_clustered | name | text + pgtap | public | is_clustered | name, name | text + pgtap | public | is_clustered | name, name, name | text + pgtap | public | is_definer | name, name[], text | text + pgtap | public | is_definer | name, name, name[] | text + pgtap | public | is_definer | name, name[] | text + pgtap | public | is_definer | name, name, text | text + pgtap | public | is_definer | name, text | text + pgtap | public | is_definer | name | text + pgtap | public | is_definer | name, name, name[], text | text + pgtap | public | is_definer | name, name | text + pgtap | public | is_descendent_of | name, name, name, name, integer, text | text + pgtap | public | is_descendent_of | name, name, integer | text + pgtap | public | is_descendent_of | name, name, name, name | text + pgtap | public | is_descendent_of | name, name, text | text + pgtap | public | is_descendent_of | name, name, name, name, text | text + pgtap | public | is_descendent_of | name, name, name, name, integer | text + pgtap | public | is_descendent_of | name, name | text + pgtap | public | is_descendent_of | name, name, integer, text | text + pgtap | public | is_empty | text | text + pgtap | public | is_empty | text, text | text + pgtap | public | is_indexed | name, name, name, text | text + pgtap | public | is_indexed | name, name[], text | text + pgtap | public | is_indexed | name, name, name[] | text + pgtap | public | is_indexed | name, name[] | text + pgtap | public | is_indexed | name, name, name[], text | text + pgtap | public | is_indexed | name, name | text + pgtap | public | is_indexed | name, name, name | text + pgtap | public | is_member_of | name, name[], text | text + pgtap | public | is_member_of | name, name[] | text + pgtap | public | is_member_of | name, name, text | text + pgtap | public | is_member_of | name, name | text + pgtap | public | is_normal_function | name, name[], text | text + pgtap | public | is_normal_function | name, name, name[] | text + pgtap | public | is_normal_function | name, name[] | text + pgtap | public | is_normal_function | name, name, text | text + pgtap | public | is_normal_function | name, text | text + pgtap | public | is_normal_function | name | text + pgtap | public | is_normal_function | name, name, name[], text | text + pgtap | public | is_normal_function | name, name | text + pgtap | public | is_partition_of | name, name, name, name | text + pgtap | public | is_partition_of | name, name, text | text + pgtap | public | is_partition_of | name, name, name, name, text | text + pgtap | public | is_partition_of | name, name | text + pgtap | public | is_partitioned | name, name, text | text + pgtap | public | is_partitioned | name, text | text + pgtap | public | is_partitioned | name | text + pgtap | public | is_partitioned | name, name | text + pgtap | public | is_procedure | name, name[], text | text + pgtap | public | is_procedure | name, name, name[] | text + pgtap | public | is_procedure | name, name[] | text + pgtap | public | is_procedure | name, name, text | text + pgtap | public | is_procedure | name, text | text + pgtap | public | is_procedure | name | text + pgtap | public | is_procedure | name, name, name[], text | text + pgtap | public | is_procedure | name, name | text + pgtap | public | is_strict | name, name[], text | text + pgtap | public | is_strict | name, name, name[] | text + pgtap | public | is_strict | name, name[] | text + pgtap | public | is_strict | name, name, text | text + pgtap | public | is_strict | name, text | text + pgtap | public | is_strict | name | text + pgtap | public | is_strict | name, name, name[], text | text + pgtap | public | is_strict | name, name | text + pgtap | public | is_superuser | name, text | text + pgtap | public | is_superuser | name | text + pgtap | public | is_window | name, name[], text | text + pgtap | public | is_window | name, name, name[] | text + pgtap | public | is_window | name, name[] | text + pgtap | public | is_window | name, name, text | text + pgtap | public | is_window | name, text | text + pgtap | public | is_window | name | text + pgtap | public | is_window | name, name, name[], text | text + pgtap | public | is_window | name, name | text + pgtap | public | isa_ok | anyelement, regtype | text + pgtap | public | isa_ok | anyelement, regtype, text | text + pgtap | public | isnt | anyelement, anyelement, text | text + pgtap | public | isnt | anyelement, anyelement | text + pgtap | public | isnt_aggregate | name, name[], text | text + pgtap | public | isnt_aggregate | name, name, name[] | text + pgtap | public | isnt_aggregate | name, name[] | text + pgtap | public | isnt_aggregate | name, name, text | text + pgtap | public | isnt_aggregate | name, text | text + pgtap | public | isnt_aggregate | name | text + pgtap | public | isnt_aggregate | name, name, name[], text | text + pgtap | public | isnt_aggregate | name, name | text + pgtap | public | isnt_ancestor_of | name, name, name, name, integer, text | text + pgtap | public | isnt_ancestor_of | name, name, integer | text + pgtap | public | isnt_ancestor_of | name, name, name, name | text + pgtap | public | isnt_ancestor_of | name, name, text | text + pgtap | public | isnt_ancestor_of | name, name, name, name, text | text + pgtap | public | isnt_ancestor_of | name, name, name, name, integer | text + pgtap | public | isnt_ancestor_of | name, name | text + pgtap | public | isnt_ancestor_of | name, name, integer, text | text + pgtap | public | isnt_definer | name, name[], text | text + pgtap | public | isnt_definer | name, name, name[] | text + pgtap | public | isnt_definer | name, name[] | text + pgtap | public | isnt_definer | name, name, text | text + pgtap | public | isnt_definer | name, text | text + pgtap | public | isnt_definer | name | text + pgtap | public | isnt_definer | name, name, name[], text | text + pgtap | public | isnt_definer | name, name | text + pgtap | public | isnt_descendent_of | name, name, name, name, integer, text | text + pgtap | public | isnt_descendent_of | name, name, integer | text + pgtap | public | isnt_descendent_of | name, name, name, name | text + pgtap | public | isnt_descendent_of | name, name, text | text + pgtap | public | isnt_descendent_of | name, name, name, name, text | text + pgtap | public | isnt_descendent_of | name, name, name, name, integer | text + pgtap | public | isnt_descendent_of | name, name | text + pgtap | public | isnt_descendent_of | name, name, integer, text | text + pgtap | public | isnt_empty | text | text + pgtap | public | isnt_empty | text, text | text + pgtap | public | isnt_member_of | name, name[], text | text + pgtap | public | isnt_member_of | name, name[] | text + pgtap | public | isnt_member_of | name, name, text | text + pgtap | public | isnt_member_of | name, name | text + pgtap | public | isnt_normal_function | name, name[], text | text + pgtap | public | isnt_normal_function | name, name, name[] | text + pgtap | public | isnt_normal_function | name, name[] | text + pgtap | public | isnt_normal_function | name, name, text | text + pgtap | public | isnt_normal_function | name, text | text + pgtap | public | isnt_normal_function | name | text + pgtap | public | isnt_normal_function | name, name, name[], text | text + pgtap | public | isnt_normal_function | name, name | text + pgtap | public | isnt_partitioned | name, name, text | text + pgtap | public | isnt_partitioned | name, text | text + pgtap | public | isnt_partitioned | name | text + pgtap | public | isnt_partitioned | name, name | text + pgtap | public | isnt_procedure | name, name[], text | text + pgtap | public | isnt_procedure | name, name, name[] | text + pgtap | public | isnt_procedure | name, name[] | text + pgtap | public | isnt_procedure | name, name, text | text + pgtap | public | isnt_procedure | name, text | text + pgtap | public | isnt_procedure | name | text + pgtap | public | isnt_procedure | name, name, name[], text | text + pgtap | public | isnt_procedure | name, name | text + pgtap | public | isnt_strict | name, name[], text | text + pgtap | public | isnt_strict | name, name, name[] | text + pgtap | public | isnt_strict | name, name[] | text + pgtap | public | isnt_strict | name, name, text | text + pgtap | public | isnt_strict | name, text | text + pgtap | public | isnt_strict | name | text + pgtap | public | isnt_strict | name, name, name[], text | text + pgtap | public | isnt_strict | name, name | text + pgtap | public | isnt_superuser | name, text | text + pgtap | public | isnt_superuser | name | text + pgtap | public | isnt_window | name, name[], text | text + pgtap | public | isnt_window | name, name, name[] | text + pgtap | public | isnt_window | name, name[] | text + pgtap | public | isnt_window | name, name, text | text + pgtap | public | isnt_window | name, text | text + pgtap | public | isnt_window | name | text + pgtap | public | isnt_window | name, name, name[], text | text + pgtap | public | isnt_window | name, name | text + pgtap | public | language_is_trusted | name, text | text + pgtap | public | language_is_trusted | name | text + pgtap | public | language_owner_is | name, name, text | text + pgtap | public | language_owner_is | name, name | text + pgtap | public | language_privs_are | name, name, name[] | text + pgtap | public | language_privs_are | name, name, name[], text | text + pgtap | public | languages_are | name[] | text + pgtap | public | languages_are | name[], text | text + pgtap | public | lives_ok | text | text + pgtap | public | lives_ok | text, text | text + pgtap | public | matches | anyelement, text | text + pgtap | public | matches | anyelement, text, text | text + pgtap | public | materialized_view_owner_is | name, name, name, text | text + pgtap | public | materialized_view_owner_is | name, name, text | text + pgtap | public | materialized_view_owner_is | name, name | text + pgtap | public | materialized_view_owner_is | name, name, name | text + pgtap | public | materialized_views_are | name[] | text + pgtap | public | materialized_views_are | name[], text | text + pgtap | public | materialized_views_are | name, name[], text | text + pgtap | public | materialized_views_are | name, name[] | text + pgtap | public | no_plan | | SETOF boolean + pgtap | public | num_failed | | integer + pgtap | public | ok | boolean | text + pgtap | public | ok | boolean, text | text + pgtap | public | opclass_owner_is | name, name, name, text | text + pgtap | public | opclass_owner_is | name, name, text | text + pgtap | public | opclass_owner_is | name, name | text + pgtap | public | opclass_owner_is | name, name, name | text + pgtap | public | opclasses_are | name[] | text + pgtap | public | opclasses_are | name[], text | text + pgtap | public | opclasses_are | name, name[], text | text + pgtap | public | opclasses_are | name, name[] | text + pgtap | public | operators_are | name, text[] | text + pgtap | public | operators_are | text[] | text + pgtap | public | operators_are | text[], text | text + pgtap | public | operators_are | name, text[], text | text + pgtap | public | os_name | | text + pgtap | public | partitions_are | name, name[], text | text + pgtap | public | partitions_are | name, name, name[] | text + pgtap | public | partitions_are | name, name[] | text + pgtap | public | partitions_are | name, name, name[], text | text + pgtap | public | pass | text | text + pgtap | public | pass | | text + pgtap | public | performs_ok | text, numeric, text | text + pgtap | public | performs_ok | text, numeric | text + pgtap | public | performs_within | text, numeric, numeric, integer | text + pgtap | public | performs_within | text, numeric, numeric | text + pgtap | public | performs_within | text, numeric, numeric, integer, text | text + pgtap | public | performs_within | text, numeric, numeric, text | text + pgtap | public | pg_version | | text + pgtap | public | pg_version_num | | integer + pgtap | public | pgtap_version | | numeric + pgtap | public | plan | integer | text + pgtap | public | policies_are | name, name[], text | text + pgtap | public | policies_are | name, name, name[] | text + pgtap | public | policies_are | name, name[] | text + pgtap | public | policies_are | name, name, name[], text | text + pgtap | public | policy_cmd_is | name, name, name, text | text + pgtap | public | policy_cmd_is | name, name, text, text | text + pgtap | public | policy_cmd_is | name, name, name, text, text | text + pgtap | public | policy_cmd_is | name, name, text | text + pgtap | public | policy_roles_are | name, name, name, name[], text | text + pgtap | public | policy_roles_are | name, name, name[] | text + pgtap | public | policy_roles_are | name, name, name, name[] | text + pgtap | public | policy_roles_are | name, name, name[], text | text + pgtap | public | relation_owner_is | name, name, name, text | text + pgtap | public | relation_owner_is | name, name, text | text + pgtap | public | relation_owner_is | name, name | text + pgtap | public | relation_owner_is | name, name, name | text + pgtap | public | results_eq | text, refcursor | text + pgtap | public | results_eq | text, anyarray, text | text + pgtap | public | results_eq | text, text | text + pgtap | public | results_eq | text, refcursor, text | text + pgtap | public | results_eq | refcursor, anyarray | text + pgtap | public | results_eq | refcursor, anyarray, text | text + pgtap | public | results_eq | text, text, text | text + pgtap | public | results_eq | text, anyarray | text + pgtap | public | results_eq | refcursor, refcursor, text | text + pgtap | public | results_eq | refcursor, text, text | text + pgtap | public | results_eq | refcursor, text | text + pgtap | public | results_eq | refcursor, refcursor | text + pgtap | public | results_ne | text, refcursor | text + pgtap | public | results_ne | text, anyarray, text | text + pgtap | public | results_ne | text, text | text + pgtap | public | results_ne | text, refcursor, text | text + pgtap | public | results_ne | refcursor, anyarray | text + pgtap | public | results_ne | refcursor, anyarray, text | text + pgtap | public | results_ne | text, text, text | text + pgtap | public | results_ne | text, anyarray | text + pgtap | public | results_ne | refcursor, refcursor, text | text + pgtap | public | results_ne | refcursor, text, text | text + pgtap | public | results_ne | refcursor, text | text + pgtap | public | results_ne | refcursor, refcursor | text + pgtap | public | roles_are | name[] | text + pgtap | public | roles_are | name[], text | text + pgtap | public | row_eq | text, anyelement | text + pgtap | public | row_eq | text, anyelement, text | text + pgtap | public | rule_is_instead | name, name, name, text | text + pgtap | public | rule_is_instead | name, name, text | text + pgtap | public | rule_is_instead | name, name | text + pgtap | public | rule_is_instead | name, name, name | text + pgtap | public | rule_is_on | name, name, name, text | text + pgtap | public | rule_is_on | name, name, text, text | text + pgtap | public | rule_is_on | name, name, name, text, text | text + pgtap | public | rule_is_on | name, name, text | text + pgtap | public | rules_are | name, name[], text | text + pgtap | public | rules_are | name, name, name[] | text + pgtap | public | rules_are | name, name[] | text + pgtap | public | rules_are | name, name, name[], text | text + pgtap | public | runtests | text | SETOF text + pgtap | public | runtests | name, text | SETOF text + pgtap | public | runtests | name | SETOF text + pgtap | public | runtests | | SETOF text + pgtap | public | schema_owner_is | name, name, text | text + pgtap | public | schema_owner_is | name, name | text + pgtap | public | schema_privs_are | name, name, name[] | text + pgtap | public | schema_privs_are | name, name, name[], text | text + pgtap | public | schemas_are | name[] | text + pgtap | public | schemas_are | name[], text | text + pgtap | public | sequence_owner_is | name, name, name, text | text + pgtap | public | sequence_owner_is | name, name, text | text + pgtap | public | sequence_owner_is | name, name | text + pgtap | public | sequence_owner_is | name, name, name | text + pgtap | public | sequence_privs_are | name, name, name, name[], text | text + pgtap | public | sequence_privs_are | name, name, name[] | text + pgtap | public | sequence_privs_are | name, name, name, name[] | text + pgtap | public | sequence_privs_are | name, name, name[], text | text + pgtap | public | sequences_are | name[] | text + pgtap | public | sequences_are | name[], text | text + pgtap | public | sequences_are | name, name[], text | text + pgtap | public | sequences_are | name, name[] | text + pgtap | public | server_privs_are | name, name, name[] | text + pgtap | public | server_privs_are | name, name, name[], text | text + pgtap | public | set_eq | text, anyarray, text | text + pgtap | public | set_eq | text, text | text + pgtap | public | set_eq | text, text, text | text + pgtap | public | set_eq | text, anyarray | text + pgtap | public | set_has | text, text | text + pgtap | public | set_has | text, text, text | text + pgtap | public | set_hasnt | text, text | text + pgtap | public | set_hasnt | text, text, text | text + pgtap | public | set_ne | text, anyarray, text | text + pgtap | public | set_ne | text, text | text + pgtap | public | set_ne | text, text, text | text + pgtap | public | set_ne | text, anyarray | text + pgtap | public | skip | integer | text + pgtap | public | skip | text | text + pgtap | public | skip | why text, how_many integer | text + pgtap | public | skip | integer, text | text + pgtap | public | table_owner_is | name, name, name, text | text + pgtap | public | table_owner_is | name, name, text | text + pgtap | public | table_owner_is | name, name | text + pgtap | public | table_owner_is | name, name, name | text + pgtap | public | table_privs_are | name, name, name, name[], text | text + pgtap | public | table_privs_are | name, name, name[] | text + pgtap | public | table_privs_are | name, name, name, name[] | text + pgtap | public | table_privs_are | name, name, name[], text | text + pgtap | public | tables_are | name[] | text + pgtap | public | tables_are | name[], text | text + pgtap | public | tables_are | name, name[], text | text + pgtap | public | tables_are | name, name[] | text + pgtap | public | tablespace_owner_is | name, name, text | text + pgtap | public | tablespace_owner_is | name, name | text + pgtap | public | tablespace_privs_are | name, name, name[] | text + pgtap | public | tablespace_privs_are | name, name, name[], text | text + pgtap | public | tablespaces_are | name[] | text + pgtap | public | tablespaces_are | name[], text | text + pgtap | public | throws_ilike | text, text | text + pgtap | public | throws_ilike | text, text, text | text + pgtap | public | throws_imatching | text, text | text + pgtap | public | throws_imatching | text, text, text | text + pgtap | public | throws_like | text, text | text + pgtap | public | throws_like | text, text, text | text + pgtap | public | throws_matching | text, text | text + pgtap | public | throws_matching | text, text, text | text + pgtap | public | throws_ok | text | text + pgtap | public | throws_ok | text, integer | text + pgtap | public | throws_ok | text, text | text + pgtap | public | throws_ok | text, character, text, text | text + pgtap | public | throws_ok | text, text, text | text + pgtap | public | throws_ok | text, integer, text, text | text + pgtap | public | throws_ok | text, integer, text | text + pgtap | public | todo | how_many integer, why text | SETOF boolean + pgtap | public | todo | why text, how_many integer | SETOF boolean + pgtap | public | todo | why text | SETOF boolean + pgtap | public | todo | how_many integer | SETOF boolean + pgtap | public | todo_end | | SETOF boolean + pgtap | public | todo_start | text | SETOF boolean + pgtap | public | todo_start | | SETOF boolean + pgtap | public | trigger_is | name, name, name, text | text + pgtap | public | trigger_is | name, name, name, name, name, text | text + pgtap | public | trigger_is | name, name, name, name, name | text + pgtap | public | trigger_is | name, name, name | text + pgtap | public | triggers_are | name, name[], text | text + pgtap | public | triggers_are | name, name, name[] | text + pgtap | public | triggers_are | name, name[] | text + pgtap | public | triggers_are | name, name, name[], text | text + pgtap | public | type_owner_is | name, name, name, text | text + pgtap | public | type_owner_is | name, name, text | text + pgtap | public | type_owner_is | name, name | text + pgtap | public | type_owner_is | name, name, name | text + pgtap | public | types_are | name[] | text + pgtap | public | types_are | name[], text | text + pgtap | public | types_are | name, name[], text | text + pgtap | public | types_are | name, name[] | text + pgtap | public | unalike | anyelement, text | text + pgtap | public | unalike | anyelement, text, text | text + pgtap | public | unialike | anyelement, text | text + pgtap | public | unialike | anyelement, text, text | text + pgtap | public | users_are | name[] | text + pgtap | public | users_are | name[], text | text + pgtap | public | view_owner_is | name, name, name, text | text + pgtap | public | view_owner_is | name, name, text | text + pgtap | public | view_owner_is | name, name | text + pgtap | public | view_owner_is | name, name, name | text + pgtap | public | views_are | name[] | text + pgtap | public | views_are | name[], text | text + pgtap | public | views_are | name, name[], text | text + pgtap | public | views_are | name, name[] | text + pgtap | public | volatility_is | name, name, name[], text, text | text + pgtap | public | volatility_is | name, name[], text | text + pgtap | public | volatility_is | name, name, text, text | text + pgtap | public | volatility_is | name, name, text | text + pgtap | public | volatility_is | name, text, text | text + pgtap | public | volatility_is | name, text | text + pgtap | public | volatility_is | name, name, name[], text | text + pgtap | public | volatility_is | name, name[], text, text | text + plcoffee | pg_catalog | plcoffee_call_handler | | language_handler + plcoffee | pg_catalog | plcoffee_call_validator | oid | void + plcoffee | pg_catalog | plcoffee_inline_handler | internal | void + plls | pg_catalog | plls_call_handler | | language_handler + plls | pg_catalog | plls_call_validator | oid | void + plls | pg_catalog | plls_inline_handler | internal | void + plpgsql | pg_catalog | plpgsql_call_handler | | language_handler + plpgsql | pg_catalog | plpgsql_inline_handler | internal | void + plpgsql | pg_catalog | plpgsql_validator | oid | void + plpgsql_check | public | __plpgsql_show_dependency_tb | funcoid regprocedure, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plpgsql_check | public | __plpgsql_show_dependency_tb | name text, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plpgsql_check | public | plpgsql_check_function | funcoid regprocedure, relid regclass, format text, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | SETOF text + plpgsql_check | public | plpgsql_check_function | name text, relid regclass, format text, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | SETOF text + plpgsql_check | public | plpgsql_check_function_tb | funcoid regprocedure, relid regclass, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | TABLE(functionid regproc, lineno integer, statement text, sqlstate text, message text, detail text, hint text, level text, "position" integer, query text, context text) + plpgsql_check | public | plpgsql_check_function_tb | name text, relid regclass, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | TABLE(functionid regproc, lineno integer, statement text, sqlstate text, message text, detail text, hint text, level text, "position" integer, query text, context text) + plpgsql_check | public | plpgsql_check_pragma | VARIADIC name text[] | integer + plpgsql_check | public | plpgsql_check_profiler | enable boolean | boolean + plpgsql_check | public | plpgsql_check_tracer | enable boolean, verbosity text | boolean + plpgsql_check | public | plpgsql_coverage_branches | funcoid regprocedure | double precision + plpgsql_check | public | plpgsql_coverage_branches | name text | double precision + plpgsql_check | public | plpgsql_coverage_statements | funcoid regprocedure | double precision + plpgsql_check | public | plpgsql_coverage_statements | name text | double precision + plpgsql_check | public | plpgsql_profiler_function_statements_tb | funcoid regprocedure | TABLE(stmtid integer, parent_stmtid integer, parent_note text, block_num integer, lineno integer, queryid bigint, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision, processed_rows bigint, stmtname text) + plpgsql_check | public | plpgsql_profiler_function_statements_tb | name text | TABLE(stmtid integer, parent_stmtid integer, parent_note text, block_num integer, lineno integer, queryid bigint, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision, processed_rows bigint, stmtname text) + plpgsql_check | public | plpgsql_profiler_function_tb | funcoid regprocedure | TABLE(lineno integer, stmt_lineno integer, queryids bigint[], cmds_on_row integer, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision[], processed_rows bigint[], source text) + plpgsql_check | public | plpgsql_profiler_function_tb | name text | TABLE(lineno integer, stmt_lineno integer, queryids bigint[], cmds_on_row integer, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision[], processed_rows bigint[], source text) + plpgsql_check | public | plpgsql_profiler_functions_all | | TABLE(funcoid regprocedure, exec_count bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, stddev_time double precision, min_time double precision, max_time double precision) + plpgsql_check | public | plpgsql_profiler_install_fake_queryid_hook | | void + plpgsql_check | public | plpgsql_profiler_remove_fake_queryid_hook | | void + plpgsql_check | public | plpgsql_profiler_reset | funcoid regprocedure | void + plpgsql_check | public | plpgsql_profiler_reset_all | | void + plpgsql_check | public | plpgsql_show_dependency_tb | funcoid regprocedure, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plpgsql_check | public | plpgsql_show_dependency_tb | fnname text, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plv8 | pg_catalog | plv8_call_handler | | language_handler + plv8 | pg_catalog | plv8_call_validator | oid | void + plv8 | pg_catalog | plv8_info | | json + plv8 | pg_catalog | plv8_inline_handler | internal | void + plv8 | pg_catalog | plv8_reset | | void + plv8 | pg_catalog | plv8_version | | text + postgis | public | _postgis_deprecate | oldname text, newname text, version text | void + postgis | public | _postgis_index_extent | tbl regclass, col text | box2d + postgis | public | _postgis_join_selectivity | regclass, text, regclass, text, text | double precision + postgis | public | _postgis_pgsql_version | | text + postgis | public | _postgis_scripts_pgsql_version | | text + postgis | public | _postgis_selectivity | tbl regclass, att_name text, geom geometry, mode text | double precision + postgis | public | _postgis_stats | tbl regclass, att_name text, text | text + postgis | public | _st_3ddfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_3ddwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_3dintersects | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_asgml | integer, geometry, integer, integer, text, text | text + postgis | public | _st_asx3d | integer, geometry, integer, integer, text | text + postgis | public | _st_bestsrid | geography | integer + postgis | public | _st_bestsrid | geography, geography | integer + postgis | public | _st_contains | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_containsproperly | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_coveredby | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_coveredby | geog1 geography, geog2 geography | boolean + postgis | public | _st_covers | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_covers | geog1 geography, geog2 geography | boolean + postgis | public | _st_crosses | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_dfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_distancetree | geography, geography, double precision, boolean | double precision + postgis | public | _st_distancetree | geography, geography | double precision + postgis | public | _st_distanceuncached | geography, geography, double precision, boolean | double precision + postgis | public | _st_distanceuncached | geography, geography, boolean | double precision + postgis | public | _st_distanceuncached | geography, geography | double precision + postgis | public | _st_dwithin | geog1 geography, geog2 geography, tolerance double precision, use_spheroid boolean | boolean + postgis | public | _st_dwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_dwithinuncached | geography, geography, double precision, boolean | boolean + postgis | public | _st_dwithinuncached | geography, geography, double precision | boolean + postgis | public | _st_equals | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_expand | geography, double precision | geography + postgis | public | _st_geomfromgml | text, integer | geometry + postgis | public | _st_intersects | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_linecrossingdirection | line1 geometry, line2 geometry | integer + postgis | public | _st_longestline | geom1 geometry, geom2 geometry | geometry + postgis | public | _st_maxdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | _st_orderingequals | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_overlaps | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_pointoutside | geography | geography + postgis | public | _st_sortablehash | geom geometry | bigint + postgis | public | _st_touches | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_voronoi | g1 geometry, clip geometry, tolerance double precision, return_polygons boolean | geometry + postgis | public | _st_within | geom1 geometry, geom2 geometry | boolean + postgis | public | addauth | text | boolean + postgis | public | addgeometrycolumn | schema_name character varying, table_name character varying, column_name character varying, new_srid integer, new_type character varying, new_dim integer, use_typmod boolean | text + postgis | public | addgeometrycolumn | catalog_name character varying, schema_name character varying, table_name character varying, column_name character varying, new_srid_in integer, new_type character varying, new_dim integer, use_typmod boolean | text + postgis | public | addgeometrycolumn | table_name character varying, column_name character varying, new_srid integer, new_type character varying, new_dim integer, use_typmod boolean | text + postgis | public | box | box3d | box + postgis | public | box | geometry | box + postgis | public | box2d | box3d | box2d + postgis | public | box2d | geometry | box2d + postgis | public | box2d_in | cstring | box2d + postgis | public | box2d_out | box2d | cstring + postgis | public | box2df_in | cstring | box2df + postgis | public | box2df_out | box2df | cstring + postgis | public | box3d | geometry | box3d + postgis | public | box3d | box2d | box3d + postgis | public | box3d_in | cstring | box3d + postgis | public | box3d_out | box3d | cstring + postgis | public | box3dtobox | box3d | box + postgis | public | bytea | geography | bytea + postgis | public | bytea | geometry | bytea + postgis | public | checkauth | text, text | integer + postgis | public | checkauth | text, text, text | integer + postgis | public | checkauthtrigger | | trigger + postgis | public | contains_2d | geometry, box2df | boolean + postgis | public | contains_2d | box2df, geometry | boolean + postgis | public | contains_2d | box2df, box2df | boolean + postgis | public | disablelongtransactions | | text + postgis | public | dropgeometrycolumn | schema_name character varying, table_name character varying, column_name character varying | text + postgis | public | dropgeometrycolumn | table_name character varying, column_name character varying | text + postgis | public | dropgeometrycolumn | catalog_name character varying, schema_name character varying, table_name character varying, column_name character varying | text + postgis | public | dropgeometrytable | table_name character varying | text + postgis | public | dropgeometrytable | schema_name character varying, table_name character varying | text + postgis | public | dropgeometrytable | catalog_name character varying, schema_name character varying, table_name character varying | text + postgis | public | enablelongtransactions | | text + postgis | public | equals | geom1 geometry, geom2 geometry | boolean + postgis | public | find_srid | character varying, character varying, character varying | integer + postgis | public | geog_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geography | bytea | geography + postgis | public | geography | geometry | geography + postgis | public | geography | geography, integer, boolean | geography + postgis | public | geography_analyze | internal | boolean + postgis | public | geography_cmp | geography, geography | integer + postgis | public | geography_distance_knn | geography, geography | double precision + postgis | public | geography_eq | geography, geography | boolean + postgis | public | geography_ge | geography, geography | boolean + postgis | public | geography_gist_compress | internal | internal + postgis | public | geography_gist_consistent | internal, geography, integer | boolean + postgis | public | geography_gist_decompress | internal | internal + postgis | public | geography_gist_distance | internal, geography, integer | double precision + postgis | public | geography_gist_penalty | internal, internal, internal | internal + postgis | public | geography_gist_picksplit | internal, internal | internal + postgis | public | geography_gist_same | box2d, box2d, internal | internal + postgis | public | geography_gist_union | bytea, internal | internal + postgis | public | geography_gt | geography, geography | boolean + postgis | public | geography_in | cstring, oid, integer | geography + postgis | public | geography_le | geography, geography | boolean + postgis | public | geography_lt | geography, geography | boolean + postgis | public | geography_out | geography | cstring + postgis | public | geography_overlaps | geography, geography | boolean + postgis | public | geography_recv | internal, oid, integer | geography + postgis | public | geography_send | geography | bytea + postgis | public | geography_spgist_choose_nd | internal, internal | void + postgis | public | geography_spgist_compress_nd | internal | internal + postgis | public | geography_spgist_config_nd | internal, internal | void + postgis | public | geography_spgist_inner_consistent_nd | internal, internal | void + postgis | public | geography_spgist_leaf_consistent_nd | internal, internal | boolean + postgis | public | geography_spgist_picksplit_nd | internal, internal | void + postgis | public | geography_typmod_in | cstring[] | integer + postgis | public | geography_typmod_out | integer | cstring + postgis | public | geom2d_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geom3d_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geom4d_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geometry | geometry, integer, boolean | geometry + postgis | public | geometry | box3d | geometry + postgis | public | geometry | text | geometry + postgis | public | geometry | point | geometry + postgis | public | geometry | bytea | geometry + postgis | public | geometry | geography | geometry + postgis | public | geometry | path | geometry + postgis | public | geometry | polygon | geometry + postgis | public | geometry | box2d | geometry + postgis | public | geometry_above | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_analyze | internal | boolean + postgis | public | geometry_below | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_cmp | geom1 geometry, geom2 geometry | integer + postgis | public | geometry_contained_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_contains | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_contains_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_contains_nd | geometry, geometry | boolean + postgis | public | geometry_distance_box | geom1 geometry, geom2 geometry | double precision + postgis | public | geometry_distance_centroid | geom1 geometry, geom2 geometry | double precision + postgis | public | geometry_distance_centroid_nd | geometry, geometry | double precision + postgis | public | geometry_distance_cpa | geometry, geometry | double precision + postgis | public | geometry_eq | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_ge | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_gist_compress_2d | internal | internal + postgis | public | geometry_gist_compress_nd | internal | internal + postgis | public | geometry_gist_consistent_2d | internal, geometry, integer | boolean + postgis | public | geometry_gist_consistent_nd | internal, geometry, integer | boolean + postgis | public | geometry_gist_decompress_2d | internal | internal + postgis | public | geometry_gist_decompress_nd | internal | internal + postgis | public | geometry_gist_distance_2d | internal, geometry, integer | double precision + postgis | public | geometry_gist_distance_nd | internal, geometry, integer | double precision + postgis | public | geometry_gist_penalty_2d | internal, internal, internal | internal + postgis | public | geometry_gist_penalty_nd | internal, internal, internal | internal + postgis | public | geometry_gist_picksplit_2d | internal, internal | internal + postgis | public | geometry_gist_picksplit_nd | internal, internal | internal + postgis | public | geometry_gist_same_2d | geom1 geometry, geom2 geometry, internal | internal + postgis | public | geometry_gist_same_nd | geometry, geometry, internal | internal + postgis | public | geometry_gist_sortsupport_2d | internal | void + postgis | public | geometry_gist_union_2d | bytea, internal | internal + postgis | public | geometry_gist_union_nd | bytea, internal | internal + postgis | public | geometry_gt | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_hash | geometry | integer + postgis | public | geometry_in | cstring | geometry + postgis | public | geometry_le | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_left | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_lt | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_out | geometry | cstring + postgis | public | geometry_overabove | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overbelow | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overlaps | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overlaps_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overlaps_nd | geometry, geometry | boolean + postgis | public | geometry_overleft | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overright | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_recv | internal | geometry + postgis | public | geometry_right | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_same | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_same_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_same_nd | geometry, geometry | boolean + postgis | public | geometry_send | geometry | bytea + postgis | public | geometry_sortsupport | internal | void + postgis | public | geometry_spgist_choose_2d | internal, internal | void + postgis | public | geometry_spgist_choose_3d | internal, internal | void + postgis | public | geometry_spgist_choose_nd | internal, internal | void + postgis | public | geometry_spgist_compress_2d | internal | internal + postgis | public | geometry_spgist_compress_3d | internal | internal + postgis | public | geometry_spgist_compress_nd | internal | internal + postgis | public | geometry_spgist_config_2d | internal, internal | void + postgis | public | geometry_spgist_config_3d | internal, internal | void + postgis | public | geometry_spgist_config_nd | internal, internal | void + postgis | public | geometry_spgist_inner_consistent_2d | internal, internal | void + postgis | public | geometry_spgist_inner_consistent_3d | internal, internal | void + postgis | public | geometry_spgist_inner_consistent_nd | internal, internal | void + postgis | public | geometry_spgist_leaf_consistent_2d | internal, internal | boolean + postgis | public | geometry_spgist_leaf_consistent_3d | internal, internal | boolean + postgis | public | geometry_spgist_leaf_consistent_nd | internal, internal | boolean + postgis | public | geometry_spgist_picksplit_2d | internal, internal | void + postgis | public | geometry_spgist_picksplit_3d | internal, internal | void + postgis | public | geometry_spgist_picksplit_nd | internal, internal | void + postgis | public | geometry_typmod_in | cstring[] | integer + postgis | public | geometry_typmod_out | integer | cstring + postgis | public | geometry_within | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_within_nd | geometry, geometry | boolean + postgis | public | geometrytype | geography | text + postgis | public | geometrytype | geometry | text + postgis | public | geomfromewkb | bytea | geometry + postgis | public | geomfromewkt | text | geometry + postgis | public | get_proj4_from_srid | integer | text + postgis | public | gettransactionid | | xid + postgis | public | gidx_in | cstring | gidx + postgis | public | gidx_out | gidx | cstring + postgis | public | gserialized_gist_joinsel_2d | internal, oid, internal, smallint | double precision + postgis | public | gserialized_gist_joinsel_nd | internal, oid, internal, smallint | double precision + postgis | public | gserialized_gist_sel_2d | internal, oid, internal, integer | double precision + postgis | public | gserialized_gist_sel_nd | internal, oid, internal, integer | double precision + postgis | public | is_contained_2d | geometry, box2df | boolean + postgis | public | is_contained_2d | box2df, geometry | boolean + postgis | public | is_contained_2d | box2df, box2df | boolean + postgis | public | json | geometry | json + postgis | public | jsonb | geometry | jsonb + postgis | public | lockrow | text, text, text, timestamp without time zone | integer + postgis | public | lockrow | text, text, text, text | integer + postgis | public | lockrow | text, text, text | integer + postgis | public | lockrow | text, text, text, text, timestamp without time zone | integer + postgis | public | longtransactionsenabled | | boolean + postgis | public | overlaps_2d | geometry, box2df | boolean + postgis | public | overlaps_2d | box2df, geometry | boolean + postgis | public | overlaps_2d | box2df, box2df | boolean + postgis | public | overlaps_geog | gidx, gidx | boolean + postgis | public | overlaps_geog | geography, gidx | boolean + postgis | public | overlaps_geog | gidx, geography | boolean + postgis | public | overlaps_nd | gidx, gidx | boolean + postgis | public | overlaps_nd | geometry, gidx | boolean + postgis | public | overlaps_nd | gidx, geometry | boolean + postgis | public | path | geometry | path + postgis | public | pgis_asflatgeobuf_finalfn | internal | bytea + postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement, boolean | internal + postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement | internal + postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement, boolean, text | internal + postgis | public | pgis_asgeobuf_finalfn | internal | bytea + postgis | public | pgis_asgeobuf_transfn | internal, anyelement | internal + postgis | public | pgis_asgeobuf_transfn | internal, anyelement, text | internal + postgis | public | pgis_asmvt_combinefn | internal, internal | internal + postgis | public | pgis_asmvt_deserialfn | bytea, internal | internal + postgis | public | pgis_asmvt_finalfn | internal | bytea + postgis | public | pgis_asmvt_serialfn | internal | bytea + postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer, text | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer, text, text | internal + postgis | public | pgis_geometry_accum_transfn | internal, geometry, double precision | internal + postgis | public | pgis_geometry_accum_transfn | internal, geometry | internal + postgis | public | pgis_geometry_accum_transfn | internal, geometry, double precision, integer | internal + postgis | public | pgis_geometry_clusterintersecting_finalfn | internal | geometry[] + postgis | public | pgis_geometry_clusterwithin_finalfn | internal | geometry[] + postgis | public | pgis_geometry_collect_finalfn | internal | geometry + postgis | public | pgis_geometry_makeline_finalfn | internal | geometry + postgis | public | pgis_geometry_polygonize_finalfn | internal | geometry + postgis | public | pgis_geometry_union_parallel_combinefn | internal, internal | internal + postgis | public | pgis_geometry_union_parallel_deserialfn | bytea, internal | internal + postgis | public | pgis_geometry_union_parallel_finalfn | internal | geometry + postgis | public | pgis_geometry_union_parallel_serialfn | internal | bytea + postgis | public | pgis_geometry_union_parallel_transfn | internal, geometry, double precision | internal + postgis | public | pgis_geometry_union_parallel_transfn | internal, geometry | internal + postgis | public | point | geometry | point + postgis | public | polygon | geometry | polygon + postgis | public | populate_geometry_columns | tbl_oid oid, use_typmod boolean | integer + postgis | public | populate_geometry_columns | use_typmod boolean | text + postgis | public | postgis_addbbox | geometry | geometry + postgis | public | postgis_cache_bbox | | trigger + postgis | public | postgis_constraint_dims | geomschema text, geomtable text, geomcolumn text | integer + postgis | public | postgis_constraint_srid | geomschema text, geomtable text, geomcolumn text | integer + postgis | public | postgis_constraint_type | geomschema text, geomtable text, geomcolumn text | character varying + postgis | public | postgis_dropbbox | geometry | geometry + postgis | public | postgis_extensions_upgrade | | text + postgis | public | postgis_full_version | | text + postgis | public | postgis_geos_noop | geometry | geometry + postgis | public | postgis_geos_version | | text + postgis | public | postgis_getbbox | geometry | box2d + postgis | public | postgis_hasbbox | geometry | boolean + postgis | public | postgis_index_supportfn | internal | internal + postgis | public | postgis_lib_build_date | | text + postgis | public | postgis_lib_revision | | text + postgis | public | postgis_lib_version | | text + postgis | public | postgis_libjson_version | | text + postgis | public | postgis_liblwgeom_version | | text + postgis | public | postgis_libprotobuf_version | | text + postgis | public | postgis_libxml_version | | text + postgis | public | postgis_noop | geometry | geometry + postgis | public | postgis_proj_version | | text + postgis | public | postgis_scripts_build_date | | text + postgis | public | postgis_scripts_installed | | text + postgis | public | postgis_scripts_released | | text + postgis | public | postgis_svn_version | | text + postgis | public | postgis_transform_geometry | geom geometry, text, text, integer | geometry + postgis | public | postgis_type_name | geomname character varying, coord_dimension integer, use_new_name boolean | character varying + postgis | public | postgis_typmod_dims | integer | integer + postgis | public | postgis_typmod_srid | integer | integer + postgis | public | postgis_typmod_type | integer | text + postgis | public | postgis_version | | text + postgis | public | postgis_wagyu_version | | text + postgis | public | spheroid_in | cstring | spheroid + postgis | public | spheroid_out | spheroid | cstring + postgis | public | st_3dclosestpoint | geom1 geometry, geom2 geometry | geometry + postgis | public | st_3ddfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_3ddistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_3ddwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_3dextent | geometry | box3d + postgis | public | st_3dintersects | geom1 geometry, geom2 geometry | boolean + postgis | public | st_3dlength | geometry | double precision + postgis | public | st_3dlineinterpolatepoint | geometry, double precision | geometry + postgis | public | st_3dlongestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_3dmakebox | geom1 geometry, geom2 geometry | box3d + postgis | public | st_3dmaxdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_3dperimeter | geometry | double precision + postgis | public | st_3dshortestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_addmeasure | geometry, double precision, double precision | geometry + postgis | public | st_addpoint | geom1 geometry, geom2 geometry, integer | geometry + postgis | public | st_addpoint | geom1 geometry, geom2 geometry | geometry + postgis | public | st_affine | geometry, double precision, double precision, double precision, double precision, double precision, double precision | geometry + postgis | public | st_affine | geometry, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision | geometry + postgis | public | st_angle | pt1 geometry, pt2 geometry, pt3 geometry, pt4 geometry | double precision + postgis | public | st_angle | line1 geometry, line2 geometry | double precision + postgis | public | st_area | geog geography, use_spheroid boolean | double precision + postgis | public | st_area | text | double precision + postgis | public | st_area | geometry | double precision + postgis | public | st_area2d | geometry | double precision + postgis | public | st_asbinary | geography | bytea + postgis | public | st_asbinary | geometry, text | bytea + postgis | public | st_asbinary | geometry | bytea + postgis | public | st_asbinary | geography, text | bytea + postgis | public | st_asencodedpolyline | geom geometry, nprecision integer | text + postgis | public | st_asewkb | geometry, text | bytea + postgis | public | st_asewkb | geometry | bytea + postgis | public | st_asewkt | text | text + postgis | public | st_asewkt | geography, integer | text + postgis | public | st_asewkt | geography | text + postgis | public | st_asewkt | geometry, integer | text + postgis | public | st_asewkt | geometry | text + postgis | public | st_asflatgeobuf | anyelement | bytea + postgis | public | st_asflatgeobuf | anyelement, boolean | bytea + postgis | public | st_asflatgeobuf | anyelement, boolean, text | bytea + postgis | public | st_asgeobuf | anyelement | bytea + postgis | public | st_asgeobuf | anyelement, text | bytea + postgis | public | st_asgeojson | text | text + postgis | public | st_asgeojson | geom geometry, maxdecimaldigits integer, options integer | text + postgis | public | st_asgeojson | geog geography, maxdecimaldigits integer, options integer | text + postgis | public | st_asgeojson | r record, geom_column text, maxdecimaldigits integer, pretty_bool boolean | text + postgis | public | st_asgml | text | text + postgis | public | st_asgml | version integer, geog geography, maxdecimaldigits integer, options integer, nprefix text, id text | text + postgis | public | st_asgml | geog geography, maxdecimaldigits integer, options integer, nprefix text, id text | text + postgis | public | st_asgml | geom geometry, maxdecimaldigits integer, options integer | text + postgis | public | st_asgml | version integer, geom geometry, maxdecimaldigits integer, options integer, nprefix text, id text | text + postgis | public | st_ashexewkb | geometry, text | text + postgis | public | st_ashexewkb | geometry | text + postgis | public | st_askml | text | text + postgis | public | st_askml | geom geometry, maxdecimaldigits integer, nprefix text | text + postgis | public | st_askml | geog geography, maxdecimaldigits integer, nprefix text | text + postgis | public | st_aslatlontext | geom geometry, tmpl text | text + postgis | public | st_asmarc21 | geom geometry, format text | text + postgis | public | st_asmvt | anyelement | bytea + postgis | public | st_asmvt | anyelement, text, integer, text, text | bytea + postgis | public | st_asmvt | anyelement, text, integer | bytea + postgis | public | st_asmvt | anyelement, text | bytea + postgis | public | st_asmvt | anyelement, text, integer, text | bytea + postgis | public | st_asmvtgeom | geom geometry, bounds box2d, extent integer, buffer integer, clip_geom boolean | geometry + postgis | public | st_assvg | geog geography, rel integer, maxdecimaldigits integer | text + postgis | public | st_assvg | geom geometry, rel integer, maxdecimaldigits integer | text + postgis | public | st_assvg | text | text + postgis | public | st_astext | text | text + postgis | public | st_astext | geography, integer | text + postgis | public | st_astext | geography | text + postgis | public | st_astext | geometry, integer | text + postgis | public | st_astext | geometry | text + postgis | public | st_astwkb | geom geometry[], ids bigint[], prec integer, prec_z integer, prec_m integer, with_sizes boolean, with_boxes boolean | bytea + postgis | public | st_astwkb | geom geometry, prec integer, prec_z integer, prec_m integer, with_sizes boolean, with_boxes boolean | bytea + postgis | public | st_asx3d | geom geometry, maxdecimaldigits integer, options integer | text + postgis | public | st_azimuth | geom1 geometry, geom2 geometry | double precision + postgis | public | st_azimuth | geog1 geography, geog2 geography | double precision + postgis | public | st_bdmpolyfromtext | text, integer | geometry + postgis | public | st_bdpolyfromtext | text, integer | geometry + postgis | public | st_boundary | geometry | geometry + postgis | public | st_boundingdiagonal | geom geometry, fits boolean | geometry + postgis | public | st_box2dfromgeohash | text, integer | box2d + postgis | public | st_buffer | geom geometry, radius double precision, options text | geometry + postgis | public | st_buffer | geography, double precision, text | geography + postgis | public | st_buffer | geography, double precision, integer | geography + postgis | public | st_buffer | geom geometry, radius double precision, quadsegs integer | geometry + postgis | public | st_buffer | text, double precision, text | geometry + postgis | public | st_buffer | geography, double precision | geography + postgis | public | st_buffer | text, double precision, integer | geometry + postgis | public | st_buffer | text, double precision | geometry + postgis | public | st_buildarea | geometry | geometry + postgis | public | st_centroid | text | geometry + postgis | public | st_centroid | geography, use_spheroid boolean | geography + postgis | public | st_centroid | geometry | geometry + postgis | public | st_chaikinsmoothing | geometry, integer, boolean | geometry + postgis | public | st_cleangeometry | geometry | geometry + postgis | public | st_clipbybox2d | geom geometry, box box2d | geometry + postgis | public | st_closestpoint | geom1 geometry, geom2 geometry | geometry + postgis | public | st_closestpointofapproach | geometry, geometry | double precision + postgis | public | st_clusterdbscan | geometry, eps double precision, minpoints integer | integer + postgis | public | st_clusterintersecting | geometry[] | geometry[] + postgis | public | st_clusterintersecting | geometry | geometry[] + postgis | public | st_clusterkmeans | geom geometry, k integer, max_radius double precision | integer + postgis | public | st_clusterwithin | geometry[], double precision | geometry[] + postgis | public | st_clusterwithin | geometry, double precision | geometry[] + postgis | public | st_collect | geom1 geometry, geom2 geometry | geometry + postgis | public | st_collect | geometry[] | geometry + postgis | public | st_collect | geometry | geometry + postgis | public | st_collectionextract | geometry, integer | geometry + postgis | public | st_collectionextract | geometry | geometry + postgis | public | st_collectionhomogenize | geometry | geometry + postgis | public | st_combinebbox | box2d, geometry | box2d + postgis | public | st_combinebbox | box3d, geometry | box3d + postgis | public | st_combinebbox | box3d, box3d | box3d + postgis | public | st_concavehull | param_geom geometry, param_pctconvex double precision, param_allow_holes boolean | geometry + postgis | public | st_contains | geom1 geometry, geom2 geometry | boolean + postgis | public | st_containsproperly | geom1 geometry, geom2 geometry | boolean + postgis | public | st_convexhull | geometry | geometry + postgis | public | st_coorddim | geometry geometry | smallint + postgis | public | st_coveredby | geom1 geometry, geom2 geometry | boolean + postgis | public | st_coveredby | text, text | boolean + postgis | public | st_coveredby | geog1 geography, geog2 geography | boolean + postgis | public | st_covers | geom1 geometry, geom2 geometry | boolean + postgis | public | st_covers | text, text | boolean + postgis | public | st_covers | geog1 geography, geog2 geography | boolean + postgis | public | st_cpawithin | geometry, geometry, double precision | boolean + postgis | public | st_crosses | geom1 geometry, geom2 geometry | boolean + postgis | public | st_curvetoline | geom geometry, tol double precision, toltype integer, flags integer | geometry + postgis | public | st_delaunaytriangles | g1 geometry, tolerance double precision, flags integer | geometry + postgis | public | st_dfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_difference | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_dimension | geometry | integer + postgis | public | st_disjoint | geom1 geometry, geom2 geometry | boolean + postgis | public | st_distance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_distance | text, text | double precision + postgis | public | st_distance | geog1 geography, geog2 geography, use_spheroid boolean | double precision + postgis | public | st_distancecpa | geometry, geometry | double precision + postgis | public | st_distancesphere | geom1 geometry, geom2 geometry | double precision + postgis | public | st_distancesphere | geom1 geometry, geom2 geometry, radius double precision | double precision + postgis | public | st_distancespheroid | geom1 geometry, geom2 geometry, spheroid | double precision + postgis | public | st_distancespheroid | geom1 geometry, geom2 geometry | double precision + postgis | public | st_dump | geometry | SETOF geometry_dump + postgis | public | st_dumppoints | geometry | SETOF geometry_dump + postgis | public | st_dumprings | geometry | SETOF geometry_dump + postgis | public | st_dumpsegments | geometry | SETOF geometry_dump + postgis | public | st_dwithin | text, text, double precision | boolean + postgis | public | st_dwithin | geog1 geography, geog2 geography, tolerance double precision, use_spheroid boolean | boolean + postgis | public | st_dwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_endpoint | geometry | geometry + postgis | public | st_envelope | geometry | geometry + postgis | public | st_equals | geom1 geometry, geom2 geometry | boolean + postgis | public | st_estimatedextent | text, text, text, boolean | box2d + postgis | public | st_estimatedextent | text, text | box2d + postgis | public | st_estimatedextent | text, text, text | box2d + postgis | public | st_expand | box3d, double precision | box3d + postgis | public | st_expand | box box3d, dx double precision, dy double precision, dz double precision | box3d + postgis | public | st_expand | box2d, double precision | box2d + postgis | public | st_expand | box box2d, dx double precision, dy double precision | box2d + postgis | public | st_expand | geometry, double precision | geometry + postgis | public | st_expand | geom geometry, dx double precision, dy double precision, dz double precision, dm double precision | geometry + postgis | public | st_extent | geometry | box2d + postgis | public | st_exteriorring | geometry | geometry + postgis | public | st_filterbym | geometry, double precision, double precision, boolean | geometry + postgis | public | st_findextent | text, text | box2d + postgis | public | st_findextent | text, text, text | box2d + postgis | public | st_flipcoordinates | geometry | geometry + postgis | public | st_force2d | geometry | geometry + postgis | public | st_force3d | geom geometry, zvalue double precision | geometry + postgis | public | st_force3dm | geom geometry, mvalue double precision | geometry + postgis | public | st_force3dz | geom geometry, zvalue double precision | geometry + postgis | public | st_force4d | geom geometry, zvalue double precision, mvalue double precision | geometry + postgis | public | st_forcecollection | geometry | geometry + postgis | public | st_forcecurve | geometry | geometry + postgis | public | st_forcepolygonccw | geometry | geometry + postgis | public | st_forcepolygoncw | geometry | geometry + postgis | public | st_forcerhr | geometry | geometry + postgis | public | st_forcesfs | geometry, version text | geometry + postgis | public | st_forcesfs | geometry | geometry + postgis | public | st_frechetdistance | geom1 geometry, geom2 geometry, double precision | double precision + postgis | public | st_fromflatgeobuf | anyelement, bytea | SETOF anyelement + postgis | public | st_fromflatgeobuftotable | text, text, bytea | void + postgis | public | st_generatepoints | area geometry, npoints integer, seed integer | geometry + postgis | public | st_generatepoints | area geometry, npoints integer | geometry + postgis | public | st_geogfromtext | text | geography + postgis | public | st_geogfromwkb | bytea | geography + postgis | public | st_geographyfromtext | text | geography + postgis | public | st_geohash | geog geography, maxchars integer | text + postgis | public | st_geohash | geom geometry, maxchars integer | text + postgis | public | st_geomcollfromtext | text | geometry + postgis | public | st_geomcollfromtext | text, integer | geometry + postgis | public | st_geomcollfromwkb | bytea, integer | geometry + postgis | public | st_geomcollfromwkb | bytea | geometry + postgis | public | st_geometricmedian | g geometry, tolerance double precision, max_iter integer, fail_if_not_converged boolean | geometry + postgis | public | st_geometryfromtext | text | geometry + postgis | public | st_geometryfromtext | text, integer | geometry + postgis | public | st_geometryn | geometry, integer | geometry + postgis | public | st_geometrytype | geometry | text + postgis | public | st_geomfromewkb | bytea | geometry + postgis | public | st_geomfromewkt | text | geometry + postgis | public | st_geomfromgeohash | text, integer | geometry + postgis | public | st_geomfromgeojson | text | geometry + postgis | public | st_geomfromgeojson | json | geometry + postgis | public | st_geomfromgeojson | jsonb | geometry + postgis | public | st_geomfromgml | text | geometry + postgis | public | st_geomfromgml | text, integer | geometry + postgis | public | st_geomfromkml | text | geometry + postgis | public | st_geomfrommarc21 | marc21xml text | geometry + postgis | public | st_geomfromtext | text | geometry + postgis | public | st_geomfromtext | text, integer | geometry + postgis | public | st_geomfromtwkb | bytea | geometry + postgis | public | st_geomfromwkb | bytea, integer | geometry + postgis | public | st_geomfromwkb | bytea | geometry + postgis | public | st_gmltosql | text | geometry + postgis | public | st_gmltosql | text, integer | geometry + postgis | public | st_hasarc | geometry geometry | boolean + postgis | public | st_hausdorffdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_hausdorffdistance | geom1 geometry, geom2 geometry, double precision | double precision + postgis | public | st_hexagon | size double precision, cell_i integer, cell_j integer, origin geometry | geometry + postgis | public | st_hexagongrid | size double precision, bounds geometry, OUT geom geometry, OUT i integer, OUT j integer | SETOF record + postgis | public | st_interiorringn | geometry, integer | geometry + postgis | public | st_interpolatepoint | line geometry, point geometry | double precision + postgis | public | st_intersection | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_intersection | text, text | geometry + postgis | public | st_intersection | geography, geography | geography + postgis | public | st_intersects | geom1 geometry, geom2 geometry | boolean + postgis | public | st_intersects | text, text | boolean + postgis | public | st_intersects | geog1 geography, geog2 geography | boolean + postgis | public | st_isclosed | geometry | boolean + postgis | public | st_iscollection | geometry | boolean + postgis | public | st_isempty | geometry | boolean + postgis | public | st_ispolygonccw | geometry | boolean + postgis | public | st_ispolygoncw | geometry | boolean + postgis | public | st_isring | geometry | boolean + postgis | public | st_issimple | geometry | boolean + postgis | public | st_isvalid | geometry, integer | boolean + postgis | public | st_isvalid | geometry | boolean + postgis | public | st_isvaliddetail | geom geometry, flags integer | valid_detail + postgis | public | st_isvalidreason | geometry, integer | text + postgis | public | st_isvalidreason | geometry | text + postgis | public | st_isvalidtrajectory | geometry | boolean + postgis | public | st_length | geog geography, use_spheroid boolean | double precision + postgis | public | st_length | text | double precision + postgis | public | st_length | geometry | double precision + postgis | public | st_length2d | geometry | double precision + postgis | public | st_length2dspheroid | geometry, spheroid | double precision + postgis | public | st_lengthspheroid | geometry, spheroid | double precision + postgis | public | st_letters | letters text, font json | geometry + postgis | public | st_linecrossingdirection | line1 geometry, line2 geometry | integer + postgis | public | st_linefromencodedpolyline | txtin text, nprecision integer | geometry + postgis | public | st_linefrommultipoint | geometry | geometry + postgis | public | st_linefromtext | text | geometry + postgis | public | st_linefromtext | text, integer | geometry + postgis | public | st_linefromwkb | bytea, integer | geometry + postgis | public | st_linefromwkb | bytea | geometry + postgis | public | st_lineinterpolatepoint | geometry, double precision | geometry + postgis | public | st_lineinterpolatepoints | geometry, double precision, repeat boolean | geometry + postgis | public | st_linelocatepoint | geom1 geometry, geom2 geometry | double precision + postgis | public | st_linemerge | geometry, boolean | geometry + postgis | public | st_linemerge | geometry | geometry + postgis | public | st_linestringfromwkb | bytea, integer | geometry + postgis | public | st_linestringfromwkb | bytea | geometry + postgis | public | st_linesubstring | geometry, double precision, double precision | geometry + postgis | public | st_linetocurve | geometry geometry | geometry + postgis | public | st_locatealong | geometry geometry, measure double precision, leftrightoffset double precision | geometry + postgis | public | st_locatebetween | geometry geometry, frommeasure double precision, tomeasure double precision, leftrightoffset double precision | geometry + postgis | public | st_locatebetweenelevations | geometry geometry, fromelevation double precision, toelevation double precision | geometry + postgis | public | st_longestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_m | geometry | double precision + postgis | public | st_makebox2d | geom1 geometry, geom2 geometry | box2d + postgis | public | st_makeenvelope | double precision, double precision, double precision, double precision, integer | geometry + postgis | public | st_makeline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_makeline | geometry[] | geometry + postgis | public | st_makeline | geometry | geometry + postgis | public | st_makepoint | double precision, double precision, double precision, double precision | geometry + postgis | public | st_makepoint | double precision, double precision | geometry + postgis | public | st_makepoint | double precision, double precision, double precision | geometry + postgis | public | st_makepointm | double precision, double precision, double precision | geometry + postgis | public | st_makepolygon | geometry, geometry[] | geometry + postgis | public | st_makepolygon | geometry | geometry + postgis | public | st_makevalid | geom geometry, params text | geometry + postgis | public | st_makevalid | geometry | geometry + postgis | public | st_maxdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_maximuminscribedcircle | geometry, OUT center geometry, OUT nearest geometry, OUT radius double precision | record + postgis | public | st_memcollect | geometry | geometry + postgis | public | st_memsize | geometry | integer + postgis | public | st_memunion | geometry | geometry + postgis | public | st_minimumboundingcircle | inputgeom geometry, segs_per_quarter integer | geometry + postgis | public | st_minimumboundingradius | geometry, OUT center geometry, OUT radius double precision | record + postgis | public | st_minimumclearance | geometry | double precision + postgis | public | st_minimumclearanceline | geometry | geometry + postgis | public | st_mlinefromtext | text | geometry + postgis | public | st_mlinefromtext | text, integer | geometry + postgis | public | st_mlinefromwkb | bytea, integer | geometry + postgis | public | st_mlinefromwkb | bytea | geometry + postgis | public | st_mpointfromtext | text | geometry + postgis | public | st_mpointfromtext | text, integer | geometry + postgis | public | st_mpointfromwkb | bytea, integer | geometry + postgis | public | st_mpointfromwkb | bytea | geometry + postgis | public | st_mpolyfromtext | text | geometry + postgis | public | st_mpolyfromtext | text, integer | geometry + postgis | public | st_mpolyfromwkb | bytea, integer | geometry + postgis | public | st_mpolyfromwkb | bytea | geometry + postgis | public | st_multi | geometry | geometry + postgis | public | st_multilinefromwkb | bytea | geometry + postgis | public | st_multilinestringfromtext | text | geometry + postgis | public | st_multilinestringfromtext | text, integer | geometry + postgis | public | st_multipointfromtext | text | geometry + postgis | public | st_multipointfromwkb | bytea, integer | geometry + postgis | public | st_multipointfromwkb | bytea | geometry + postgis | public | st_multipolyfromwkb | bytea, integer | geometry + postgis | public | st_multipolyfromwkb | bytea | geometry + postgis | public | st_multipolygonfromtext | text | geometry + postgis | public | st_multipolygonfromtext | text, integer | geometry + postgis | public | st_ndims | geometry | smallint + postgis | public | st_node | g geometry | geometry + postgis | public | st_normalize | geom geometry | geometry + postgis | public | st_npoints | geometry | integer + postgis | public | st_nrings | geometry | integer + postgis | public | st_numgeometries | geometry | integer + postgis | public | st_numinteriorring | geometry | integer + postgis | public | st_numinteriorrings | geometry | integer + postgis | public | st_numpatches | geometry | integer + postgis | public | st_numpoints | geometry | integer + postgis | public | st_offsetcurve | line geometry, distance double precision, params text | geometry + postgis | public | st_orderingequals | geom1 geometry, geom2 geometry | boolean + postgis | public | st_orientedenvelope | geometry | geometry + postgis | public | st_overlaps | geom1 geometry, geom2 geometry | boolean + postgis | public | st_patchn | geometry, integer | geometry + postgis | public | st_perimeter | geog geography, use_spheroid boolean | double precision + postgis | public | st_perimeter | geometry | double precision + postgis | public | st_perimeter2d | geometry | double precision + postgis | public | st_point | double precision, double precision, srid integer | geometry + postgis | public | st_point | double precision, double precision | geometry + postgis | public | st_pointfromgeohash | text, integer | geometry + postgis | public | st_pointfromtext | text | geometry + postgis | public | st_pointfromtext | text, integer | geometry + postgis | public | st_pointfromwkb | bytea, integer | geometry + postgis | public | st_pointfromwkb | bytea | geometry + postgis | public | st_pointinsidecircle | geometry, double precision, double precision, double precision | boolean + postgis | public | st_pointm | xcoordinate double precision, ycoordinate double precision, mcoordinate double precision, srid integer | geometry + postgis | public | st_pointn | geometry, integer | geometry + postgis | public | st_pointonsurface | geometry | geometry + postgis | public | st_points | geometry | geometry + postgis | public | st_pointz | xcoordinate double precision, ycoordinate double precision, zcoordinate double precision, srid integer | geometry + postgis | public | st_pointzm | xcoordinate double precision, ycoordinate double precision, zcoordinate double precision, mcoordinate double precision, srid integer | geometry + postgis | public | st_polyfromtext | text | geometry + postgis | public | st_polyfromtext | text, integer | geometry + postgis | public | st_polyfromwkb | bytea, integer | geometry + postgis | public | st_polyfromwkb | bytea | geometry + postgis | public | st_polygon | geometry, integer | geometry + postgis | public | st_polygonfromtext | text | geometry + postgis | public | st_polygonfromtext | text, integer | geometry + postgis | public | st_polygonfromwkb | bytea, integer | geometry + postgis | public | st_polygonfromwkb | bytea | geometry + postgis | public | st_polygonize | geometry[] | geometry + postgis | public | st_polygonize | geometry | geometry + postgis | public | st_project | geog geography, distance double precision, azimuth double precision | geography + postgis | public | st_quantizecoordinates | g geometry, prec_x integer, prec_y integer, prec_z integer, prec_m integer | geometry + postgis | public | st_reduceprecision | geom geometry, gridsize double precision | geometry + postgis | public | st_relate | geom1 geometry, geom2 geometry, integer | text + postgis | public | st_relate | geom1 geometry, geom2 geometry | text + postgis | public | st_relate | geom1 geometry, geom2 geometry, text | boolean + postgis | public | st_relatematch | text, text | boolean + postgis | public | st_removepoint | geometry, integer | geometry + postgis | public | st_removerepeatedpoints | geom geometry, tolerance double precision | geometry + postgis | public | st_reverse | geometry | geometry + postgis | public | st_rotate | geometry, double precision, geometry | geometry + postgis | public | st_rotate | geometry, double precision, double precision, double precision | geometry + postgis | public | st_rotate | geometry, double precision | geometry + postgis | public | st_rotatex | geometry, double precision | geometry + postgis | public | st_rotatey | geometry, double precision | geometry + postgis | public | st_rotatez | geometry, double precision | geometry + postgis | public | st_scale | geometry, double precision, double precision | geometry + postgis | public | st_scale | geometry, double precision, double precision, double precision | geometry + postgis | public | st_scale | geometry, geometry, origin geometry | geometry + postgis | public | st_scale | geometry, geometry | geometry + postgis | public | st_scroll | geometry, geometry | geometry + postgis | public | st_segmentize | geog geography, max_segment_length double precision | geography + postgis | public | st_segmentize | geometry, double precision | geometry + postgis | public | st_seteffectivearea | geometry, double precision, integer | geometry + postgis | public | st_setpoint | geometry, integer, geometry | geometry + postgis | public | st_setsrid | geog geography, srid integer | geography + postgis | public | st_setsrid | geom geometry, srid integer | geometry + postgis | public | st_sharedpaths | geom1 geometry, geom2 geometry | geometry + postgis | public | st_shiftlongitude | geometry | geometry + postgis | public | st_shortestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_simplify | geometry, double precision, boolean | geometry + postgis | public | st_simplify | geometry, double precision | geometry + postgis | public | st_simplifypolygonhull | geom geometry, vertex_fraction double precision, is_outer boolean | geometry + postgis | public | st_simplifypreservetopology | geometry, double precision | geometry + postgis | public | st_simplifyvw | geometry, double precision | geometry + postgis | public | st_snap | geom1 geometry, geom2 geometry, double precision | geometry + postgis | public | st_snaptogrid | geometry, double precision, double precision | geometry + postgis | public | st_snaptogrid | geom1 geometry, geom2 geometry, double precision, double precision, double precision, double precision | geometry + postgis | public | st_snaptogrid | geometry, double precision, double precision, double precision, double precision | geometry + postgis | public | st_snaptogrid | geometry, double precision | geometry + postgis | public | st_split | geom1 geometry, geom2 geometry | geometry + postgis | public | st_square | size double precision, cell_i integer, cell_j integer, origin geometry | geometry + postgis | public | st_squaregrid | size double precision, bounds geometry, OUT geom geometry, OUT i integer, OUT j integer | SETOF record + postgis | public | st_srid | geog geography | integer + postgis | public | st_srid | geom geometry | integer + postgis | public | st_startpoint | geometry | geometry + postgis | public | st_subdivide | geom geometry, maxvertices integer, gridsize double precision | SETOF geometry + postgis | public | st_summary | geography | text + postgis | public | st_summary | geometry | text + postgis | public | st_swapordinates | geom geometry, ords cstring | geometry + postgis | public | st_symdifference | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_symmetricdifference | geom1 geometry, geom2 geometry | geometry + postgis | public | st_tileenvelope | zoom integer, x integer, y integer, bounds geometry, margin double precision | geometry + postgis | public | st_touches | geom1 geometry, geom2 geometry | boolean + postgis | public | st_transform | geom geometry, to_proj text | geometry + postgis | public | st_transform | geom geometry, from_proj text, to_srid integer | geometry + postgis | public | st_transform | geometry, integer | geometry + postgis | public | st_transform | geom geometry, from_proj text, to_proj text | geometry + postgis | public | st_translate | geometry, double precision, double precision | geometry + postgis | public | st_translate | geometry, double precision, double precision, double precision | geometry + postgis | public | st_transscale | geometry, double precision, double precision, double precision, double precision | geometry + postgis | public | st_triangulatepolygon | g1 geometry | geometry + postgis | public | st_unaryunion | geometry, gridsize double precision | geometry + postgis | public | st_union | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_union | geom1 geometry, geom2 geometry | geometry + postgis | public | st_union | geometry[] | geometry + postgis | public | st_union | geometry, gridsize double precision | geometry + postgis | public | st_union | geometry | geometry + postgis | public | st_voronoilines | g1 geometry, tolerance double precision, extend_to geometry | geometry + postgis | public | st_voronoipolygons | g1 geometry, tolerance double precision, extend_to geometry | geometry + postgis | public | st_within | geom1 geometry, geom2 geometry | boolean + postgis | public | st_wkbtosql | wkb bytea | geometry + postgis | public | st_wkttosql | text | geometry + postgis | public | st_wrapx | geom geometry, wrap double precision, move double precision | geometry + postgis | public | st_x | geometry | double precision + postgis | public | st_xmax | box3d | double precision + postgis | public | st_xmin | box3d | double precision + postgis | public | st_y | geometry | double precision + postgis | public | st_ymax | box3d | double precision + postgis | public | st_ymin | box3d | double precision + postgis | public | st_z | geometry | double precision + postgis | public | st_zmax | box3d | double precision + postgis | public | st_zmflag | geometry | smallint + postgis | public | st_zmin | box3d | double precision + postgis | public | text | geometry | text + postgis | public | unlockrows | text | integer + postgis | public | updategeometrysrid | catalogn_name character varying, schema_name character varying, table_name character varying, column_name character varying, new_srid_in integer | text + postgis | public | updategeometrysrid | character varying, character varying, character varying, integer | text + postgis | public | updategeometrysrid | character varying, character varying, integer | text + postgis_raster | public | __st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | agg_count + postgis_raster | public | _add_overview_constraint | ovschema name, ovtable name, ovcolumn name, refschema name, reftable name, refcolumn name, factor integer | boolean + postgis_raster | public | _add_raster_constraint | cn name, sql text | boolean + postgis_raster | public | _add_raster_constraint_alignment | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | boolean + postgis_raster | public | _add_raster_constraint_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_extent | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_nodata_values | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_num_bands | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_out_db | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_pixel_types | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_scale | rastschema name, rasttable name, rastcolumn name, axis character | boolean + postgis_raster | public | _add_raster_constraint_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_srid | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_overview_constraint | ovschema name, ovtable name, ovcolumn name | boolean + postgis_raster | public | _drop_raster_constraint | rastschema name, rasttable name, cn name | boolean + postgis_raster | public | _drop_raster_constraint_alignment | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | boolean + postgis_raster | public | _drop_raster_constraint_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_extent | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_nodata_values | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_num_bands | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_out_db | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_pixel_types | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_regular_blocking | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_scale | rastschema name, rasttable name, rastcolumn name, axis character | boolean + postgis_raster | public | _drop_raster_constraint_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_srid | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _overview_constraint | ov raster, factor integer, refschema name, reftable name, refcolumn name | boolean + postgis_raster | public | _overview_constraint_info | ovschema name, ovtable name, ovcolumn name, OUT refschema name, OUT reftable name, OUT refcolumn name, OUT factor integer | record + postgis_raster | public | _raster_constraint_info_alignment | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | integer + postgis_raster | public | _raster_constraint_info_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_extent | rastschema name, rasttable name, rastcolumn name | geometry + postgis_raster | public | _raster_constraint_info_index | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_nodata_values | rastschema name, rasttable name, rastcolumn name | double precision[] + postgis_raster | public | _raster_constraint_info_num_bands | rastschema name, rasttable name, rastcolumn name | integer + postgis_raster | public | _raster_constraint_info_out_db | rastschema name, rasttable name, rastcolumn name | boolean[] + postgis_raster | public | _raster_constraint_info_pixel_types | rastschema name, rasttable name, rastcolumn name | text[] + postgis_raster | public | _raster_constraint_info_regular_blocking | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_scale | rastschema name, rasttable name, rastcolumn name, axis character | double precision + postgis_raster | public | _raster_constraint_info_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_srid | rastschema name, rasttable name, rastcolumn name | integer + postgis_raster | public | _raster_constraint_nodata_values | rast raster | numeric[] + postgis_raster | public | _raster_constraint_out_db | rast raster | boolean[] + postgis_raster | public | _raster_constraint_pixel_types | rast raster | text[] + postgis_raster | public | _st_aspect4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_asraster | geom geometry, scalex double precision, scaley double precision, width integer, height integer, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | _st_clip | rast raster, nband integer[], geom geometry, nodataval double precision[], crop boolean | raster + postgis_raster | public | _st_colormap | rast raster, nband integer, colormap text, method text | raster + postgis_raster | public | _st_contains | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_containsproperly | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_convertarray4ma | value double precision[] | double precision[] + postgis_raster | public | _st_count | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | bigint + postgis_raster | public | _st_countagg_finalfn | agg agg_count | bigint + postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean | agg_count + postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, exclude_nodata_value boolean | agg_count + postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | agg_count + postgis_raster | public | _st_coveredby | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_covers | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_dfullywithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | _st_dwithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | _st_gdalwarp | rast raster, algorithm text, maxerr double precision, srid integer, scalex double precision, scaley double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, width integer, height integer | raster + postgis_raster | public | _st_grayscale4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_hillshade4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_histogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, width double precision[], "right" boolean, min double precision, max double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | _st_intersects | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_intersects | geom geometry, rast raster, nband integer | boolean + postgis_raster | public | _st_mapalgebra | rastbandargset rastbandarg[], expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | _st_mapalgebra | rastbandargset rastbandarg[], callbackfunc regprocedure, pixeltype text, distancex integer, distancey integer, extenttype text, customextent raster, mask double precision[], weighted boolean, VARIADIC userargs text[] | raster + postgis_raster | public | _st_neighborhood | rast raster, band integer, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | _st_overlaps | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_pixelascentroids | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | _st_pixelaspolygons | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | _st_quantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | _st_rastertoworldcoord | rast raster, columnx integer, rowy integer, OUT longitude double precision, OUT latitude double precision | record + postgis_raster | public | _st_reclass | rast raster, VARIADIC reclassargset reclassarg[] | raster + postgis_raster | public | _st_roughness4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_samealignment_finalfn | agg agg_samealignment | boolean + postgis_raster | public | _st_samealignment_transfn | agg agg_samealignment, rast raster | agg_samealignment + postgis_raster | public | _st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], noset boolean[], hasnosetvalue boolean, nosetvalue double precision, keepnodata boolean | raster + postgis_raster | public | _st_slope4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_summarystats | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | summarystats + postgis_raster | public | _st_summarystats_finalfn | internal | summarystats + postgis_raster | public | _st_summarystats_transfn | internal, raster, integer, boolean, double precision | internal + postgis_raster | public | _st_summarystats_transfn | internal, raster, boolean, double precision | internal + postgis_raster | public | _st_summarystats_transfn | internal, raster, integer, boolean | internal + postgis_raster | public | _st_tile | rast raster, width integer, height integer, nband integer[], padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | _st_touches | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_tpi4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_tri4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_union_finalfn | internal | raster + postgis_raster | public | _st_union_transfn | internal, raster, integer, text | internal + postgis_raster | public | _st_union_transfn | internal, raster | internal + postgis_raster | public | _st_union_transfn | internal, raster, text | internal + postgis_raster | public | _st_union_transfn | internal, raster, integer | internal + postgis_raster | public | _st_union_transfn | internal, raster, unionarg[] | internal + postgis_raster | public | _st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer, OUT percent double precision | SETOF record + postgis_raster | public | _st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer, OUT percent double precision | SETOF record + postgis_raster | public | _st_within | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_worldtorastercoord | rast raster, longitude double precision, latitude double precision, OUT columnx integer, OUT rowy integer | record + postgis_raster | public | _updaterastersrid | schema_name name, table_name name, column_name name, new_srid integer | boolean + postgis_raster | public | addoverviewconstraints | ovschema name, ovtable name, ovcolumn name, refschema name, reftable name, refcolumn name, ovfactor integer | boolean + postgis_raster | public | addoverviewconstraints | ovtable name, ovcolumn name, reftable name, refcolumn name, ovfactor integer | boolean + postgis_raster | public | addrasterconstraints | rastschema name, rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | addrasterconstraints | rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | addrasterconstraints | rastschema name, rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | addrasterconstraints | rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | box3d | raster | box3d + postgis_raster | public | bytea | raster | bytea + postgis_raster | public | dropoverviewconstraints | ovtable name, ovcolumn name | boolean + postgis_raster | public | dropoverviewconstraints | ovschema name, ovtable name, ovcolumn name | boolean + postgis_raster | public | droprasterconstraints | rastschema name, rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | droprasterconstraints | rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | droprasterconstraints | rastschema name, rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | droprasterconstraints | rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | geometry_contained_by_raster | geometry, raster | boolean + postgis_raster | public | geometry_raster_contain | geometry, raster | boolean + postgis_raster | public | geometry_raster_overlap | geometry, raster | boolean + postgis_raster | public | postgis_gdal_version | | text + postgis_raster | public | postgis_noop | raster | geometry + postgis_raster | public | postgis_raster_lib_build_date | | text + postgis_raster | public | postgis_raster_lib_version | | text + postgis_raster | public | postgis_raster_scripts_installed | | text + postgis_raster | public | raster_above | raster, raster | boolean + postgis_raster | public | raster_below | raster, raster | boolean + postgis_raster | public | raster_contain | raster, raster | boolean + postgis_raster | public | raster_contained | raster, raster | boolean + postgis_raster | public | raster_contained_by_geometry | raster, geometry | boolean + postgis_raster | public | raster_eq | raster, raster | boolean + postgis_raster | public | raster_geometry_contain | raster, geometry | boolean + postgis_raster | public | raster_geometry_overlap | raster, geometry | boolean + postgis_raster | public | raster_hash | raster | integer + postgis_raster | public | raster_in | cstring | raster + postgis_raster | public | raster_left | raster, raster | boolean + postgis_raster | public | raster_out | raster | cstring + postgis_raster | public | raster_overabove | raster, raster | boolean + postgis_raster | public | raster_overbelow | raster, raster | boolean + postgis_raster | public | raster_overlap | raster, raster | boolean + postgis_raster | public | raster_overleft | raster, raster | boolean + postgis_raster | public | raster_overright | raster, raster | boolean + postgis_raster | public | raster_right | raster, raster | boolean + postgis_raster | public | raster_same | raster, raster | boolean + postgis_raster | public | st_addband | rast raster, index integer, outdbfile text, outdbindex integer[], nodataval double precision | raster + postgis_raster | public | st_addband | rast raster, outdbfile text, outdbindex integer[], index integer, nodataval double precision | raster + postgis_raster | public | st_addband | rast raster, addbandargset addbandarg[] | raster + postgis_raster | public | st_addband | rast raster, pixeltype text, initialvalue double precision, nodataval double precision | raster + postgis_raster | public | st_addband | rast raster, index integer, pixeltype text, initialvalue double precision, nodataval double precision | raster + postgis_raster | public | st_addband | torast raster, fromrast raster, fromband integer, torastindex integer | raster + postgis_raster | public | st_addband | torast raster, fromrasts raster[], fromband integer, torastindex integer | raster + postgis_raster | public | st_approxcount | rast raster, exclude_nodata_value boolean, sample_percent double precision | bigint + postgis_raster | public | st_approxcount | rast raster, sample_percent double precision | bigint + postgis_raster | public | st_approxcount | rast raster, nband integer, sample_percent double precision | bigint + postgis_raster | public | st_approxcount | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | bigint + postgis_raster | public | st_approxhistogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, sample_percent double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, exclude_nodata_value boolean, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, nband integer, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, nband integer, sample_percent double precision, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, sample_percent double precision, quantile double precision | double precision + postgis_raster | public | st_approxsummarystats | rast raster, exclude_nodata_value boolean, sample_percent double precision | summarystats + postgis_raster | public | st_approxsummarystats | rast raster, sample_percent double precision | summarystats + postgis_raster | public | st_approxsummarystats | rast raster, nband integer, sample_percent double precision | summarystats + postgis_raster | public | st_approxsummarystats | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | summarystats + postgis_raster | public | st_asbinary | raster, outasin boolean | bytea + postgis_raster | public | st_asgdalraster | rast raster, format text, options text[], srid integer | bytea + postgis_raster | public | st_ashexwkb | raster, outasin boolean | text + postgis_raster | public | st_asjpeg | rast raster, options text[] | bytea + postgis_raster | public | st_asjpeg | rast raster, nbands integer[], options text[] | bytea + postgis_raster | public | st_asjpeg | rast raster, nband integer, options text[] | bytea + postgis_raster | public | st_asjpeg | rast raster, nbands integer[], quality integer | bytea + postgis_raster | public | st_asjpeg | rast raster, nband integer, quality integer | bytea + postgis_raster | public | st_aspect | rast raster, nband integer, customextent raster, pixeltype text, units text, interpolate_nodata boolean | raster + postgis_raster | public | st_aspect | rast raster, nband integer, pixeltype text, units text, interpolate_nodata boolean | raster + postgis_raster | public | st_aspng | rast raster, options text[] | bytea + postgis_raster | public | st_aspng | rast raster, nbands integer[], options text[] | bytea + postgis_raster | public | st_aspng | rast raster, nband integer, compression integer | bytea + postgis_raster | public | st_aspng | rast raster, nband integer, options text[] | bytea + postgis_raster | public | st_aspng | rast raster, nbands integer[], compression integer | bytea + postgis_raster | public | st_asraster | geom geometry, ref raster, pixeltype text[], value double precision[], nodataval double precision[], touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, pixeltype text, value double precision, nodataval double precision, upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, gridx double precision, gridy double precision, pixeltype text[], value double precision[], nodataval double precision[], skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, gridx double precision, gridy double precision, pixeltype text, value double precision, nodataval double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, ref raster, pixeltype text, value double precision, nodataval double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, gridx double precision, gridy double precision, pixeltype text, value double precision, nodataval double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, gridx double precision, gridy double precision, pixeltype text[], value double precision[], nodataval double precision[], skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, pixeltype text, value double precision, nodataval double precision, upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_astiff | rast raster, options text[], srid integer | bytea + postgis_raster | public | st_astiff | rast raster, compression text, srid integer | bytea + postgis_raster | public | st_astiff | rast raster, nbands integer[], compression text, srid integer | bytea + postgis_raster | public | st_astiff | rast raster, nbands integer[], options text[], srid integer | bytea + postgis_raster | public | st_aswkb | raster, outasin boolean | bytea + postgis_raster | public | st_band | rast raster, nbands text, delimiter character | raster + postgis_raster | public | st_band | rast raster, nbands integer[] | raster + postgis_raster | public | st_band | rast raster, nband integer | raster + postgis_raster | public | st_bandfilesize | rast raster, band integer | bigint + postgis_raster | public | st_bandfiletimestamp | rast raster, band integer | bigint + postgis_raster | public | st_bandisnodata | rast raster, forcechecking boolean | boolean + postgis_raster | public | st_bandisnodata | rast raster, band integer, forcechecking boolean | boolean + postgis_raster | public | st_bandmetadata | rast raster, band integer | TABLE(pixeltype text, nodatavalue double precision, isoutdb boolean, path text, outdbbandnum integer, filesize bigint, filetimestamp bigint) + postgis_raster | public | st_bandmetadata | rast raster, band integer[] | TABLE(bandnum integer, pixeltype text, nodatavalue double precision, isoutdb boolean, path text, outdbbandnum integer, filesize bigint, filetimestamp bigint) + postgis_raster | public | st_bandnodatavalue | rast raster, band integer | double precision + postgis_raster | public | st_bandpath | rast raster, band integer | text + postgis_raster | public | st_bandpixeltype | rast raster, band integer | text + postgis_raster | public | st_clip | rast raster, geom geometry, nodataval double precision[], crop boolean | raster + postgis_raster | public | st_clip | rast raster, geom geometry, nodataval double precision, crop boolean | raster + postgis_raster | public | st_clip | rast raster, nband integer, geom geometry, nodataval double precision, crop boolean | raster + postgis_raster | public | st_clip | rast raster, nband integer[], geom geometry, nodataval double precision[], crop boolean | raster + postgis_raster | public | st_clip | rast raster, geom geometry, crop boolean | raster + postgis_raster | public | st_clip | rast raster, nband integer, geom geometry, crop boolean | raster + postgis_raster | public | st_colormap | rast raster, colormap text, method text | raster + postgis_raster | public | st_colormap | rast raster, nband integer, colormap text, method text | raster + postgis_raster | public | st_contains | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_contains | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_containsproperly | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_containsproperly | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_contour | rast raster, bandnumber integer, level_interval double precision, level_base double precision, fixed_levels double precision[], polygonize boolean | TABLE(geom geometry, id integer, value double precision) + postgis_raster | public | st_convexhull | raster | geometry + postgis_raster | public | st_count | rast raster, nband integer, exclude_nodata_value boolean | bigint + postgis_raster | public | st_count | rast raster, exclude_nodata_value boolean | bigint + postgis_raster | public | st_countagg | raster, integer, boolean, double precision | bigint + postgis_raster | public | st_countagg | raster, integer, boolean | bigint + postgis_raster | public | st_countagg | raster, boolean | bigint + postgis_raster | public | st_coveredby | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_coveredby | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_covers | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_covers | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_createoverview | tab regclass, col name, factor integer, algo text | regclass + postgis_raster | public | st_dfullywithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | st_dfullywithin | rast1 raster, rast2 raster, distance double precision | boolean + postgis_raster | public | st_disjoint | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_disjoint | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_distinct4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_distinct4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_dumpaspolygons | rast raster, band integer, exclude_nodata_value boolean | SETOF geomval + postgis_raster | public | st_dumpvalues | rast raster, nband integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_dumpvalues | rast raster, nband integer[], exclude_nodata_value boolean | TABLE(nband integer, valarray double precision[]) + postgis_raster | public | st_dwithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | st_dwithin | rast1 raster, rast2 raster, distance double precision | boolean + postgis_raster | public | st_envelope | raster | geometry + postgis_raster | public | st_fromgdalraster | gdaldata bytea, srid integer | raster + postgis_raster | public | st_gdaldrivers | OUT idx integer, OUT short_name text, OUT long_name text, OUT can_read boolean, OUT can_write boolean, OUT create_options text | SETOF record + postgis_raster | public | st_georeference | rast raster, format text | text + postgis_raster | public | st_geotransform | raster, OUT imag double precision, OUT jmag double precision, OUT theta_i double precision, OUT theta_ij double precision, OUT xoffset double precision, OUT yoffset double precision | record + postgis_raster | public | st_grayscale | rastbandargset rastbandarg[], extenttype text | raster + postgis_raster | public | st_grayscale | rast raster, redband integer, greenband integer, blueband integer, extenttype text | raster + postgis_raster | public | st_hasnoband | rast raster, nband integer | boolean + postgis_raster | public | st_height | raster | integer + postgis_raster | public | st_hillshade | rast raster, nband integer, customextent raster, pixeltype text, azimuth double precision, altitude double precision, max_bright double precision, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_hillshade | rast raster, nband integer, pixeltype text, azimuth double precision, altitude double precision, max_bright double precision, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_histogram | rast raster, nband integer, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_histogram | rast raster, nband integer, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_histogram | rast raster, nband integer, exclude_nodata_value boolean, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_histogram | rast raster, nband integer, exclude_nodata_value boolean, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_interpolateraster | geom geometry, options text, rast raster, bandnumber integer | raster + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, returnband text, nodataval double precision[] | raster + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, returnband text, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, returnband text, nodataval double precision[] | raster + postgis_raster | public | st_intersection | geomin geometry, rast raster, band integer | SETOF geomval + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, returnband text, nodataval double precision | raster + postgis_raster | public | st_intersection | rast raster, band integer, geomin geometry | SETOF geomval + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, nodataval double precision[] | raster + postgis_raster | public | st_intersection | rast raster, geomin geometry | SETOF geomval + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, nodataval double precision[] | raster + postgis_raster | public | st_intersects | rast raster, geom geometry, nband integer | boolean + postgis_raster | public | st_intersects | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_intersects | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_intersects | rast raster, nband integer, geom geometry | boolean + postgis_raster | public | st_intersects | geom geometry, rast raster, nband integer | boolean + postgis_raster | public | st_invdistweight4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_iscoveragetile | rast raster, coverage raster, tilewidth integer, tileheight integer | boolean + postgis_raster | public | st_isempty | rast raster | boolean + postgis_raster | public | st_makeemptycoverage | tilewidth integer, tileheight integer, width integer, height integer, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision, srid integer | SETOF raster + postgis_raster | public | st_makeemptyraster | width integer, height integer, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision, srid integer | raster + postgis_raster | public | st_makeemptyraster | width integer, height integer, upperleftx double precision, upperlefty double precision, pixelsize double precision | raster + postgis_raster | public | st_makeemptyraster | rast raster | raster + postgis_raster | public | st_mapalgebra | rast1 raster, rast2 raster, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebra | rast raster, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebra | rast1 raster, band1 integer, rast2 raster, band2 integer, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer[], callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer, callbackfunc regprocedure, mask double precision[], weighted boolean, pixeltype text, extenttype text, customextent raster, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rastbandargset rastbandarg[], callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast1 raster, nband1 integer, rast2 raster, nband2 integer, callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer, callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebraexpr | rast1 raster, rast2 raster, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebraexpr | rast raster, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebraexpr | rast1 raster, band1 integer, rast2 raster, band2 integer, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebraexpr | rast raster, band integer, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebrafct | rast raster, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast1 raster, band1 integer, rast2 raster, band2 integer, tworastuserfunc regprocedure, pixeltype text, extenttype text, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, pixeltype text, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast raster, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, pixeltype text, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, pixeltype text, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast1 raster, rast2 raster, tworastuserfunc regprocedure, pixeltype text, extenttype text, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, pixeltype text, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafctngb | rast raster, band integer, pixeltype text, ngbwidth integer, ngbheight integer, onerastngbuserfunc regprocedure, nodatamode text, VARIADIC args text[] | raster + postgis_raster | public | st_max4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_max4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_mean4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_mean4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_memsize | raster | integer + postgis_raster | public | st_metadata | rast raster, OUT upperleftx double precision, OUT upperlefty double precision, OUT width integer, OUT height integer, OUT scalex double precision, OUT scaley double precision, OUT skewx double precision, OUT skewy double precision, OUT srid integer, OUT numbands integer | record + postgis_raster | public | st_min4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_min4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_minconvexhull | rast raster, nband integer | geometry + postgis_raster | public | st_mindist4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_minpossiblevalue | pixeltype text | double precision + postgis_raster | public | st_nearestvalue | rast raster, columnx integer, rowy integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_nearestvalue | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_nearestvalue | rast raster, band integer, pt geometry, exclude_nodata_value boolean | double precision + postgis_raster | public | st_nearestvalue | rast raster, pt geometry, exclude_nodata_value boolean | double precision + postgis_raster | public | st_neighborhood | rast raster, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_neighborhood | rast raster, band integer, pt geometry, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_neighborhood | rast raster, band integer, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_neighborhood | rast raster, pt geometry, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_notsamealignmentreason | rast1 raster, rast2 raster | text + postgis_raster | public | st_numbands | raster | integer + postgis_raster | public | st_overlaps | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_overlaps | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_pixelascentroid | rast raster, x integer, y integer | geometry + postgis_raster | public | st_pixelascentroids | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | st_pixelaspoint | rast raster, x integer, y integer | geometry + postgis_raster | public | st_pixelaspoints | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | st_pixelaspolygon | rast raster, x integer, y integer | geometry + postgis_raster | public | st_pixelaspolygons | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | st_pixelheight | raster | double precision + postgis_raster | public | st_pixelofvalue | rast raster, nband integer, search double precision, exclude_nodata_value boolean | TABLE(x integer, y integer) + postgis_raster | public | st_pixelofvalue | rast raster, search double precision, exclude_nodata_value boolean | TABLE(x integer, y integer) + postgis_raster | public | st_pixelofvalue | rast raster, nband integer, search double precision[], exclude_nodata_value boolean | TABLE(val double precision, x integer, y integer) + postgis_raster | public | st_pixelofvalue | rast raster, search double precision[], exclude_nodata_value boolean | TABLE(val double precision, x integer, y integer) + postgis_raster | public | st_pixelwidth | raster | double precision + postgis_raster | public | st_polygon | rast raster, band integer | geometry + postgis_raster | public | st_quantile | rast raster, exclude_nodata_value boolean, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, nband integer, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, nband integer, exclude_nodata_value boolean, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_quantile | rast raster, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, nband integer, exclude_nodata_value boolean, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, nband integer, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_quantile | rast raster, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_range4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_range4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_rastertoworldcoord | rast raster, columnx integer, rowy integer, OUT longitude double precision, OUT latitude double precision | record + postgis_raster | public | st_rastertoworldcoordx | rast raster, xr integer | double precision + postgis_raster | public | st_rastertoworldcoordx | rast raster, xr integer, yr integer | double precision + postgis_raster | public | st_rastertoworldcoordy | rast raster, xr integer, yr integer | double precision + postgis_raster | public | st_rastertoworldcoordy | rast raster, yr integer | double precision + postgis_raster | public | st_rastfromhexwkb | text | raster + postgis_raster | public | st_rastfromwkb | bytea | raster + postgis_raster | public | st_reclass | rast raster, nband integer, reclassexpr text, pixeltype text, nodataval double precision | raster + postgis_raster | public | st_reclass | rast raster, VARIADIC reclassargset reclassarg[] | raster + postgis_raster | public | st_reclass | rast raster, reclassexpr text, pixeltype text | raster + postgis_raster | public | st_resample | rast raster, ref raster, usescale boolean, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resample | rast raster, width integer, height integer, gridx double precision, gridy double precision, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resample | rast raster, scalex double precision, scaley double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resample | rast raster, ref raster, algorithm text, maxerr double precision, usescale boolean | raster + postgis_raster | public | st_rescale | rast raster, scalexy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_rescale | rast raster, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resize | rast raster, width integer, height integer, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resize | rast raster, width text, height text, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resize | rast raster, percentwidth double precision, percentheight double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_reskew | rast raster, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_reskew | rast raster, skewxy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_retile | tab regclass, col name, ext geometry, sfx double precision, sfy double precision, tw integer, th integer, algo text | SETOF raster + postgis_raster | public | st_rotation | raster | double precision + postgis_raster | public | st_roughness | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_roughness | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_samealignment | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_samealignment | raster | boolean + postgis_raster | public | st_samealignment | ulx1 double precision, uly1 double precision, scalex1 double precision, scaley1 double precision, skewx1 double precision, skewy1 double precision, ulx2 double precision, uly2 double precision, scalex2 double precision, scaley2 double precision, skewx2 double precision, skewy2 double precision | boolean + postgis_raster | public | st_scalex | raster | double precision + postgis_raster | public | st_scaley | raster | double precision + postgis_raster | public | st_setbandindex | rast raster, band integer, outdbindex integer, force boolean | raster + postgis_raster | public | st_setbandisnodata | rast raster, band integer | raster + postgis_raster | public | st_setbandnodatavalue | rast raster, band integer, nodatavalue double precision, forcechecking boolean | raster + postgis_raster | public | st_setbandnodatavalue | rast raster, nodatavalue double precision | raster + postgis_raster | public | st_setbandpath | rast raster, band integer, outdbpath text, outdbindex integer, force boolean | raster + postgis_raster | public | st_setgeoreference | rast raster, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision | raster + postgis_raster | public | st_setgeoreference | rast raster, georef text, format text | raster + postgis_raster | public | st_setgeotransform | rast raster, imag double precision, jmag double precision, theta_i double precision, theta_ij double precision, xoffset double precision, yoffset double precision | raster + postgis_raster | public | st_setm | rast raster, geom geometry, resample text, band integer | geometry + postgis_raster | public | st_setrotation | rast raster, rotation double precision | raster + postgis_raster | public | st_setscale | rast raster, scalex double precision, scaley double precision | raster + postgis_raster | public | st_setscale | rast raster, scale double precision | raster + postgis_raster | public | st_setskew | rast raster, skewx double precision, skewy double precision | raster + postgis_raster | public | st_setskew | rast raster, skew double precision | raster + postgis_raster | public | st_setsrid | rast raster, srid integer | raster + postgis_raster | public | st_setupperleft | rast raster, upperleftx double precision, upperlefty double precision | raster + postgis_raster | public | st_setvalue | rast raster, nband integer, geom geometry, newvalue double precision | raster + postgis_raster | public | st_setvalue | rast raster, geom geometry, newvalue double precision | raster + postgis_raster | public | st_setvalue | rast raster, x integer, y integer, newvalue double precision | raster + postgis_raster | public | st_setvalue | rast raster, band integer, x integer, y integer, newvalue double precision | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], nosetvalue double precision, keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], noset boolean[], keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, width integer, height integer, newvalue double precision, keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, x integer, y integer, width integer, height integer, newvalue double precision, keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, geomvalset geomval[], keepnodata boolean | raster + postgis_raster | public | st_setz | rast raster, geom geometry, resample text, band integer | geometry + postgis_raster | public | st_skewx | raster | double precision + postgis_raster | public | st_skewy | raster | double precision + postgis_raster | public | st_slope | rast raster, nband integer, customextent raster, pixeltype text, units text, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_slope | rast raster, nband integer, pixeltype text, units text, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, algorithm text, maxerr double precision, scalex double precision, scaley double precision | raster + postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, scalexy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_srid | raster | integer + postgis_raster | public | st_stddev4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_stddev4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_sum4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_sum4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_summary | rast raster | text + postgis_raster | public | st_summarystats | rast raster, nband integer, exclude_nodata_value boolean | summarystats + postgis_raster | public | st_summarystats | rast raster, exclude_nodata_value boolean | summarystats + postgis_raster | public | st_summarystatsagg | raster, integer, boolean, double precision | summarystats + postgis_raster | public | st_summarystatsagg | raster, integer, boolean | summarystats + postgis_raster | public | st_summarystatsagg | raster, boolean, double precision | summarystats + postgis_raster | public | st_tile | rast raster, nband integer, width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | st_tile | rast raster, nband integer[], width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | st_tile | rast raster, width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | st_touches | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_touches | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_tpi | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_tpi | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_transform | rast raster, srid integer, algorithm text, maxerr double precision, scalex double precision, scaley double precision | raster + postgis_raster | public | st_transform | rast raster, srid integer, scalexy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_transform | rast raster, alignto raster, algorithm text, maxerr double precision | raster + postgis_raster | public | st_transform | rast raster, srid integer, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_tri | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_tri | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_union | raster, integer | raster + postgis_raster | public | st_union | raster | raster + postgis_raster | public | st_union | raster, unionarg[] | raster + postgis_raster | public | st_union | raster, integer, text | raster + postgis_raster | public | st_union | raster, text | raster + postgis_raster | public | st_upperleftx | raster | double precision + postgis_raster | public | st_upperlefty | raster | double precision + postgis_raster | public | st_value | rast raster, band integer, x integer, y integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_value | rast raster, x integer, y integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_value | rast raster, band integer, pt geometry, exclude_nodata_value boolean, resample text | double precision + postgis_raster | public | st_value | rast raster, pt geometry, exclude_nodata_value boolean | double precision + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rast raster, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rast raster, nband integer, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rast raster, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rast raster, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuepercent | rast raster, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rast raster, nband integer, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rast raster, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rast raster, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rast raster, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_width | raster | integer + postgis_raster | public | st_within | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_within | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_worldtorastercoord | rast raster, longitude double precision, latitude double precision, OUT columnx integer, OUT rowy integer | record + postgis_raster | public | st_worldtorastercoord | rast raster, pt geometry, OUT columnx integer, OUT rowy integer | record + postgis_raster | public | st_worldtorastercoordx | rast raster, xw double precision | integer + postgis_raster | public | st_worldtorastercoordx | rast raster, xw double precision, yw double precision | integer + postgis_raster | public | st_worldtorastercoordx | rast raster, pt geometry | integer + postgis_raster | public | st_worldtorastercoordy | rast raster, xw double precision, yw double precision | integer + postgis_raster | public | st_worldtorastercoordy | rast raster, pt geometry | integer + postgis_raster | public | st_worldtorastercoordy | rast raster, yw double precision | integer + postgis_raster | public | updaterastersrid | table_name name, column_name name, new_srid integer | boolean + postgis_raster | public | updaterastersrid | schema_name name, table_name name, column_name name, new_srid integer | boolean + postgis_sfcgal | public | postgis_sfcgal_full_version | | text + postgis_sfcgal | public | postgis_sfcgal_noop | geometry | geometry + postgis_sfcgal | public | postgis_sfcgal_scripts_installed | | text + postgis_sfcgal | public | postgis_sfcgal_version | | text + postgis_sfcgal | public | st_3darea | geometry | double precision + postgis_sfcgal | public | st_3dconvexhull | geometry | geometry + postgis_sfcgal | public | st_3ddifference | geom1 geometry, geom2 geometry | geometry + postgis_sfcgal | public | st_3dintersection | geom1 geometry, geom2 geometry | geometry + postgis_sfcgal | public | st_3dunion | geom1 geometry, geom2 geometry | geometry + postgis_sfcgal | public | st_3dunion | geometry | geometry + postgis_sfcgal | public | st_alphashape | g1 geometry, alpha double precision, allow_holes boolean | geometry + postgis_sfcgal | public | st_approximatemedialaxis | geometry | geometry + postgis_sfcgal | public | st_constraineddelaunaytriangles | geometry | geometry + postgis_sfcgal | public | st_extrude | geometry, double precision, double precision, double precision | geometry + postgis_sfcgal | public | st_forcelhr | geometry | geometry + postgis_sfcgal | public | st_isplanar | geometry | boolean + postgis_sfcgal | public | st_issolid | geometry | boolean + postgis_sfcgal | public | st_makesolid | geometry | geometry + postgis_sfcgal | public | st_minkowskisum | geometry, geometry | geometry + postgis_sfcgal | public | st_optimalalphashape | g1 geometry, allow_holes boolean, nb_components integer | geometry + postgis_sfcgal | public | st_orientation | geometry | integer + postgis_sfcgal | public | st_straightskeleton | geometry | geometry + postgis_sfcgal | public | st_tesselate | geometry | geometry + postgis_sfcgal | public | st_volume | geometry | double precision + postgis_tiger_geocoder | tiger | count_words | character varying | integer + postgis_tiger_geocoder | tiger | create_census_base_tables | | text + postgis_tiger_geocoder | tiger | cull_null | character varying | character varying + postgis_tiger_geocoder | tiger | diff_zip | zip1 character varying, zip2 character varying | integer + postgis_tiger_geocoder | tiger | drop_dupe_featnames_generate_script | | text + postgis_tiger_geocoder | tiger | drop_indexes_generate_script | tiger_data_schema text | text + postgis_tiger_geocoder | tiger | drop_nation_tables_generate_script | param_schema text | text + postgis_tiger_geocoder | tiger | drop_state_tables_generate_script | param_state text, param_schema text | text + postgis_tiger_geocoder | tiger | end_soundex | character varying | character varying + postgis_tiger_geocoder | tiger | geocode | input character varying, max_results integer, restrict_geom geometry, OUT addy norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode | in_addy norm_addy, max_results integer, restrict_geom geometry, OUT addy norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode_address | parsed norm_addy, max_results integer, restrict_geom geometry, OUT addy norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode_intersection | roadway1 text, roadway2 text, in_state text, in_city text, in_zip text, num_results integer, OUT addy norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode_location | parsed norm_addy, restrict_geom geometry, OUT addy norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | get_geocode_setting | setting_name text | text + postgis_tiger_geocoder | tiger | get_last_words | inputstring character varying, count integer | character varying + postgis_tiger_geocoder | tiger | get_tract | loc_geom geometry, output_field text | text + postgis_tiger_geocoder | tiger | greatest_hn | fromhn character varying, tohn character varying | integer + postgis_tiger_geocoder | tiger | includes_address | given_address integer, addr1 integer, addr2 integer, addr3 integer, addr4 integer | boolean + postgis_tiger_geocoder | tiger | install_geocode_settings | | void + postgis_tiger_geocoder | tiger | install_missing_indexes | | boolean + postgis_tiger_geocoder | tiger | install_pagc_tables | | void + postgis_tiger_geocoder | tiger | interpolate_from_address | given_address integer, in_addr1 character varying, in_addr2 character varying, in_road geometry, in_side character varying, in_offset_m double precision | geometry + postgis_tiger_geocoder | tiger | is_pretype | text | boolean + postgis_tiger_geocoder | tiger | least_hn | fromhn character varying, tohn character varying | integer + postgis_tiger_geocoder | tiger | levenshtein_ignore_case | character varying, character varying | integer + postgis_tiger_geocoder | tiger | loader_generate_census_script | param_states text[], os text | SETOF text + postgis_tiger_geocoder | tiger | loader_generate_nation_script | os text | SETOF text + postgis_tiger_geocoder | tiger | loader_generate_script | param_states text[], os text | SETOF text + postgis_tiger_geocoder | tiger | loader_load_staged_data | param_staging_table text, param_target_table text | integer + postgis_tiger_geocoder | tiger | loader_load_staged_data | param_staging_table text, param_target_table text, param_columns_exclude text[] | integer + postgis_tiger_geocoder | tiger | loader_macro_replace | param_input text, param_keys text[], param_values text[] | text + postgis_tiger_geocoder | tiger | location_extract | fullstreet character varying, stateabbrev character varying | character varying + postgis_tiger_geocoder | tiger | location_extract_countysub_exact | fullstreet character varying, stateabbrev character varying | character varying + postgis_tiger_geocoder | tiger | location_extract_countysub_fuzzy | fullstreet character varying, stateabbrev character varying | character varying + postgis_tiger_geocoder | tiger | location_extract_place_exact | fullstreet character varying, stateabbrev character varying | character varying + postgis_tiger_geocoder | tiger | location_extract_place_fuzzy | fullstreet character varying, stateabbrev character varying | character varying + postgis_tiger_geocoder | tiger | missing_indexes_generate_script | | text + postgis_tiger_geocoder | tiger | normalize_address | in_rawinput character varying | norm_addy + postgis_tiger_geocoder | tiger | nullable_levenshtein | character varying, character varying | integer + postgis_tiger_geocoder | tiger | numeric_streets_equal | input_street character varying, output_street character varying | boolean + postgis_tiger_geocoder | tiger | pagc_normalize_address | in_rawinput character varying | norm_addy + postgis_tiger_geocoder | tiger | pprint_addy | input norm_addy | character varying + postgis_tiger_geocoder | tiger | rate_attributes | dirpa character varying, dirpb character varying, streetnamea character varying, streetnameb character varying, streettypea character varying, streettypeb character varying, dirsa character varying, dirsb character varying, prequalabr character varying | integer + postgis_tiger_geocoder | tiger | rate_attributes | dirpa character varying, dirpb character varying, streetnamea character varying, streetnameb character varying, streettypea character varying, streettypeb character varying, dirsa character varying, dirsb character varying, locationa character varying, locationb character varying, prequalabr character varying | integer + postgis_tiger_geocoder | tiger | reverse_geocode | pt geometry, include_strnum_range boolean, OUT intpt geometry[], OUT addy norm_addy[], OUT street character varying[] | record + postgis_tiger_geocoder | tiger | set_geocode_setting | setting_name text, setting_value text | text + postgis_tiger_geocoder | tiger | setsearchpathforinstall | a_schema_name text | text + postgis_tiger_geocoder | tiger | state_extract | rawinput character varying | character varying + postgis_tiger_geocoder | tiger | topology_load_tiger | toponame character varying, region_type character varying, region_id character varying | text + postgis_tiger_geocoder | tiger | utmzone | geometry | integer + postgis_tiger_geocoder | tiger | zip_range | zip text, range_start integer, range_end integer | character varying[] + postgis_topology | topology | _asgmledge | edge_id integer, start_node integer, end_node integer, line geometry, visitedtable regclass, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text + postgis_topology | topology | _asgmlface | toponame text, face_id integer, visitedtable regclass, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text + postgis_topology | topology | _asgmlnode | id integer, point geometry, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text + postgis_topology | topology | _checkedgelinking | curedge_edge_id integer, prevedge_edge_id integer, prevedge_next_left_edge integer, prevedge_next_right_edge integer | validatetopology_returntype + postgis_topology | topology | _st_adjacentedges | atopology character varying, anode integer, anedge integer | integer[] + postgis_topology | topology | _st_mintolerance | ageom geometry | double precision + postgis_topology | topology | _st_mintolerance | atopology character varying, ageom geometry | double precision + postgis_topology | topology | _validatetopologyedgelinking | bbox geometry | SETOF validatetopology_returntype + postgis_topology | topology | _validatetopologygetfaceshellmaximaledgering | atopology character varying, aface integer | geometry + postgis_topology | topology | _validatetopologygetringedges | starting_edge integer | integer[] + postgis_topology | topology | _validatetopologyrings | bbox geometry | SETOF validatetopology_returntype + postgis_topology | topology | addedge | atopology character varying, aline geometry | integer + postgis_topology | topology | addface | atopology character varying, apoly geometry, force_new boolean | integer + postgis_topology | topology | addnode | atopology character varying, apoint geometry, allowedgesplitting boolean, setcontainingface boolean | integer + postgis_topology | topology | addtopogeometrycolumn | toponame character varying, schema character varying, tbl character varying, col character varying, ltype character varying, child integer | integer + postgis_topology | topology | addtopogeometrycolumn | character varying, character varying, character varying, character varying, character varying | integer + postgis_topology | topology | addtosearchpath | a_schema_name character varying | text + postgis_topology | topology | asgml | tg topogeometry, nsprefix text, prec integer, options integer, vis regclass | text + postgis_topology | topology | asgml | tg topogeometry, nsprefix text, prec integer, opts integer | text + postgis_topology | topology | asgml | tg topogeometry, nsprefix text | text + postgis_topology | topology | asgml | tg topogeometry, nsprefix_in text, precision_in integer, options_in integer, visitedtable regclass, idprefix text, gmlver integer | text + postgis_topology | topology | asgml | tg topogeometry | text + postgis_topology | topology | asgml | tg topogeometry, nsprefix text, prec integer, options integer, visitedtable regclass, idprefix text | text + postgis_topology | topology | asgml | tg topogeometry, visitedtable regclass, nsprefix text | text + postgis_topology | topology | asgml | tg topogeometry, visitedtable regclass | text + postgis_topology | topology | astopojson | tg topogeometry, edgemaptable regclass | text + postgis_topology | topology | cleartopogeom | tg topogeometry | topogeometry + postgis_topology | topology | copytopology | atopology character varying, newtopo character varying | integer + postgis_topology | topology | createtopogeom | toponame character varying, tg_type integer, layer_id integer, tg_objs topoelementarray | topogeometry + postgis_topology | topology | createtopogeom | toponame character varying, tg_type integer, layer_id integer | topogeometry + postgis_topology | topology | createtopology | atopology character varying, srid integer, prec double precision, hasz boolean | integer + postgis_topology | topology | createtopology | character varying, integer | integer + postgis_topology | topology | createtopology | toponame character varying, srid integer, prec double precision | integer + postgis_topology | topology | createtopology | character varying | integer + postgis_topology | topology | droptopogeometrycolumn | schema character varying, tbl character varying, col character varying | text + postgis_topology | topology | droptopology | atopology character varying | text + postgis_topology | topology | equals | tg1 topogeometry, tg2 topogeometry | boolean + postgis_topology | topology | findlayer | schema_name name, table_name name, feature_column name | layer + postgis_topology | topology | findlayer | topology_id integer, layer_id integer | layer + postgis_topology | topology | findlayer | layer_table regclass, feature_column name | layer + postgis_topology | topology | findlayer | tg topogeometry | layer + postgis_topology | topology | findtopology | integer | topology + postgis_topology | topology | findtopology | text | topology + postgis_topology | topology | findtopology | topogeometry | topology + postgis_topology | topology | findtopology | name, name, name | topology + postgis_topology | topology | findtopology | regclass, name | topology + postgis_topology | topology | geometry | topogeom topogeometry | geometry + postgis_topology | topology | geometrytype | tg topogeometry | text + postgis_topology | topology | getedgebypoint | atopology character varying, apoint geometry, tol1 double precision | integer + postgis_topology | topology | getfacebypoint | atopology character varying, apoint geometry, tol1 double precision | integer + postgis_topology | topology | getfacecontainingpoint | atopology text, apoint geometry | integer + postgis_topology | topology | getnodebypoint | atopology character varying, apoint geometry, tol1 double precision | integer + postgis_topology | topology | getnodeedges | atopology character varying, anode integer | SETOF getfaceedges_returntype + postgis_topology | topology | getringedges | atopology character varying, anedge integer, maxedges integer | SETOF getfaceedges_returntype + postgis_topology | topology | gettopogeomelementarray | toponame character varying, layer_id integer, tgid integer | topoelementarray + postgis_topology | topology | gettopogeomelementarray | tg topogeometry | topoelementarray + postgis_topology | topology | gettopogeomelements | tg topogeometry | SETOF topoelement + postgis_topology | topology | gettopogeomelements | toponame character varying, layerid integer, tgid integer | SETOF topoelement + postgis_topology | topology | gettopologyid | toponame character varying | integer + postgis_topology | topology | gettopologyname | topoid integer | character varying + postgis_topology | topology | gettopologysrid | toponame character varying | integer + postgis_topology | topology | intersects | tg1 topogeometry, tg2 topogeometry | boolean + postgis_topology | topology | layertrigger | | trigger + postgis_topology | topology | polygonize | toponame character varying | text + postgis_topology | topology | populate_topology_layer | | TABLE(schema_name text, table_name text, feature_column text) + postgis_topology | topology | postgis_topology_scripts_installed | | text + postgis_topology | topology | relationtrigger | | trigger + postgis_topology | topology | removeunusedprimitives | atopology text, bbox geometry | integer + postgis_topology | topology | st_addedgemodface | atopology character varying, anode integer, anothernode integer, acurve geometry | integer + postgis_topology | topology | st_addedgenewfaces | atopology character varying, anode integer, anothernode integer, acurve geometry | integer + postgis_topology | topology | st_addisoedge | atopology character varying, anode integer, anothernode integer, acurve geometry | integer + postgis_topology | topology | st_addisonode | atopology character varying, aface integer, apoint geometry | integer + postgis_topology | topology | st_changeedgegeom | atopology character varying, anedge integer, acurve geometry | text + postgis_topology | topology | st_createtopogeo | atopology character varying, acollection geometry | text + postgis_topology | topology | st_geometrytype | tg topogeometry | text + postgis_topology | topology | st_getfaceedges | toponame character varying, face_id integer | SETOF getfaceedges_returntype + postgis_topology | topology | st_getfacegeometry | toponame character varying, aface integer | geometry + postgis_topology | topology | st_inittopogeo | atopology character varying | text + postgis_topology | topology | st_modedgeheal | toponame character varying, e1id integer, e2id integer | integer + postgis_topology | topology | st_modedgesplit | atopology character varying, anedge integer, apoint geometry | integer + postgis_topology | topology | st_moveisonode | atopology character varying, anode integer, apoint geometry | text + postgis_topology | topology | st_newedgeheal | toponame character varying, e1id integer, e2id integer | integer + postgis_topology | topology | st_newedgessplit | atopology character varying, anedge integer, apoint geometry | integer + postgis_topology | topology | st_remedgemodface | toponame character varying, e1id integer | integer + postgis_topology | topology | st_remedgenewface | toponame character varying, e1id integer | integer + postgis_topology | topology | st_remisonode | character varying, integer | text + postgis_topology | topology | st_removeisoedge | atopology character varying, anedge integer | text + postgis_topology | topology | st_removeisonode | atopology character varying, anode integer | text + postgis_topology | topology | st_simplify | tg topogeometry, tolerance double precision | geometry + postgis_topology | topology | st_srid | tg topogeometry | integer + postgis_topology | topology | topoelementarray_agg | topoelement | topoelementarray + postgis_topology | topology | topoelementarray_append | topoelementarray, topoelement | topoelementarray + postgis_topology | topology | topogeo_addgeometry | atopology character varying, ageom geometry, tolerance double precision | void + postgis_topology | topology | topogeo_addlinestring | atopology character varying, aline geometry, tolerance double precision | SETOF integer + postgis_topology | topology | topogeo_addpoint | atopology character varying, apoint geometry, tolerance double precision | integer + postgis_topology | topology | topogeo_addpolygon | atopology character varying, apoly geometry, tolerance double precision | SETOF integer + postgis_topology | topology | topogeom_addelement | tg topogeometry, el topoelement | topogeometry + postgis_topology | topology | topogeom_addtopogeom | tgt topogeometry, src topogeometry | topogeometry + postgis_topology | topology | topogeom_remelement | tg topogeometry, el topoelement | topogeometry + postgis_topology | topology | topologysummary | atopology character varying | text + postgis_topology | topology | totopogeom | ageom geometry, atopology character varying, alayer integer, atolerance double precision | topogeometry + postgis_topology | topology | totopogeom | ageom geometry, tg topogeometry, atolerance double precision | topogeometry + postgis_topology | topology | validatetopology | toponame character varying, bbox geometry | SETOF validatetopology_returntype + postgis_topology | topology | validatetopologyrelation | toponame character varying | TABLE(error text, layer_id integer, topogeo_id integer, element_id integer) + postgres_fdw | public | postgres_fdw_disconnect | text | boolean + postgres_fdw | public | postgres_fdw_disconnect_all | | boolean + postgres_fdw | public | postgres_fdw_get_connections | OUT server_name text, OUT valid boolean | SETOF record + postgres_fdw | public | postgres_fdw_handler | | fdw_handler + postgres_fdw | public | postgres_fdw_validator | text[], oid | void + refint | public | check_foreign_key | | trigger + refint | public | check_primary_key | | trigger + rum | public | rum_anyarray_config | internal | void + rum | public | rum_anyarray_consistent | internal, smallint, anyarray, integer, internal, internal, internal, internal | boolean + rum | public | rum_anyarray_distance | anyarray, anyarray | double precision + rum | public | rum_anyarray_ordering | internal, smallint, anyarray, integer, internal, internal, internal, internal, internal | double precision + rum | public | rum_anyarray_similar | anyarray, anyarray | boolean + rum | public | rum_bit_compare_prefix | bit, bit, smallint, internal | integer + rum | public | rum_bit_extract_query | bit, internal, smallint, internal, internal | internal + rum | public | rum_bit_extract_value | bit, internal | internal + rum | public | rum_btree_consistent | internal, smallint, internal, integer, internal, internal, internal, internal | boolean + rum | public | rum_bytea_compare_prefix | bytea, bytea, smallint, internal | integer + rum | public | rum_bytea_extract_query | bytea, internal, smallint, internal, internal | internal + rum | public | rum_bytea_extract_value | bytea, internal | internal + rum | public | rum_char_compare_prefix | "char", "char", smallint, internal | integer + rum | public | rum_char_extract_query | "char", internal, smallint, internal, internal | internal + rum | public | rum_char_extract_value | "char", internal | internal + rum | public | rum_cidr_compare_prefix | cidr, cidr, smallint, internal | integer + rum | public | rum_cidr_extract_query | cidr, internal, smallint, internal, internal | internal + rum | public | rum_cidr_extract_value | cidr, internal | internal + rum | public | rum_date_compare_prefix | date, date, smallint, internal | integer + rum | public | rum_date_extract_query | date, internal, smallint, internal, internal | internal + rum | public | rum_date_extract_value | date, internal | internal + rum | public | rum_extract_anyarray | anyarray, internal, internal, internal, internal | internal + rum | public | rum_extract_anyarray_query | anyarray, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_extract_tsquery | tsquery, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_extract_tsquery_hash | tsquery, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_extract_tsvector | tsvector, internal, internal, internal, internal | internal + rum | public | rum_extract_tsvector_hash | tsvector, internal, internal, internal, internal | internal + rum | public | rum_float4_compare_prefix | real, real, smallint, internal | integer + rum | public | rum_float4_config | internal | void + rum | public | rum_float4_distance | real, real | double precision + rum | public | rum_float4_extract_query | real, internal, smallint, internal, internal | internal + rum | public | rum_float4_extract_value | real, internal | internal + rum | public | rum_float4_key_distance | real, real, smallint | double precision + rum | public | rum_float4_left_distance | real, real | double precision + rum | public | rum_float4_outer_distance | real, real, smallint | double precision + rum | public | rum_float4_right_distance | real, real | double precision + rum | public | rum_float8_compare_prefix | double precision, double precision, smallint, internal | integer + rum | public | rum_float8_config | internal | void + rum | public | rum_float8_distance | double precision, double precision | double precision + rum | public | rum_float8_extract_query | double precision, internal, smallint, internal, internal | internal + rum | public | rum_float8_extract_value | double precision, internal | internal + rum | public | rum_float8_key_distance | double precision, double precision, smallint | double precision + rum | public | rum_float8_left_distance | double precision, double precision | double precision + rum | public | rum_float8_outer_distance | double precision, double precision, smallint | double precision + rum | public | rum_float8_right_distance | double precision, double precision | double precision + rum | public | rum_inet_compare_prefix | inet, inet, smallint, internal | integer + rum | public | rum_inet_extract_query | inet, internal, smallint, internal, internal | internal + rum | public | rum_inet_extract_value | inet, internal | internal + rum | public | rum_int2_compare_prefix | smallint, smallint, smallint, internal | integer + rum | public | rum_int2_config | internal | void + rum | public | rum_int2_distance | smallint, smallint | double precision + rum | public | rum_int2_extract_query | smallint, internal, smallint, internal, internal | internal + rum | public | rum_int2_extract_value | smallint, internal | internal + rum | public | rum_int2_key_distance | smallint, smallint, smallint | double precision + rum | public | rum_int2_left_distance | smallint, smallint | double precision + rum | public | rum_int2_outer_distance | smallint, smallint, smallint | double precision + rum | public | rum_int2_right_distance | smallint, smallint | double precision + rum | public | rum_int4_compare_prefix | integer, integer, smallint, internal | integer + rum | public | rum_int4_config | internal | void + rum | public | rum_int4_distance | integer, integer | double precision + rum | public | rum_int4_extract_query | integer, internal, smallint, internal, internal | internal + rum | public | rum_int4_extract_value | integer, internal | internal + rum | public | rum_int4_key_distance | integer, integer, smallint | double precision + rum | public | rum_int4_left_distance | integer, integer | double precision + rum | public | rum_int4_outer_distance | integer, integer, smallint | double precision + rum | public | rum_int4_right_distance | integer, integer | double precision + rum | public | rum_int8_compare_prefix | bigint, bigint, smallint, internal | integer + rum | public | rum_int8_config | internal | void + rum | public | rum_int8_distance | bigint, bigint | double precision + rum | public | rum_int8_extract_query | bigint, internal, smallint, internal, internal | internal + rum | public | rum_int8_extract_value | bigint, internal | internal + rum | public | rum_int8_key_distance | bigint, bigint, smallint | double precision + rum | public | rum_int8_left_distance | bigint, bigint | double precision + rum | public | rum_int8_outer_distance | bigint, bigint, smallint | double precision + rum | public | rum_int8_right_distance | bigint, bigint | double precision + rum | public | rum_interval_compare_prefix | interval, interval, smallint, internal | integer + rum | public | rum_interval_extract_query | interval, internal, smallint, internal, internal | internal + rum | public | rum_interval_extract_value | interval, internal | internal + rum | public | rum_macaddr_compare_prefix | macaddr, macaddr, smallint, internal | integer + rum | public | rum_macaddr_extract_query | macaddr, internal, smallint, internal, internal | internal + rum | public | rum_macaddr_extract_value | macaddr, internal | internal + rum | public | rum_money_compare_prefix | money, money, smallint, internal | integer + rum | public | rum_money_config | internal | void + rum | public | rum_money_distance | money, money | double precision + rum | public | rum_money_extract_query | money, internal, smallint, internal, internal | internal + rum | public | rum_money_extract_value | money, internal | internal + rum | public | rum_money_key_distance | money, money, smallint | double precision + rum | public | rum_money_left_distance | money, money | double precision + rum | public | rum_money_outer_distance | money, money, smallint | double precision + rum | public | rum_money_right_distance | money, money | double precision + rum | public | rum_numeric_cmp | numeric, numeric | integer + rum | public | rum_numeric_compare_prefix | numeric, numeric, smallint, internal | integer + rum | public | rum_numeric_extract_query | numeric, internal, smallint, internal, internal | internal + rum | public | rum_numeric_extract_value | numeric, internal | internal + rum | public | rum_oid_compare_prefix | oid, oid, smallint, internal | integer + rum | public | rum_oid_config | internal | void + rum | public | rum_oid_distance | oid, oid | double precision + rum | public | rum_oid_extract_query | oid, internal, smallint, internal, internal | internal + rum | public | rum_oid_extract_value | oid, internal | internal + rum | public | rum_oid_key_distance | oid, oid, smallint | double precision + rum | public | rum_oid_left_distance | oid, oid | double precision + rum | public | rum_oid_outer_distance | oid, oid, smallint | double precision + rum | public | rum_oid_right_distance | oid, oid | double precision + rum | public | rum_text_compare_prefix | text, text, smallint, internal | integer + rum | public | rum_text_extract_query | text, internal, smallint, internal, internal | internal + rum | public | rum_text_extract_value | text, internal | internal + rum | public | rum_time_compare_prefix | time without time zone, time without time zone, smallint, internal | integer + rum | public | rum_time_extract_query | time without time zone, internal, smallint, internal, internal | internal + rum | public | rum_time_extract_value | time without time zone, internal | internal + rum | public | rum_timestamp_compare_prefix | timestamp without time zone, timestamp without time zone, smallint, internal | integer + rum | public | rum_timestamp_config | internal | void + rum | public | rum_timestamp_consistent | internal, smallint, timestamp without time zone, integer, internal, internal, internal, internal | boolean + rum | public | rum_timestamp_distance | timestamp without time zone, timestamp without time zone | double precision + rum | public | rum_timestamp_extract_query | timestamp without time zone, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_timestamp_extract_value | timestamp without time zone, internal, internal, internal, internal | internal + rum | public | rum_timestamp_key_distance | timestamp without time zone, timestamp without time zone, smallint | double precision + rum | public | rum_timestamp_left_distance | timestamp without time zone, timestamp without time zone | double precision + rum | public | rum_timestamp_outer_distance | timestamp without time zone, timestamp without time zone, smallint | double precision + rum | public | rum_timestamp_right_distance | timestamp without time zone, timestamp without time zone | double precision + rum | public | rum_timestamptz_distance | timestamp with time zone, timestamp with time zone | double precision + rum | public | rum_timestamptz_key_distance | timestamp with time zone, timestamp with time zone, smallint | double precision + rum | public | rum_timestamptz_left_distance | timestamp with time zone, timestamp with time zone | double precision + rum | public | rum_timestamptz_right_distance | timestamp with time zone, timestamp with time zone | double precision + rum | public | rum_timetz_compare_prefix | time with time zone, time with time zone, smallint, internal | integer + rum | public | rum_timetz_extract_query | time with time zone, internal, smallint, internal, internal | internal + rum | public | rum_timetz_extract_value | time with time zone, internal | internal + rum | public | rum_ts_distance | tsvector, tsquery, integer | real + rum | public | rum_ts_distance | tsvector, tsquery | real + rum | public | rum_ts_distance | tsvector, rum_distance_query | real + rum | public | rum_ts_join_pos | internal, internal | bytea + rum | public | rum_ts_score | tsvector, tsquery, integer | real + rum | public | rum_ts_score | tsvector, tsquery | real + rum | public | rum_ts_score | tsvector, rum_distance_query | real + rum | public | rum_tsquery_addon_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | rum_tsquery_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | rum_tsquery_distance | internal, smallint, tsvector, integer, internal, internal, internal, internal, internal | double precision + rum | public | rum_tsquery_pre_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | rum_tsvector_config | internal | void + rum | public | rum_varbit_compare_prefix | bit varying, bit varying, smallint, internal | integer + rum | public | rum_varbit_extract_query | bit varying, internal, smallint, internal, internal | internal + rum | public | rum_varbit_extract_value | bit varying, internal | internal + rum | public | rumhandler | internal | index_am_handler + rum | public | ruminv_extract_tsquery | tsquery, internal, internal, internal, internal | internal + rum | public | ruminv_extract_tsvector | tsvector, internal, smallint, internal, internal, internal, internal | internal + rum | public | ruminv_tsquery_config | internal | void + rum | public | ruminv_tsvector_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | tsquery_to_distance_query | tsquery | rum_distance_query + seg | public | gseg_consistent | internal, seg, smallint, oid, internal | boolean + seg | public | gseg_penalty | internal, internal, internal | internal + seg | public | gseg_picksplit | internal, internal | internal + seg | public | gseg_same | seg, seg, internal | internal + seg | public | gseg_union | internal, internal | seg + seg | public | seg_center | seg | real + seg | public | seg_cmp | seg, seg | integer + seg | public | seg_contained | seg, seg | boolean + seg | public | seg_contains | seg, seg | boolean + seg | public | seg_different | seg, seg | boolean + seg | public | seg_ge | seg, seg | boolean + seg | public | seg_gt | seg, seg | boolean + seg | public | seg_in | cstring | seg + seg | public | seg_inter | seg, seg | seg + seg | public | seg_le | seg, seg | boolean + seg | public | seg_left | seg, seg | boolean + seg | public | seg_lower | seg | real + seg | public | seg_lt | seg, seg | boolean + seg | public | seg_out | seg | cstring + seg | public | seg_over_left | seg, seg | boolean + seg | public | seg_over_right | seg, seg | boolean + seg | public | seg_overlap | seg, seg | boolean + seg | public | seg_right | seg, seg | boolean + seg | public | seg_same | seg, seg | boolean + seg | public | seg_size | seg | real + seg | public | seg_union | seg, seg | seg + seg | public | seg_upper | seg | real + sslinfo | public | ssl_cipher | | text + sslinfo | public | ssl_client_cert_present | | boolean + sslinfo | public | ssl_client_dn | | text + sslinfo | public | ssl_client_dn_field | text | text + sslinfo | public | ssl_client_serial | | numeric + sslinfo | public | ssl_extension_info | OUT name text, OUT value text, OUT critical boolean | SETOF record + sslinfo | public | ssl_is_used | | boolean + sslinfo | public | ssl_issuer_dn | | text + sslinfo | public | ssl_issuer_field | text | text + sslinfo | public | ssl_version | | text + supabase_vault | vault | create_secret | new_secret text, new_name text, new_description text, new_key_id uuid | uuid + supabase_vault | vault | update_secret | secret_id uuid, new_secret text, new_name text, new_description text, new_key_id uuid | void + tablefunc | public | connectby | text, text, text, text, integer, text | SETOF record + tablefunc | public | connectby | text, text, text, text, text, integer, text | SETOF record + tablefunc | public | connectby | text, text, text, text, integer | SETOF record + tablefunc | public | connectby | text, text, text, text, text, integer | SETOF record + tablefunc | public | crosstab | text | SETOF record + tablefunc | public | crosstab | text, integer | SETOF record + tablefunc | public | crosstab | text, text | SETOF record + tablefunc | public | crosstab2 | text | SETOF tablefunc_crosstab_2 + tablefunc | public | crosstab3 | text | SETOF tablefunc_crosstab_3 + tablefunc | public | crosstab4 | text | SETOF tablefunc_crosstab_4 + tablefunc | public | normal_rand | integer, double precision, double precision | SETOF double precision + tcn | public | triggered_change_notification | | trigger + timescaledb | _timescaledb_debug | extension_state | | text + timescaledb | _timescaledb_functions | alter_job_set_hypertable_id | job_id integer, hypertable regclass | integer + timescaledb | _timescaledb_functions | attach_osm_table_chunk | hypertable regclass, chunk regclass | boolean + timescaledb | _timescaledb_functions | bookend_deserializefunc | bytea, internal | internal + timescaledb | _timescaledb_functions | bookend_finalfunc | internal, anyelement, "any" | anyelement + timescaledb | _timescaledb_functions | bookend_serializefunc | internal | bytea + timescaledb | _timescaledb_functions | cagg_get_bucket_function_info | mat_hypertable_id integer, OUT bucket_func regprocedure, OUT bucket_width text, OUT bucket_origin text, OUT bucket_offset text, OUT bucket_timezone text, OUT bucket_fixed_width boolean | record + timescaledb | _timescaledb_functions | cagg_migrate_create_plan | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _cagg_name_new text, IN _override boolean, IN _drop_old boolean | + timescaledb | _timescaledb_functions | cagg_migrate_execute_copy_data | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_copy_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_create_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_disable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_drop_old_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_enable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_override_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_execute_plan | IN _cagg_data _timescaledb_catalog.continuous_agg | + timescaledb | _timescaledb_functions | cagg_migrate_execute_refresh_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_functions | cagg_migrate_plan_exists | _hypertable_id integer | boolean + timescaledb | _timescaledb_functions | cagg_migrate_pre_validation | _cagg_schema text, _cagg_name text, _cagg_name_new text | _timescaledb_catalog.continuous_agg + timescaledb | _timescaledb_functions | cagg_migrate_to_time_bucket | IN cagg regclass | + timescaledb | _timescaledb_functions | cagg_validate_query | query text, OUT is_valid boolean, OUT error_level text, OUT error_code text, OUT error_message text, OUT error_detail text, OUT error_hint text | record + timescaledb | _timescaledb_functions | cagg_watermark | hypertable_id integer | bigint + timescaledb | _timescaledb_functions | cagg_watermark_materialized | hypertable_id integer | bigint + timescaledb | _timescaledb_functions | calculate_chunk_interval | dimension_id integer, dimension_coord bigint, chunk_target_size bigint | bigint + timescaledb | _timescaledb_functions | chunk_constraint_add_table_constraint | chunk_constraint_row _timescaledb_catalog.chunk_constraint | void + timescaledb | _timescaledb_functions | chunk_id_from_relid | relid oid | integer + timescaledb | _timescaledb_functions | chunk_index_clone | chunk_index_oid oid | oid + timescaledb | _timescaledb_functions | chunk_index_replace | chunk_index_oid_old oid, chunk_index_oid_new oid | void + timescaledb | _timescaledb_functions | chunk_status | regclass | integer + timescaledb | _timescaledb_functions | chunks_local_size | schema_name_in name, table_name_in name | TABLE(chunk_id integer, chunk_schema name, chunk_name name, table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) + timescaledb | _timescaledb_functions | compressed_chunk_local_stats | schema_name_in name, table_name_in name | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint) + timescaledb | _timescaledb_functions | compressed_data_in | cstring | _timescaledb_internal.compressed_data + timescaledb | _timescaledb_functions | compressed_data_out | _timescaledb_internal.compressed_data | cstring + timescaledb | _timescaledb_functions | compressed_data_recv | internal | _timescaledb_internal.compressed_data + timescaledb | _timescaledb_functions | compressed_data_send | _timescaledb_internal.compressed_data | bytea + timescaledb | _timescaledb_functions | constraint_clone | constraint_oid oid, target_oid regclass | void + timescaledb | _timescaledb_functions | continuous_agg_invalidation_trigger | | trigger + timescaledb | _timescaledb_functions | create_chunk | hypertable regclass, slices jsonb, schema_name name, table_name name, chunk_table regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb, created boolean) + timescaledb | _timescaledb_functions | create_chunk_table | hypertable regclass, slices jsonb, schema_name name, table_name name | boolean + timescaledb | _timescaledb_functions | create_compressed_chunk | chunk regclass, chunk_table regclass, uncompressed_heap_size bigint, uncompressed_toast_size bigint, uncompressed_index_size bigint, compressed_heap_size bigint, compressed_toast_size bigint, compressed_index_size bigint, numrows_pre_compression bigint, numrows_post_compression bigint | regclass + timescaledb | _timescaledb_functions | dimension_info_in | cstring | _timescaledb_internal.dimension_info + timescaledb | _timescaledb_functions | dimension_info_out | _timescaledb_internal.dimension_info | cstring + timescaledb | _timescaledb_functions | drop_chunk | chunk regclass | boolean + timescaledb | _timescaledb_functions | finalize_agg | agg_name text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement + timescaledb | _timescaledb_functions | finalize_agg_ffunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement + timescaledb | _timescaledb_functions | finalize_agg_sfunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | internal + timescaledb | _timescaledb_functions | first_combinefunc | internal, internal | internal + timescaledb | _timescaledb_functions | first_sfunc | internal, anyelement, "any" | internal + timescaledb | _timescaledb_functions | freeze_chunk | chunk regclass | boolean + timescaledb | _timescaledb_functions | generate_uuid | | uuid + timescaledb | _timescaledb_functions | get_approx_row_count | relation regclass | bigint + timescaledb | _timescaledb_functions | get_compressed_chunk_index_for_recompression | uncompressed_chunk regclass | regclass + timescaledb | _timescaledb_functions | get_create_command | table_name name | text + timescaledb | _timescaledb_functions | get_git_commit | | TABLE(commit_tag text, commit_hash text, commit_time timestamp with time zone) + timescaledb | _timescaledb_functions | get_orderby_defaults | relation regclass, segment_by_cols text[] | jsonb + timescaledb | _timescaledb_functions | get_os_info | | TABLE(sysname text, version text, release text, version_pretty text) + timescaledb | _timescaledb_functions | get_partition_for_key | val anyelement | integer + timescaledb | _timescaledb_functions | get_partition_hash | val anyelement | integer + timescaledb | _timescaledb_functions | get_segmentby_defaults | relation regclass | jsonb + timescaledb | _timescaledb_functions | hist_combinefunc | state1 internal, state2 internal | internal + timescaledb | _timescaledb_functions | hist_deserializefunc | bytea, internal | internal + timescaledb | _timescaledb_functions | hist_finalfunc | state internal, val double precision, min double precision, max double precision, nbuckets integer | integer[] + timescaledb | _timescaledb_functions | hist_serializefunc | internal | bytea + timescaledb | _timescaledb_functions | hist_sfunc | state internal, val double precision, min double precision, max double precision, nbuckets integer | internal + timescaledb | _timescaledb_functions | hypertable_local_size | schema_name_in name, table_name_in name | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) + timescaledb | _timescaledb_functions | hypertable_osm_range_update | hypertable regclass, range_start anyelement, range_end anyelement, empty boolean | boolean + timescaledb | _timescaledb_functions | indexes_local_size | schema_name_in name, index_name_in name | TABLE(hypertable_id integer, total_bytes bigint) + timescaledb | _timescaledb_functions | insert_blocker | | trigger + timescaledb | _timescaledb_functions | interval_to_usec | chunk_interval interval | bigint + timescaledb | _timescaledb_functions | last_combinefunc | internal, internal | internal + timescaledb | _timescaledb_functions | last_sfunc | internal, anyelement, "any" | internal + timescaledb | _timescaledb_functions | makeaclitem | regrole, regrole, text, boolean | aclitem + timescaledb | _timescaledb_functions | metadata_insert_trigger | | trigger + timescaledb | _timescaledb_functions | partialize_agg | arg anyelement | bytea + timescaledb | _timescaledb_functions | policy_compression | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_functions | policy_compression_check | config jsonb | void + timescaledb | _timescaledb_functions | policy_compression_execute | IN job_id integer, IN htid integer, IN lag anyelement, IN maxchunks integer, IN verbose_log boolean, IN recompress_enabled boolean, IN use_creation_time boolean | + timescaledb | _timescaledb_functions | policy_job_stat_history_retention | job_id integer, config jsonb | integer + timescaledb | _timescaledb_functions | policy_job_stat_history_retention_check | config jsonb | void + timescaledb | _timescaledb_functions | policy_recompression | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_functions | policy_refresh_continuous_aggregate | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_functions | policy_refresh_continuous_aggregate_check | config jsonb | void + timescaledb | _timescaledb_functions | policy_reorder | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_functions | policy_reorder_check | config jsonb | void + timescaledb | _timescaledb_functions | policy_retention | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_functions | policy_retention_check | config jsonb | void + timescaledb | _timescaledb_functions | process_ddl_event | | event_trigger + timescaledb | _timescaledb_functions | range_value_to_pretty | time_value bigint, column_type regtype | text + timescaledb | _timescaledb_functions | recompress_chunk_segmentwise | uncompressed_chunk regclass, if_compressed boolean | regclass + timescaledb | _timescaledb_functions | relation_approximate_size | relation regclass | TABLE(total_size bigint, heap_size bigint, index_size bigint, toast_size bigint) + timescaledb | _timescaledb_functions | relation_size | relation regclass | TABLE(total_size bigint, heap_size bigint, index_size bigint, toast_size bigint) + timescaledb | _timescaledb_functions | remove_dropped_chunk_metadata | _hypertable_id integer | integer + timescaledb | _timescaledb_functions | repair_relation_acls | | + timescaledb | _timescaledb_functions | restart_background_workers | | boolean + timescaledb | _timescaledb_functions | show_chunk | chunk regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb) + timescaledb | _timescaledb_functions | start_background_workers | | boolean + timescaledb | _timescaledb_functions | stop_background_workers | | boolean + timescaledb | _timescaledb_functions | subtract_integer_from_now | hypertable_relid regclass, lag bigint | bigint + timescaledb | _timescaledb_functions | time_to_internal | time_val anyelement | bigint + timescaledb | _timescaledb_functions | to_date | unixtime_us bigint | date + timescaledb | _timescaledb_functions | to_interval | unixtime_us bigint | interval + timescaledb | _timescaledb_functions | to_timestamp | unixtime_us bigint | timestamp with time zone + timescaledb | _timescaledb_functions | to_timestamp_without_timezone | unixtime_us bigint | timestamp without time zone + timescaledb | _timescaledb_functions | to_unix_microseconds | ts timestamp with time zone | bigint + timescaledb | _timescaledb_functions | tsl_loaded | | boolean + timescaledb | _timescaledb_functions | unfreeze_chunk | chunk regclass | boolean + timescaledb | _timescaledb_internal | alter_job_set_hypertable_id | job_id integer, hypertable regclass | integer + timescaledb | _timescaledb_internal | attach_osm_table_chunk | hypertable regclass, chunk regclass | boolean + timescaledb | _timescaledb_internal | cagg_migrate_create_plan | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _cagg_name_new text, IN _override boolean, IN _drop_old boolean | + timescaledb | _timescaledb_internal | cagg_migrate_execute_copy_data | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_copy_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_create_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_disable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_drop_old_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_enable_policies | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_override_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_execute_plan | IN _cagg_data _timescaledb_catalog.continuous_agg | + timescaledb | _timescaledb_internal | cagg_migrate_execute_refresh_new_cagg | IN _cagg_data _timescaledb_catalog.continuous_agg, IN _plan_step _timescaledb_catalog.continuous_agg_migrate_plan_step | + timescaledb | _timescaledb_internal | cagg_migrate_plan_exists | _hypertable_id integer | boolean + timescaledb | _timescaledb_internal | cagg_migrate_pre_validation | _cagg_schema text, _cagg_name text, _cagg_name_new text | _timescaledb_catalog.continuous_agg + timescaledb | _timescaledb_internal | cagg_watermark | hypertable_id integer | bigint + timescaledb | _timescaledb_internal | cagg_watermark_materialized | hypertable_id integer | bigint + timescaledb | _timescaledb_internal | calculate_chunk_interval | dimension_id integer, dimension_coord bigint, chunk_target_size bigint | bigint + timescaledb | _timescaledb_internal | chunk_constraint_add_table_constraint | chunk_constraint_row _timescaledb_catalog.chunk_constraint | void + timescaledb | _timescaledb_internal | chunk_id_from_relid | relid oid | integer + timescaledb | _timescaledb_internal | chunk_index_clone | chunk_index_oid oid | oid + timescaledb | _timescaledb_internal | chunk_index_replace | chunk_index_oid_old oid, chunk_index_oid_new oid | void + timescaledb | _timescaledb_internal | chunk_status | regclass | integer + timescaledb | _timescaledb_internal | chunks_local_size | schema_name_in name, table_name_in name | TABLE(chunk_id integer, chunk_schema name, chunk_name name, table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) + timescaledb | _timescaledb_internal | compressed_chunk_local_stats | schema_name_in name, table_name_in name | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint) + timescaledb | _timescaledb_internal | compressed_chunk_remote_stats | schema_name_in name, table_name_in name | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint, node_name name) + timescaledb | _timescaledb_internal | continuous_agg_invalidation_trigger | | trigger + timescaledb | _timescaledb_internal | create_chunk | hypertable regclass, _slices jsonb, _schema_name name, _table_name name, chunk_table regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb, created boolean) + timescaledb | _timescaledb_internal | create_chunk_table | hypertable regclass, slices jsonb, schema_name name, table_name name | boolean + timescaledb | _timescaledb_internal | create_compressed_chunk | chunk regclass, chunk_table regclass, uncompressed_heap_size bigint, uncompressed_toast_size bigint, uncompressed_index_size bigint, compressed_heap_size bigint, compressed_toast_size bigint, compressed_index_size bigint, numrows_pre_compression bigint, numrows_post_compression bigint | regclass + timescaledb | _timescaledb_internal | drop_chunk | chunk regclass | boolean + timescaledb | _timescaledb_internal | finalize_agg | agg_name text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement + timescaledb | _timescaledb_internal | finalize_agg_ffunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | anyelement + timescaledb | _timescaledb_internal | finalize_agg_sfunc | tstate internal, aggfn text, inner_agg_collation_schema name, inner_agg_collation_name name, inner_agg_input_types name[], inner_agg_serialized_state bytea, return_type_dummy_val anyelement | internal + timescaledb | _timescaledb_internal | freeze_chunk | chunk regclass | boolean + timescaledb | _timescaledb_internal | generate_uuid | | uuid + timescaledb | _timescaledb_internal | get_approx_row_count | relation regclass | bigint + timescaledb | _timescaledb_internal | get_compressed_chunk_index_for_recompression | uncompressed_chunk regclass | regclass + timescaledb | _timescaledb_internal | get_create_command | table_name name | text + timescaledb | _timescaledb_internal | get_git_commit | | TABLE(commit_tag text, commit_hash text, commit_time timestamp with time zone) + timescaledb | _timescaledb_internal | get_os_info | | TABLE(sysname text, version text, release text, version_pretty text) + timescaledb | _timescaledb_internal | get_partition_for_key | val anyelement | integer + timescaledb | _timescaledb_internal | get_partition_hash | val anyelement | integer + timescaledb | _timescaledb_internal | hypertable_local_size | schema_name_in name, table_name_in name | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) + timescaledb | _timescaledb_internal | indexes_local_size | schema_name_in name, table_name_in name | TABLE(hypertable_id integer, total_bytes bigint) + timescaledb | _timescaledb_internal | insert_blocker | | trigger + timescaledb | _timescaledb_internal | interval_to_usec | chunk_interval interval | bigint + timescaledb | _timescaledb_internal | partialize_agg | arg anyelement | bytea + timescaledb | _timescaledb_internal | policy_compression | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_internal | policy_compression_check | config jsonb | void + timescaledb | _timescaledb_internal | policy_compression_execute | IN job_id integer, IN htid integer, IN lag anyelement, IN maxchunks integer, IN verbose_log boolean, IN recompress_enabled boolean, IN use_creation_time boolean | + timescaledb | _timescaledb_internal | policy_job_stat_history_retention | job_id integer, config jsonb | integer + timescaledb | _timescaledb_internal | policy_job_stat_history_retention_check | config jsonb | void + timescaledb | _timescaledb_internal | policy_recompression | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_internal | policy_refresh_continuous_aggregate | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_internal | policy_refresh_continuous_aggregate_check | config jsonb | void + timescaledb | _timescaledb_internal | policy_reorder | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_internal | policy_reorder_check | config jsonb | void + timescaledb | _timescaledb_internal | policy_retention | IN job_id integer, IN config jsonb | + timescaledb | _timescaledb_internal | policy_retention_check | config jsonb | void + timescaledb | _timescaledb_internal | process_ddl_event | | event_trigger + timescaledb | _timescaledb_internal | range_value_to_pretty | time_value bigint, column_type regtype | text + timescaledb | _timescaledb_internal | recompress_chunk_segmentwise | uncompressed_chunk regclass, if_compressed boolean | regclass + timescaledb | _timescaledb_internal | relation_size | relation regclass | TABLE(total_size bigint, heap_size bigint, index_size bigint, toast_size bigint) + timescaledb | _timescaledb_internal | restart_background_workers | | boolean + timescaledb | _timescaledb_internal | show_chunk | chunk regclass | TABLE(chunk_id integer, hypertable_id integer, schema_name name, table_name name, relkind "char", slices jsonb) + timescaledb | _timescaledb_internal | start_background_workers | | boolean + timescaledb | _timescaledb_internal | stop_background_workers | | boolean + timescaledb | _timescaledb_internal | subtract_integer_from_now | hypertable_relid regclass, lag bigint | bigint + timescaledb | _timescaledb_internal | time_to_internal | time_val anyelement | bigint + timescaledb | _timescaledb_internal | to_date | unixtime_us bigint | date + timescaledb | _timescaledb_internal | to_interval | unixtime_us bigint | interval + timescaledb | _timescaledb_internal | to_timestamp | unixtime_us bigint | timestamp with time zone + timescaledb | _timescaledb_internal | to_timestamp_without_timezone | unixtime_us bigint | timestamp without time zone + timescaledb | _timescaledb_internal | to_unix_microseconds | ts timestamp with time zone | bigint + timescaledb | _timescaledb_internal | tsl_loaded | | boolean + timescaledb | _timescaledb_internal | unfreeze_chunk | chunk regclass | boolean + timescaledb | public | add_compression_policy | hypertable regclass, compress_after "any", if_not_exists boolean, schedule_interval interval, initial_start timestamp with time zone, timezone text, compress_created_before interval | integer + timescaledb | public | add_continuous_aggregate_policy | continuous_aggregate regclass, start_offset "any", end_offset "any", schedule_interval interval, if_not_exists boolean, initial_start timestamp with time zone, timezone text | integer + timescaledb | public | add_dimension | hypertable regclass, column_name name, number_partitions integer, chunk_time_interval anyelement, partitioning_func regproc, if_not_exists boolean | TABLE(dimension_id integer, schema_name name, table_name name, column_name name, created boolean) + timescaledb | public | add_dimension | hypertable regclass, dimension _timescaledb_internal.dimension_info, if_not_exists boolean | TABLE(dimension_id integer, created boolean) + timescaledb | public | add_job | proc regproc, schedule_interval interval, config jsonb, initial_start timestamp with time zone, scheduled boolean, check_config regproc, fixed_schedule boolean, timezone text | integer + timescaledb | public | add_reorder_policy | hypertable regclass, index_name name, if_not_exists boolean, initial_start timestamp with time zone, timezone text | integer + timescaledb | public | add_retention_policy | relation regclass, drop_after "any", if_not_exists boolean, schedule_interval interval, initial_start timestamp with time zone, timezone text, drop_created_before interval | integer + timescaledb | public | alter_job | job_id integer, schedule_interval interval, max_runtime interval, max_retries integer, retry_period interval, scheduled boolean, config jsonb, next_start timestamp with time zone, if_exists boolean, check_config regproc, fixed_schedule boolean, initial_start timestamp with time zone, timezone text | TABLE(job_id integer, schedule_interval interval, max_runtime interval, max_retries integer, retry_period interval, scheduled boolean, config jsonb, next_start timestamp with time zone, check_config text, fixed_schedule boolean, initial_start timestamp with time zone, timezone text) + timescaledb | public | approximate_row_count | relation regclass | bigint + timescaledb | public | attach_tablespace | tablespace name, hypertable regclass, if_not_attached boolean | void + timescaledb | public | by_hash | column_name name, number_partitions integer, partition_func regproc | _timescaledb_internal.dimension_info + timescaledb | public | by_range | column_name name, partition_interval anyelement, partition_func regproc | _timescaledb_internal.dimension_info + timescaledb | public | cagg_migrate | IN cagg regclass, IN override boolean, IN drop_old boolean | + timescaledb | public | chunk_compression_stats | hypertable regclass | TABLE(chunk_schema name, chunk_name name, compression_status text, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint, node_name name) + timescaledb | public | chunks_detailed_size | hypertable regclass | TABLE(chunk_schema name, chunk_name name, table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint, node_name name) + timescaledb | public | compress_chunk | uncompressed_chunk regclass, if_not_compressed boolean, recompress boolean | regclass + timescaledb | public | create_hypertable | relation regclass, time_column_name name, partitioning_column name, number_partitions integer, associated_schema_name name, associated_table_prefix name, chunk_time_interval anyelement, create_default_indexes boolean, if_not_exists boolean, partitioning_func regproc, migrate_data boolean, chunk_target_size text, chunk_sizing_func regproc, time_partitioning_func regproc | TABLE(hypertable_id integer, schema_name name, table_name name, created boolean) + timescaledb | public | create_hypertable | relation regclass, dimension _timescaledb_internal.dimension_info, create_default_indexes boolean, if_not_exists boolean, migrate_data boolean | TABLE(hypertable_id integer, created boolean) + timescaledb | public | decompress_chunk | uncompressed_chunk regclass, if_compressed boolean | regclass + timescaledb | public | delete_job | job_id integer | void + timescaledb | public | detach_tablespace | tablespace name, hypertable regclass, if_attached boolean | integer + timescaledb | public | detach_tablespaces | hypertable regclass | integer + timescaledb | public | disable_chunk_skipping | hypertable regclass, column_name name, if_not_exists boolean | TABLE(hypertable_id integer, column_name name, disabled boolean) + timescaledb | public | drop_chunks | relation regclass, older_than "any", newer_than "any", "verbose" boolean, created_before "any", created_after "any" | SETOF text + timescaledb | public | enable_chunk_skipping | hypertable regclass, column_name name, if_not_exists boolean | TABLE(column_stats_id integer, enabled boolean) + timescaledb | public | first | anyelement, "any" | anyelement + timescaledb | public | get_telemetry_report | | jsonb + timescaledb | public | histogram | double precision, double precision, double precision, integer | integer[] + timescaledb | public | hypertable_approximate_detailed_size | relation regclass | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint) + timescaledb | public | hypertable_approximate_size | hypertable regclass | bigint + timescaledb | public | hypertable_compression_stats | hypertable regclass | TABLE(total_chunks bigint, number_compressed_chunks bigint, before_compression_table_bytes bigint, before_compression_index_bytes bigint, before_compression_toast_bytes bigint, before_compression_total_bytes bigint, after_compression_table_bytes bigint, after_compression_index_bytes bigint, after_compression_toast_bytes bigint, after_compression_total_bytes bigint, node_name name) + timescaledb | public | hypertable_detailed_size | hypertable regclass | TABLE(table_bytes bigint, index_bytes bigint, toast_bytes bigint, total_bytes bigint, node_name name) + timescaledb | public | hypertable_index_size | index_name regclass | bigint + timescaledb | public | hypertable_size | hypertable regclass | bigint + timescaledb | public | interpolate | value double precision, prev record, next record | double precision + timescaledb | public | interpolate | value real, prev record, next record | real + timescaledb | public | interpolate | value bigint, prev record, next record | bigint + timescaledb | public | interpolate | value integer, prev record, next record | integer + timescaledb | public | interpolate | value smallint, prev record, next record | smallint + timescaledb | public | last | anyelement, "any" | anyelement + timescaledb | public | locf | value anyelement, prev anyelement, treat_null_as_missing boolean | anyelement + timescaledb | public | move_chunk | chunk regclass, destination_tablespace name, index_destination_tablespace name, reorder_index regclass, "verbose" boolean | void + timescaledb | public | recompress_chunk | IN chunk regclass, IN if_not_compressed boolean | + timescaledb | public | refresh_continuous_aggregate | IN continuous_aggregate regclass, IN window_start "any", IN window_end "any" | + timescaledb | public | remove_compression_policy | hypertable regclass, if_exists boolean | boolean + timescaledb | public | remove_continuous_aggregate_policy | continuous_aggregate regclass, if_not_exists boolean, if_exists boolean | void + timescaledb | public | remove_reorder_policy | hypertable regclass, if_exists boolean | void + timescaledb | public | remove_retention_policy | relation regclass, if_exists boolean | void + timescaledb | public | reorder_chunk | chunk regclass, index regclass, "verbose" boolean | void + timescaledb | public | run_job | IN job_id integer | + timescaledb | public | set_adaptive_chunking | hypertable regclass, chunk_target_size text, INOUT chunk_sizing_func regproc, OUT chunk_target_size bigint | record + timescaledb | public | set_chunk_time_interval | hypertable regclass, chunk_time_interval anyelement, dimension_name name | void + timescaledb | public | set_integer_now_func | hypertable regclass, integer_now_func regproc, replace_if_exists boolean | void + timescaledb | public | set_number_partitions | hypertable regclass, number_partitions integer, dimension_name name | void + timescaledb | public | set_partitioning_interval | hypertable regclass, partition_interval anyelement, dimension_name name | void + timescaledb | public | show_chunks | relation regclass, older_than "any", newer_than "any", created_before "any", created_after "any" | SETOF regclass + timescaledb | public | show_tablespaces | hypertable regclass | SETOF name + timescaledb | public | time_bucket | bucket_width bigint, ts bigint | bigint + timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone, "offset" interval | timestamp with time zone + timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone, origin timestamp with time zone | timestamp with time zone + timescaledb | public | time_bucket | bucket_width smallint, ts smallint, "offset" smallint | smallint + timescaledb | public | time_bucket | bucket_width interval, ts timestamp without time zone, "offset" interval | timestamp without time zone + timescaledb | public | time_bucket | bucket_width interval, ts timestamp without time zone | timestamp without time zone + timescaledb | public | time_bucket | bucket_width integer, ts integer | integer + timescaledb | public | time_bucket | bucket_width bigint, ts bigint, "offset" bigint | bigint + timescaledb | public | time_bucket | bucket_width smallint, ts smallint | smallint + timescaledb | public | time_bucket | bucket_width interval, ts date | date + timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone, timezone text, origin timestamp with time zone, "offset" interval | timestamp with time zone + timescaledb | public | time_bucket | bucket_width interval, ts date, "offset" interval | date + timescaledb | public | time_bucket | bucket_width interval, ts timestamp without time zone, origin timestamp without time zone | timestamp without time zone + timescaledb | public | time_bucket | bucket_width integer, ts integer, "offset" integer | integer + timescaledb | public | time_bucket | bucket_width interval, ts timestamp with time zone | timestamp with time zone + timescaledb | public | time_bucket | bucket_width interval, ts date, origin date | date + timescaledb | public | time_bucket_gapfill | bucket_width interval, ts timestamp with time zone, timezone text, start timestamp with time zone, finish timestamp with time zone | timestamp with time zone + timescaledb | public | time_bucket_gapfill | bucket_width interval, ts date, start date, finish date | date + timescaledb | public | time_bucket_gapfill | bucket_width smallint, ts smallint, start smallint, finish smallint | smallint + timescaledb | public | time_bucket_gapfill | bucket_width interval, ts timestamp with time zone, start timestamp with time zone, finish timestamp with time zone | timestamp with time zone + timescaledb | public | time_bucket_gapfill | bucket_width bigint, ts bigint, start bigint, finish bigint | bigint + timescaledb | public | time_bucket_gapfill | bucket_width interval, ts timestamp without time zone, start timestamp without time zone, finish timestamp without time zone | timestamp without time zone + timescaledb | public | time_bucket_gapfill | bucket_width integer, ts integer, start integer, finish integer | integer + timescaledb | public | timescaledb_post_restore | | boolean + timescaledb | public | timescaledb_pre_restore | | boolean + timescaledb | timescaledb_experimental | add_policies | relation regclass, if_not_exists boolean, refresh_start_offset "any", refresh_end_offset "any", compress_after "any", drop_after "any" | boolean + timescaledb | timescaledb_experimental | alter_policies | relation regclass, if_exists boolean, refresh_start_offset "any", refresh_end_offset "any", compress_after "any", drop_after "any" | boolean + timescaledb | timescaledb_experimental | remove_all_policies | relation regclass, if_exists boolean | boolean + timescaledb | timescaledb_experimental | remove_policies | relation regclass, if_exists boolean, VARIADIC policy_names text[] | boolean + timescaledb | timescaledb_experimental | show_policies | relation regclass | SETOF jsonb + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone, origin timestamp with time zone, timezone text | timestamp with time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone, origin timestamp with time zone | timestamp with time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp without time zone | timestamp without time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone, timezone text | timestamp with time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts date | date + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp without time zone, origin timestamp without time zone | timestamp without time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts timestamp with time zone | timestamp with time zone + timescaledb | timescaledb_experimental | time_bucket_ng | bucket_width interval, ts date, origin date | date + tsm_system_rows | public | system_rows | internal | tsm_handler + unaccent | public | unaccent | text | text + unaccent | public | unaccent | regdictionary, text | text + unaccent | public | unaccent_init | internal | internal + unaccent | public | unaccent_lexize | internal, internal, internal, internal | internal + uuid-ossp | extensions | uuid_generate_v1 | | uuid + uuid-ossp | extensions | uuid_generate_v1mc | | uuid + uuid-ossp | extensions | uuid_generate_v3 | namespace uuid, name text | uuid + uuid-ossp | extensions | uuid_generate_v4 | | uuid + uuid-ossp | extensions | uuid_generate_v5 | namespace uuid, name text | uuid + uuid-ossp | extensions | uuid_nil | | uuid + uuid-ossp | extensions | uuid_ns_dns | | uuid + uuid-ossp | extensions | uuid_ns_oid | | uuid + uuid-ossp | extensions | uuid_ns_url | | uuid + uuid-ossp | extensions | uuid_ns_x500 | | uuid + vector | public | array_to_halfvec | real[], integer, boolean | halfvec + vector | public | array_to_halfvec | integer[], integer, boolean | halfvec + vector | public | array_to_halfvec | numeric[], integer, boolean | halfvec + vector | public | array_to_halfvec | double precision[], integer, boolean | halfvec + vector | public | array_to_sparsevec | real[], integer, boolean | sparsevec + vector | public | array_to_sparsevec | integer[], integer, boolean | sparsevec + vector | public | array_to_sparsevec | numeric[], integer, boolean | sparsevec + vector | public | array_to_sparsevec | double precision[], integer, boolean | sparsevec + vector | public | array_to_vector | real[], integer, boolean | vector + vector | public | array_to_vector | integer[], integer, boolean | vector + vector | public | array_to_vector | numeric[], integer, boolean | vector + vector | public | array_to_vector | double precision[], integer, boolean | vector + vector | public | avg | vector | vector + vector | public | avg | halfvec | halfvec + vector | public | binary_quantize | vector | bit + vector | public | binary_quantize | halfvec | bit + vector | public | cosine_distance | vector, vector | double precision + vector | public | cosine_distance | halfvec, halfvec | double precision + vector | public | cosine_distance | sparsevec, sparsevec | double precision + vector | public | halfvec | halfvec, integer, boolean | halfvec + vector | public | halfvec_accum | double precision[], halfvec | double precision[] + vector | public | halfvec_add | halfvec, halfvec | halfvec + vector | public | halfvec_avg | double precision[] | halfvec + vector | public | halfvec_cmp | halfvec, halfvec | integer + vector | public | halfvec_combine | double precision[], double precision[] | double precision[] + vector | public | halfvec_concat | halfvec, halfvec | halfvec + vector | public | halfvec_eq | halfvec, halfvec | boolean + vector | public | halfvec_ge | halfvec, halfvec | boolean + vector | public | halfvec_gt | halfvec, halfvec | boolean + vector | public | halfvec_in | cstring, oid, integer | halfvec + vector | public | halfvec_l2_squared_distance | halfvec, halfvec | double precision + vector | public | halfvec_le | halfvec, halfvec | boolean + vector | public | halfvec_lt | halfvec, halfvec | boolean + vector | public | halfvec_mul | halfvec, halfvec | halfvec + vector | public | halfvec_ne | halfvec, halfvec | boolean + vector | public | halfvec_negative_inner_product | halfvec, halfvec | double precision + vector | public | halfvec_out | halfvec | cstring + vector | public | halfvec_recv | internal, oid, integer | halfvec + vector | public | halfvec_send | halfvec | bytea + vector | public | halfvec_spherical_distance | halfvec, halfvec | double precision + vector | public | halfvec_sub | halfvec, halfvec | halfvec + vector | public | halfvec_to_float4 | halfvec, integer, boolean | real[] + vector | public | halfvec_to_sparsevec | halfvec, integer, boolean | sparsevec + vector | public | halfvec_to_vector | halfvec, integer, boolean | vector + vector | public | halfvec_typmod_in | cstring[] | integer + vector | public | hamming_distance | bit, bit | double precision + vector | public | hnsw_bit_support | internal | internal + vector | public | hnsw_halfvec_support | internal | internal + vector | public | hnsw_sparsevec_support | internal | internal + vector | public | hnswhandler | internal | index_am_handler + vector | public | inner_product | vector, vector | double precision + vector | public | inner_product | halfvec, halfvec | double precision + vector | public | inner_product | sparsevec, sparsevec | double precision + vector | public | ivfflat_bit_support | internal | internal + vector | public | ivfflat_halfvec_support | internal | internal + vector | public | ivfflathandler | internal | index_am_handler + vector | public | jaccard_distance | bit, bit | double precision + vector | public | l1_distance | vector, vector | double precision + vector | public | l1_distance | halfvec, halfvec | double precision + vector | public | l1_distance | sparsevec, sparsevec | double precision + vector | public | l2_distance | vector, vector | double precision + vector | public | l2_distance | halfvec, halfvec | double precision + vector | public | l2_distance | sparsevec, sparsevec | double precision + vector | public | l2_norm | halfvec | double precision + vector | public | l2_norm | sparsevec | double precision + vector | public | l2_normalize | vector | vector + vector | public | l2_normalize | halfvec | halfvec + vector | public | l2_normalize | sparsevec | sparsevec + vector | public | sparsevec | sparsevec, integer, boolean | sparsevec + vector | public | sparsevec_cmp | sparsevec, sparsevec | integer + vector | public | sparsevec_eq | sparsevec, sparsevec | boolean + vector | public | sparsevec_ge | sparsevec, sparsevec | boolean + vector | public | sparsevec_gt | sparsevec, sparsevec | boolean + vector | public | sparsevec_in | cstring, oid, integer | sparsevec + vector | public | sparsevec_l2_squared_distance | sparsevec, sparsevec | double precision + vector | public | sparsevec_le | sparsevec, sparsevec | boolean + vector | public | sparsevec_lt | sparsevec, sparsevec | boolean + vector | public | sparsevec_ne | sparsevec, sparsevec | boolean + vector | public | sparsevec_negative_inner_product | sparsevec, sparsevec | double precision + vector | public | sparsevec_out | sparsevec | cstring + vector | public | sparsevec_recv | internal, oid, integer | sparsevec + vector | public | sparsevec_send | sparsevec | bytea + vector | public | sparsevec_to_halfvec | sparsevec, integer, boolean | halfvec + vector | public | sparsevec_to_vector | sparsevec, integer, boolean | vector + vector | public | sparsevec_typmod_in | cstring[] | integer + vector | public | subvector | halfvec, integer, integer | halfvec + vector | public | subvector | vector, integer, integer | vector + vector | public | sum | vector | vector + vector | public | sum | halfvec | halfvec + vector | public | vector | vector, integer, boolean | vector + vector | public | vector_accum | double precision[], vector | double precision[] + vector | public | vector_add | vector, vector | vector + vector | public | vector_avg | double precision[] | vector + vector | public | vector_cmp | vector, vector | integer + vector | public | vector_combine | double precision[], double precision[] | double precision[] + vector | public | vector_concat | vector, vector | vector + vector | public | vector_dims | vector | integer + vector | public | vector_dims | halfvec | integer + vector | public | vector_eq | vector, vector | boolean + vector | public | vector_ge | vector, vector | boolean + vector | public | vector_gt | vector, vector | boolean + vector | public | vector_in | cstring, oid, integer | vector + vector | public | vector_l2_squared_distance | vector, vector | double precision + vector | public | vector_le | vector, vector | boolean + vector | public | vector_lt | vector, vector | boolean + vector | public | vector_mul | vector, vector | vector + vector | public | vector_ne | vector, vector | boolean + vector | public | vector_negative_inner_product | vector, vector | double precision + vector | public | vector_norm | vector | double precision + vector | public | vector_out | vector | cstring + vector | public | vector_recv | internal, oid, integer | vector + vector | public | vector_send | vector | bytea + vector | public | vector_spherical_distance | vector, vector | double precision + vector | public | vector_sub | vector, vector | vector + vector | public | vector_to_float4 | vector, integer, boolean | real[] + vector | public | vector_to_halfvec | vector, integer, boolean | halfvec + vector | public | vector_to_sparsevec | vector, integer, boolean | sparsevec + vector | public | vector_typmod_in | cstring[] | integer + wrappers | public | airtable_fdw_handler | | fdw_handler + wrappers | public | airtable_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | airtable_fdw_validator | options text[], catalog oid | void + wrappers | public | auth0_fdw_handler | | fdw_handler + wrappers | public | auth0_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | auth0_fdw_validator | options text[], catalog oid | void + wrappers | public | big_query_fdw_handler | | fdw_handler + wrappers | public | big_query_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | big_query_fdw_validator | options text[], catalog oid | void + wrappers | public | click_house_fdw_handler | | fdw_handler + wrappers | public | click_house_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | click_house_fdw_validator | options text[], catalog oid | void + wrappers | public | cognito_fdw_handler | | fdw_handler + wrappers | public | cognito_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | cognito_fdw_validator | options text[], catalog oid | void + wrappers | public | firebase_fdw_handler | | fdw_handler + wrappers | public | firebase_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | firebase_fdw_validator | options text[], catalog oid | void + wrappers | public | hello_world_fdw_handler | | fdw_handler + wrappers | public | hello_world_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | hello_world_fdw_validator | options text[], catalog oid | void + wrappers | public | logflare_fdw_handler | | fdw_handler + wrappers | public | logflare_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | logflare_fdw_validator | options text[], catalog oid | void + wrappers | public | mssql_fdw_handler | | fdw_handler + wrappers | public | mssql_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | mssql_fdw_validator | options text[], catalog oid | void + wrappers | public | redis_fdw_handler | | fdw_handler + wrappers | public | redis_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | redis_fdw_validator | options text[], catalog oid | void + wrappers | public | s3_fdw_handler | | fdw_handler + wrappers | public | s3_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | s3_fdw_validator | options text[], catalog oid | void + wrappers | public | stripe_fdw_handler | | fdw_handler + wrappers | public | stripe_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | stripe_fdw_validator | options text[], catalog oid | void + wrappers | public | wasm_fdw_handler | | fdw_handler + wrappers | public | wasm_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | wasm_fdw_validator | options text[], catalog oid | void + xml2 | public | xml_encode_special_chars | text | text + xml2 | public | xml_valid | text | boolean + xml2 | public | xpath_bool | text, text | boolean + xml2 | public | xpath_list | text, text | text + xml2 | public | xpath_list | text, text, text | text + xml2 | public | xpath_nodeset | text, text, text, text | text + xml2 | public | xpath_nodeset | text, text | text + xml2 | public | xpath_nodeset | text, text, text | text + xml2 | public | xpath_number | text, text | real + xml2 | public | xpath_string | text, text | text + xml2 | public | xpath_table | text, text, text, text, text | SETOF record + xml2 | public | xslt_process | text, text | text + xml2 | public | xslt_process | text, text, text | text +(5055 rows) + +/* + +Monitor extension public table/view/matview/index interface + +*/ +select + e.extname as extension_name, + n.nspname as schema_name, + pc.relname as entity_name, + pa.attname +from + pg_catalog.pg_class pc + join pg_catalog.pg_namespace n + on n.oid = pc.relnamespace + join pg_catalog.pg_depend d + on d.objid = pc.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid + left join pg_catalog.pg_attribute pa + on pa.attrelid = pc.oid + and pa.attnum > 0 + and not pa.attisdropped +where + d.deptype = 'e' + and pc.relkind in ('r', 'v', 'm', 'i') +order by + e.extname, + n.nspname, + pc.relname, + pa.attname; + extension_name | schema_name | entity_name | attname +------------------------------+--------------------------+--------------------------------------------------+----------------------------------- + address_standardizer_data_us | public | us_gaz | id + address_standardizer_data_us | public | us_gaz | is_custom + address_standardizer_data_us | public | us_gaz | seq + address_standardizer_data_us | public | us_gaz | stdword + address_standardizer_data_us | public | us_gaz | token + address_standardizer_data_us | public | us_gaz | word + address_standardizer_data_us | public | us_lex | id + address_standardizer_data_us | public | us_lex | is_custom + address_standardizer_data_us | public | us_lex | seq + address_standardizer_data_us | public | us_lex | stdword + address_standardizer_data_us | public | us_lex | token + address_standardizer_data_us | public | us_lex | word + address_standardizer_data_us | public | us_rules | id + address_standardizer_data_us | public | us_rules | is_custom + address_standardizer_data_us | public | us_rules | rule + hypopg | public | hypopg_hidden_indexes | am_name + hypopg | public | hypopg_hidden_indexes | index_name + hypopg | public | hypopg_hidden_indexes | indexrelid + hypopg | public | hypopg_hidden_indexes | is_hypo + hypopg | public | hypopg_hidden_indexes | schema_name + hypopg | public | hypopg_hidden_indexes | table_name + hypopg | public | hypopg_list_indexes | am_name + hypopg | public | hypopg_list_indexes | index_name + hypopg | public | hypopg_list_indexes | indexrelid + hypopg | public | hypopg_list_indexes | schema_name + hypopg | public | hypopg_list_indexes | table_name + pg_buffercache | public | pg_buffercache | bufferid + pg_buffercache | public | pg_buffercache | isdirty + pg_buffercache | public | pg_buffercache | pinning_backends + pg_buffercache | public | pg_buffercache | relblocknumber + pg_buffercache | public | pg_buffercache | reldatabase + pg_buffercache | public | pg_buffercache | relfilenode + pg_buffercache | public | pg_buffercache | relforknumber + pg_buffercache | public | pg_buffercache | reltablespace + pg_buffercache | public | pg_buffercache | usagecount + pg_net | net | _http_response | content + pg_net | net | _http_response | content_type + pg_net | net | _http_response | created + pg_net | net | _http_response | error_msg + pg_net | net | _http_response | headers + pg_net | net | _http_response | id + pg_net | net | _http_response | status_code + pg_net | net | _http_response | timed_out + pg_net | net | http_request_queue | body + pg_net | net | http_request_queue | headers + pg_net | net | http_request_queue | id + pg_net | net | http_request_queue | method + pg_net | net | http_request_queue | timeout_milliseconds + pg_net | net | http_request_queue | url + pg_repack | repack | primary_keys | indexrelid + pg_repack | repack | primary_keys | indrelid + pg_repack | repack | tables | alter_col_storage + pg_repack | repack | tables | ckey + pg_repack | repack | tables | ckid + pg_repack | repack | tables | copy_data + pg_repack | repack | tables | create_log + pg_repack | repack | tables | create_pktype + pg_repack | repack | tables | create_table + pg_repack | repack | tables | create_trigger + pg_repack | repack | tables | delete_log + pg_repack | repack | tables | drop_columns + pg_repack | repack | tables | enable_trigger + pg_repack | repack | tables | lock_table + pg_repack | repack | tables | pkid + pg_repack | repack | tables | relid + pg_repack | repack | tables | relname + pg_repack | repack | tables | reltoastidxid + pg_repack | repack | tables | reltoastrelid + pg_repack | repack | tables | schemaname + pg_repack | repack | tables | sql_delete + pg_repack | repack | tables | sql_insert + pg_repack | repack | tables | sql_peek + pg_repack | repack | tables | sql_pop + pg_repack | repack | tables | sql_update + pg_repack | repack | tables | tablespace_orig + pg_stat_monitor | public | pg_stat_monitor | application_name + pg_stat_monitor | public | pg_stat_monitor | blk_read_time + pg_stat_monitor | public | pg_stat_monitor | blk_write_time + pg_stat_monitor | public | pg_stat_monitor | bucket + pg_stat_monitor | public | pg_stat_monitor | bucket_done + pg_stat_monitor | public | pg_stat_monitor | bucket_start_time + pg_stat_monitor | public | pg_stat_monitor | calls + pg_stat_monitor | public | pg_stat_monitor | client_ip + pg_stat_monitor | public | pg_stat_monitor | cmd_type + pg_stat_monitor | public | pg_stat_monitor | cmd_type_text + pg_stat_monitor | public | pg_stat_monitor | comments + pg_stat_monitor | public | pg_stat_monitor | cpu_sys_time + pg_stat_monitor | public | pg_stat_monitor | cpu_user_time + pg_stat_monitor | public | pg_stat_monitor | datname + pg_stat_monitor | public | pg_stat_monitor | dbid + pg_stat_monitor | public | pg_stat_monitor | elevel + pg_stat_monitor | public | pg_stat_monitor | jit_emission_count + pg_stat_monitor | public | pg_stat_monitor | jit_emission_time + pg_stat_monitor | public | pg_stat_monitor | jit_functions + pg_stat_monitor | public | pg_stat_monitor | jit_generation_time + pg_stat_monitor | public | pg_stat_monitor | jit_inlining_count + pg_stat_monitor | public | pg_stat_monitor | jit_inlining_time + pg_stat_monitor | public | pg_stat_monitor | jit_optimization_count + pg_stat_monitor | public | pg_stat_monitor | jit_optimization_time + pg_stat_monitor | public | pg_stat_monitor | local_blks_dirtied + pg_stat_monitor | public | pg_stat_monitor | local_blks_hit + pg_stat_monitor | public | pg_stat_monitor | local_blks_read + pg_stat_monitor | public | pg_stat_monitor | local_blks_written + pg_stat_monitor | public | pg_stat_monitor | max_exec_time + pg_stat_monitor | public | pg_stat_monitor | max_plan_time + pg_stat_monitor | public | pg_stat_monitor | mean_exec_time + pg_stat_monitor | public | pg_stat_monitor | mean_plan_time + pg_stat_monitor | public | pg_stat_monitor | message + pg_stat_monitor | public | pg_stat_monitor | min_exec_time + pg_stat_monitor | public | pg_stat_monitor | min_plan_time + pg_stat_monitor | public | pg_stat_monitor | pgsm_query_id + pg_stat_monitor | public | pg_stat_monitor | planid + pg_stat_monitor | public | pg_stat_monitor | plans + pg_stat_monitor | public | pg_stat_monitor | query + pg_stat_monitor | public | pg_stat_monitor | query_plan + pg_stat_monitor | public | pg_stat_monitor | queryid + pg_stat_monitor | public | pg_stat_monitor | relations + pg_stat_monitor | public | pg_stat_monitor | resp_calls + pg_stat_monitor | public | pg_stat_monitor | rows + pg_stat_monitor | public | pg_stat_monitor | shared_blks_dirtied + pg_stat_monitor | public | pg_stat_monitor | shared_blks_hit + pg_stat_monitor | public | pg_stat_monitor | shared_blks_read + pg_stat_monitor | public | pg_stat_monitor | shared_blks_written + pg_stat_monitor | public | pg_stat_monitor | sqlcode + pg_stat_monitor | public | pg_stat_monitor | stddev_exec_time + pg_stat_monitor | public | pg_stat_monitor | stddev_plan_time + pg_stat_monitor | public | pg_stat_monitor | temp_blk_read_time + pg_stat_monitor | public | pg_stat_monitor | temp_blk_write_time + pg_stat_monitor | public | pg_stat_monitor | temp_blks_read + pg_stat_monitor | public | pg_stat_monitor | temp_blks_written + pg_stat_monitor | public | pg_stat_monitor | top_query + pg_stat_monitor | public | pg_stat_monitor | top_queryid + pg_stat_monitor | public | pg_stat_monitor | toplevel + pg_stat_monitor | public | pg_stat_monitor | total_exec_time + pg_stat_monitor | public | pg_stat_monitor | total_plan_time + pg_stat_monitor | public | pg_stat_monitor | userid + pg_stat_monitor | public | pg_stat_monitor | username + pg_stat_monitor | public | pg_stat_monitor | wal_bytes + pg_stat_monitor | public | pg_stat_monitor | wal_fpi + pg_stat_monitor | public | pg_stat_monitor | wal_records + pg_stat_statements | extensions | pg_stat_statements | blk_read_time + pg_stat_statements | extensions | pg_stat_statements | blk_write_time + pg_stat_statements | extensions | pg_stat_statements | calls + pg_stat_statements | extensions | pg_stat_statements | dbid + pg_stat_statements | extensions | pg_stat_statements | jit_emission_count + pg_stat_statements | extensions | pg_stat_statements | jit_emission_time + pg_stat_statements | extensions | pg_stat_statements | jit_functions + pg_stat_statements | extensions | pg_stat_statements | jit_generation_time + pg_stat_statements | extensions | pg_stat_statements | jit_inlining_count + pg_stat_statements | extensions | pg_stat_statements | jit_inlining_time + pg_stat_statements | extensions | pg_stat_statements | jit_optimization_count + pg_stat_statements | extensions | pg_stat_statements | jit_optimization_time + pg_stat_statements | extensions | pg_stat_statements | local_blks_dirtied + pg_stat_statements | extensions | pg_stat_statements | local_blks_hit + pg_stat_statements | extensions | pg_stat_statements | local_blks_read + pg_stat_statements | extensions | pg_stat_statements | local_blks_written + pg_stat_statements | extensions | pg_stat_statements | max_exec_time + pg_stat_statements | extensions | pg_stat_statements | max_plan_time + pg_stat_statements | extensions | pg_stat_statements | mean_exec_time + pg_stat_statements | extensions | pg_stat_statements | mean_plan_time + pg_stat_statements | extensions | pg_stat_statements | min_exec_time + pg_stat_statements | extensions | pg_stat_statements | min_plan_time + pg_stat_statements | extensions | pg_stat_statements | plans + pg_stat_statements | extensions | pg_stat_statements | query + pg_stat_statements | extensions | pg_stat_statements | queryid + pg_stat_statements | extensions | pg_stat_statements | rows + pg_stat_statements | extensions | pg_stat_statements | shared_blks_dirtied + pg_stat_statements | extensions | pg_stat_statements | shared_blks_hit + pg_stat_statements | extensions | pg_stat_statements | shared_blks_read + pg_stat_statements | extensions | pg_stat_statements | shared_blks_written + pg_stat_statements | extensions | pg_stat_statements | stddev_exec_time + pg_stat_statements | extensions | pg_stat_statements | stddev_plan_time + pg_stat_statements | extensions | pg_stat_statements | temp_blk_read_time + pg_stat_statements | extensions | pg_stat_statements | temp_blk_write_time + pg_stat_statements | extensions | pg_stat_statements | temp_blks_read + pg_stat_statements | extensions | pg_stat_statements | temp_blks_written + pg_stat_statements | extensions | pg_stat_statements | toplevel + pg_stat_statements | extensions | pg_stat_statements | total_exec_time + pg_stat_statements | extensions | pg_stat_statements | total_plan_time + pg_stat_statements | extensions | pg_stat_statements | userid + pg_stat_statements | extensions | pg_stat_statements | wal_bytes + pg_stat_statements | extensions | pg_stat_statements | wal_fpi + pg_stat_statements | extensions | pg_stat_statements | wal_records + pg_stat_statements | extensions | pg_stat_statements_info | dealloc + pg_stat_statements | extensions | pg_stat_statements_info | stats_reset + pg_tle | pgtle | feature_info | feature + pg_tle | pgtle | feature_info | obj_identity + pg_tle | pgtle | feature_info | proname + pg_tle | pgtle | feature_info | schema_name + pgmq | pgmq | a_foo | archived_at + pgmq | pgmq | a_foo | enqueued_at + pgmq | pgmq | a_foo | message + pgmq | pgmq | a_foo | msg_id + pgmq | pgmq | a_foo | read_ct + pgmq | pgmq | a_foo | vt + pgmq | pgmq | meta | created_at + pgmq | pgmq | meta | is_partitioned + pgmq | pgmq | meta | is_unlogged + pgmq | pgmq | meta | queue_name + pgmq | pgmq | q_foo | enqueued_at + pgmq | pgmq | q_foo | message + pgmq | pgmq | q_foo | msg_id + pgmq | pgmq | q_foo | read_ct + pgmq | pgmq | q_foo | vt + pgsodium | pgsodium | decrypted_key | associated_data + pgsodium | pgsodium | decrypted_key | comment + pgsodium | pgsodium | decrypted_key | created + pgsodium | pgsodium | decrypted_key | decrypted_raw_key + pgsodium | pgsodium | decrypted_key | expires + pgsodium | pgsodium | decrypted_key | id + pgsodium | pgsodium | decrypted_key | key_context + pgsodium | pgsodium | decrypted_key | key_id + pgsodium | pgsodium | decrypted_key | key_type + pgsodium | pgsodium | decrypted_key | name + pgsodium | pgsodium | decrypted_key | parent_key + pgsodium | pgsodium | decrypted_key | raw_key + pgsodium | pgsodium | decrypted_key | raw_key_nonce + pgsodium | pgsodium | decrypted_key | status + pgsodium | pgsodium | key | associated_data + pgsodium | pgsodium | key | comment + pgsodium | pgsodium | key | created + pgsodium | pgsodium | key | expires + pgsodium | pgsodium | key | id + pgsodium | pgsodium | key | key_context + pgsodium | pgsodium | key | key_id + pgsodium | pgsodium | key | key_type + pgsodium | pgsodium | key | name + pgsodium | pgsodium | key | parent_key + pgsodium | pgsodium | key | raw_key + pgsodium | pgsodium | key | raw_key_nonce + pgsodium | pgsodium | key | status + pgsodium | pgsodium | key | user_data + pgsodium | pgsodium | mask_columns | associated_columns + pgsodium | pgsodium | mask_columns | attname + pgsodium | pgsodium | mask_columns | attrelid + pgsodium | pgsodium | mask_columns | format_type + pgsodium | pgsodium | mask_columns | key_id + pgsodium | pgsodium | mask_columns | key_id_column + pgsodium | pgsodium | mask_columns | nonce_column + pgsodium | pgsodium | masking_rule | associated_columns + pgsodium | pgsodium | masking_rule | attname + pgsodium | pgsodium | masking_rule | attnum + pgsodium | pgsodium | masking_rule | attrelid + pgsodium | pgsodium | masking_rule | col_description + pgsodium | pgsodium | masking_rule | format_type + pgsodium | pgsodium | masking_rule | key_id + pgsodium | pgsodium | masking_rule | key_id_column + pgsodium | pgsodium | masking_rule | nonce_column + pgsodium | pgsodium | masking_rule | priority + pgsodium | pgsodium | masking_rule | relname + pgsodium | pgsodium | masking_rule | relnamespace + pgsodium | pgsodium | masking_rule | security_invoker + pgsodium | pgsodium | masking_rule | view_name + pgsodium | pgsodium | valid_key | associated_data + pgsodium | pgsodium | valid_key | created + pgsodium | pgsodium | valid_key | expires + pgsodium | pgsodium | valid_key | id + pgsodium | pgsodium | valid_key | key_context + pgsodium | pgsodium | valid_key | key_id + pgsodium | pgsodium | valid_key | key_type + pgsodium | pgsodium | valid_key | name + pgsodium | pgsodium | valid_key | status + pgtap | public | pg_all_foreign_keys | fk_columns + pgtap | public | pg_all_foreign_keys | fk_constraint_name + pgtap | public | pg_all_foreign_keys | fk_schema_name + pgtap | public | pg_all_foreign_keys | fk_table_name + pgtap | public | pg_all_foreign_keys | fk_table_oid + pgtap | public | pg_all_foreign_keys | is_deferrable + pgtap | public | pg_all_foreign_keys | is_deferred + pgtap | public | pg_all_foreign_keys | match_type + pgtap | public | pg_all_foreign_keys | on_delete + pgtap | public | pg_all_foreign_keys | on_update + pgtap | public | pg_all_foreign_keys | pk_columns + pgtap | public | pg_all_foreign_keys | pk_constraint_name + pgtap | public | pg_all_foreign_keys | pk_index_name + pgtap | public | pg_all_foreign_keys | pk_schema_name + pgtap | public | pg_all_foreign_keys | pk_table_name + pgtap | public | pg_all_foreign_keys | pk_table_oid + pgtap | public | tap_funky | args + pgtap | public | tap_funky | is_definer + pgtap | public | tap_funky | is_strict + pgtap | public | tap_funky | is_visible + pgtap | public | tap_funky | kind + pgtap | public | tap_funky | langoid + pgtap | public | tap_funky | name + pgtap | public | tap_funky | oid + pgtap | public | tap_funky | owner + pgtap | public | tap_funky | returns + pgtap | public | tap_funky | returns_set + pgtap | public | tap_funky | schema + pgtap | public | tap_funky | volatility + postgis | public | geography_columns | coord_dimension + postgis | public | geography_columns | f_geography_column + postgis | public | geography_columns | f_table_catalog + postgis | public | geography_columns | f_table_name + postgis | public | geography_columns | f_table_schema + postgis | public | geography_columns | srid + postgis | public | geography_columns | type + postgis | public | geometry_columns | coord_dimension + postgis | public | geometry_columns | f_geometry_column + postgis | public | geometry_columns | f_table_catalog + postgis | public | geometry_columns | f_table_name + postgis | public | geometry_columns | f_table_schema + postgis | public | geometry_columns | srid + postgis | public | geometry_columns | type + postgis | public | spatial_ref_sys | auth_name + postgis | public | spatial_ref_sys | auth_srid + postgis | public | spatial_ref_sys | proj4text + postgis | public | spatial_ref_sys | srid + postgis | public | spatial_ref_sys | srtext + postgis_raster | public | raster_columns | blocksize_x + postgis_raster | public | raster_columns | blocksize_y + postgis_raster | public | raster_columns | extent + postgis_raster | public | raster_columns | nodata_values + postgis_raster | public | raster_columns | num_bands + postgis_raster | public | raster_columns | out_db + postgis_raster | public | raster_columns | pixel_types + postgis_raster | public | raster_columns | r_raster_column + postgis_raster | public | raster_columns | r_table_catalog + postgis_raster | public | raster_columns | r_table_name + postgis_raster | public | raster_columns | r_table_schema + postgis_raster | public | raster_columns | regular_blocking + postgis_raster | public | raster_columns | same_alignment + postgis_raster | public | raster_columns | scale_x + postgis_raster | public | raster_columns | scale_y + postgis_raster | public | raster_columns | spatial_index + postgis_raster | public | raster_columns | srid + postgis_raster | public | raster_overviews | o_raster_column + postgis_raster | public | raster_overviews | o_table_catalog + postgis_raster | public | raster_overviews | o_table_name + postgis_raster | public | raster_overviews | o_table_schema + postgis_raster | public | raster_overviews | overview_factor + postgis_raster | public | raster_overviews | r_raster_column + postgis_raster | public | raster_overviews | r_table_catalog + postgis_raster | public | raster_overviews | r_table_name + postgis_raster | public | raster_overviews | r_table_schema + postgis_tiger_geocoder | tiger | addr | arid + postgis_tiger_geocoder | tiger | addr | fromarmid + postgis_tiger_geocoder | tiger | addr | fromhn + postgis_tiger_geocoder | tiger | addr | fromtyp + postgis_tiger_geocoder | tiger | addr | gid + postgis_tiger_geocoder | tiger | addr | mtfcc + postgis_tiger_geocoder | tiger | addr | plus4 + postgis_tiger_geocoder | tiger | addr | side + postgis_tiger_geocoder | tiger | addr | statefp + postgis_tiger_geocoder | tiger | addr | tlid + postgis_tiger_geocoder | tiger | addr | toarmid + postgis_tiger_geocoder | tiger | addr | tohn + postgis_tiger_geocoder | tiger | addr | totyp + postgis_tiger_geocoder | tiger | addr | zip + postgis_tiger_geocoder | tiger | addrfeat | aridl + postgis_tiger_geocoder | tiger | addrfeat | aridr + postgis_tiger_geocoder | tiger | addrfeat | edge_mtfcc + postgis_tiger_geocoder | tiger | addrfeat | fullname + postgis_tiger_geocoder | tiger | addrfeat | gid + postgis_tiger_geocoder | tiger | addrfeat | lfromhn + postgis_tiger_geocoder | tiger | addrfeat | lfromtyp + postgis_tiger_geocoder | tiger | addrfeat | linearid + postgis_tiger_geocoder | tiger | addrfeat | ltohn + postgis_tiger_geocoder | tiger | addrfeat | ltotyp + postgis_tiger_geocoder | tiger | addrfeat | offsetl + postgis_tiger_geocoder | tiger | addrfeat | offsetr + postgis_tiger_geocoder | tiger | addrfeat | parityl + postgis_tiger_geocoder | tiger | addrfeat | parityr + postgis_tiger_geocoder | tiger | addrfeat | plus4l + postgis_tiger_geocoder | tiger | addrfeat | plus4r + postgis_tiger_geocoder | tiger | addrfeat | rfromhn + postgis_tiger_geocoder | tiger | addrfeat | rfromtyp + postgis_tiger_geocoder | tiger | addrfeat | rtohn + postgis_tiger_geocoder | tiger | addrfeat | rtotyp + postgis_tiger_geocoder | tiger | addrfeat | statefp + postgis_tiger_geocoder | tiger | addrfeat | the_geom + postgis_tiger_geocoder | tiger | addrfeat | tlid + postgis_tiger_geocoder | tiger | addrfeat | zipl + postgis_tiger_geocoder | tiger | addrfeat | zipr + postgis_tiger_geocoder | tiger | bg | aland + postgis_tiger_geocoder | tiger | bg | awater + postgis_tiger_geocoder | tiger | bg | bg_id + postgis_tiger_geocoder | tiger | bg | blkgrpce + postgis_tiger_geocoder | tiger | bg | countyfp + postgis_tiger_geocoder | tiger | bg | funcstat + postgis_tiger_geocoder | tiger | bg | gid + postgis_tiger_geocoder | tiger | bg | intptlat + postgis_tiger_geocoder | tiger | bg | intptlon + postgis_tiger_geocoder | tiger | bg | mtfcc + postgis_tiger_geocoder | tiger | bg | namelsad + postgis_tiger_geocoder | tiger | bg | statefp + postgis_tiger_geocoder | tiger | bg | the_geom + postgis_tiger_geocoder | tiger | bg | tractce + postgis_tiger_geocoder | tiger | county | aland + postgis_tiger_geocoder | tiger | county | awater + postgis_tiger_geocoder | tiger | county | cbsafp + postgis_tiger_geocoder | tiger | county | classfp + postgis_tiger_geocoder | tiger | county | cntyidfp + postgis_tiger_geocoder | tiger | county | countyfp + postgis_tiger_geocoder | tiger | county | countyns + postgis_tiger_geocoder | tiger | county | csafp + postgis_tiger_geocoder | tiger | county | funcstat + postgis_tiger_geocoder | tiger | county | gid + postgis_tiger_geocoder | tiger | county | intptlat + postgis_tiger_geocoder | tiger | county | intptlon + postgis_tiger_geocoder | tiger | county | lsad + postgis_tiger_geocoder | tiger | county | metdivfp + postgis_tiger_geocoder | tiger | county | mtfcc + postgis_tiger_geocoder | tiger | county | name + postgis_tiger_geocoder | tiger | county | namelsad + postgis_tiger_geocoder | tiger | county | statefp + postgis_tiger_geocoder | tiger | county | the_geom + postgis_tiger_geocoder | tiger | county_lookup | co_code + postgis_tiger_geocoder | tiger | county_lookup | name + postgis_tiger_geocoder | tiger | county_lookup | st_code + postgis_tiger_geocoder | tiger | county_lookup | state + postgis_tiger_geocoder | tiger | countysub_lookup | co_code + postgis_tiger_geocoder | tiger | countysub_lookup | county + postgis_tiger_geocoder | tiger | countysub_lookup | cs_code + postgis_tiger_geocoder | tiger | countysub_lookup | name + postgis_tiger_geocoder | tiger | countysub_lookup | st_code + postgis_tiger_geocoder | tiger | countysub_lookup | state + postgis_tiger_geocoder | tiger | cousub | aland + postgis_tiger_geocoder | tiger | cousub | awater + postgis_tiger_geocoder | tiger | cousub | classfp + postgis_tiger_geocoder | tiger | cousub | cnectafp + postgis_tiger_geocoder | tiger | cousub | cosbidfp + postgis_tiger_geocoder | tiger | cousub | countyfp + postgis_tiger_geocoder | tiger | cousub | cousubfp + postgis_tiger_geocoder | tiger | cousub | cousubns + postgis_tiger_geocoder | tiger | cousub | funcstat + postgis_tiger_geocoder | tiger | cousub | gid + postgis_tiger_geocoder | tiger | cousub | intptlat + postgis_tiger_geocoder | tiger | cousub | intptlon + postgis_tiger_geocoder | tiger | cousub | lsad + postgis_tiger_geocoder | tiger | cousub | mtfcc + postgis_tiger_geocoder | tiger | cousub | name + postgis_tiger_geocoder | tiger | cousub | namelsad + postgis_tiger_geocoder | tiger | cousub | nctadvfp + postgis_tiger_geocoder | tiger | cousub | nectafp + postgis_tiger_geocoder | tiger | cousub | statefp + postgis_tiger_geocoder | tiger | cousub | the_geom + postgis_tiger_geocoder | tiger | direction_lookup | abbrev + postgis_tiger_geocoder | tiger | direction_lookup | name + postgis_tiger_geocoder | tiger | edges | artpath + postgis_tiger_geocoder | tiger | edges | countyfp + postgis_tiger_geocoder | tiger | edges | deckedroad + postgis_tiger_geocoder | tiger | edges | divroad + postgis_tiger_geocoder | tiger | edges | exttyp + postgis_tiger_geocoder | tiger | edges | featcat + postgis_tiger_geocoder | tiger | edges | fullname + postgis_tiger_geocoder | tiger | edges | gcseflg + postgis_tiger_geocoder | tiger | edges | gid + postgis_tiger_geocoder | tiger | edges | hydroflg + postgis_tiger_geocoder | tiger | edges | lfromadd + postgis_tiger_geocoder | tiger | edges | ltoadd + postgis_tiger_geocoder | tiger | edges | mtfcc + postgis_tiger_geocoder | tiger | edges | offsetl + postgis_tiger_geocoder | tiger | edges | offsetr + postgis_tiger_geocoder | tiger | edges | olfflg + postgis_tiger_geocoder | tiger | edges | passflg + postgis_tiger_geocoder | tiger | edges | persist + postgis_tiger_geocoder | tiger | edges | railflg + postgis_tiger_geocoder | tiger | edges | rfromadd + postgis_tiger_geocoder | tiger | edges | roadflg + postgis_tiger_geocoder | tiger | edges | rtoadd + postgis_tiger_geocoder | tiger | edges | smid + postgis_tiger_geocoder | tiger | edges | statefp + postgis_tiger_geocoder | tiger | edges | tfidl + postgis_tiger_geocoder | tiger | edges | tfidr + postgis_tiger_geocoder | tiger | edges | the_geom + postgis_tiger_geocoder | tiger | edges | tlid + postgis_tiger_geocoder | tiger | edges | tnidf + postgis_tiger_geocoder | tiger | edges | tnidt + postgis_tiger_geocoder | tiger | edges | ttyp + postgis_tiger_geocoder | tiger | edges | zipl + postgis_tiger_geocoder | tiger | edges | zipr + postgis_tiger_geocoder | tiger | faces | aiannhce + postgis_tiger_geocoder | tiger | faces | aiannhce00 + postgis_tiger_geocoder | tiger | faces | aiannhfp + postgis_tiger_geocoder | tiger | faces | aiannhfp00 + postgis_tiger_geocoder | tiger | faces | anrcfp + postgis_tiger_geocoder | tiger | faces | anrcfp00 + postgis_tiger_geocoder | tiger | faces | atotal + postgis_tiger_geocoder | tiger | faces | blkgrpce + postgis_tiger_geocoder | tiger | faces | blkgrpce00 + postgis_tiger_geocoder | tiger | faces | blkgrpce20 + postgis_tiger_geocoder | tiger | faces | blockce + postgis_tiger_geocoder | tiger | faces | blockce00 + postgis_tiger_geocoder | tiger | faces | blockce20 + postgis_tiger_geocoder | tiger | faces | cbsafp + postgis_tiger_geocoder | tiger | faces | cd108fp + postgis_tiger_geocoder | tiger | faces | cd111fp + postgis_tiger_geocoder | tiger | faces | cnectafp + postgis_tiger_geocoder | tiger | faces | comptyp + postgis_tiger_geocoder | tiger | faces | comptyp00 + postgis_tiger_geocoder | tiger | faces | conctyfp + postgis_tiger_geocoder | tiger | faces | conctyfp00 + postgis_tiger_geocoder | tiger | faces | countyfp + postgis_tiger_geocoder | tiger | faces | countyfp00 + postgis_tiger_geocoder | tiger | faces | countyfp20 + postgis_tiger_geocoder | tiger | faces | cousubfp + postgis_tiger_geocoder | tiger | faces | cousubfp00 + postgis_tiger_geocoder | tiger | faces | csafp + postgis_tiger_geocoder | tiger | faces | elsdlea + postgis_tiger_geocoder | tiger | faces | elsdlea00 + postgis_tiger_geocoder | tiger | faces | gid + postgis_tiger_geocoder | tiger | faces | intptlat + postgis_tiger_geocoder | tiger | faces | intptlon + postgis_tiger_geocoder | tiger | faces | lwflag + postgis_tiger_geocoder | tiger | faces | metdivfp + postgis_tiger_geocoder | tiger | faces | nctadvfp + postgis_tiger_geocoder | tiger | faces | nectafp + postgis_tiger_geocoder | tiger | faces | offset + postgis_tiger_geocoder | tiger | faces | placefp + postgis_tiger_geocoder | tiger | faces | placefp00 + postgis_tiger_geocoder | tiger | faces | puma5ce + postgis_tiger_geocoder | tiger | faces | puma5ce00 + postgis_tiger_geocoder | tiger | faces | scsdlea + postgis_tiger_geocoder | tiger | faces | scsdlea00 + postgis_tiger_geocoder | tiger | faces | sldlst + postgis_tiger_geocoder | tiger | faces | sldlst00 + postgis_tiger_geocoder | tiger | faces | sldust + postgis_tiger_geocoder | tiger | faces | sldust00 + postgis_tiger_geocoder | tiger | faces | statefp + postgis_tiger_geocoder | tiger | faces | statefp00 + postgis_tiger_geocoder | tiger | faces | statefp20 + postgis_tiger_geocoder | tiger | faces | submcdfp + postgis_tiger_geocoder | tiger | faces | submcdfp00 + postgis_tiger_geocoder | tiger | faces | tazce + postgis_tiger_geocoder | tiger | faces | tazce00 + postgis_tiger_geocoder | tiger | faces | tblkgpce + postgis_tiger_geocoder | tiger | faces | tfid + postgis_tiger_geocoder | tiger | faces | the_geom + postgis_tiger_geocoder | tiger | faces | tractce + postgis_tiger_geocoder | tiger | faces | tractce00 + postgis_tiger_geocoder | tiger | faces | tractce20 + postgis_tiger_geocoder | tiger | faces | trsubce + postgis_tiger_geocoder | tiger | faces | trsubce00 + postgis_tiger_geocoder | tiger | faces | trsubfp + postgis_tiger_geocoder | tiger | faces | trsubfp00 + postgis_tiger_geocoder | tiger | faces | ttractce + postgis_tiger_geocoder | tiger | faces | uace + postgis_tiger_geocoder | tiger | faces | uace00 + postgis_tiger_geocoder | tiger | faces | ugace + postgis_tiger_geocoder | tiger | faces | ugace00 + postgis_tiger_geocoder | tiger | faces | unsdlea + postgis_tiger_geocoder | tiger | faces | unsdlea00 + postgis_tiger_geocoder | tiger | faces | vtdst + postgis_tiger_geocoder | tiger | faces | vtdst00 + postgis_tiger_geocoder | tiger | faces | zcta5ce + postgis_tiger_geocoder | tiger | faces | zcta5ce00 + postgis_tiger_geocoder | tiger | featnames | fullname + postgis_tiger_geocoder | tiger | featnames | gid + postgis_tiger_geocoder | tiger | featnames | linearid + postgis_tiger_geocoder | tiger | featnames | mtfcc + postgis_tiger_geocoder | tiger | featnames | name + postgis_tiger_geocoder | tiger | featnames | paflag + postgis_tiger_geocoder | tiger | featnames | predir + postgis_tiger_geocoder | tiger | featnames | predirabrv + postgis_tiger_geocoder | tiger | featnames | prequal + postgis_tiger_geocoder | tiger | featnames | prequalabr + postgis_tiger_geocoder | tiger | featnames | pretyp + postgis_tiger_geocoder | tiger | featnames | pretypabrv + postgis_tiger_geocoder | tiger | featnames | statefp + postgis_tiger_geocoder | tiger | featnames | sufdir + postgis_tiger_geocoder | tiger | featnames | sufdirabrv + postgis_tiger_geocoder | tiger | featnames | sufqual + postgis_tiger_geocoder | tiger | featnames | sufqualabr + postgis_tiger_geocoder | tiger | featnames | suftyp + postgis_tiger_geocoder | tiger | featnames | suftypabrv + postgis_tiger_geocoder | tiger | featnames | tlid + postgis_tiger_geocoder | tiger | geocode_settings | category + postgis_tiger_geocoder | tiger | geocode_settings | name + postgis_tiger_geocoder | tiger | geocode_settings | setting + postgis_tiger_geocoder | tiger | geocode_settings | short_desc + postgis_tiger_geocoder | tiger | geocode_settings | unit + postgis_tiger_geocoder | tiger | geocode_settings_default | category + postgis_tiger_geocoder | tiger | geocode_settings_default | name + postgis_tiger_geocoder | tiger | geocode_settings_default | setting + postgis_tiger_geocoder | tiger | geocode_settings_default | short_desc + postgis_tiger_geocoder | tiger | geocode_settings_default | unit + postgis_tiger_geocoder | tiger | loader_lookuptables | columns_exclude + postgis_tiger_geocoder | tiger | loader_lookuptables | insert_mode + postgis_tiger_geocoder | tiger | loader_lookuptables | level_county + postgis_tiger_geocoder | tiger | loader_lookuptables | level_nation + postgis_tiger_geocoder | tiger | loader_lookuptables | level_state + postgis_tiger_geocoder | tiger | loader_lookuptables | load + postgis_tiger_geocoder | tiger | loader_lookuptables | lookup_name + postgis_tiger_geocoder | tiger | loader_lookuptables | post_load_process + postgis_tiger_geocoder | tiger | loader_lookuptables | pre_load_process + postgis_tiger_geocoder | tiger | loader_lookuptables | process_order + postgis_tiger_geocoder | tiger | loader_lookuptables | single_geom_mode + postgis_tiger_geocoder | tiger | loader_lookuptables | single_mode + postgis_tiger_geocoder | tiger | loader_lookuptables | table_name + postgis_tiger_geocoder | tiger | loader_lookuptables | website_root_override + postgis_tiger_geocoder | tiger | loader_platform | county_process_command + postgis_tiger_geocoder | tiger | loader_platform | declare_sect + postgis_tiger_geocoder | tiger | loader_platform | environ_set_command + postgis_tiger_geocoder | tiger | loader_platform | loader + postgis_tiger_geocoder | tiger | loader_platform | os + postgis_tiger_geocoder | tiger | loader_platform | path_sep + postgis_tiger_geocoder | tiger | loader_platform | pgbin + postgis_tiger_geocoder | tiger | loader_platform | psql + postgis_tiger_geocoder | tiger | loader_platform | unzip_command + postgis_tiger_geocoder | tiger | loader_platform | wget + postgis_tiger_geocoder | tiger | loader_variables | data_schema + postgis_tiger_geocoder | tiger | loader_variables | staging_fold + postgis_tiger_geocoder | tiger | loader_variables | staging_schema + postgis_tiger_geocoder | tiger | loader_variables | tiger_year + postgis_tiger_geocoder | tiger | loader_variables | website_root + postgis_tiger_geocoder | tiger | pagc_gaz | id + postgis_tiger_geocoder | tiger | pagc_gaz | is_custom + postgis_tiger_geocoder | tiger | pagc_gaz | seq + postgis_tiger_geocoder | tiger | pagc_gaz | stdword + postgis_tiger_geocoder | tiger | pagc_gaz | token + postgis_tiger_geocoder | tiger | pagc_gaz | word + postgis_tiger_geocoder | tiger | pagc_lex | id + postgis_tiger_geocoder | tiger | pagc_lex | is_custom + postgis_tiger_geocoder | tiger | pagc_lex | seq + postgis_tiger_geocoder | tiger | pagc_lex | stdword + postgis_tiger_geocoder | tiger | pagc_lex | token + postgis_tiger_geocoder | tiger | pagc_lex | word + postgis_tiger_geocoder | tiger | pagc_rules | id + postgis_tiger_geocoder | tiger | pagc_rules | is_custom + postgis_tiger_geocoder | tiger | pagc_rules | rule + postgis_tiger_geocoder | tiger | place | aland + postgis_tiger_geocoder | tiger | place | awater + postgis_tiger_geocoder | tiger | place | classfp + postgis_tiger_geocoder | tiger | place | cpi + postgis_tiger_geocoder | tiger | place | funcstat + postgis_tiger_geocoder | tiger | place | gid + postgis_tiger_geocoder | tiger | place | intptlat + postgis_tiger_geocoder | tiger | place | intptlon + postgis_tiger_geocoder | tiger | place | lsad + postgis_tiger_geocoder | tiger | place | mtfcc + postgis_tiger_geocoder | tiger | place | name + postgis_tiger_geocoder | tiger | place | namelsad + postgis_tiger_geocoder | tiger | place | pcicbsa + postgis_tiger_geocoder | tiger | place | pcinecta + postgis_tiger_geocoder | tiger | place | placefp + postgis_tiger_geocoder | tiger | place | placens + postgis_tiger_geocoder | tiger | place | plcidfp + postgis_tiger_geocoder | tiger | place | statefp + postgis_tiger_geocoder | tiger | place | the_geom + postgis_tiger_geocoder | tiger | place_lookup | name + postgis_tiger_geocoder | tiger | place_lookup | pl_code + postgis_tiger_geocoder | tiger | place_lookup | st_code + postgis_tiger_geocoder | tiger | place_lookup | state + postgis_tiger_geocoder | tiger | secondary_unit_lookup | abbrev + postgis_tiger_geocoder | tiger | secondary_unit_lookup | name + postgis_tiger_geocoder | tiger | state | aland + postgis_tiger_geocoder | tiger | state | awater + postgis_tiger_geocoder | tiger | state | division + postgis_tiger_geocoder | tiger | state | funcstat + postgis_tiger_geocoder | tiger | state | gid + postgis_tiger_geocoder | tiger | state | intptlat + postgis_tiger_geocoder | tiger | state | intptlon + postgis_tiger_geocoder | tiger | state | lsad + postgis_tiger_geocoder | tiger | state | mtfcc + postgis_tiger_geocoder | tiger | state | name + postgis_tiger_geocoder | tiger | state | region + postgis_tiger_geocoder | tiger | state | statefp + postgis_tiger_geocoder | tiger | state | statens + postgis_tiger_geocoder | tiger | state | stusps + postgis_tiger_geocoder | tiger | state | the_geom + postgis_tiger_geocoder | tiger | state_lookup | abbrev + postgis_tiger_geocoder | tiger | state_lookup | name + postgis_tiger_geocoder | tiger | state_lookup | st_code + postgis_tiger_geocoder | tiger | state_lookup | statefp + postgis_tiger_geocoder | tiger | street_type_lookup | abbrev + postgis_tiger_geocoder | tiger | street_type_lookup | is_hw + postgis_tiger_geocoder | tiger | street_type_lookup | name + postgis_tiger_geocoder | tiger | tabblock | aland + postgis_tiger_geocoder | tiger | tabblock | awater + postgis_tiger_geocoder | tiger | tabblock | blockce + postgis_tiger_geocoder | tiger | tabblock | countyfp + postgis_tiger_geocoder | tiger | tabblock | funcstat + postgis_tiger_geocoder | tiger | tabblock | gid + postgis_tiger_geocoder | tiger | tabblock | intptlat + postgis_tiger_geocoder | tiger | tabblock | intptlon + postgis_tiger_geocoder | tiger | tabblock | mtfcc + postgis_tiger_geocoder | tiger | tabblock | name + postgis_tiger_geocoder | tiger | tabblock | statefp + postgis_tiger_geocoder | tiger | tabblock | tabblock_id + postgis_tiger_geocoder | tiger | tabblock | the_geom + postgis_tiger_geocoder | tiger | tabblock | tractce + postgis_tiger_geocoder | tiger | tabblock | uace + postgis_tiger_geocoder | tiger | tabblock | ur + postgis_tiger_geocoder | tiger | tabblock20 | aland + postgis_tiger_geocoder | tiger | tabblock20 | awater + postgis_tiger_geocoder | tiger | tabblock20 | blockce + postgis_tiger_geocoder | tiger | tabblock20 | countyfp + postgis_tiger_geocoder | tiger | tabblock20 | funcstat + postgis_tiger_geocoder | tiger | tabblock20 | geoid + postgis_tiger_geocoder | tiger | tabblock20 | housing + postgis_tiger_geocoder | tiger | tabblock20 | intptlat + postgis_tiger_geocoder | tiger | tabblock20 | intptlon + postgis_tiger_geocoder | tiger | tabblock20 | mtfcc + postgis_tiger_geocoder | tiger | tabblock20 | name + postgis_tiger_geocoder | tiger | tabblock20 | pop + postgis_tiger_geocoder | tiger | tabblock20 | statefp + postgis_tiger_geocoder | tiger | tabblock20 | the_geom + postgis_tiger_geocoder | tiger | tabblock20 | tractce + postgis_tiger_geocoder | tiger | tabblock20 | uace + postgis_tiger_geocoder | tiger | tabblock20 | uatype + postgis_tiger_geocoder | tiger | tabblock20 | ur + postgis_tiger_geocoder | tiger | tract | aland + postgis_tiger_geocoder | tiger | tract | awater + postgis_tiger_geocoder | tiger | tract | countyfp + postgis_tiger_geocoder | tiger | tract | funcstat + postgis_tiger_geocoder | tiger | tract | gid + postgis_tiger_geocoder | tiger | tract | intptlat + postgis_tiger_geocoder | tiger | tract | intptlon + postgis_tiger_geocoder | tiger | tract | mtfcc + postgis_tiger_geocoder | tiger | tract | name + postgis_tiger_geocoder | tiger | tract | namelsad + postgis_tiger_geocoder | tiger | tract | statefp + postgis_tiger_geocoder | tiger | tract | the_geom + postgis_tiger_geocoder | tiger | tract | tract_id + postgis_tiger_geocoder | tiger | tract | tractce + postgis_tiger_geocoder | tiger | zcta5 | aland + postgis_tiger_geocoder | tiger | zcta5 | awater + postgis_tiger_geocoder | tiger | zcta5 | classfp + postgis_tiger_geocoder | tiger | zcta5 | funcstat + postgis_tiger_geocoder | tiger | zcta5 | gid + postgis_tiger_geocoder | tiger | zcta5 | intptlat + postgis_tiger_geocoder | tiger | zcta5 | intptlon + postgis_tiger_geocoder | tiger | zcta5 | mtfcc + postgis_tiger_geocoder | tiger | zcta5 | partflg + postgis_tiger_geocoder | tiger | zcta5 | statefp + postgis_tiger_geocoder | tiger | zcta5 | the_geom + postgis_tiger_geocoder | tiger | zcta5 | zcta5ce + postgis_tiger_geocoder | tiger | zip_lookup | cnt + postgis_tiger_geocoder | tiger | zip_lookup | co_code + postgis_tiger_geocoder | tiger | zip_lookup | county + postgis_tiger_geocoder | tiger | zip_lookup | cousub + postgis_tiger_geocoder | tiger | zip_lookup | cs_code + postgis_tiger_geocoder | tiger | zip_lookup | pl_code + postgis_tiger_geocoder | tiger | zip_lookup | place + postgis_tiger_geocoder | tiger | zip_lookup | st_code + postgis_tiger_geocoder | tiger | zip_lookup | state + postgis_tiger_geocoder | tiger | zip_lookup | zip + postgis_tiger_geocoder | tiger | zip_lookup_all | cnt + postgis_tiger_geocoder | tiger | zip_lookup_all | co_code + postgis_tiger_geocoder | tiger | zip_lookup_all | county + postgis_tiger_geocoder | tiger | zip_lookup_all | cousub + postgis_tiger_geocoder | tiger | zip_lookup_all | cs_code + postgis_tiger_geocoder | tiger | zip_lookup_all | pl_code + postgis_tiger_geocoder | tiger | zip_lookup_all | place + postgis_tiger_geocoder | tiger | zip_lookup_all | st_code + postgis_tiger_geocoder | tiger | zip_lookup_all | state + postgis_tiger_geocoder | tiger | zip_lookup_all | zip + postgis_tiger_geocoder | tiger | zip_lookup_base | city + postgis_tiger_geocoder | tiger | zip_lookup_base | county + postgis_tiger_geocoder | tiger | zip_lookup_base | state + postgis_tiger_geocoder | tiger | zip_lookup_base | statefp + postgis_tiger_geocoder | tiger | zip_lookup_base | zip + postgis_tiger_geocoder | tiger | zip_state | statefp + postgis_tiger_geocoder | tiger | zip_state | stusps + postgis_tiger_geocoder | tiger | zip_state | zip + postgis_tiger_geocoder | tiger | zip_state_loc | place + postgis_tiger_geocoder | tiger | zip_state_loc | statefp + postgis_tiger_geocoder | tiger | zip_state_loc | stusps + postgis_tiger_geocoder | tiger | zip_state_loc | zip + postgis_topology | topology | layer | child_id + postgis_topology | topology | layer | feature_column + postgis_topology | topology | layer | feature_type + postgis_topology | topology | layer | layer_id + postgis_topology | topology | layer | level + postgis_topology | topology | layer | schema_name + postgis_topology | topology | layer | table_name + postgis_topology | topology | layer | topology_id + postgis_topology | topology | topology | hasz + postgis_topology | topology | topology | id + postgis_topology | topology | topology | name + postgis_topology | topology | topology | precision + postgis_topology | topology | topology | srid + supabase_vault | vault | secrets | created_at + supabase_vault | vault | secrets | description + supabase_vault | vault | secrets | id + supabase_vault | vault | secrets | key_id + supabase_vault | vault | secrets | name + supabase_vault | vault | secrets | nonce + supabase_vault | vault | secrets | secret + supabase_vault | vault | secrets | updated_at + timescaledb | _timescaledb_cache | cache_inval_bgw_job | + timescaledb | _timescaledb_cache | cache_inval_extension | + timescaledb | _timescaledb_cache | cache_inval_hypertable | + timescaledb | _timescaledb_catalog | chunk | compressed_chunk_id + timescaledb | _timescaledb_catalog | chunk | creation_time + timescaledb | _timescaledb_catalog | chunk | dropped + timescaledb | _timescaledb_catalog | chunk | hypertable_id + timescaledb | _timescaledb_catalog | chunk | id + timescaledb | _timescaledb_catalog | chunk | osm_chunk + timescaledb | _timescaledb_catalog | chunk | schema_name + timescaledb | _timescaledb_catalog | chunk | status + timescaledb | _timescaledb_catalog | chunk | table_name + timescaledb | _timescaledb_catalog | chunk_column_stats | chunk_id + timescaledb | _timescaledb_catalog | chunk_column_stats | column_name + timescaledb | _timescaledb_catalog | chunk_column_stats | hypertable_id + timescaledb | _timescaledb_catalog | chunk_column_stats | id + timescaledb | _timescaledb_catalog | chunk_column_stats | range_end + timescaledb | _timescaledb_catalog | chunk_column_stats | range_start + timescaledb | _timescaledb_catalog | chunk_column_stats | valid + timescaledb | _timescaledb_catalog | chunk_constraint | chunk_id + timescaledb | _timescaledb_catalog | chunk_constraint | constraint_name + timescaledb | _timescaledb_catalog | chunk_constraint | dimension_slice_id + timescaledb | _timescaledb_catalog | chunk_constraint | hypertable_constraint_name + timescaledb | _timescaledb_catalog | chunk_index | chunk_id + timescaledb | _timescaledb_catalog | chunk_index | hypertable_id + timescaledb | _timescaledb_catalog | chunk_index | hypertable_index_name + timescaledb | _timescaledb_catalog | chunk_index | index_name + timescaledb | _timescaledb_catalog | compression_algorithm | description + timescaledb | _timescaledb_catalog | compression_algorithm | id + timescaledb | _timescaledb_catalog | compression_algorithm | name + timescaledb | _timescaledb_catalog | compression_algorithm | version + timescaledb | _timescaledb_catalog | compression_chunk_size | chunk_id + timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_chunk_id + timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_heap_size + timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_index_size + timescaledb | _timescaledb_catalog | compression_chunk_size | compressed_toast_size + timescaledb | _timescaledb_catalog | compression_chunk_size | numrows_frozen_immediately + timescaledb | _timescaledb_catalog | compression_chunk_size | numrows_post_compression + timescaledb | _timescaledb_catalog | compression_chunk_size | numrows_pre_compression + timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_heap_size + timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_index_size + timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_toast_size + timescaledb | _timescaledb_catalog | compression_settings | orderby + timescaledb | _timescaledb_catalog | compression_settings | orderby_desc + timescaledb | _timescaledb_catalog | compression_settings | orderby_nullsfirst + timescaledb | _timescaledb_catalog | compression_settings | relid + timescaledb | _timescaledb_catalog | compression_settings | segmentby + timescaledb | _timescaledb_catalog | continuous_agg | direct_view_name + timescaledb | _timescaledb_catalog | continuous_agg | direct_view_schema + timescaledb | _timescaledb_catalog | continuous_agg | finalized + timescaledb | _timescaledb_catalog | continuous_agg | mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_agg | materialized_only + timescaledb | _timescaledb_catalog | continuous_agg | parent_mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_agg | partial_view_name + timescaledb | _timescaledb_catalog | continuous_agg | partial_view_schema + timescaledb | _timescaledb_catalog | continuous_agg | raw_hypertable_id + timescaledb | _timescaledb_catalog | continuous_agg | user_view_name + timescaledb | _timescaledb_catalog | continuous_agg | user_view_schema + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | end_ts + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | start_ts + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan | user_view_definition + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | config + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | end_ts + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | start_ts + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | status + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | step_id + timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | type + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_fixed_width + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_func + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_offset + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_origin + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_timezone + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_width + timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_aggs_hypertable_invalidation_log | greatest_modified_value + timescaledb | _timescaledb_catalog | continuous_aggs_hypertable_invalidation_log | hypertable_id + timescaledb | _timescaledb_catalog | continuous_aggs_hypertable_invalidation_log | lowest_modified_value + timescaledb | _timescaledb_catalog | continuous_aggs_invalidation_threshold | hypertable_id + timescaledb | _timescaledb_catalog | continuous_aggs_invalidation_threshold | watermark + timescaledb | _timescaledb_catalog | continuous_aggs_materialization_invalidation_log | greatest_modified_value + timescaledb | _timescaledb_catalog | continuous_aggs_materialization_invalidation_log | lowest_modified_value + timescaledb | _timescaledb_catalog | continuous_aggs_materialization_invalidation_log | materialization_id + timescaledb | _timescaledb_catalog | continuous_aggs_watermark | mat_hypertable_id + timescaledb | _timescaledb_catalog | continuous_aggs_watermark | watermark + timescaledb | _timescaledb_catalog | dimension | aligned + timescaledb | _timescaledb_catalog | dimension | column_name + timescaledb | _timescaledb_catalog | dimension | column_type + timescaledb | _timescaledb_catalog | dimension | compress_interval_length + timescaledb | _timescaledb_catalog | dimension | hypertable_id + timescaledb | _timescaledb_catalog | dimension | id + timescaledb | _timescaledb_catalog | dimension | integer_now_func + timescaledb | _timescaledb_catalog | dimension | integer_now_func_schema + timescaledb | _timescaledb_catalog | dimension | interval_length + timescaledb | _timescaledb_catalog | dimension | num_slices + timescaledb | _timescaledb_catalog | dimension | partitioning_func + timescaledb | _timescaledb_catalog | dimension | partitioning_func_schema + timescaledb | _timescaledb_catalog | dimension_slice | dimension_id + timescaledb | _timescaledb_catalog | dimension_slice | id + timescaledb | _timescaledb_catalog | dimension_slice | range_end + timescaledb | _timescaledb_catalog | dimension_slice | range_start + timescaledb | _timescaledb_catalog | hypertable | associated_schema_name + timescaledb | _timescaledb_catalog | hypertable | associated_table_prefix + timescaledb | _timescaledb_catalog | hypertable | chunk_sizing_func_name + timescaledb | _timescaledb_catalog | hypertable | chunk_sizing_func_schema + timescaledb | _timescaledb_catalog | hypertable | chunk_target_size + timescaledb | _timescaledb_catalog | hypertable | compressed_hypertable_id + timescaledb | _timescaledb_catalog | hypertable | compression_state + timescaledb | _timescaledb_catalog | hypertable | id + timescaledb | _timescaledb_catalog | hypertable | num_dimensions + timescaledb | _timescaledb_catalog | hypertable | schema_name + timescaledb | _timescaledb_catalog | hypertable | status + timescaledb | _timescaledb_catalog | hypertable | table_name + timescaledb | _timescaledb_catalog | metadata | include_in_telemetry + timescaledb | _timescaledb_catalog | metadata | key + timescaledb | _timescaledb_catalog | metadata | value + timescaledb | _timescaledb_catalog | tablespace | hypertable_id + timescaledb | _timescaledb_catalog | tablespace | id + timescaledb | _timescaledb_catalog | tablespace | tablespace_name + timescaledb | _timescaledb_catalog | telemetry_event | body + timescaledb | _timescaledb_catalog | telemetry_event | created + timescaledb | _timescaledb_catalog | telemetry_event | tag + timescaledb | _timescaledb_config | bgw_job | application_name + timescaledb | _timescaledb_config | bgw_job | check_name + timescaledb | _timescaledb_config | bgw_job | check_schema + timescaledb | _timescaledb_config | bgw_job | config + timescaledb | _timescaledb_config | bgw_job | fixed_schedule + timescaledb | _timescaledb_config | bgw_job | hypertable_id + timescaledb | _timescaledb_config | bgw_job | id + timescaledb | _timescaledb_config | bgw_job | initial_start + timescaledb | _timescaledb_config | bgw_job | max_retries + timescaledb | _timescaledb_config | bgw_job | max_runtime + timescaledb | _timescaledb_config | bgw_job | owner + timescaledb | _timescaledb_config | bgw_job | proc_name + timescaledb | _timescaledb_config | bgw_job | proc_schema + timescaledb | _timescaledb_config | bgw_job | retry_period + timescaledb | _timescaledb_config | bgw_job | schedule_interval + timescaledb | _timescaledb_config | bgw_job | scheduled + timescaledb | _timescaledb_config | bgw_job | timezone + timescaledb | _timescaledb_internal | bgw_job_stat | consecutive_crashes + timescaledb | _timescaledb_internal | bgw_job_stat | consecutive_failures + timescaledb | _timescaledb_internal | bgw_job_stat | flags + timescaledb | _timescaledb_internal | bgw_job_stat | job_id + timescaledb | _timescaledb_internal | bgw_job_stat | last_finish + timescaledb | _timescaledb_internal | bgw_job_stat | last_run_success + timescaledb | _timescaledb_internal | bgw_job_stat | last_start + timescaledb | _timescaledb_internal | bgw_job_stat | last_successful_finish + timescaledb | _timescaledb_internal | bgw_job_stat | next_start + timescaledb | _timescaledb_internal | bgw_job_stat | total_crashes + timescaledb | _timescaledb_internal | bgw_job_stat | total_duration + timescaledb | _timescaledb_internal | bgw_job_stat | total_duration_failures + timescaledb | _timescaledb_internal | bgw_job_stat | total_failures + timescaledb | _timescaledb_internal | bgw_job_stat | total_runs + timescaledb | _timescaledb_internal | bgw_job_stat | total_successes + timescaledb | _timescaledb_internal | bgw_job_stat_history | data + timescaledb | _timescaledb_internal | bgw_job_stat_history | execution_finish + timescaledb | _timescaledb_internal | bgw_job_stat_history | execution_start + timescaledb | _timescaledb_internal | bgw_job_stat_history | id + timescaledb | _timescaledb_internal | bgw_job_stat_history | job_id + timescaledb | _timescaledb_internal | bgw_job_stat_history | pid + timescaledb | _timescaledb_internal | bgw_job_stat_history | succeeded + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | chunk_id + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | job_id + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | last_time_job_run + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | num_times_job_run + timescaledb | _timescaledb_internal | compressed_chunk_stats | chunk_name + timescaledb | _timescaledb_internal | compressed_chunk_stats | chunk_schema + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_heap_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_index_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_toast_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_total_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compression_status + timescaledb | _timescaledb_internal | compressed_chunk_stats | hypertable_name + timescaledb | _timescaledb_internal | compressed_chunk_stats | hypertable_schema + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_heap_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_index_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_toast_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_total_size + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_id + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_name + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_schema + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_heap_size + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_index_size + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_toast_size + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | compressed_total_size + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | heap_bytes + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | hypertable_id + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | hypertable_name + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | hypertable_schema + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | index_bytes + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | toast_bytes + timescaledb | _timescaledb_internal | hypertable_chunk_local_size | total_bytes + timescaledb | timescaledb_experimental | policies | config + timescaledb | timescaledb_experimental | policies | hypertable_name + timescaledb | timescaledb_experimental | policies | hypertable_schema + timescaledb | timescaledb_experimental | policies | proc_name + timescaledb | timescaledb_experimental | policies | proc_schema + timescaledb | timescaledb_experimental | policies | relation_name + timescaledb | timescaledb_experimental | policies | relation_schema + timescaledb | timescaledb_experimental | policies | schedule_interval + timescaledb | timescaledb_information | chunk_compression_settings | chunk + timescaledb | timescaledb_information | chunk_compression_settings | hypertable + timescaledb | timescaledb_information | chunk_compression_settings | orderby + timescaledb | timescaledb_information | chunk_compression_settings | segmentby + timescaledb | timescaledb_information | chunks | chunk_creation_time + timescaledb | timescaledb_information | chunks | chunk_name + timescaledb | timescaledb_information | chunks | chunk_schema + timescaledb | timescaledb_information | chunks | chunk_tablespace + timescaledb | timescaledb_information | chunks | hypertable_name + timescaledb | timescaledb_information | chunks | hypertable_schema + timescaledb | timescaledb_information | chunks | is_compressed + timescaledb | timescaledb_information | chunks | primary_dimension + timescaledb | timescaledb_information | chunks | primary_dimension_type + timescaledb | timescaledb_information | chunks | range_end + timescaledb | timescaledb_information | chunks | range_end_integer + timescaledb | timescaledb_information | chunks | range_start + timescaledb | timescaledb_information | chunks | range_start_integer + timescaledb | timescaledb_information | compression_settings | attname + timescaledb | timescaledb_information | compression_settings | hypertable_name + timescaledb | timescaledb_information | compression_settings | hypertable_schema + timescaledb | timescaledb_information | compression_settings | orderby_asc + timescaledb | timescaledb_information | compression_settings | orderby_column_index + timescaledb | timescaledb_information | compression_settings | orderby_nullsfirst + timescaledb | timescaledb_information | compression_settings | segmentby_column_index + timescaledb | timescaledb_information | continuous_aggregates | compression_enabled + timescaledb | timescaledb_information | continuous_aggregates | finalized + timescaledb | timescaledb_information | continuous_aggregates | hypertable_name + timescaledb | timescaledb_information | continuous_aggregates | hypertable_schema + timescaledb | timescaledb_information | continuous_aggregates | materialization_hypertable_name + timescaledb | timescaledb_information | continuous_aggregates | materialization_hypertable_schema + timescaledb | timescaledb_information | continuous_aggregates | materialized_only + timescaledb | timescaledb_information | continuous_aggregates | view_definition + timescaledb | timescaledb_information | continuous_aggregates | view_name + timescaledb | timescaledb_information | continuous_aggregates | view_owner + timescaledb | timescaledb_information | continuous_aggregates | view_schema + timescaledb | timescaledb_information | dimensions | column_name + timescaledb | timescaledb_information | dimensions | column_type + timescaledb | timescaledb_information | dimensions | dimension_number + timescaledb | timescaledb_information | dimensions | dimension_type + timescaledb | timescaledb_information | dimensions | hypertable_name + timescaledb | timescaledb_information | dimensions | hypertable_schema + timescaledb | timescaledb_information | dimensions | integer_interval + timescaledb | timescaledb_information | dimensions | integer_now_func + timescaledb | timescaledb_information | dimensions | num_partitions + timescaledb | timescaledb_information | dimensions | time_interval + timescaledb | timescaledb_information | hypertable_compression_settings | compress_interval_length + timescaledb | timescaledb_information | hypertable_compression_settings | hypertable + timescaledb | timescaledb_information | hypertable_compression_settings | orderby + timescaledb | timescaledb_information | hypertable_compression_settings | segmentby + timescaledb | timescaledb_information | hypertables | compression_enabled + timescaledb | timescaledb_information | hypertables | hypertable_name + timescaledb | timescaledb_information | hypertables | hypertable_schema + timescaledb | timescaledb_information | hypertables | num_chunks + timescaledb | timescaledb_information | hypertables | num_dimensions + timescaledb | timescaledb_information | hypertables | owner + timescaledb | timescaledb_information | hypertables | tablespaces + timescaledb | timescaledb_information | job_errors | err_message + timescaledb | timescaledb_information | job_errors | finish_time + timescaledb | timescaledb_information | job_errors | job_id + timescaledb | timescaledb_information | job_errors | pid + timescaledb | timescaledb_information | job_errors | proc_name + timescaledb | timescaledb_information | job_errors | proc_schema + timescaledb | timescaledb_information | job_errors | sqlerrcode + timescaledb | timescaledb_information | job_errors | start_time + timescaledb | timescaledb_information | job_history | config + timescaledb | timescaledb_information | job_history | err_message + timescaledb | timescaledb_information | job_history | finish_time + timescaledb | timescaledb_information | job_history | id + timescaledb | timescaledb_information | job_history | job_id + timescaledb | timescaledb_information | job_history | pid + timescaledb | timescaledb_information | job_history | proc_name + timescaledb | timescaledb_information | job_history | proc_schema + timescaledb | timescaledb_information | job_history | sqlerrcode + timescaledb | timescaledb_information | job_history | start_time + timescaledb | timescaledb_information | job_history | succeeded + timescaledb | timescaledb_information | job_stats | hypertable_name + timescaledb | timescaledb_information | job_stats | hypertable_schema + timescaledb | timescaledb_information | job_stats | job_id + timescaledb | timescaledb_information | job_stats | job_status + timescaledb | timescaledb_information | job_stats | last_run_duration + timescaledb | timescaledb_information | job_stats | last_run_started_at + timescaledb | timescaledb_information | job_stats | last_run_status + timescaledb | timescaledb_information | job_stats | last_successful_finish + timescaledb | timescaledb_information | job_stats | next_start + timescaledb | timescaledb_information | job_stats | total_failures + timescaledb | timescaledb_information | job_stats | total_runs + timescaledb | timescaledb_information | job_stats | total_successes + timescaledb | timescaledb_information | jobs | application_name + timescaledb | timescaledb_information | jobs | check_name + timescaledb | timescaledb_information | jobs | check_schema + timescaledb | timescaledb_information | jobs | config + timescaledb | timescaledb_information | jobs | fixed_schedule + timescaledb | timescaledb_information | jobs | hypertable_name + timescaledb | timescaledb_information | jobs | hypertable_schema + timescaledb | timescaledb_information | jobs | initial_start + timescaledb | timescaledb_information | jobs | job_id + timescaledb | timescaledb_information | jobs | max_retries + timescaledb | timescaledb_information | jobs | max_runtime + timescaledb | timescaledb_information | jobs | next_start + timescaledb | timescaledb_information | jobs | owner + timescaledb | timescaledb_information | jobs | proc_name + timescaledb | timescaledb_information | jobs | proc_schema + timescaledb | timescaledb_information | jobs | retry_period + timescaledb | timescaledb_information | jobs | schedule_interval + timescaledb | timescaledb_information | jobs | scheduled + wrappers | public | wrappers_fdw_stats | bytes_in + wrappers | public | wrappers_fdw_stats | bytes_out + wrappers | public | wrappers_fdw_stats | create_times + wrappers | public | wrappers_fdw_stats | created_at + wrappers | public | wrappers_fdw_stats | fdw_name + wrappers | public | wrappers_fdw_stats | metadata + wrappers | public | wrappers_fdw_stats | rows_in + wrappers | public | wrappers_fdw_stats | rows_out + wrappers | public | wrappers_fdw_stats | updated_at +(1097 rows) + diff --git a/postgres_15.8.1.044/nix/tests/expected/z_15_pg_stat_monitor.out b/postgres_15.8.1.044/nix/tests/expected/z_15_pg_stat_monitor.out new file mode 100644 index 0000000..f4d9069 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/z_15_pg_stat_monitor.out @@ -0,0 +1,10 @@ +select + * +from + pg_stat_monitor +where + false; + bucket | bucket_start_time | userid | username | dbid | datname | client_ip | pgsm_query_id | queryid | toplevel | top_queryid | query | comments | planid | query_plan | top_query | application_name | relations | cmd_type | cmd_type_text | elevel | sqlcode | message | calls | total_exec_time | min_exec_time | max_exec_time | mean_exec_time | stddev_exec_time | rows | shared_blks_hit | shared_blks_read | shared_blks_dirtied | shared_blks_written | local_blks_hit | local_blks_read | local_blks_dirtied | local_blks_written | temp_blks_read | temp_blks_written | blk_read_time | blk_write_time | temp_blk_read_time | temp_blk_write_time | resp_calls | cpu_user_time | cpu_sys_time | wal_records | wal_fpi | wal_bytes | bucket_done | plans | total_plan_time | min_plan_time | max_plan_time | mean_plan_time | stddev_plan_time | jit_functions | jit_generation_time | jit_inlining_count | jit_inlining_time | jit_optimization_count | jit_optimization_time | jit_emission_count | jit_emission_time +--------+-------------------+--------+----------+------+---------+-----------+---------------+---------+----------+-------------+-------+----------+--------+------------+-----------+------------------+-----------+----------+---------------+--------+---------+---------+-------+-----------------+---------------+---------------+----------------+------------------+------+-----------------+------------------+---------------------+---------------------+----------------+-----------------+--------------------+--------------------+----------------+-------------------+---------------+----------------+--------------------+---------------------+------------+---------------+--------------+-------------+---------+-----------+-------------+-------+-----------------+---------------+---------------+----------------+------------------+---------------+---------------------+--------------------+-------------------+------------------------+-----------------------+--------------------+------------------- +(0 rows) + diff --git a/postgres_15.8.1.044/nix/tests/expected/z_15_pgroonga.out b/postgres_15.8.1.044/nix/tests/expected/z_15_pgroonga.out new file mode 100644 index 0000000..5ceeed2 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/z_15_pgroonga.out @@ -0,0 +1,76 @@ +create schema v; +create table v.roon( + id serial primary key, + content text +); +with tokenizers as ( + select + x + from + jsonb_array_elements( + (select pgroonga_command('tokenizer_list'))::jsonb + ) x(val) + limit + 1 + offset + 1 -- first record is unrelated and not stable +) +select + t.x::jsonb ->> 'name' +from + jsonb_array_elements((select * from tokenizers)) t(x) +order by + t.x::jsonb ->> 'name'; + ?column? +--------------------------------------------- + TokenBigram + TokenBigramIgnoreBlank + TokenBigramIgnoreBlankSplitSymbol + TokenBigramIgnoreBlankSplitSymbolAlpha + TokenBigramIgnoreBlankSplitSymbolAlphaDigit + TokenBigramSplitSymbol + TokenBigramSplitSymbolAlpha + TokenBigramSplitSymbolAlphaDigit + TokenDelimit + TokenDelimitNull + TokenDocumentVectorBM25 + TokenDocumentVectorTFIDF + TokenMecab + TokenNgram + TokenPattern + TokenRegexp + TokenTable + TokenTrigram + TokenUnigram +(19 rows) + +insert into v.roon (content) +values + ('Hello World'), + ('PostgreSQL with PGroonga is a thing'), + ('This is a full-text search test'), + ('PGroonga supports various languages'); +-- Create default index +create index pgroonga_index on v.roon using pgroonga (content); +-- Create mecab tokenizer index since we had a bug with this one once +create index pgroonga_index_mecab on v.roon using pgroonga (content) with (tokenizer='TokenMecab'); +-- Run some queries to test the index +select * from v.roon where content &@~ 'Hello'; + id | content +----+------------- + 1 | Hello World +(1 row) + +select * from v.roon where content &@~ 'powerful'; + id | content +----+--------- +(0 rows) + +select * from v.roon where content &@~ 'supports'; + id | content +----+------------------------------------- + 4 | PGroonga supports various languages +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.roon diff --git a/postgres_15.8.1.044/nix/tests/expected/z_15_pgvector.out b/postgres_15.8.1.044/nix/tests/expected/z_15_pgvector.out new file mode 100644 index 0000000..6564be5 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/z_15_pgvector.out @@ -0,0 +1,90 @@ +create schema v; +create table v.items( + id serial primary key, + embedding vector(3), + half_embedding halfvec(3), + bit_embedding bit(3), + sparse_embedding sparsevec(3) +); +-- vector ops +create index on v.items using hnsw (embedding vector_l2_ops); +create index on v.items using hnsw (embedding vector_cosine_ops); +create index on v.items using hnsw (embedding vector_l1_ops); +create index on v.items using ivfflat (embedding vector_l2_ops); +NOTICE: ivfflat index created with little data +DETAIL: This will cause low recall. +HINT: Drop the index until the table has more data. +create index on v.items using ivfflat (embedding vector_cosine_ops); +NOTICE: ivfflat index created with little data +DETAIL: This will cause low recall. +HINT: Drop the index until the table has more data. +-- halfvec ops +create index on v.items using hnsw (half_embedding halfvec_l2_ops); +create index on v.items using hnsw (half_embedding halfvec_cosine_ops); +create index on v.items using hnsw (half_embedding halfvec_l1_ops); +create index on v.items using ivfflat (half_embedding halfvec_l2_ops); +NOTICE: ivfflat index created with little data +DETAIL: This will cause low recall. +HINT: Drop the index until the table has more data. +create index on v.items using ivfflat (half_embedding halfvec_cosine_ops); +NOTICE: ivfflat index created with little data +DETAIL: This will cause low recall. +HINT: Drop the index until the table has more data. +-- sparsevec +create index on v.items using hnsw (sparse_embedding sparsevec_l2_ops); +create index on v.items using hnsw (sparse_embedding sparsevec_cosine_ops); +create index on v.items using hnsw (sparse_embedding sparsevec_l1_ops); +-- bit ops +create index on v.items using hnsw (bit_embedding bit_hamming_ops); +create index on v.items using ivfflat (bit_embedding bit_hamming_ops); +NOTICE: ivfflat index created with little data +DETAIL: This will cause low recall. +HINT: Drop the index until the table has more data. +-- Populate some records +insert into v.items( + embedding, + half_embedding, + bit_embedding, + sparse_embedding +) +values + ('[1,2,3]', '[1,2,3]', '101', '{1:4}/3'), + ('[2,3,4]', '[2,3,4]', '010', '{1:7,3:0}/3'); +-- Test op types +select + * +from + v.items +order by + embedding <-> '[2,3,5]', + embedding <=> '[2,3,5]', + embedding <+> '[2,3,5]', + embedding <#> '[2,3,5]', + half_embedding <-> '[2,3,5]', + half_embedding <=> '[2,3,5]', + half_embedding <+> '[2,3,5]', + half_embedding <#> '[2,3,5]', + sparse_embedding <-> '{2:4,3:1}/3', + sparse_embedding <=> '{2:4,3:1}/3', + sparse_embedding <+> '{2:4,3:1}/3', + sparse_embedding <#> '{2:4,3:1}/3', + bit_embedding <~> '011'; + id | embedding | half_embedding | bit_embedding | sparse_embedding +----+-----------+----------------+---------------+------------------ + 2 | [2,3,4] | [2,3,4] | 010 | {1:7}/3 + 1 | [1,2,3] | [1,2,3] | 101 | {1:4}/3 +(2 rows) + +select + avg(embedding), + avg(half_embedding) +from + v.items; + avg | avg +---------------+--------------- + [1.5,2.5,3.5] | [1.5,2.5,3.5] +(1 row) + +-- Cleanup +drop schema v cascade; +NOTICE: drop cascades to table v.items diff --git a/postgres_15.8.1.044/nix/tests/expected/z_15_plv8.out b/postgres_15.8.1.044/nix/tests/expected/z_15_plv8.out new file mode 100644 index 0000000..bf909f0 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/z_15_plv8.out @@ -0,0 +1,23 @@ +/* +This test is excluded from the Postgres 17 suite because it does not ship +with the Supabase PG17 image +*/ +create extension if not exists plv8; +NOTICE: extension "plv8" already exists, skipping +create schema v; +-- create a function to perform some JavaScript operations +create function v.multiply_numbers(a integer, b integer) + returns integer + language plv8 +as $$ + return a * b; +$$; +select + v.multiply_numbers(3, 4); + multiply_numbers +------------------ + 12 +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to function v.multiply_numbers(integer,integer) diff --git a/postgres_15.8.1.044/nix/tests/expected/z_15_rum.out b/postgres_15.8.1.044/nix/tests/expected/z_15_rum.out new file mode 100644 index 0000000..1296bef --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/z_15_rum.out @@ -0,0 +1,41 @@ +/* +This extension is excluded from oriole-17 because it uses an unsupported index type +*/ +create schema v; +create table v.test_rum( + t text, + a tsvector +); +create trigger tsvectorupdate + before update or insert on v.test_rum + for each row + execute procedure + tsvector_update_trigger( + 'a', + 'pg_catalog.english', + 't' + ); +insert into v.test_rum(t) +values + ('the situation is most beautiful'), + ('it is a beautiful'), + ('it looks like a beautiful place'); +create index rumidx on v.test_rum using rum (a rum_tsvector_ops); +select + t, + round(a <=> to_tsquery('english', 'beautiful | place')) as rank +from + v.test_rum +where + a @@ to_tsquery('english', 'beautiful | place') +order by + a <=> to_tsquery('english', 'beautiful | place'); + t | rank +---------------------------------+------ + it looks like a beautiful place | 8 + the situation is most beautiful | 16 + it is a beautiful | 16 +(3 rows) + +drop schema v cascade; +NOTICE: drop cascades to table v.test_rum diff --git a/postgres_15.8.1.044/nix/tests/expected/z_15_timescale.out b/postgres_15.8.1.044/nix/tests/expected/z_15_timescale.out new file mode 100644 index 0000000..d0c0f2f --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/z_15_timescale.out @@ -0,0 +1,53 @@ +/* +This test is excluded from the Postgres 17 suite because it does not ship +with the Supabase PG17 image +*/ +create extension if not exists timescaledb; +NOTICE: extension "timescaledb" already exists, skipping +-- Confirm we're running the apache version +show timescaledb.license; + timescaledb.license +--------------------- + apache +(1 row) + +-- Create schema v +create schema v; +-- Create a table in the v schema +create table v.sensor_data ( + time timestamptz not null, + sensor_id int not null, + temperature double precision not null, + humidity double precision not null +); +-- Convert the table to a hypertable +select create_hypertable('v.sensor_data', 'time'); + create_hypertable +--------------------- + (1,v,sensor_data,t) +(1 row) + +-- Insert some data into the hypertable +insert into v.sensor_data (time, sensor_id, temperature, humidity) +values + ('2024-08-09', 1, 22.5, 60.2), + ('2024-08-08', 1, 23.0, 59.1), + ('2024-08-07', 2, 21.7, 63.3); +-- Select data from the hypertable +select + * +from + v.sensor_data; + time | sensor_id | temperature | humidity +------------------------------+-----------+-------------+---------- + Fri Aug 09 00:00:00 2024 PDT | 1 | 22.5 | 60.2 + Thu Aug 08 00:00:00 2024 PDT | 1 | 23 | 59.1 + Wed Aug 07 00:00:00 2024 PDT | 2 | 21.7 | 63.3 +(3 rows) + +-- Drop schema v and all its entities +drop schema v cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table v.sensor_data +drop cascades to table _timescaledb_internal._hyper_1_1_chunk +drop cascades to table _timescaledb_internal._hyper_1_2_chunk diff --git a/postgres_15.8.1.044/nix/tests/expected/z_17_ext_interface.out b/postgres_15.8.1.044/nix/tests/expected/z_17_ext_interface.out new file mode 100644 index 0000000..37f417f --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/z_17_ext_interface.out @@ -0,0 +1,5342 @@ +/* + +The purpose of this test is to monitor the SQL interface exposed +by Postgres extensions so we have to manually review/approve any difference +that emerge as versions change. + +*/ +/* + +List all extensions that are not enabled +If a new entry shows up in this list, that means a new extension has been +added and you should `create extension ...` to enable it in ./nix/tests/prime + +*/ +select + name +from + pg_available_extensions +where + installed_version is null +order by + name asc; + name +------------------------ + pg_cron + postgis_tiger_geocoder + tsm_system_time +(3 rows) + +/* + +Monitor relocatability and config of each extension +- lesson learned from pg_cron + +*/ +select + extname as extension_name, + extrelocatable as is_relocatable +from + pg_extension +order by + extname asc; + extension_name | is_relocatable +------------------------------+---------------- + address_standardizer | t + address_standardizer_data_us | t + amcheck | t + autoinc | t + bloom | t + btree_gin | t + btree_gist | t + citext | t + cube | t + dblink | t + dict_int | t + dict_xsyn | t + earthdistance | t + file_fdw | t + fuzzystrmatch | t + hstore | t + http | f + hypopg | t + index_advisor | t + insert_username | t + intagg | t + intarray | t + isn | t + lo | t + ltree | t + moddatetime | t + orioledb | t + pageinspect | t + pg_backtrace | t + pg_buffercache | t + pg_freespacemap | t + pg_graphql | f + pg_hashids | t + pg_jsonschema | f + pg_net | f + pg_prewarm | t + pg_repack | f + pg_stat_monitor | t + pg_stat_statements | t + pg_surgery | t + pg_tle | f + pg_trgm | t + pg_visibility | t + pg_walinspect | t + pgaudit | t + pgcrypto | t + pgjwt | f + pgmq | f + pgroonga | f + pgroonga_database | f + pgrouting | t + pgrowlocks | t + pgsodium | f + pgstattuple | t + pgtap | t + plpgsql | f + plpgsql_check | f + postgis | f + postgis_raster | f + postgis_sfcgal | t + postgis_topology | f + postgres_fdw | t + refint | t + rum | t + seg | t + sslinfo | t + supabase_vault | f + tablefunc | t + tcn | t + tsm_system_rows | t + unaccent | t + uuid-ossp | t + vector | t + wrappers | f + xml2 | f +(75 rows) + +/* + +Monitor extension public function interface + +*/ +select + e.extname as extension_name, + n.nspname as schema_name, + p.proname as function_name, + pg_catalog.pg_get_function_identity_arguments(p.oid) as argument_types, + pg_catalog.pg_get_function_result(p.oid) as return_type +from + pg_catalog.pg_proc p + join pg_catalog.pg_namespace n + on n.oid = p.pronamespace + join pg_catalog.pg_depend d + on d.objid = p.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid +where + d.deptype = 'e' + -- Filter out changes between pg15 and pg16 from extensions that ship with postgres + -- new in pg16 + and not (e.extname = 'fuzzystrmatch' and p.proname = 'daitch_mokotoff') + and not (e.extname = 'pageinspect' and p.proname = 'bt_multi_page_stats') + and not (e.extname = 'pg_buffercache' and p.proname = 'pg_buffercache_summary') + and not (e.extname = 'pg_buffercache' and p.proname = 'pg_buffercache_usage_counts') + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_block_info') + -- removed in pg16 + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_records_info_till_end_of_wal') + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_stats_till_end_of_wal') + -- changed in pg16 - output signature added a column + and not (e.extname = 'pageinspect' and p.proname = 'brin_page_items') +order by + e.extname, + n.nspname, + p.proname, + pg_catalog.pg_get_function_identity_arguments(p.oid); + extension_name | schema_name | function_name | argument_types | return_type +----------------------+----------------+--------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + address_standardizer | public | parse_address | text, OUT num text, OUT street text, OUT street2 text, OUT address1 text, OUT city text, OUT state text, OUT zip text, OUT zipplus text, OUT country text | record + address_standardizer | public | standardize_address | lextab text, gaztab text, rultab text, address text | stdaddr + address_standardizer | public | standardize_address | lextab text, gaztab text, rultab text, micro text, macro text | stdaddr + amcheck | public | bt_index_check | index regclass | void + amcheck | public | bt_index_check | index regclass, heapallindexed boolean | void + amcheck | public | bt_index_check | index regclass, heapallindexed boolean, checkunique boolean | void + amcheck | public | bt_index_parent_check | index regclass | void + amcheck | public | bt_index_parent_check | index regclass, heapallindexed boolean | void + amcheck | public | bt_index_parent_check | index regclass, heapallindexed boolean, rootdescend boolean | void + amcheck | public | bt_index_parent_check | index regclass, heapallindexed boolean, rootdescend boolean, checkunique boolean | void + amcheck | public | verify_heapam | relation regclass, on_error_stop boolean, check_toast boolean, skip text, startblock bigint, endblock bigint, OUT blkno bigint, OUT offnum integer, OUT attnum integer, OUT msg text | SETOF record + autoinc | public | autoinc | | trigger + bloom | public | blhandler | internal | index_am_handler + btree_gin | public | gin_btree_consistent | internal, smallint, anyelement, integer, internal, internal | boolean + btree_gin | public | gin_compare_prefix_anyenum | anyenum, anyenum, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bit | bit, bit, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bool | boolean, boolean, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bpchar | character, character, smallint, internal | integer + btree_gin | public | gin_compare_prefix_bytea | bytea, bytea, smallint, internal | integer + btree_gin | public | gin_compare_prefix_char | "char", "char", smallint, internal | integer + btree_gin | public | gin_compare_prefix_cidr | cidr, cidr, smallint, internal | integer + btree_gin | public | gin_compare_prefix_date | date, date, smallint, internal | integer + btree_gin | public | gin_compare_prefix_float4 | real, real, smallint, internal | integer + btree_gin | public | gin_compare_prefix_float8 | double precision, double precision, smallint, internal | integer + btree_gin | public | gin_compare_prefix_inet | inet, inet, smallint, internal | integer + btree_gin | public | gin_compare_prefix_int2 | smallint, smallint, smallint, internal | integer + btree_gin | public | gin_compare_prefix_int4 | integer, integer, smallint, internal | integer + btree_gin | public | gin_compare_prefix_int8 | bigint, bigint, smallint, internal | integer + btree_gin | public | gin_compare_prefix_interval | interval, interval, smallint, internal | integer + btree_gin | public | gin_compare_prefix_macaddr | macaddr, macaddr, smallint, internal | integer + btree_gin | public | gin_compare_prefix_macaddr8 | macaddr8, macaddr8, smallint, internal | integer + btree_gin | public | gin_compare_prefix_money | money, money, smallint, internal | integer + btree_gin | public | gin_compare_prefix_name | name, name, smallint, internal | integer + btree_gin | public | gin_compare_prefix_numeric | numeric, numeric, smallint, internal | integer + btree_gin | public | gin_compare_prefix_oid | oid, oid, smallint, internal | integer + btree_gin | public | gin_compare_prefix_text | text, text, smallint, internal | integer + btree_gin | public | gin_compare_prefix_time | time without time zone, time without time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_timestamp | timestamp without time zone, timestamp without time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_timestamptz | timestamp with time zone, timestamp with time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_timetz | time with time zone, time with time zone, smallint, internal | integer + btree_gin | public | gin_compare_prefix_uuid | uuid, uuid, smallint, internal | integer + btree_gin | public | gin_compare_prefix_varbit | bit varying, bit varying, smallint, internal | integer + btree_gin | public | gin_enum_cmp | anyenum, anyenum | integer + btree_gin | public | gin_extract_query_anyenum | anyenum, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bit | bit, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bool | boolean, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bpchar | character, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_bytea | bytea, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_char | "char", internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_cidr | cidr, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_date | date, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_float4 | real, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_float8 | double precision, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_inet | inet, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_int2 | smallint, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_int4 | integer, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_int8 | bigint, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_interval | interval, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_macaddr | macaddr, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_macaddr8 | macaddr8, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_money | money, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_name | name, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_numeric | numeric, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_oid | oid, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_text | text, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_time | time without time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_timestamp | timestamp without time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_timestamptz | timestamp with time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_timetz | time with time zone, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_uuid | uuid, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_query_varbit | bit varying, internal, smallint, internal, internal | internal + btree_gin | public | gin_extract_value_anyenum | anyenum, internal | internal + btree_gin | public | gin_extract_value_bit | bit, internal | internal + btree_gin | public | gin_extract_value_bool | boolean, internal | internal + btree_gin | public | gin_extract_value_bpchar | character, internal | internal + btree_gin | public | gin_extract_value_bytea | bytea, internal | internal + btree_gin | public | gin_extract_value_char | "char", internal | internal + btree_gin | public | gin_extract_value_cidr | cidr, internal | internal + btree_gin | public | gin_extract_value_date | date, internal | internal + btree_gin | public | gin_extract_value_float4 | real, internal | internal + btree_gin | public | gin_extract_value_float8 | double precision, internal | internal + btree_gin | public | gin_extract_value_inet | inet, internal | internal + btree_gin | public | gin_extract_value_int2 | smallint, internal | internal + btree_gin | public | gin_extract_value_int4 | integer, internal | internal + btree_gin | public | gin_extract_value_int8 | bigint, internal | internal + btree_gin | public | gin_extract_value_interval | interval, internal | internal + btree_gin | public | gin_extract_value_macaddr | macaddr, internal | internal + btree_gin | public | gin_extract_value_macaddr8 | macaddr8, internal | internal + btree_gin | public | gin_extract_value_money | money, internal | internal + btree_gin | public | gin_extract_value_name | name, internal | internal + btree_gin | public | gin_extract_value_numeric | numeric, internal | internal + btree_gin | public | gin_extract_value_oid | oid, internal | internal + btree_gin | public | gin_extract_value_text | text, internal | internal + btree_gin | public | gin_extract_value_time | time without time zone, internal | internal + btree_gin | public | gin_extract_value_timestamp | timestamp without time zone, internal | internal + btree_gin | public | gin_extract_value_timestamptz | timestamp with time zone, internal | internal + btree_gin | public | gin_extract_value_timetz | time with time zone, internal | internal + btree_gin | public | gin_extract_value_uuid | uuid, internal | internal + btree_gin | public | gin_extract_value_varbit | bit varying, internal | internal + btree_gin | public | gin_numeric_cmp | numeric, numeric | integer + btree_gist | public | cash_dist | money, money | money + btree_gist | public | date_dist | date, date | integer + btree_gist | public | float4_dist | real, real | real + btree_gist | public | float8_dist | double precision, double precision | double precision + btree_gist | public | gbt_bit_compress | internal | internal + btree_gist | public | gbt_bit_consistent | internal, bit, smallint, oid, internal | boolean + btree_gist | public | gbt_bit_penalty | internal, internal, internal | internal + btree_gist | public | gbt_bit_picksplit | internal, internal | internal + btree_gist | public | gbt_bit_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_bit_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_bool_compress | internal | internal + btree_gist | public | gbt_bool_consistent | internal, boolean, smallint, oid, internal | boolean + btree_gist | public | gbt_bool_fetch | internal | internal + btree_gist | public | gbt_bool_penalty | internal, internal, internal | internal + btree_gist | public | gbt_bool_picksplit | internal, internal | internal + btree_gist | public | gbt_bool_same | gbtreekey2, gbtreekey2, internal | internal + btree_gist | public | gbt_bool_union | internal, internal | gbtreekey2 + btree_gist | public | gbt_bpchar_compress | internal | internal + btree_gist | public | gbt_bpchar_consistent | internal, character, smallint, oid, internal | boolean + btree_gist | public | gbt_bytea_compress | internal | internal + btree_gist | public | gbt_bytea_consistent | internal, bytea, smallint, oid, internal | boolean + btree_gist | public | gbt_bytea_penalty | internal, internal, internal | internal + btree_gist | public | gbt_bytea_picksplit | internal, internal | internal + btree_gist | public | gbt_bytea_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_bytea_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_cash_compress | internal | internal + btree_gist | public | gbt_cash_consistent | internal, money, smallint, oid, internal | boolean + btree_gist | public | gbt_cash_distance | internal, money, smallint, oid, internal | double precision + btree_gist | public | gbt_cash_fetch | internal | internal + btree_gist | public | gbt_cash_penalty | internal, internal, internal | internal + btree_gist | public | gbt_cash_picksplit | internal, internal | internal + btree_gist | public | gbt_cash_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_cash_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_date_compress | internal | internal + btree_gist | public | gbt_date_consistent | internal, date, smallint, oid, internal | boolean + btree_gist | public | gbt_date_distance | internal, date, smallint, oid, internal | double precision + btree_gist | public | gbt_date_fetch | internal | internal + btree_gist | public | gbt_date_penalty | internal, internal, internal | internal + btree_gist | public | gbt_date_picksplit | internal, internal | internal + btree_gist | public | gbt_date_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_date_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_decompress | internal | internal + btree_gist | public | gbt_enum_compress | internal | internal + btree_gist | public | gbt_enum_consistent | internal, anyenum, smallint, oid, internal | boolean + btree_gist | public | gbt_enum_fetch | internal | internal + btree_gist | public | gbt_enum_penalty | internal, internal, internal | internal + btree_gist | public | gbt_enum_picksplit | internal, internal | internal + btree_gist | public | gbt_enum_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_enum_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_float4_compress | internal | internal + btree_gist | public | gbt_float4_consistent | internal, real, smallint, oid, internal | boolean + btree_gist | public | gbt_float4_distance | internal, real, smallint, oid, internal | double precision + btree_gist | public | gbt_float4_fetch | internal | internal + btree_gist | public | gbt_float4_penalty | internal, internal, internal | internal + btree_gist | public | gbt_float4_picksplit | internal, internal | internal + btree_gist | public | gbt_float4_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_float4_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_float8_compress | internal | internal + btree_gist | public | gbt_float8_consistent | internal, double precision, smallint, oid, internal | boolean + btree_gist | public | gbt_float8_distance | internal, double precision, smallint, oid, internal | double precision + btree_gist | public | gbt_float8_fetch | internal | internal + btree_gist | public | gbt_float8_penalty | internal, internal, internal | internal + btree_gist | public | gbt_float8_picksplit | internal, internal | internal + btree_gist | public | gbt_float8_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_float8_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_inet_compress | internal | internal + btree_gist | public | gbt_inet_consistent | internal, inet, smallint, oid, internal | boolean + btree_gist | public | gbt_inet_penalty | internal, internal, internal | internal + btree_gist | public | gbt_inet_picksplit | internal, internal | internal + btree_gist | public | gbt_inet_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_inet_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_int2_compress | internal | internal + btree_gist | public | gbt_int2_consistent | internal, smallint, smallint, oid, internal | boolean + btree_gist | public | gbt_int2_distance | internal, smallint, smallint, oid, internal | double precision + btree_gist | public | gbt_int2_fetch | internal | internal + btree_gist | public | gbt_int2_penalty | internal, internal, internal | internal + btree_gist | public | gbt_int2_picksplit | internal, internal | internal + btree_gist | public | gbt_int2_same | gbtreekey4, gbtreekey4, internal | internal + btree_gist | public | gbt_int2_union | internal, internal | gbtreekey4 + btree_gist | public | gbt_int4_compress | internal | internal + btree_gist | public | gbt_int4_consistent | internal, integer, smallint, oid, internal | boolean + btree_gist | public | gbt_int4_distance | internal, integer, smallint, oid, internal | double precision + btree_gist | public | gbt_int4_fetch | internal | internal + btree_gist | public | gbt_int4_penalty | internal, internal, internal | internal + btree_gist | public | gbt_int4_picksplit | internal, internal | internal + btree_gist | public | gbt_int4_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_int4_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_int8_compress | internal | internal + btree_gist | public | gbt_int8_consistent | internal, bigint, smallint, oid, internal | boolean + btree_gist | public | gbt_int8_distance | internal, bigint, smallint, oid, internal | double precision + btree_gist | public | gbt_int8_fetch | internal | internal + btree_gist | public | gbt_int8_penalty | internal, internal, internal | internal + btree_gist | public | gbt_int8_picksplit | internal, internal | internal + btree_gist | public | gbt_int8_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_int8_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_intv_compress | internal | internal + btree_gist | public | gbt_intv_consistent | internal, interval, smallint, oid, internal | boolean + btree_gist | public | gbt_intv_decompress | internal | internal + btree_gist | public | gbt_intv_distance | internal, interval, smallint, oid, internal | double precision + btree_gist | public | gbt_intv_fetch | internal | internal + btree_gist | public | gbt_intv_penalty | internal, internal, internal | internal + btree_gist | public | gbt_intv_picksplit | internal, internal | internal + btree_gist | public | gbt_intv_same | gbtreekey32, gbtreekey32, internal | internal + btree_gist | public | gbt_intv_union | internal, internal | gbtreekey32 + btree_gist | public | gbt_macad8_compress | internal | internal + btree_gist | public | gbt_macad8_consistent | internal, macaddr8, smallint, oid, internal | boolean + btree_gist | public | gbt_macad8_fetch | internal | internal + btree_gist | public | gbt_macad8_penalty | internal, internal, internal | internal + btree_gist | public | gbt_macad8_picksplit | internal, internal | internal + btree_gist | public | gbt_macad8_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_macad8_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_macad_compress | internal | internal + btree_gist | public | gbt_macad_consistent | internal, macaddr, smallint, oid, internal | boolean + btree_gist | public | gbt_macad_fetch | internal | internal + btree_gist | public | gbt_macad_penalty | internal, internal, internal | internal + btree_gist | public | gbt_macad_picksplit | internal, internal | internal + btree_gist | public | gbt_macad_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_macad_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_numeric_compress | internal | internal + btree_gist | public | gbt_numeric_consistent | internal, numeric, smallint, oid, internal | boolean + btree_gist | public | gbt_numeric_penalty | internal, internal, internal | internal + btree_gist | public | gbt_numeric_picksplit | internal, internal | internal + btree_gist | public | gbt_numeric_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_numeric_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_oid_compress | internal | internal + btree_gist | public | gbt_oid_consistent | internal, oid, smallint, oid, internal | boolean + btree_gist | public | gbt_oid_distance | internal, oid, smallint, oid, internal | double precision + btree_gist | public | gbt_oid_fetch | internal | internal + btree_gist | public | gbt_oid_penalty | internal, internal, internal | internal + btree_gist | public | gbt_oid_picksplit | internal, internal | internal + btree_gist | public | gbt_oid_same | gbtreekey8, gbtreekey8, internal | internal + btree_gist | public | gbt_oid_union | internal, internal | gbtreekey8 + btree_gist | public | gbt_text_compress | internal | internal + btree_gist | public | gbt_text_consistent | internal, text, smallint, oid, internal | boolean + btree_gist | public | gbt_text_penalty | internal, internal, internal | internal + btree_gist | public | gbt_text_picksplit | internal, internal | internal + btree_gist | public | gbt_text_same | gbtreekey_var, gbtreekey_var, internal | internal + btree_gist | public | gbt_text_union | internal, internal | gbtreekey_var + btree_gist | public | gbt_time_compress | internal | internal + btree_gist | public | gbt_time_consistent | internal, time without time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_time_distance | internal, time without time zone, smallint, oid, internal | double precision + btree_gist | public | gbt_time_fetch | internal | internal + btree_gist | public | gbt_time_penalty | internal, internal, internal | internal + btree_gist | public | gbt_time_picksplit | internal, internal | internal + btree_gist | public | gbt_time_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_time_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_timetz_compress | internal | internal + btree_gist | public | gbt_timetz_consistent | internal, time with time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_ts_compress | internal | internal + btree_gist | public | gbt_ts_consistent | internal, timestamp without time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_ts_distance | internal, timestamp without time zone, smallint, oid, internal | double precision + btree_gist | public | gbt_ts_fetch | internal | internal + btree_gist | public | gbt_ts_penalty | internal, internal, internal | internal + btree_gist | public | gbt_ts_picksplit | internal, internal | internal + btree_gist | public | gbt_ts_same | gbtreekey16, gbtreekey16, internal | internal + btree_gist | public | gbt_ts_union | internal, internal | gbtreekey16 + btree_gist | public | gbt_tstz_compress | internal | internal + btree_gist | public | gbt_tstz_consistent | internal, timestamp with time zone, smallint, oid, internal | boolean + btree_gist | public | gbt_tstz_distance | internal, timestamp with time zone, smallint, oid, internal | double precision + btree_gist | public | gbt_uuid_compress | internal | internal + btree_gist | public | gbt_uuid_consistent | internal, uuid, smallint, oid, internal | boolean + btree_gist | public | gbt_uuid_fetch | internal | internal + btree_gist | public | gbt_uuid_penalty | internal, internal, internal | internal + btree_gist | public | gbt_uuid_picksplit | internal, internal | internal + btree_gist | public | gbt_uuid_same | gbtreekey32, gbtreekey32, internal | internal + btree_gist | public | gbt_uuid_union | internal, internal | gbtreekey32 + btree_gist | public | gbt_var_decompress | internal | internal + btree_gist | public | gbt_var_fetch | internal | internal + btree_gist | public | gbtreekey16_in | cstring | gbtreekey16 + btree_gist | public | gbtreekey16_out | gbtreekey16 | cstring + btree_gist | public | gbtreekey2_in | cstring | gbtreekey2 + btree_gist | public | gbtreekey2_out | gbtreekey2 | cstring + btree_gist | public | gbtreekey32_in | cstring | gbtreekey32 + btree_gist | public | gbtreekey32_out | gbtreekey32 | cstring + btree_gist | public | gbtreekey4_in | cstring | gbtreekey4 + btree_gist | public | gbtreekey4_out | gbtreekey4 | cstring + btree_gist | public | gbtreekey8_in | cstring | gbtreekey8 + btree_gist | public | gbtreekey8_out | gbtreekey8 | cstring + btree_gist | public | gbtreekey_var_in | cstring | gbtreekey_var + btree_gist | public | gbtreekey_var_out | gbtreekey_var | cstring + btree_gist | public | int2_dist | smallint, smallint | smallint + btree_gist | public | int4_dist | integer, integer | integer + btree_gist | public | int8_dist | bigint, bigint | bigint + btree_gist | public | interval_dist | interval, interval | interval + btree_gist | public | oid_dist | oid, oid | oid + btree_gist | public | time_dist | time without time zone, time without time zone | interval + btree_gist | public | ts_dist | timestamp without time zone, timestamp without time zone | interval + btree_gist | public | tstz_dist | timestamp with time zone, timestamp with time zone | interval + citext | public | citext | boolean | citext + citext | public | citext | character | citext + citext | public | citext | inet | citext + citext | public | citext_cmp | citext, citext | integer + citext | public | citext_eq | citext, citext | boolean + citext | public | citext_ge | citext, citext | boolean + citext | public | citext_gt | citext, citext | boolean + citext | public | citext_hash | citext | integer + citext | public | citext_hash_extended | citext, bigint | bigint + citext | public | citext_larger | citext, citext | citext + citext | public | citext_le | citext, citext | boolean + citext | public | citext_lt | citext, citext | boolean + citext | public | citext_ne | citext, citext | boolean + citext | public | citext_pattern_cmp | citext, citext | integer + citext | public | citext_pattern_ge | citext, citext | boolean + citext | public | citext_pattern_gt | citext, citext | boolean + citext | public | citext_pattern_le | citext, citext | boolean + citext | public | citext_pattern_lt | citext, citext | boolean + citext | public | citext_smaller | citext, citext | citext + citext | public | citextin | cstring | citext + citext | public | citextout | citext | cstring + citext | public | citextrecv | internal | citext + citext | public | citextsend | citext | bytea + citext | public | max | citext | citext + citext | public | min | citext | citext + citext | public | regexp_match | citext, citext | text[] + citext | public | regexp_match | citext, citext, text | text[] + citext | public | regexp_matches | citext, citext | SETOF text[] + citext | public | regexp_matches | citext, citext, text | SETOF text[] + citext | public | regexp_replace | citext, citext, text | text + citext | public | regexp_replace | citext, citext, text, text | text + citext | public | regexp_split_to_array | citext, citext | text[] + citext | public | regexp_split_to_array | citext, citext, text | text[] + citext | public | regexp_split_to_table | citext, citext | SETOF text + citext | public | regexp_split_to_table | citext, citext, text | SETOF text + citext | public | replace | citext, citext, citext | text + citext | public | split_part | citext, citext, integer | text + citext | public | strpos | citext, citext | integer + citext | public | texticlike | citext, citext | boolean + citext | public | texticlike | citext, text | boolean + citext | public | texticnlike | citext, citext | boolean + citext | public | texticnlike | citext, text | boolean + citext | public | texticregexeq | citext, citext | boolean + citext | public | texticregexeq | citext, text | boolean + citext | public | texticregexne | citext, citext | boolean + citext | public | texticregexne | citext, text | boolean + citext | public | translate | citext, citext, text | text + cube | public | cube | cube, double precision | cube + cube | public | cube | cube, double precision, double precision | cube + cube | public | cube | double precision | cube + cube | public | cube | double precision, double precision | cube + cube | public | cube | double precision[] | cube + cube | public | cube | double precision[], double precision[] | cube + cube | public | cube_cmp | cube, cube | integer + cube | public | cube_contained | cube, cube | boolean + cube | public | cube_contains | cube, cube | boolean + cube | public | cube_coord | cube, integer | double precision + cube | public | cube_coord_llur | cube, integer | double precision + cube | public | cube_dim | cube | integer + cube | public | cube_distance | cube, cube | double precision + cube | public | cube_enlarge | cube, double precision, integer | cube + cube | public | cube_eq | cube, cube | boolean + cube | public | cube_ge | cube, cube | boolean + cube | public | cube_gt | cube, cube | boolean + cube | public | cube_in | cstring | cube + cube | public | cube_inter | cube, cube | cube + cube | public | cube_is_point | cube | boolean + cube | public | cube_le | cube, cube | boolean + cube | public | cube_ll_coord | cube, integer | double precision + cube | public | cube_lt | cube, cube | boolean + cube | public | cube_ne | cube, cube | boolean + cube | public | cube_out | cube | cstring + cube | public | cube_overlap | cube, cube | boolean + cube | public | cube_recv | internal | cube + cube | public | cube_send | cube | bytea + cube | public | cube_size | cube | double precision + cube | public | cube_subset | cube, integer[] | cube + cube | public | cube_union | cube, cube | cube + cube | public | cube_ur_coord | cube, integer | double precision + cube | public | distance_chebyshev | cube, cube | double precision + cube | public | distance_taxicab | cube, cube | double precision + cube | public | g_cube_consistent | internal, cube, smallint, oid, internal | boolean + cube | public | g_cube_distance | internal, cube, smallint, oid, internal | double precision + cube | public | g_cube_penalty | internal, internal, internal | internal + cube | public | g_cube_picksplit | internal, internal | internal + cube | public | g_cube_same | cube, cube, internal | internal + cube | public | g_cube_union | internal, internal | cube + dblink | public | dblink | text | SETOF record + dblink | public | dblink | text, boolean | SETOF record + dblink | public | dblink | text, text | SETOF record + dblink | public | dblink | text, text, boolean | SETOF record + dblink | public | dblink_build_sql_delete | text, int2vector, integer, text[] | text + dblink | public | dblink_build_sql_insert | text, int2vector, integer, text[], text[] | text + dblink | public | dblink_build_sql_update | text, int2vector, integer, text[], text[] | text + dblink | public | dblink_cancel_query | text | text + dblink | public | dblink_close | text | text + dblink | public | dblink_close | text, boolean | text + dblink | public | dblink_close | text, text | text + dblink | public | dblink_close | text, text, boolean | text + dblink | public | dblink_connect | text | text + dblink | public | dblink_connect | text, text | text + dblink | public | dblink_connect_u | text | text + dblink | public | dblink_connect_u | text, text | text + dblink | public | dblink_current_query | | text + dblink | public | dblink_disconnect | | text + dblink | public | dblink_disconnect | text | text + dblink | public | dblink_error_message | text | text + dblink | public | dblink_exec | text | text + dblink | public | dblink_exec | text, boolean | text + dblink | public | dblink_exec | text, text | text + dblink | public | dblink_exec | text, text, boolean | text + dblink | public | dblink_fdw_validator | options text[], catalog oid | void + dblink | public | dblink_fetch | text, integer | SETOF record + dblink | public | dblink_fetch | text, integer, boolean | SETOF record + dblink | public | dblink_fetch | text, text, integer | SETOF record + dblink | public | dblink_fetch | text, text, integer, boolean | SETOF record + dblink | public | dblink_get_connections | | text[] + dblink | public | dblink_get_notify | conname text, OUT notify_name text, OUT be_pid integer, OUT extra text | SETOF record + dblink | public | dblink_get_notify | OUT notify_name text, OUT be_pid integer, OUT extra text | SETOF record + dblink | public | dblink_get_pkey | text | SETOF dblink_pkey_results + dblink | public | dblink_get_result | text | SETOF record + dblink | public | dblink_get_result | text, boolean | SETOF record + dblink | public | dblink_is_busy | text | integer + dblink | public | dblink_open | text, text | text + dblink | public | dblink_open | text, text, boolean | text + dblink | public | dblink_open | text, text, text | text + dblink | public | dblink_open | text, text, text, boolean | text + dblink | public | dblink_send_query | text, text | integer + dict_int | public | dintdict_init | internal | internal + dict_int | public | dintdict_lexize | internal, internal, internal, internal | internal + dict_xsyn | public | dxsyn_init | internal | internal + dict_xsyn | public | dxsyn_lexize | internal, internal, internal, internal | internal + earthdistance | public | earth | | double precision + earthdistance | public | earth_box | earth, double precision | cube + earthdistance | public | earth_distance | earth, earth | double precision + earthdistance | public | gc_to_sec | double precision | double precision + earthdistance | public | geo_distance | point, point | double precision + earthdistance | public | latitude | earth | double precision + earthdistance | public | ll_to_earth | double precision, double precision | earth + earthdistance | public | longitude | earth | double precision + earthdistance | public | sec_to_gc | double precision | double precision + file_fdw | public | file_fdw_handler | | fdw_handler + file_fdw | public | file_fdw_validator | text[], oid | void + fuzzystrmatch | public | difference | text, text | integer + fuzzystrmatch | public | dmetaphone | text | text + fuzzystrmatch | public | dmetaphone_alt | text | text + fuzzystrmatch | public | levenshtein | text, text | integer + fuzzystrmatch | public | levenshtein | text, text, integer, integer, integer | integer + fuzzystrmatch | public | levenshtein_less_equal | text, text, integer | integer + fuzzystrmatch | public | levenshtein_less_equal | text, text, integer, integer, integer, integer | integer + fuzzystrmatch | public | metaphone | text, integer | text + fuzzystrmatch | public | soundex | text | text + fuzzystrmatch | public | text_soundex | text | text + hstore | public | akeys | hstore | text[] + hstore | public | avals | hstore | text[] + hstore | public | defined | hstore, text | boolean + hstore | public | delete | hstore, hstore | hstore + hstore | public | delete | hstore, text | hstore + hstore | public | delete | hstore, text[] | hstore + hstore | public | each | hs hstore, OUT key text, OUT value text | SETOF record + hstore | public | exist | hstore, text | boolean + hstore | public | exists_all | hstore, text[] | boolean + hstore | public | exists_any | hstore, text[] | boolean + hstore | public | fetchval | hstore, text | text + hstore | public | ghstore_compress | internal | internal + hstore | public | ghstore_consistent | internal, hstore, smallint, oid, internal | boolean + hstore | public | ghstore_decompress | internal | internal + hstore | public | ghstore_in | cstring | ghstore + hstore | public | ghstore_options | internal | void + hstore | public | ghstore_out | ghstore | cstring + hstore | public | ghstore_penalty | internal, internal, internal | internal + hstore | public | ghstore_picksplit | internal, internal | internal + hstore | public | ghstore_same | ghstore, ghstore, internal | internal + hstore | public | ghstore_union | internal, internal | ghstore + hstore | public | gin_consistent_hstore | internal, smallint, hstore, integer, internal, internal | boolean + hstore | public | gin_extract_hstore | hstore, internal | internal + hstore | public | gin_extract_hstore_query | hstore, internal, smallint, internal, internal | internal + hstore | public | hs_concat | hstore, hstore | hstore + hstore | public | hs_contained | hstore, hstore | boolean + hstore | public | hs_contains | hstore, hstore | boolean + hstore | public | hstore | record | hstore + hstore | public | hstore | text, text | hstore + hstore | public | hstore | text[] | hstore + hstore | public | hstore | text[], text[] | hstore + hstore | public | hstore_cmp | hstore, hstore | integer + hstore | public | hstore_eq | hstore, hstore | boolean + hstore | public | hstore_ge | hstore, hstore | boolean + hstore | public | hstore_gt | hstore, hstore | boolean + hstore | public | hstore_hash | hstore | integer + hstore | public | hstore_hash_extended | hstore, bigint | bigint + hstore | public | hstore_in | cstring | hstore + hstore | public | hstore_le | hstore, hstore | boolean + hstore | public | hstore_lt | hstore, hstore | boolean + hstore | public | hstore_ne | hstore, hstore | boolean + hstore | public | hstore_out | hstore | cstring + hstore | public | hstore_recv | internal | hstore + hstore | public | hstore_send | hstore | bytea + hstore | public | hstore_subscript_handler | internal | internal + hstore | public | hstore_to_array | hstore | text[] + hstore | public | hstore_to_json | hstore | json + hstore | public | hstore_to_json_loose | hstore | json + hstore | public | hstore_to_jsonb | hstore | jsonb + hstore | public | hstore_to_jsonb_loose | hstore | jsonb + hstore | public | hstore_to_matrix | hstore | text[] + hstore | public | hstore_version_diag | hstore | integer + hstore | public | isdefined | hstore, text | boolean + hstore | public | isexists | hstore, text | boolean + hstore | public | populate_record | anyelement, hstore | anyelement + hstore | public | skeys | hstore | SETOF text + hstore | public | slice | hstore, text[] | hstore + hstore | public | slice_array | hstore, text[] | text[] + hstore | public | svals | hstore | SETOF text + hstore | public | tconvert | text, text | hstore + http | public | bytea_to_text | data bytea | text + http | public | http | request http_request | http_response + http | public | http_delete | uri character varying | http_response + http | public | http_delete | uri character varying, content character varying, content_type character varying | http_response + http | public | http_get | uri character varying | http_response + http | public | http_get | uri character varying, data jsonb | http_response + http | public | http_head | uri character varying | http_response + http | public | http_header | field character varying, value character varying | http_header + http | public | http_list_curlopt | | TABLE(curlopt text, value text) + http | public | http_patch | uri character varying, content character varying, content_type character varying | http_response + http | public | http_post | uri character varying, content character varying, content_type character varying | http_response + http | public | http_post | uri character varying, data jsonb | http_response + http | public | http_put | uri character varying, content character varying, content_type character varying | http_response + http | public | http_reset_curlopt | | boolean + http | public | http_set_curlopt | curlopt character varying, value character varying | boolean + http | public | text_to_bytea | data text | bytea + http | public | urlencode | data jsonb | text + http | public | urlencode | string bytea | text + http | public | urlencode | string character varying | text + hypopg | public | hypopg | OUT indexname text, OUT indexrelid oid, OUT indrelid oid, OUT innatts integer, OUT indisunique boolean, OUT indkey int2vector, OUT indcollation oidvector, OUT indclass oidvector, OUT indoption oidvector, OUT indexprs pg_node_tree, OUT indpred pg_node_tree, OUT amid oid | SETOF record + hypopg | public | hypopg_create_index | sql_order text, OUT indexrelid oid, OUT indexname text | SETOF record + hypopg | public | hypopg_drop_index | indexid oid | boolean + hypopg | public | hypopg_get_indexdef | indexid oid | text + hypopg | public | hypopg_hidden_indexes | | TABLE(indexid oid) + hypopg | public | hypopg_hide_index | indexid oid | boolean + hypopg | public | hypopg_relation_size | indexid oid | bigint + hypopg | public | hypopg_reset | | void + hypopg | public | hypopg_reset_index | | void + hypopg | public | hypopg_unhide_all_indexes | | void + hypopg | public | hypopg_unhide_index | indexid oid | boolean + index_advisor | public | index_advisor | query text | TABLE(startup_cost_before jsonb, startup_cost_after jsonb, total_cost_before jsonb, total_cost_after jsonb, index_statements text[], errors text[]) + insert_username | public | insert_username | | trigger + intagg | public | int_agg_final_array | internal | integer[] + intagg | public | int_agg_state | internal, integer | internal + intagg | public | int_array_aggregate | integer | integer[] + intagg | public | int_array_enum | integer[] | SETOF integer + intarray | public | _int_contained | integer[], integer[] | boolean + intarray | public | _int_contained_joinsel | internal, oid, internal, smallint, internal | double precision + intarray | public | _int_contained_sel | internal, oid, internal, integer | double precision + intarray | public | _int_contains | integer[], integer[] | boolean + intarray | public | _int_contains_joinsel | internal, oid, internal, smallint, internal | double precision + intarray | public | _int_contains_sel | internal, oid, internal, integer | double precision + intarray | public | _int_different | integer[], integer[] | boolean + intarray | public | _int_inter | integer[], integer[] | integer[] + intarray | public | _int_matchsel | internal, oid, internal, integer | double precision + intarray | public | _int_overlap | integer[], integer[] | boolean + intarray | public | _int_overlap_joinsel | internal, oid, internal, smallint, internal | double precision + intarray | public | _int_overlap_sel | internal, oid, internal, integer | double precision + intarray | public | _int_same | integer[], integer[] | boolean + intarray | public | _int_union | integer[], integer[] | integer[] + intarray | public | _intbig_in | cstring | intbig_gkey + intarray | public | _intbig_out | intbig_gkey | cstring + intarray | public | boolop | integer[], query_int | boolean + intarray | public | bqarr_in | cstring | query_int + intarray | public | bqarr_out | query_int | cstring + intarray | public | g_int_compress | internal | internal + intarray | public | g_int_consistent | internal, integer[], smallint, oid, internal | boolean + intarray | public | g_int_decompress | internal | internal + intarray | public | g_int_options | internal | void + intarray | public | g_int_penalty | internal, internal, internal | internal + intarray | public | g_int_picksplit | internal, internal | internal + intarray | public | g_int_same | integer[], integer[], internal | internal + intarray | public | g_int_union | internal, internal | integer[] + intarray | public | g_intbig_compress | internal | internal + intarray | public | g_intbig_consistent | internal, integer[], smallint, oid, internal | boolean + intarray | public | g_intbig_decompress | internal | internal + intarray | public | g_intbig_options | internal | void + intarray | public | g_intbig_penalty | internal, internal, internal | internal + intarray | public | g_intbig_picksplit | internal, internal | internal + intarray | public | g_intbig_same | intbig_gkey, intbig_gkey, internal | internal + intarray | public | g_intbig_union | internal, internal | intbig_gkey + intarray | public | ginint4_consistent | internal, smallint, integer[], integer, internal, internal, internal, internal | boolean + intarray | public | ginint4_queryextract | integer[], internal, smallint, internal, internal, internal, internal | internal + intarray | public | icount | integer[] | integer + intarray | public | idx | integer[], integer | integer + intarray | public | intarray_del_elem | integer[], integer | integer[] + intarray | public | intarray_push_array | integer[], integer[] | integer[] + intarray | public | intarray_push_elem | integer[], integer | integer[] + intarray | public | intset | integer | integer[] + intarray | public | intset_subtract | integer[], integer[] | integer[] + intarray | public | intset_union_elem | integer[], integer | integer[] + intarray | public | querytree | query_int | text + intarray | public | rboolop | query_int, integer[] | boolean + intarray | public | sort | integer[] | integer[] + intarray | public | sort | integer[], text | integer[] + intarray | public | sort_asc | integer[] | integer[] + intarray | public | sort_desc | integer[] | integer[] + intarray | public | subarray | integer[], integer | integer[] + intarray | public | subarray | integer[], integer, integer | integer[] + intarray | public | uniq | integer[] | integer[] + isn | public | btean13cmp | ean13, ean13 | integer + isn | public | btean13cmp | ean13, isbn | integer + isn | public | btean13cmp | ean13, isbn13 | integer + isn | public | btean13cmp | ean13, ismn | integer + isn | public | btean13cmp | ean13, ismn13 | integer + isn | public | btean13cmp | ean13, issn | integer + isn | public | btean13cmp | ean13, issn13 | integer + isn | public | btean13cmp | ean13, upc | integer + isn | public | btisbn13cmp | isbn13, ean13 | integer + isn | public | btisbn13cmp | isbn13, isbn | integer + isn | public | btisbn13cmp | isbn13, isbn13 | integer + isn | public | btisbncmp | isbn, ean13 | integer + isn | public | btisbncmp | isbn, isbn | integer + isn | public | btisbncmp | isbn, isbn13 | integer + isn | public | btismn13cmp | ismn13, ean13 | integer + isn | public | btismn13cmp | ismn13, ismn | integer + isn | public | btismn13cmp | ismn13, ismn13 | integer + isn | public | btismncmp | ismn, ean13 | integer + isn | public | btismncmp | ismn, ismn | integer + isn | public | btismncmp | ismn, ismn13 | integer + isn | public | btissn13cmp | issn13, ean13 | integer + isn | public | btissn13cmp | issn13, issn | integer + isn | public | btissn13cmp | issn13, issn13 | integer + isn | public | btissncmp | issn, ean13 | integer + isn | public | btissncmp | issn, issn | integer + isn | public | btissncmp | issn, issn13 | integer + isn | public | btupccmp | upc, ean13 | integer + isn | public | btupccmp | upc, upc | integer + isn | public | ean13_in | cstring | ean13 + isn | public | ean13_out | ean13 | cstring + isn | public | ean13_out | isbn13 | cstring + isn | public | ean13_out | ismn13 | cstring + isn | public | ean13_out | issn13 | cstring + isn | public | hashean13 | ean13 | integer + isn | public | hashisbn | isbn | integer + isn | public | hashisbn13 | isbn13 | integer + isn | public | hashismn | ismn | integer + isn | public | hashismn13 | ismn13 | integer + isn | public | hashissn | issn | integer + isn | public | hashissn13 | issn13 | integer + isn | public | hashupc | upc | integer + isn | public | is_valid | ean13 | boolean + isn | public | is_valid | isbn | boolean + isn | public | is_valid | isbn13 | boolean + isn | public | is_valid | ismn | boolean + isn | public | is_valid | ismn13 | boolean + isn | public | is_valid | issn | boolean + isn | public | is_valid | issn13 | boolean + isn | public | is_valid | upc | boolean + isn | public | isbn | ean13 | isbn + isn | public | isbn13 | ean13 | isbn13 + isn | public | isbn13_in | cstring | isbn13 + isn | public | isbn_in | cstring | isbn + isn | public | ismn | ean13 | ismn + isn | public | ismn13 | ean13 | ismn13 + isn | public | ismn13_in | cstring | ismn13 + isn | public | ismn_in | cstring | ismn + isn | public | isn_out | isbn | cstring + isn | public | isn_out | ismn | cstring + isn | public | isn_out | issn | cstring + isn | public | isn_out | upc | cstring + isn | public | isn_weak | | boolean + isn | public | isn_weak | boolean | boolean + isn | public | isneq | ean13, ean13 | boolean + isn | public | isneq | ean13, isbn | boolean + isn | public | isneq | ean13, isbn13 | boolean + isn | public | isneq | ean13, ismn | boolean + isn | public | isneq | ean13, ismn13 | boolean + isn | public | isneq | ean13, issn | boolean + isn | public | isneq | ean13, issn13 | boolean + isn | public | isneq | ean13, upc | boolean + isn | public | isneq | isbn, ean13 | boolean + isn | public | isneq | isbn, isbn | boolean + isn | public | isneq | isbn, isbn13 | boolean + isn | public | isneq | isbn13, ean13 | boolean + isn | public | isneq | isbn13, isbn | boolean + isn | public | isneq | isbn13, isbn13 | boolean + isn | public | isneq | ismn, ean13 | boolean + isn | public | isneq | ismn, ismn | boolean + isn | public | isneq | ismn, ismn13 | boolean + isn | public | isneq | ismn13, ean13 | boolean + isn | public | isneq | ismn13, ismn | boolean + isn | public | isneq | ismn13, ismn13 | boolean + isn | public | isneq | issn, ean13 | boolean + isn | public | isneq | issn, issn | boolean + isn | public | isneq | issn, issn13 | boolean + isn | public | isneq | issn13, ean13 | boolean + isn | public | isneq | issn13, issn | boolean + isn | public | isneq | issn13, issn13 | boolean + isn | public | isneq | upc, ean13 | boolean + isn | public | isneq | upc, upc | boolean + isn | public | isnge | ean13, ean13 | boolean + isn | public | isnge | ean13, isbn | boolean + isn | public | isnge | ean13, isbn13 | boolean + isn | public | isnge | ean13, ismn | boolean + isn | public | isnge | ean13, ismn13 | boolean + isn | public | isnge | ean13, issn | boolean + isn | public | isnge | ean13, issn13 | boolean + isn | public | isnge | ean13, upc | boolean + isn | public | isnge | isbn, ean13 | boolean + isn | public | isnge | isbn, isbn | boolean + isn | public | isnge | isbn, isbn13 | boolean + isn | public | isnge | isbn13, ean13 | boolean + isn | public | isnge | isbn13, isbn | boolean + isn | public | isnge | isbn13, isbn13 | boolean + isn | public | isnge | ismn, ean13 | boolean + isn | public | isnge | ismn, ismn | boolean + isn | public | isnge | ismn, ismn13 | boolean + isn | public | isnge | ismn13, ean13 | boolean + isn | public | isnge | ismn13, ismn | boolean + isn | public | isnge | ismn13, ismn13 | boolean + isn | public | isnge | issn, ean13 | boolean + isn | public | isnge | issn, issn | boolean + isn | public | isnge | issn, issn13 | boolean + isn | public | isnge | issn13, ean13 | boolean + isn | public | isnge | issn13, issn | boolean + isn | public | isnge | issn13, issn13 | boolean + isn | public | isnge | upc, ean13 | boolean + isn | public | isnge | upc, upc | boolean + isn | public | isngt | ean13, ean13 | boolean + isn | public | isngt | ean13, isbn | boolean + isn | public | isngt | ean13, isbn13 | boolean + isn | public | isngt | ean13, ismn | boolean + isn | public | isngt | ean13, ismn13 | boolean + isn | public | isngt | ean13, issn | boolean + isn | public | isngt | ean13, issn13 | boolean + isn | public | isngt | ean13, upc | boolean + isn | public | isngt | isbn, ean13 | boolean + isn | public | isngt | isbn, isbn | boolean + isn | public | isngt | isbn, isbn13 | boolean + isn | public | isngt | isbn13, ean13 | boolean + isn | public | isngt | isbn13, isbn | boolean + isn | public | isngt | isbn13, isbn13 | boolean + isn | public | isngt | ismn, ean13 | boolean + isn | public | isngt | ismn, ismn | boolean + isn | public | isngt | ismn, ismn13 | boolean + isn | public | isngt | ismn13, ean13 | boolean + isn | public | isngt | ismn13, ismn | boolean + isn | public | isngt | ismn13, ismn13 | boolean + isn | public | isngt | issn, ean13 | boolean + isn | public | isngt | issn, issn | boolean + isn | public | isngt | issn, issn13 | boolean + isn | public | isngt | issn13, ean13 | boolean + isn | public | isngt | issn13, issn | boolean + isn | public | isngt | issn13, issn13 | boolean + isn | public | isngt | upc, ean13 | boolean + isn | public | isngt | upc, upc | boolean + isn | public | isnle | ean13, ean13 | boolean + isn | public | isnle | ean13, isbn | boolean + isn | public | isnle | ean13, isbn13 | boolean + isn | public | isnle | ean13, ismn | boolean + isn | public | isnle | ean13, ismn13 | boolean + isn | public | isnle | ean13, issn | boolean + isn | public | isnle | ean13, issn13 | boolean + isn | public | isnle | ean13, upc | boolean + isn | public | isnle | isbn, ean13 | boolean + isn | public | isnle | isbn, isbn | boolean + isn | public | isnle | isbn, isbn13 | boolean + isn | public | isnle | isbn13, ean13 | boolean + isn | public | isnle | isbn13, isbn | boolean + isn | public | isnle | isbn13, isbn13 | boolean + isn | public | isnle | ismn, ean13 | boolean + isn | public | isnle | ismn, ismn | boolean + isn | public | isnle | ismn, ismn13 | boolean + isn | public | isnle | ismn13, ean13 | boolean + isn | public | isnle | ismn13, ismn | boolean + isn | public | isnle | ismn13, ismn13 | boolean + isn | public | isnle | issn, ean13 | boolean + isn | public | isnle | issn, issn | boolean + isn | public | isnle | issn, issn13 | boolean + isn | public | isnle | issn13, ean13 | boolean + isn | public | isnle | issn13, issn | boolean + isn | public | isnle | issn13, issn13 | boolean + isn | public | isnle | upc, ean13 | boolean + isn | public | isnle | upc, upc | boolean + isn | public | isnlt | ean13, ean13 | boolean + isn | public | isnlt | ean13, isbn | boolean + isn | public | isnlt | ean13, isbn13 | boolean + isn | public | isnlt | ean13, ismn | boolean + isn | public | isnlt | ean13, ismn13 | boolean + isn | public | isnlt | ean13, issn | boolean + isn | public | isnlt | ean13, issn13 | boolean + isn | public | isnlt | ean13, upc | boolean + isn | public | isnlt | isbn, ean13 | boolean + isn | public | isnlt | isbn, isbn | boolean + isn | public | isnlt | isbn, isbn13 | boolean + isn | public | isnlt | isbn13, ean13 | boolean + isn | public | isnlt | isbn13, isbn | boolean + isn | public | isnlt | isbn13, isbn13 | boolean + isn | public | isnlt | ismn, ean13 | boolean + isn | public | isnlt | ismn, ismn | boolean + isn | public | isnlt | ismn, ismn13 | boolean + isn | public | isnlt | ismn13, ean13 | boolean + isn | public | isnlt | ismn13, ismn | boolean + isn | public | isnlt | ismn13, ismn13 | boolean + isn | public | isnlt | issn, ean13 | boolean + isn | public | isnlt | issn, issn | boolean + isn | public | isnlt | issn, issn13 | boolean + isn | public | isnlt | issn13, ean13 | boolean + isn | public | isnlt | issn13, issn | boolean + isn | public | isnlt | issn13, issn13 | boolean + isn | public | isnlt | upc, ean13 | boolean + isn | public | isnlt | upc, upc | boolean + isn | public | isnne | ean13, ean13 | boolean + isn | public | isnne | ean13, isbn | boolean + isn | public | isnne | ean13, isbn13 | boolean + isn | public | isnne | ean13, ismn | boolean + isn | public | isnne | ean13, ismn13 | boolean + isn | public | isnne | ean13, issn | boolean + isn | public | isnne | ean13, issn13 | boolean + isn | public | isnne | ean13, upc | boolean + isn | public | isnne | isbn, ean13 | boolean + isn | public | isnne | isbn, isbn | boolean + isn | public | isnne | isbn, isbn13 | boolean + isn | public | isnne | isbn13, ean13 | boolean + isn | public | isnne | isbn13, isbn | boolean + isn | public | isnne | isbn13, isbn13 | boolean + isn | public | isnne | ismn, ean13 | boolean + isn | public | isnne | ismn, ismn | boolean + isn | public | isnne | ismn, ismn13 | boolean + isn | public | isnne | ismn13, ean13 | boolean + isn | public | isnne | ismn13, ismn | boolean + isn | public | isnne | ismn13, ismn13 | boolean + isn | public | isnne | issn, ean13 | boolean + isn | public | isnne | issn, issn | boolean + isn | public | isnne | issn, issn13 | boolean + isn | public | isnne | issn13, ean13 | boolean + isn | public | isnne | issn13, issn | boolean + isn | public | isnne | issn13, issn13 | boolean + isn | public | isnne | upc, ean13 | boolean + isn | public | isnne | upc, upc | boolean + isn | public | issn | ean13 | issn + isn | public | issn13 | ean13 | issn13 + isn | public | issn13_in | cstring | issn13 + isn | public | issn_in | cstring | issn + isn | public | make_valid | ean13 | ean13 + isn | public | make_valid | isbn | isbn + isn | public | make_valid | isbn13 | isbn13 + isn | public | make_valid | ismn | ismn + isn | public | make_valid | ismn13 | ismn13 + isn | public | make_valid | issn | issn + isn | public | make_valid | issn13 | issn13 + isn | public | make_valid | upc | upc + isn | public | upc | ean13 | upc + isn | public | upc_in | cstring | upc + lo | public | lo_manage | | trigger + lo | public | lo_oid | lo | oid + ltree | public | _lt_q_regex | ltree[], lquery[] | boolean + ltree | public | _lt_q_rregex | lquery[], ltree[] | boolean + ltree | public | _ltq_extract_regex | ltree[], lquery | ltree + ltree | public | _ltq_regex | ltree[], lquery | boolean + ltree | public | _ltq_rregex | lquery, ltree[] | boolean + ltree | public | _ltree_compress | internal | internal + ltree | public | _ltree_consistent | internal, ltree[], smallint, oid, internal | boolean + ltree | public | _ltree_extract_isparent | ltree[], ltree | ltree + ltree | public | _ltree_extract_risparent | ltree[], ltree | ltree + ltree | public | _ltree_gist_options | internal | void + ltree | public | _ltree_isparent | ltree[], ltree | boolean + ltree | public | _ltree_penalty | internal, internal, internal | internal + ltree | public | _ltree_picksplit | internal, internal | internal + ltree | public | _ltree_r_isparent | ltree, ltree[] | boolean + ltree | public | _ltree_r_risparent | ltree, ltree[] | boolean + ltree | public | _ltree_risparent | ltree[], ltree | boolean + ltree | public | _ltree_same | ltree_gist, ltree_gist, internal | internal + ltree | public | _ltree_union | internal, internal | ltree_gist + ltree | public | _ltxtq_exec | ltree[], ltxtquery | boolean + ltree | public | _ltxtq_extract_exec | ltree[], ltxtquery | ltree + ltree | public | _ltxtq_rexec | ltxtquery, ltree[] | boolean + ltree | public | hash_ltree | ltree | integer + ltree | public | hash_ltree_extended | ltree, bigint | bigint + ltree | public | index | ltree, ltree | integer + ltree | public | index | ltree, ltree, integer | integer + ltree | public | lca | ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree, ltree, ltree, ltree, ltree, ltree, ltree, ltree | ltree + ltree | public | lca | ltree[] | ltree + ltree | public | lquery_in | cstring | lquery + ltree | public | lquery_out | lquery | cstring + ltree | public | lquery_recv | internal | lquery + ltree | public | lquery_send | lquery | bytea + ltree | public | lt_q_regex | ltree, lquery[] | boolean + ltree | public | lt_q_rregex | lquery[], ltree | boolean + ltree | public | ltq_regex | ltree, lquery | boolean + ltree | public | ltq_rregex | lquery, ltree | boolean + ltree | public | ltree2text | ltree | text + ltree | public | ltree_addltree | ltree, ltree | ltree + ltree | public | ltree_addtext | ltree, text | ltree + ltree | public | ltree_cmp | ltree, ltree | integer + ltree | public | ltree_compress | internal | internal + ltree | public | ltree_consistent | internal, ltree, smallint, oid, internal | boolean + ltree | public | ltree_decompress | internal | internal + ltree | public | ltree_eq | ltree, ltree | boolean + ltree | public | ltree_ge | ltree, ltree | boolean + ltree | public | ltree_gist_in | cstring | ltree_gist + ltree | public | ltree_gist_options | internal | void + ltree | public | ltree_gist_out | ltree_gist | cstring + ltree | public | ltree_gt | ltree, ltree | boolean + ltree | public | ltree_in | cstring | ltree + ltree | public | ltree_isparent | ltree, ltree | boolean + ltree | public | ltree_le | ltree, ltree | boolean + ltree | public | ltree_lt | ltree, ltree | boolean + ltree | public | ltree_ne | ltree, ltree | boolean + ltree | public | ltree_out | ltree | cstring + ltree | public | ltree_penalty | internal, internal, internal | internal + ltree | public | ltree_picksplit | internal, internal | internal + ltree | public | ltree_recv | internal | ltree + ltree | public | ltree_risparent | ltree, ltree | boolean + ltree | public | ltree_same | ltree_gist, ltree_gist, internal | internal + ltree | public | ltree_send | ltree | bytea + ltree | public | ltree_textadd | text, ltree | ltree + ltree | public | ltree_union | internal, internal | ltree_gist + ltree | public | ltreeparentsel | internal, oid, internal, integer | double precision + ltree | public | ltxtq_exec | ltree, ltxtquery | boolean + ltree | public | ltxtq_in | cstring | ltxtquery + ltree | public | ltxtq_out | ltxtquery | cstring + ltree | public | ltxtq_recv | internal | ltxtquery + ltree | public | ltxtq_rexec | ltxtquery, ltree | boolean + ltree | public | ltxtq_send | ltxtquery | bytea + ltree | public | nlevel | ltree | integer + ltree | public | subltree | ltree, integer, integer | ltree + ltree | public | subpath | ltree, integer | ltree + ltree | public | subpath | ltree, integer, integer | ltree + ltree | public | text2ltree | text | ltree + moddatetime | public | moddatetime | | trigger + orioledb | public | orioledb_commit_hash | | text + orioledb | public | orioledb_compression_max_level | | bigint + orioledb | public | orioledb_evict_pages | relid oid, maxlevel integer | void + orioledb | public | orioledb_get_evicted_trees | OUT datoid oid, OUT relnode oid, OUT root_downlink bigint, OUT file_length bigint | SETOF record + orioledb | public | orioledb_get_index_descrs | OUT datoid oid, OUT reloid oid, OUT relnode oid, OUT refcnt oid | SETOF record + orioledb | public | orioledb_get_table_descrs | OUT datoid oid, OUT reloid oid, OUT relnode oid, OUT refcnt oid | SETOF record + orioledb | public | orioledb_has_retained_undo | | boolean + orioledb | public | orioledb_idx_structure | relid oid, tree_name text, options character varying, depth integer | text + orioledb | public | orioledb_index_description | datoid oid, relid oid, relnode oid, index_type text, OUT name text, OUT description text | record + orioledb | public | orioledb_index_oids | OUT datoid oid, OUT table_reloid oid, OUT table_relnode oid, OUT index_reloid oid, OUT index_relnode oid, OUT index_type text | SETOF record + orioledb | public | orioledb_index_rows | relid oid, OUT total integer, OUT dead integer | record + orioledb | public | orioledb_page_stats | OUT pool_name text, OUT busy_pages bigint, OUT free_pages bigint, OUT dirty_pages bigint, OUT all_pages bigint | SETOF record + orioledb | public | orioledb_recovery_synchronized | | boolean + orioledb | public | orioledb_relation_size | relid oid | bigint + orioledb | public | orioledb_sys_tree_check | num integer, force_map_check boolean | boolean + orioledb | public | orioledb_sys_tree_rows | num integer | SETOF jsonb + orioledb | public | orioledb_sys_tree_structure | num integer, options character varying, depth integer | text + orioledb | public | orioledb_table_description | datoid oid, relid oid, relnode oid | text + orioledb | public | orioledb_table_description | relid oid | text + orioledb | public | orioledb_table_oids | OUT datoid oid, OUT reloid oid, OUT relnode oid | SETOF record + orioledb | public | orioledb_table_pages | relid oid, OUT blkno bigint, OUT level integer, OUT rightlink bigint, OUT hikey jsonb | SETOF record + orioledb | public | orioledb_tableam_handler | internal | table_am_handler + orioledb | public | orioledb_tbl_are_indices_equal | idx_oid1 regclass, idx_oid2 regclass | boolean + orioledb | public | orioledb_tbl_bin_structure | relid oid, print_bytes boolean, depth integer | text + orioledb | public | orioledb_tbl_check | relid oid, force_map_check boolean | boolean + orioledb | public | orioledb_tbl_compression_check | level bigint, relid oid, ranges integer[] | text + orioledb | public | orioledb_tbl_indices | relid oid | text + orioledb | public | orioledb_tbl_structure | relid oid, options character varying, depth integer | text + orioledb | public | orioledb_tree_stat | relid regclass, OUT level integer, OUT count bigint, OUT avgoccupied double precision, OUT avgvacated double precision | SETOF record + orioledb | public | orioledb_ucm_check | | boolean + orioledb | public | orioledb_version | | text + orioledb | public | orioledb_write_pages | relid oid | void + orioledb | public | pg_stopevent_reset | eventname text | boolean + orioledb | public | pg_stopevent_set | eventname text, condition jsonpath | void + orioledb | public | pg_stopevents | OUT stopevent text, OUT condition jsonpath, OUT waiter_pids integer[] | SETOF record + pageinspect | public | brin_metapage_info | page bytea, OUT magic text, OUT version integer, OUT pagesperrange integer, OUT lastrevmappage bigint | record + pageinspect | public | brin_page_type | page bytea | text + pageinspect | public | brin_revmap_data | page bytea, OUT pages tid | SETOF tid + pageinspect | public | bt_metap | relname text, OUT magic integer, OUT version integer, OUT root bigint, OUT level bigint, OUT fastroot bigint, OUT fastlevel bigint, OUT last_cleanup_num_delpages bigint, OUT last_cleanup_num_tuples double precision, OUT allequalimage boolean | record + pageinspect | public | bt_page_items | page bytea, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT nulls boolean, OUT vars boolean, OUT data text, OUT dead boolean, OUT htid tid, OUT tids tid[] | SETOF record + pageinspect | public | bt_page_items | relname text, blkno bigint, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT nulls boolean, OUT vars boolean, OUT data text, OUT dead boolean, OUT htid tid, OUT tids tid[] | SETOF record + pageinspect | public | bt_page_stats | relname text, blkno bigint, OUT blkno bigint, OUT type "char", OUT live_items integer, OUT dead_items integer, OUT avg_item_size integer, OUT page_size integer, OUT free_size integer, OUT btpo_prev bigint, OUT btpo_next bigint, OUT btpo_level bigint, OUT btpo_flags integer | record + pageinspect | public | fsm_page_contents | page bytea | text + pageinspect | public | get_raw_page | text, bigint | bytea + pageinspect | public | get_raw_page | text, text, bigint | bytea + pageinspect | public | gin_leafpage_items | page bytea, OUT first_tid tid, OUT nbytes smallint, OUT tids tid[] | SETOF record + pageinspect | public | gin_metapage_info | page bytea, OUT pending_head bigint, OUT pending_tail bigint, OUT tail_free_size integer, OUT n_pending_pages bigint, OUT n_pending_tuples bigint, OUT n_total_pages bigint, OUT n_entry_pages bigint, OUT n_data_pages bigint, OUT n_entries bigint, OUT version integer | record + pageinspect | public | gin_page_opaque_info | page bytea, OUT rightlink bigint, OUT maxoff integer, OUT flags text[] | record + pageinspect | public | gist_page_items | page bytea, index_oid regclass, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT dead boolean, OUT keys text | SETOF record + pageinspect | public | gist_page_items_bytea | page bytea, OUT itemoffset smallint, OUT ctid tid, OUT itemlen smallint, OUT dead boolean, OUT key_data bytea | SETOF record + pageinspect | public | gist_page_opaque_info | page bytea, OUT lsn pg_lsn, OUT nsn pg_lsn, OUT rightlink bigint, OUT flags text[] | record + pageinspect | public | hash_bitmap_info | index_oid regclass, blkno bigint, OUT bitmapblkno bigint, OUT bitmapbit integer, OUT bitstatus boolean | SETOF record + pageinspect | public | hash_metapage_info | page bytea, OUT magic bigint, OUT version bigint, OUT ntuples double precision, OUT ffactor integer, OUT bsize integer, OUT bmsize integer, OUT bmshift integer, OUT maxbucket bigint, OUT highmask bigint, OUT lowmask bigint, OUT ovflpoint bigint, OUT firstfree bigint, OUT nmaps bigint, OUT procid oid, OUT spares bigint[], OUT mapp bigint[] | record + pageinspect | public | hash_page_items | page bytea, OUT itemoffset integer, OUT ctid tid, OUT data bigint | SETOF record + pageinspect | public | hash_page_stats | page bytea, OUT live_items integer, OUT dead_items integer, OUT page_size integer, OUT free_size integer, OUT hasho_prevblkno bigint, OUT hasho_nextblkno bigint, OUT hasho_bucket bigint, OUT hasho_flag integer, OUT hasho_page_id integer | record + pageinspect | public | hash_page_type | page bytea | text + pageinspect | public | heap_page_item_attrs | page bytea, rel_oid regclass, do_detoast boolean, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_attrs bytea[] | SETOF record + pageinspect | public | heap_page_item_attrs | page bytea, rel_oid regclass, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_attrs bytea[] | SETOF record + pageinspect | public | heap_page_items | page bytea, OUT lp smallint, OUT lp_off smallint, OUT lp_flags smallint, OUT lp_len smallint, OUT t_xmin xid, OUT t_xmax xid, OUT t_field3 integer, OUT t_ctid tid, OUT t_infomask2 integer, OUT t_infomask integer, OUT t_hoff smallint, OUT t_bits text, OUT t_oid oid, OUT t_data bytea | SETOF record + pageinspect | public | heap_tuple_infomask_flags | t_infomask integer, t_infomask2 integer, OUT raw_flags text[], OUT combined_flags text[] | record + pageinspect | public | page_checksum | page bytea, blkno bigint | smallint + pageinspect | public | page_header | page bytea, OUT lsn pg_lsn, OUT checksum smallint, OUT flags smallint, OUT lower integer, OUT upper integer, OUT special integer, OUT pagesize integer, OUT version smallint, OUT prune_xid xid | record + pageinspect | public | tuple_data_split | rel_oid oid, t_data bytea, t_infomask integer, t_infomask2 integer, t_bits text | bytea[] + pageinspect | public | tuple_data_split | rel_oid oid, t_data bytea, t_infomask integer, t_infomask2 integer, t_bits text, do_detoast boolean | bytea[] + pg_backtrace | public | pg_backtrace_init | | void + pg_buffercache | public | pg_buffercache_evict | integer | boolean + pg_buffercache | public | pg_buffercache_pages | | SETOF record + pg_freespacemap | public | pg_freespace | regclass, bigint | smallint + pg_freespacemap | public | pg_freespace | rel regclass, OUT blkno bigint, OUT avail smallint | SETOF record + pg_graphql | graphql | _internal_resolve | query text, variables jsonb, "operationName" text, extensions jsonb | jsonb + pg_graphql | graphql | comment_directive | comment_ text | jsonb + pg_graphql | graphql | exception | message text | text + pg_graphql | graphql | get_schema_version | | integer + pg_graphql | graphql | increment_schema_version | | event_trigger + pg_graphql | graphql | resolve | query text, variables jsonb, "operationName" text, extensions jsonb | jsonb + pg_graphql | graphql_public | graphql | "operationName" text, query text, variables jsonb, extensions jsonb | jsonb + pg_hashids | public | hash_decode | text, text, integer | integer + pg_hashids | public | hash_encode | bigint | text + pg_hashids | public | hash_encode | bigint, text | text + pg_hashids | public | hash_encode | bigint, text, integer | text + pg_hashids | public | id_decode | text | bigint[] + pg_hashids | public | id_decode | text, text | bigint[] + pg_hashids | public | id_decode | text, text, integer | bigint[] + pg_hashids | public | id_decode | text, text, integer, text | bigint[] + pg_hashids | public | id_decode_once | text | bigint + pg_hashids | public | id_decode_once | text, text | bigint + pg_hashids | public | id_decode_once | text, text, integer | bigint + pg_hashids | public | id_decode_once | text, text, integer, text | bigint + pg_hashids | public | id_encode | bigint | text + pg_hashids | public | id_encode | bigint, text | text + pg_hashids | public | id_encode | bigint, text, integer | text + pg_hashids | public | id_encode | bigint, text, integer, text | text + pg_hashids | public | id_encode | bigint[] | text + pg_hashids | public | id_encode | bigint[], text | text + pg_hashids | public | id_encode | bigint[], text, integer | text + pg_hashids | public | id_encode | bigint[], text, integer, text | text + pg_jsonschema | public | json_matches_schema | schema json, instance json | boolean + pg_jsonschema | public | jsonb_matches_schema | schema json, instance jsonb | boolean + pg_jsonschema | public | jsonschema_is_valid | schema json | boolean + pg_jsonschema | public | jsonschema_validation_errors | schema json, instance json | text[] + pg_net | net | _await_response | request_id bigint | boolean + pg_net | net | _encode_url_with_params_array | url text, params_array text[] | text + pg_net | net | _http_collect_response | request_id bigint, async boolean | net.http_response_result + pg_net | net | _urlencode_string | string character varying | text + pg_net | net | check_worker_is_up | | void + pg_net | net | http_collect_response | request_id bigint, async boolean | net.http_response_result + pg_net | net | http_delete | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | http_get | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | http_post | url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer | bigint + pg_net | net | worker_restart | | boolean + pg_prewarm | public | autoprewarm_dump_now | | bigint + pg_prewarm | public | autoprewarm_start_worker | | void + pg_prewarm | public | pg_prewarm | regclass, mode text, fork text, first_block bigint, last_block bigint | bigint + pg_repack | repack | conflicted_triggers | oid | SETOF name + pg_repack | repack | create_index_type | oid, oid | void + pg_repack | repack | create_log_table | oid | void + pg_repack | repack | create_table | oid, name | void + pg_repack | repack | disable_autovacuum | regclass | void + pg_repack | repack | get_alter_col_storage | oid | text + pg_repack | repack | get_assign | oid, text | text + pg_repack | repack | get_columns_for_create_as | oid | text + pg_repack | repack | get_compare_pkey | oid, text | text + pg_repack | repack | get_create_index_type | oid, name | text + pg_repack | repack | get_create_trigger | relid oid, pkid oid | text + pg_repack | repack | get_drop_columns | oid, text | text + pg_repack | repack | get_enable_trigger | relid oid | text + pg_repack | repack | get_index_columns | oid | text + pg_repack | repack | get_order_by | oid, oid | text + pg_repack | repack | get_storage_param | oid | text + pg_repack | repack | get_table_and_inheritors | regclass | regclass[] + pg_repack | repack | oid2text | oid | text + pg_repack | repack | repack_apply | sql_peek cstring, sql_insert cstring, sql_delete cstring, sql_update cstring, sql_pop cstring, count integer | integer + pg_repack | repack | repack_drop | oid, integer | void + pg_repack | repack | repack_index_swap | oid | void + pg_repack | repack | repack_indexdef | oid, oid, name, boolean | text + pg_repack | repack | repack_swap | oid | void + pg_repack | repack | repack_trigger | | trigger + pg_repack | repack | version | | text + pg_repack | repack | version_sql | | text + pg_stat_monitor | public | decode_error_level | elevel integer | text + pg_stat_monitor | public | get_cmd_type | cmd_type integer | text + pg_stat_monitor | public | get_histogram_timings | | text + pg_stat_monitor | public | histogram | _bucket integer, _quryid bigint | SETOF record + pg_stat_monitor | public | pg_stat_monitor_internal | showtext boolean, OUT bucket bigint, OUT userid oid, OUT username text, OUT dbid oid, OUT datname text, OUT client_ip bigint, OUT queryid bigint, OUT planid bigint, OUT query text, OUT query_plan text, OUT pgsm_query_id bigint, OUT top_queryid bigint, OUT top_query text, OUT application_name text, OUT relations text, OUT cmd_type integer, OUT elevel integer, OUT sqlcode text, OUT message text, OUT bucket_start_time timestamp with time zone, OUT calls bigint, OUT total_exec_time double precision, OUT min_exec_time double precision, OUT max_exec_time double precision, OUT mean_exec_time double precision, OUT stddev_exec_time double precision, OUT rows bigint, OUT plans bigint, OUT total_plan_time double precision, OUT min_plan_time double precision, OUT max_plan_time double precision, OUT mean_plan_time double precision, OUT stddev_plan_time double precision, OUT shared_blks_hit bigint, OUT shared_blks_read bigint, OUT shared_blks_dirtied bigint, OUT shared_blks_written bigint, OUT local_blks_hit bigint, OUT local_blks_read bigint, OUT local_blks_dirtied bigint, OUT local_blks_written bigint, OUT temp_blks_read bigint, OUT temp_blks_written bigint, OUT shared_blk_read_time double precision, OUT shared_blk_write_time double precision, OUT local_blk_read_time double precision, OUT local_blk_write_time double precision, OUT temp_blk_read_time double precision, OUT temp_blk_write_time double precision, OUT resp_calls text, OUT cpu_user_time double precision, OUT cpu_sys_time double precision, OUT wal_records bigint, OUT wal_fpi bigint, OUT wal_bytes numeric, OUT comments text, OUT jit_functions bigint, OUT jit_generation_time double precision, OUT jit_inlining_count bigint, OUT jit_inlining_time double precision, OUT jit_optimization_count bigint, OUT jit_optimization_time double precision, OUT jit_emission_count bigint, OUT jit_emission_time double precision, OUT jit_deform_count bigint, OUT jit_deform_time double precision, OUT stats_since timestamp with time zone, OUT minmax_stats_since timestamp with time zone, OUT toplevel boolean, OUT bucket_done boolean | SETOF record + pg_stat_monitor | public | pg_stat_monitor_reset | | void + pg_stat_monitor | public | pg_stat_monitor_version | | text + pg_stat_monitor | public | pgsm_create_11_view | | integer + pg_stat_monitor | public | pgsm_create_13_view | | integer + pg_stat_monitor | public | pgsm_create_14_view | | integer + pg_stat_monitor | public | pgsm_create_15_view | | integer + pg_stat_monitor | public | pgsm_create_17_view | | integer + pg_stat_monitor | public | pgsm_create_view | | integer + pg_stat_monitor | public | range | | text[] + pg_stat_statements | extensions | pg_stat_statements | showtext boolean, OUT userid oid, OUT dbid oid, OUT toplevel boolean, OUT queryid bigint, OUT query text, OUT plans bigint, OUT total_plan_time double precision, OUT min_plan_time double precision, OUT max_plan_time double precision, OUT mean_plan_time double precision, OUT stddev_plan_time double precision, OUT calls bigint, OUT total_exec_time double precision, OUT min_exec_time double precision, OUT max_exec_time double precision, OUT mean_exec_time double precision, OUT stddev_exec_time double precision, OUT rows bigint, OUT shared_blks_hit bigint, OUT shared_blks_read bigint, OUT shared_blks_dirtied bigint, OUT shared_blks_written bigint, OUT local_blks_hit bigint, OUT local_blks_read bigint, OUT local_blks_dirtied bigint, OUT local_blks_written bigint, OUT temp_blks_read bigint, OUT temp_blks_written bigint, OUT shared_blk_read_time double precision, OUT shared_blk_write_time double precision, OUT local_blk_read_time double precision, OUT local_blk_write_time double precision, OUT temp_blk_read_time double precision, OUT temp_blk_write_time double precision, OUT wal_records bigint, OUT wal_fpi bigint, OUT wal_bytes numeric, OUT jit_functions bigint, OUT jit_generation_time double precision, OUT jit_inlining_count bigint, OUT jit_inlining_time double precision, OUT jit_optimization_count bigint, OUT jit_optimization_time double precision, OUT jit_emission_count bigint, OUT jit_emission_time double precision, OUT jit_deform_count bigint, OUT jit_deform_time double precision, OUT stats_since timestamp with time zone, OUT minmax_stats_since timestamp with time zone | SETOF record + pg_stat_statements | extensions | pg_stat_statements_info | OUT dealloc bigint, OUT stats_reset timestamp with time zone | record + pg_stat_statements | extensions | pg_stat_statements_reset | userid oid, dbid oid, queryid bigint, minmax_only boolean | timestamp with time zone + pg_surgery | public | heap_force_freeze | reloid regclass, tids tid[] | void + pg_surgery | public | heap_force_kill | reloid regclass, tids tid[] | void + pg_tle | pgtle | available_extension_versions | OUT name name, OUT version text, OUT superuser boolean, OUT trusted boolean, OUT relocatable boolean, OUT schema name, OUT requires name[], OUT comment text | SETOF record + pg_tle | pgtle | available_extensions | OUT name name, OUT default_version text, OUT comment text | SETOF record + pg_tle | pgtle | create_base_type | typenamespace regnamespace, typename name, infunc regprocedure, outfunc regprocedure, internallength integer, alignment text, storage text | void + pg_tle | pgtle | create_base_type_if_not_exists | typenamespace regnamespace, typename name, infunc regprocedure, outfunc regprocedure, internallength integer, alignment text, storage text | boolean + pg_tle | pgtle | create_operator_func | typenamespace regnamespace, typename name, opfunc regprocedure | void + pg_tle | pgtle | create_operator_func_if_not_exists | typenamespace regnamespace, typename name, opfunc regprocedure | boolean + pg_tle | pgtle | create_shell_type | typenamespace regnamespace, typename name | void + pg_tle | pgtle | create_shell_type_if_not_exists | typenamespace regnamespace, typename name | boolean + pg_tle | pgtle | extension_update_paths | name name, OUT source text, OUT target text, OUT path text | SETOF record + pg_tle | pgtle | install_extension | name text, version text, description text, ext text, requires text[] | boolean + pg_tle | pgtle | install_extension_version_sql | name text, version text, ext text | boolean + pg_tle | pgtle | install_update_path | name text, fromvers text, tovers text, ext text | boolean + pg_tle | pgtle | pg_tle_feature_info_sql_drop | | event_trigger + pg_tle | pgtle | register_feature | proc regproc, feature pgtle.pg_tle_features | void + pg_tle | pgtle | register_feature_if_not_exists | proc regproc, feature pgtle.pg_tle_features | boolean + pg_tle | pgtle | set_default_version | name text, version text | boolean + pg_tle | pgtle | uninstall_extension | extname text | boolean + pg_tle | pgtle | uninstall_extension | extname text, version text | boolean + pg_tle | pgtle | uninstall_extension_if_exists | extname text | boolean + pg_tle | pgtle | uninstall_update_path | extname text, fromvers text, tovers text | boolean + pg_tle | pgtle | uninstall_update_path_if_exists | extname text, fromvers text, tovers text | boolean + pg_tle | pgtle | unregister_feature | proc regproc, feature pgtle.pg_tle_features | void + pg_tle | pgtle | unregister_feature_if_exists | proc regproc, feature pgtle.pg_tle_features | boolean + pg_trgm | public | gin_extract_query_trgm | text, internal, smallint, internal, internal, internal, internal | internal + pg_trgm | public | gin_extract_value_trgm | text, internal | internal + pg_trgm | public | gin_trgm_consistent | internal, smallint, text, integer, internal, internal, internal, internal | boolean + pg_trgm | public | gin_trgm_triconsistent | internal, smallint, text, integer, internal, internal, internal | "char" + pg_trgm | public | gtrgm_compress | internal | internal + pg_trgm | public | gtrgm_consistent | internal, text, smallint, oid, internal | boolean + pg_trgm | public | gtrgm_decompress | internal | internal + pg_trgm | public | gtrgm_distance | internal, text, smallint, oid, internal | double precision + pg_trgm | public | gtrgm_in | cstring | gtrgm + pg_trgm | public | gtrgm_options | internal | void + pg_trgm | public | gtrgm_out | gtrgm | cstring + pg_trgm | public | gtrgm_penalty | internal, internal, internal | internal + pg_trgm | public | gtrgm_picksplit | internal, internal | internal + pg_trgm | public | gtrgm_same | gtrgm, gtrgm, internal | internal + pg_trgm | public | gtrgm_union | internal, internal | gtrgm + pg_trgm | public | set_limit | real | real + pg_trgm | public | show_limit | | real + pg_trgm | public | show_trgm | text | text[] + pg_trgm | public | similarity | text, text | real + pg_trgm | public | similarity_dist | text, text | real + pg_trgm | public | similarity_op | text, text | boolean + pg_trgm | public | strict_word_similarity | text, text | real + pg_trgm | public | strict_word_similarity_commutator_op | text, text | boolean + pg_trgm | public | strict_word_similarity_dist_commutator_op | text, text | real + pg_trgm | public | strict_word_similarity_dist_op | text, text | real + pg_trgm | public | strict_word_similarity_op | text, text | boolean + pg_trgm | public | word_similarity | text, text | real + pg_trgm | public | word_similarity_commutator_op | text, text | boolean + pg_trgm | public | word_similarity_dist_commutator_op | text, text | real + pg_trgm | public | word_similarity_dist_op | text, text | real + pg_trgm | public | word_similarity_op | text, text | boolean + pg_visibility | public | pg_check_frozen | regclass, OUT t_ctid tid | SETOF tid + pg_visibility | public | pg_check_visible | regclass, OUT t_ctid tid | SETOF tid + pg_visibility | public | pg_truncate_visibility_map | regclass | void + pg_visibility | public | pg_visibility | regclass, blkno bigint, OUT all_visible boolean, OUT all_frozen boolean, OUT pd_all_visible boolean | record + pg_visibility | public | pg_visibility | regclass, OUT blkno bigint, OUT all_visible boolean, OUT all_frozen boolean, OUT pd_all_visible boolean | SETOF record + pg_visibility | public | pg_visibility_map | regclass, blkno bigint, OUT all_visible boolean, OUT all_frozen boolean | record + pg_visibility | public | pg_visibility_map | regclass, OUT blkno bigint, OUT all_visible boolean, OUT all_frozen boolean | SETOF record + pg_visibility | public | pg_visibility_map_summary | regclass, OUT all_visible bigint, OUT all_frozen bigint | record + pg_walinspect | public | pg_get_wal_record_info | in_lsn pg_lsn, OUT start_lsn pg_lsn, OUT end_lsn pg_lsn, OUT prev_lsn pg_lsn, OUT xid xid, OUT resource_manager text, OUT record_type text, OUT record_length integer, OUT main_data_length integer, OUT fpi_length integer, OUT description text, OUT block_ref text | record + pg_walinspect | public | pg_get_wal_records_info | start_lsn pg_lsn, end_lsn pg_lsn, OUT start_lsn pg_lsn, OUT end_lsn pg_lsn, OUT prev_lsn pg_lsn, OUT xid xid, OUT resource_manager text, OUT record_type text, OUT record_length integer, OUT main_data_length integer, OUT fpi_length integer, OUT description text, OUT block_ref text | SETOF record + pg_walinspect | public | pg_get_wal_stats | start_lsn pg_lsn, end_lsn pg_lsn, per_record boolean, OUT "resource_manager/record_type" text, OUT count bigint, OUT count_percentage double precision, OUT record_size bigint, OUT record_size_percentage double precision, OUT fpi_size bigint, OUT fpi_size_percentage double precision, OUT combined_size bigint, OUT combined_size_percentage double precision | SETOF record + pgaudit | public | pgaudit_ddl_command_end | | event_trigger + pgaudit | public | pgaudit_sql_drop | | event_trigger + pgcrypto | extensions | armor | bytea | text + pgcrypto | extensions | armor | bytea, text[], text[] | text + pgcrypto | extensions | crypt | text, text | text + pgcrypto | extensions | dearmor | text | bytea + pgcrypto | extensions | decrypt | bytea, bytea, text | bytea + pgcrypto | extensions | decrypt_iv | bytea, bytea, bytea, text | bytea + pgcrypto | extensions | digest | bytea, text | bytea + pgcrypto | extensions | digest | text, text | bytea + pgcrypto | extensions | encrypt | bytea, bytea, text | bytea + pgcrypto | extensions | encrypt_iv | bytea, bytea, bytea, text | bytea + pgcrypto | extensions | gen_random_bytes | integer | bytea + pgcrypto | extensions | gen_random_uuid | | uuid + pgcrypto | extensions | gen_salt | text | text + pgcrypto | extensions | gen_salt | text, integer | text + pgcrypto | extensions | hmac | bytea, bytea, text | bytea + pgcrypto | extensions | hmac | text, text, text | bytea + pgcrypto | extensions | pgp_armor_headers | text, OUT key text, OUT value text | SETOF record + pgcrypto | extensions | pgp_key_id | bytea | text + pgcrypto | extensions | pgp_pub_decrypt | bytea, bytea | text + pgcrypto | extensions | pgp_pub_decrypt | bytea, bytea, text | text + pgcrypto | extensions | pgp_pub_decrypt | bytea, bytea, text, text | text + pgcrypto | extensions | pgp_pub_decrypt_bytea | bytea, bytea | bytea + pgcrypto | extensions | pgp_pub_decrypt_bytea | bytea, bytea, text | bytea + pgcrypto | extensions | pgp_pub_decrypt_bytea | bytea, bytea, text, text | bytea + pgcrypto | extensions | pgp_pub_encrypt | text, bytea | bytea + pgcrypto | extensions | pgp_pub_encrypt | text, bytea, text | bytea + pgcrypto | extensions | pgp_pub_encrypt_bytea | bytea, bytea | bytea + pgcrypto | extensions | pgp_pub_encrypt_bytea | bytea, bytea, text | bytea + pgcrypto | extensions | pgp_sym_decrypt | bytea, text | text + pgcrypto | extensions | pgp_sym_decrypt | bytea, text, text | text + pgcrypto | extensions | pgp_sym_decrypt_bytea | bytea, text | bytea + pgcrypto | extensions | pgp_sym_decrypt_bytea | bytea, text, text | bytea + pgcrypto | extensions | pgp_sym_encrypt | text, text | bytea + pgcrypto | extensions | pgp_sym_encrypt | text, text, text | bytea + pgcrypto | extensions | pgp_sym_encrypt_bytea | bytea, text | bytea + pgcrypto | extensions | pgp_sym_encrypt_bytea | bytea, text, text | bytea + pgjwt | extensions | algorithm_sign | signables text, secret text, algorithm text | text + pgjwt | extensions | sign | payload json, secret text, algorithm text | text + pgjwt | extensions | try_cast_double | inp text | double precision + pgjwt | extensions | url_decode | data text | bytea + pgjwt | extensions | url_encode | data bytea | text + pgjwt | extensions | verify | token text, secret text, algorithm text | TABLE(header json, payload json, valid boolean) + pgmq | pgmq | _belongs_to_pgmq | table_name text | boolean + pgmq | pgmq | _ensure_pg_partman_installed | | void + pgmq | pgmq | _get_partition_col | partition_interval text | text + pgmq | pgmq | _get_pg_partman_major_version | | integer + pgmq | pgmq | _get_pg_partman_schema | | text + pgmq | pgmq | archive | queue_name text, msg_id bigint | boolean + pgmq | pgmq | archive | queue_name text, msg_ids bigint[] | SETOF bigint + pgmq | pgmq | convert_archive_partitioned | table_name text, partition_interval text, retention_interval text, leading_partition integer | void + pgmq | pgmq | create | queue_name text | void + pgmq | pgmq | create_non_partitioned | queue_name text | void + pgmq | pgmq | create_partitioned | queue_name text, partition_interval text, retention_interval text | void + pgmq | pgmq | create_unlogged | queue_name text | void + pgmq | pgmq | delete | queue_name text, msg_id bigint | boolean + pgmq | pgmq | delete | queue_name text, msg_ids bigint[] | SETOF bigint + pgmq | pgmq | detach_archive | queue_name text | void + pgmq | pgmq | drop_queue | queue_name text | boolean + pgmq | pgmq | format_table_name | queue_name text, prefix text | text + pgmq | pgmq | list_queues | | SETOF pgmq.queue_record + pgmq | pgmq | metrics | queue_name text | pgmq.metrics_result + pgmq | pgmq | metrics_all | | SETOF pgmq.metrics_result + pgmq | pgmq | pop | queue_name text | SETOF pgmq.message_record + pgmq | pgmq | purge_queue | queue_name text | bigint + pgmq | pgmq | read | queue_name text, vt integer, qty integer | SETOF pgmq.message_record + pgmq | pgmq | read_with_poll | queue_name text, vt integer, qty integer, max_poll_seconds integer, poll_interval_ms integer | SETOF pgmq.message_record + pgmq | pgmq | send | queue_name text, msg jsonb, delay integer | SETOF bigint + pgmq | pgmq | send_batch | queue_name text, msgs jsonb[], delay integer | SETOF bigint + pgmq | pgmq | set_vt | queue_name text, msg_id bigint, vt integer | SETOF pgmq.message_record + pgmq | pgmq | validate_queue_name | queue_name text | void + pgroonga | pgroonga | command | groongacommand text | text + pgroonga | pgroonga | command | groongacommand text, arguments text[] | text + pgroonga | pgroonga | command_escape_value | value text | text + pgroonga | pgroonga | contain_varchar_array | character varying[], character varying | boolean + pgroonga | pgroonga | escape | value bigint | text + pgroonga | pgroonga | escape | value boolean | text + pgroonga | pgroonga | escape | value double precision | text + pgroonga | pgroonga | escape | value integer | text + pgroonga | pgroonga | escape | value real | text + pgroonga | pgroonga | escape | value smallint | text + pgroonga | pgroonga | escape | value text | text + pgroonga | pgroonga | escape | value text, special_characters text | text + pgroonga | pgroonga | escape | value timestamp with time zone | text + pgroonga | pgroonga | escape | value timestamp without time zone | text + pgroonga | pgroonga | flush | indexname cstring | boolean + pgroonga | pgroonga | highlight_html | target text, keywords text[] | text + pgroonga | pgroonga | match_in_text | text, text[] | boolean + pgroonga | pgroonga | match_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | match_in_varchar | character varying, character varying[] | boolean + pgroonga | pgroonga | match_jsonb | jsonb, text | boolean + pgroonga | pgroonga | match_positions_byte | target text, keywords text[] | integer[] + pgroonga | pgroonga | match_positions_character | target text, keywords text[] | integer[] + pgroonga | pgroonga | match_query | character varying, character varying | boolean + pgroonga | pgroonga | match_query | text, text | boolean + pgroonga | pgroonga | match_query | text[], text | boolean + pgroonga | pgroonga | match_regexp | character varying, character varying | boolean + pgroonga | pgroonga | match_regexp | text, text | boolean + pgroonga | pgroonga | match_script_jsonb | jsonb, text | boolean + pgroonga | pgroonga | match_term | target character varying, term character varying | boolean + pgroonga | pgroonga | match_term | target character varying[], term character varying | boolean + pgroonga | pgroonga | match_term | target text, term text | boolean + pgroonga | pgroonga | match_term | target text[], term text | boolean + pgroonga | pgroonga | match_text | text, text | boolean + pgroonga | pgroonga | match_text_array | text[], text | boolean + pgroonga | pgroonga | match_varchar | character varying, character varying | boolean + pgroonga | pgroonga | prefix_in_text | text, text[] | boolean + pgroonga | pgroonga | prefix_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | prefix_rk_in_text | text, text[] | boolean + pgroonga | pgroonga | prefix_rk_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | prefix_rk_text | text, text | boolean + pgroonga | pgroonga | prefix_rk_text_array | text[], text | boolean + pgroonga | pgroonga | prefix_text | text, text | boolean + pgroonga | pgroonga | prefix_text_array | text[], text | boolean + pgroonga | pgroonga | query_escape | query text | text + pgroonga | pgroonga | query_expand | tablename cstring, termcolumnname text, synonymscolumnname text, query text | text + pgroonga | pgroonga | query_extract_keywords | query text | text[] + pgroonga | pgroonga | query_in_text | text, text[] | boolean + pgroonga | pgroonga | query_in_text_array | text[], text[] | boolean + pgroonga | pgroonga | query_in_varchar | character varying, character varying[] | boolean + pgroonga | pgroonga | query_jsonb | jsonb, text | boolean + pgroonga | pgroonga | query_text | text, text | boolean + pgroonga | pgroonga | query_text_array | text[], text | boolean + pgroonga | pgroonga | query_varchar | character varying, character varying | boolean + pgroonga | pgroonga | regexp_text | text, text | boolean + pgroonga | pgroonga | regexp_varchar | character varying, character varying | boolean + pgroonga | pgroonga | score | "row" record | double precision + pgroonga | pgroonga | script_jsonb | jsonb, text | boolean + pgroonga | pgroonga | script_text | text, text | boolean + pgroonga | pgroonga | script_text_array | text[], text | boolean + pgroonga | pgroonga | script_varchar | character varying, character varying | boolean + pgroonga | pgroonga | similar_text | text, text | boolean + pgroonga | pgroonga | similar_text_array | text[], text | boolean + pgroonga | pgroonga | similar_varchar | character varying, character varying | boolean + pgroonga | pgroonga | snippet_html | target text, keywords text[], width integer | text[] + pgroonga | pgroonga | table_name | indexname cstring | text + pgroonga | public | pgroonga_command | groongacommand text | text + pgroonga | public | pgroonga_command | groongacommand text, arguments text[] | text + pgroonga | public | pgroonga_command_escape_value | value text | text + pgroonga | public | pgroonga_condition | query text, weights integer[], scorers text[], schema_name text, index_name text, column_name text, fuzzy_max_distance_ratio real | pgroonga_condition + pgroonga | public | pgroonga_contain_varchar_array | character varying[], character varying | boolean + pgroonga | public | pgroonga_equal_query_text_array | targets text[], query text | boolean + pgroonga | public | pgroonga_equal_query_text_array_condition | targets text[], condition pgroonga_condition | boolean + pgroonga | public | pgroonga_equal_query_text_array_condition | targets text[], condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_equal_query_varchar_array | targets character varying[], query text | boolean + pgroonga | public | pgroonga_equal_query_varchar_array_condition | targets character varying[], condition pgroonga_condition | boolean + pgroonga | public | pgroonga_equal_query_varchar_array_condition | targets character varying[], condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_equal_text | target text, other text | boolean + pgroonga | public | pgroonga_equal_text_condition | target text, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_equal_text_condition | target text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_equal_varchar | target character varying, other character varying | boolean + pgroonga | public | pgroonga_equal_varchar_condition | target character varying, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_equal_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_escape | value bigint | text + pgroonga | public | pgroonga_escape | value boolean | text + pgroonga | public | pgroonga_escape | value double precision | text + pgroonga | public | pgroonga_escape | value integer | text + pgroonga | public | pgroonga_escape | value real | text + pgroonga | public | pgroonga_escape | value smallint | text + pgroonga | public | pgroonga_escape | value text | text + pgroonga | public | pgroonga_escape | value text, special_characters text | text + pgroonga | public | pgroonga_escape | value timestamp with time zone | text + pgroonga | public | pgroonga_escape | value timestamp without time zone | text + pgroonga | public | pgroonga_flush | indexname cstring | boolean + pgroonga | public | pgroonga_handler | internal | index_am_handler + pgroonga | public | pgroonga_highlight_html | target text, keywords text[] | text + pgroonga | public | pgroonga_highlight_html | target text, keywords text[], indexname cstring | text + pgroonga | public | pgroonga_highlight_html | targets text[], keywords text[] | text[] + pgroonga | public | pgroonga_highlight_html | targets text[], keywords text[], indexname cstring | text[] + pgroonga | public | pgroonga_index_column_name | indexname cstring, columnindex integer | text + pgroonga | public | pgroonga_index_column_name | indexname cstring, columnname text | text + pgroonga | public | pgroonga_is_writable | | boolean + pgroonga | public | pgroonga_list_broken_indexes | | SETOF text + pgroonga | public | pgroonga_list_lagged_indexes | | SETOF text + pgroonga | public | pgroonga_match_in_text | text, text[] | boolean + pgroonga | public | pgroonga_match_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_match_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_match_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_match_positions_byte | target text, keywords text[] | integer[] + pgroonga | public | pgroonga_match_positions_byte | target text, keywords text[], indexname cstring | integer[] + pgroonga | public | pgroonga_match_positions_character | target text, keywords text[] | integer[] + pgroonga | public | pgroonga_match_positions_character | target text, keywords text[], indexname cstring | integer[] + pgroonga | public | pgroonga_match_query | character varying, character varying | boolean + pgroonga | public | pgroonga_match_query | text, text | boolean + pgroonga | public | pgroonga_match_query | text[], text | boolean + pgroonga | public | pgroonga_match_regexp | character varying, character varying | boolean + pgroonga | public | pgroonga_match_regexp | text, text | boolean + pgroonga | public | pgroonga_match_script_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_match_term | target character varying, term character varying | boolean + pgroonga | public | pgroonga_match_term | target character varying[], term character varying | boolean + pgroonga | public | pgroonga_match_term | target text, term text | boolean + pgroonga | public | pgroonga_match_term | target text[], term text | boolean + pgroonga | public | pgroonga_match_text | text, text | boolean + pgroonga | public | pgroonga_match_text_array | text[], text | boolean + pgroonga | public | pgroonga_match_text_array_condition | target text[], condition pgroonga_condition | boolean + pgroonga | public | pgroonga_match_text_array_condition | target text[], condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_match_text_array_condition_with_scorers | target text[], condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_match_text_condition | target text, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_match_text_condition | target text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_match_text_condition_with_scorers | target text, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_match_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_match_varchar_condition | target character varying, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_match_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_match_varchar_condition_with_scorers | target character varying, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_normalize | target text | text + pgroonga | public | pgroonga_normalize | target text, normalizername text | text + pgroonga | public | pgroonga_not_prefix_in_text | text, text[] | boolean + pgroonga | public | pgroonga_prefix_in_text | text, text[] | boolean + pgroonga | public | pgroonga_prefix_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_prefix_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_prefix_in_varchar_array | character varying[], character varying[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_text | text, text[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_prefix_rk_in_varchar_array | character varying[], character varying[] | boolean + pgroonga | public | pgroonga_prefix_rk_text | text, text | boolean + pgroonga | public | pgroonga_prefix_rk_text_array | text[], text | boolean + pgroonga | public | pgroonga_prefix_rk_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_prefix_rk_varchar_array | character varying[], character varying | boolean + pgroonga | public | pgroonga_prefix_text | text, text | boolean + pgroonga | public | pgroonga_prefix_text_array | text[], text | boolean + pgroonga | public | pgroonga_prefix_text_array_condition | text[], pgroonga_condition | boolean + pgroonga | public | pgroonga_prefix_text_condition | text, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_prefix_text_condition | text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_prefix_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_prefix_varchar_array | character varying[], character varying | boolean + pgroonga | public | pgroonga_prefix_varchar_array_condition | character varying[], pgroonga_condition | boolean + pgroonga | public | pgroonga_prefix_varchar_condition | target character varying, conditoin pgroonga_condition | boolean + pgroonga | public | pgroonga_prefix_varchar_condition | target character varying, conditoin pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_escape | query text | text + pgroonga | public | pgroonga_query_expand | tablename cstring, termcolumnname text, synonymscolumnname text, query text | text + pgroonga | public | pgroonga_query_extract_keywords | query text, index_name text | text[] + pgroonga | public | pgroonga_query_in_text | text, text[] | boolean + pgroonga | public | pgroonga_query_in_text_array | text[], text[] | boolean + pgroonga | public | pgroonga_query_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_query_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_query_text | text, text | boolean + pgroonga | public | pgroonga_query_text_array | text[], text | boolean + pgroonga | public | pgroonga_query_text_array_condition | targets text[], condition pgroonga_condition | boolean + pgroonga | public | pgroonga_query_text_array_condition | targets text[], condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_text_array_condition_with_scorers | targets text[], condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_query_text_condition | target text, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_query_text_condition | target text, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_text_condition_with_scorers | target text, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_query_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_query_varchar_condition | target character varying, condition pgroonga_condition | boolean + pgroonga | public | pgroonga_query_varchar_condition | target character varying, condition pgroonga_full_text_search_condition | boolean + pgroonga | public | pgroonga_query_varchar_condition_with_scorers | target character varying, condition pgroonga_full_text_search_condition_with_scorers | boolean + pgroonga | public | pgroonga_regexp_in_text | text, text[] | boolean + pgroonga | public | pgroonga_regexp_in_varchar | character varying, character varying[] | boolean + pgroonga | public | pgroonga_regexp_text | text, text | boolean + pgroonga | public | pgroonga_regexp_text_array | targets text[], pattern text | boolean + pgroonga | public | pgroonga_regexp_text_array_condition | targets text[], pattern pgroonga_condition | boolean + pgroonga | public | pgroonga_regexp_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_result_to_jsonb_objects | result jsonb | jsonb + pgroonga | public | pgroonga_result_to_recordset | result jsonb | SETOF record + pgroonga | public | pgroonga_score | "row" record | double precision + pgroonga | public | pgroonga_score | tableoid oid, ctid tid | double precision + pgroonga | public | pgroonga_script_jsonb | jsonb, text | boolean + pgroonga | public | pgroonga_script_text | text, text | boolean + pgroonga | public | pgroonga_script_text_array | text[], text | boolean + pgroonga | public | pgroonga_script_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_set_writable | newwritable boolean | boolean + pgroonga | public | pgroonga_similar_text | text, text | boolean + pgroonga | public | pgroonga_similar_text_array | text[], text | boolean + pgroonga | public | pgroonga_similar_varchar | character varying, character varying | boolean + pgroonga | public | pgroonga_snippet_html | target text, keywords text[], width integer | text[] + pgroonga | public | pgroonga_table_name | indexname cstring | text + pgroonga | public | pgroonga_tokenize | target text, VARIADIC options text[] | json[] + pgroonga | public | pgroonga_vacuum | | boolean + pgroonga | public | pgroonga_wal_apply | | bigint + pgroonga | public | pgroonga_wal_apply | indexname cstring | bigint + pgroonga | public | pgroonga_wal_set_applied_position | | boolean + pgroonga | public | pgroonga_wal_set_applied_position | block bigint, "offset" bigint | boolean + pgroonga | public | pgroonga_wal_set_applied_position | indexname cstring | boolean + pgroonga | public | pgroonga_wal_set_applied_position | indexname cstring, block bigint, "offset" bigint | boolean + pgroonga | public | pgroonga_wal_status | | TABLE(name text, oid oid, current_block bigint, current_offset bigint, current_size bigint, last_block bigint, last_offset bigint, last_size bigint) + pgroonga | public | pgroonga_wal_truncate | | bigint + pgroonga | public | pgroonga_wal_truncate | indexname cstring | bigint + pgroonga_database | public | pgroonga_database_remove | | boolean + pgrouting | public | _pgr_alphashape | text, alpha double precision, OUT seq1 bigint, OUT textgeom text | SETOF record + pgrouting | public | _pgr_array_reverse | anyarray | anyarray + pgrouting | public | _pgr_articulationpoints | edges_sql text, OUT seq integer, OUT node bigint | SETOF record + pgrouting | public | _pgr_astar | edges_sql text, combinations_sql text, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_astar | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bdastar | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bdastar | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bddijkstra | text, anyarray, anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bddijkstra | text, text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bellmanford | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bellmanford | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_biconnectedcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT edge bigint | SETOF record + pgrouting | public | _pgr_binarybreadthfirstsearch | edges_sql text, combinations_sql text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_binarybreadthfirstsearch | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bipartite | edges_sql text, OUT node bigint, OUT color bigint | SETOF record + pgrouting | public | _pgr_boost_version | | text + pgrouting | public | _pgr_breadthfirstsearch | edges_sql text, from_vids anyarray, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_bridges | edges_sql text, OUT seq integer, OUT edge bigint | SETOF record + pgrouting | public | _pgr_build_type | | text + pgrouting | public | _pgr_checkcolumn | text, text, text, is_optional boolean, dryrun boolean | boolean + pgrouting | public | _pgr_checkquery | text | text + pgrouting | public | _pgr_checkverttab | vertname text, columnsarr text[], reporterrs integer, fnname text, OUT sname text, OUT vname text | record + pgrouting | public | _pgr_chinesepostman | edges_sql text, only_cost boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_compilation_date | | text + pgrouting | public | _pgr_compiler_version | | text + pgrouting | public | _pgr_connectedcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | _pgr_contraction | edges_sql text, contraction_order bigint[], max_cycles integer, forbidden_vertices bigint[], directed boolean, OUT type text, OUT id bigint, OUT contracted_vertices bigint[], OUT source bigint, OUT target bigint, OUT cost double precision | SETOF record + pgrouting | public | _pgr_createindex | sname text, tname text, colname text, indext text, reporterrs integer, fnname text | void + pgrouting | public | _pgr_createindex | tabname text, colname text, indext text, reporterrs integer, fnname text | void + pgrouting | public | _pgr_cuthillmckeeordering | text, OUT seq bigint, OUT node bigint | SETOF record + pgrouting | public | _pgr_dagshortestpath | text, anyarray, anyarray, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dagshortestpath | text, text, directed boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_depthfirstsearch | edges_sql text, root_vids anyarray, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, n_goals bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, combinations_sql text, directed boolean, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, only_cost boolean, normal boolean, n_goals bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstra | edges_sql text, start_vids anyarray, end_vids anyarray, directed boolean, only_cost boolean, normal boolean, n_goals bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstranear | text, anyarray, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstranear | text, anyarray, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstranear | text, bigint, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_dijkstravia | edges_sql text, via_vids anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_drivingdistance | edges_sql text, start_vids anyarray, distance double precision, directed boolean, equicost boolean, OUT seq integer, OUT from_v bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edgecoloring | edges_sql text, OUT edge_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | _pgr_edgedisjointpaths | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edgedisjointpaths | text, text, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edwardmoore | edges_sql text, combinations_sql text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_edwardmoore | edges_sql text, from_vids anyarray, to_vids anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_endpoint | g geometry | geometry + pgrouting | public | _pgr_floydwarshall | edges_sql text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_get_statement | o_sql text | text + pgrouting | public | _pgr_getcolumnname | sname text, tname text, col text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_getcolumnname | tab text, col text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_getcolumntype | sname text, tname text, cname text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_getcolumntype | tab text, col text, reporterrs integer, fnname text | text + pgrouting | public | _pgr_gettablename | tab text, reporterrs integer, fnname text, OUT sname text, OUT tname text | record + pgrouting | public | _pgr_git_hash | | text + pgrouting | public | _pgr_hawickcircuits | text, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_iscolumnindexed | sname text, tname text, cname text, reporterrs integer, fnname text | boolean + pgrouting | public | _pgr_iscolumnindexed | tab text, col text, reporterrs integer, fnname text | boolean + pgrouting | public | _pgr_iscolumnintable | tab text, col text | boolean + pgrouting | public | _pgr_isplanar | text | boolean + pgrouting | public | _pgr_johnson | edges_sql text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_kruskal | text, anyarray, fn_suffix text, max_depth bigint, distance double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_ksp | edges_sql text, start_vid bigint, end_vid bigint, k integer, directed boolean, heap_paths boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_lengauertarjandominatortree | edges_sql text, root_vid bigint, OUT seq integer, OUT vid bigint, OUT idom bigint | SETOF record + pgrouting | public | _pgr_lib_version | | text + pgrouting | public | _pgr_linegraph | text, directed boolean, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT reverse_cost double precision | SETOF record + pgrouting | public | _pgr_linegraphfull | text, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT edge bigint | SETOF record + pgrouting | public | _pgr_makeconnected | text, OUT seq bigint, OUT start_vid bigint, OUT end_vid bigint | SETOF record + pgrouting | public | _pgr_maxcardinalitymatch | edges_sql text, directed boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint | SETOF record + pgrouting | public | _pgr_maxflow | edges_sql text, combinations_sql text, algorithm integer, only_flow boolean, OUT seq integer, OUT edge_id bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | _pgr_maxflow | edges_sql text, sources anyarray, targets anyarray, algorithm integer, only_flow boolean, OUT seq integer, OUT edge_id bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | _pgr_maxflowmincost | edges_sql text, combinations_sql text, only_cost boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_maxflowmincost | edges_sql text, sources anyarray, targets anyarray, only_cost boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_msg | msgkind integer, fnname text, msg text | void + pgrouting | public | _pgr_onerror | errcond boolean, reporterrs integer, fnname text, msgerr text, hinto text, msgok text | void + pgrouting | public | _pgr_operating_system | | text + pgrouting | public | _pgr_parameter_check | fn text, sql text, big boolean | boolean + pgrouting | public | _pgr_pgsql_version | | text + pgrouting | public | _pgr_pickdeliver | text, text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | _pgr_pickdelivereuclidean | text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | _pgr_pointtoid | point geometry, tolerance double precision, vertname text, srid integer | bigint + pgrouting | public | _pgr_prim | text, anyarray, order_by text, max_depth bigint, distance double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_quote_ident | idname text | text + pgrouting | public | _pgr_sequentialvertexcoloring | edges_sql text, OUT vertex_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | _pgr_startpoint | g geometry | geometry + pgrouting | public | _pgr_stoerwagner | edges_sql text, OUT seq integer, OUT edge bigint, OUT cost double precision, OUT mincut double precision | SETOF record + pgrouting | public | _pgr_strongcomponents | edges_sql text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | _pgr_topologicalsort | edges_sql text, OUT seq integer, OUT sorted_v bigint | SETOF record + pgrouting | public | _pgr_transitiveclosure | edges_sql text, OUT seq integer, OUT vid bigint, OUT target_array bigint[] | SETOF record + pgrouting | public | _pgr_trsp | sql text, source_eid integer, source_pos double precision, target_eid integer, target_pos double precision, directed boolean, has_reverse_cost boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp | text, text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp_withpoints | text, text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT departure bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trsp_withpoints | text, text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT departure bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_trspvia | text, text, anyarray, boolean, boolean, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_trspvia_withpoints | text, text, text, anyarray, boolean, boolean, boolean, character, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_trspviavertices | sql text, vids integer[], directed boolean, has_rcost boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record + pgrouting | public | _pgr_tsp | matrix_row_sql text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_tspeuclidean | coordinates_sql text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_turnrestrictedpath | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, stop_on_first boolean, strict boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_versionless | v1 text, v2 text | boolean + pgrouting | public | _pgr_vrponedepot | text, text, text, integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | _pgr_withpoints | edges_sql text, points_sql text, combinations_sql text, directed boolean, driving_side character, details boolean, only_cost boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpoints | edges_sql text, points_sql text, start_pids anyarray, end_pids anyarray, directed boolean, driving_side character, details boolean, only_cost boolean, normal boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsdd | edges_sql text, points_sql text, start_pid anyarray, distance double precision, directed boolean, driving_side character, details boolean, equicost boolean, OUT seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsksp | edges_sql text, points_sql text, start_pid bigint, end_pid bigint, k integer, directed boolean, heap_paths boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsvia | sql text, via_edges bigint[], fraction double precision[], directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _pgr_withpointsvia | text, text, anyarray, boolean, boolean, boolean, character, boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | _trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _v4trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | _v4trsp | text, text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_alphashape | geometry, alpha double precision | geometry + pgrouting | public | pgr_analyzegraph | text, double precision, the_geom text, id text, source text, target text, rows_where text | character varying + pgrouting | public | pgr_analyzeoneway | text, text[], text[], text[], text[], two_way_if_null boolean, oneway text, source text, target text | text + pgrouting | public | pgr_articulationpoints | text, OUT node bigint | SETOF bigint + pgrouting | public | pgr_astar | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, anyarray, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, bigint, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, bigint, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astar | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, anyarray, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, anyarray, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, bigint, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, bigint, bigint, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcost | text, text, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_astarcostmatrix | text, anyarray, directed boolean, heuristic integer, factor double precision, epsilon double precision, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, anyarray, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, anyarray, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, bigint, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, bigint, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastar | text, text, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, anyarray, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, anyarray, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, bigint, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, bigint, bigint, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcost | text, text, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bdastarcostmatrix | text, anyarray, directed boolean, heuristic integer, factor numeric, epsilon numeric, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstra | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, anyarray, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, anyarray, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, bigint, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, bigint, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracost | text, text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bddijkstracostmatrix | text, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bellmanford | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_biconnectedcomponents | text, OUT seq bigint, OUT component bigint, OUT edge bigint | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_binarybreadthfirstsearch | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bipartite | text, OUT vertex_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_boykovkolmogorov | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_breadthfirstsearch | text, anyarray, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_breadthfirstsearch | text, bigint, max_depth bigint, directed boolean, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_bridges | text, OUT edge bigint | SETOF bigint + pgrouting | public | pgr_chinesepostman | text, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_chinesepostmancost | text | double precision + pgrouting | public | pgr_connectedcomponents | text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | pgr_contraction | text, bigint[], max_cycles integer, forbidden_vertices bigint[], directed boolean, OUT type text, OUT id bigint, OUT contracted_vertices bigint[], OUT source bigint, OUT target bigint, OUT cost double precision | SETOF record + pgrouting | public | pgr_createtopology | text, double precision, the_geom text, id text, source text, target text, rows_where text, clean boolean | character varying + pgrouting | public | pgr_createverticestable | text, the_geom text, source text, target text, rows_where text | text + pgrouting | public | pgr_cuthillmckeeordering | text, OUT seq bigint, OUT node bigint | SETOF record + pgrouting | public | pgr_dagshortestpath | text, anyarray, anyarray, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, anyarray, bigint, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, bigint, anyarray, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, bigint, bigint, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dagshortestpath | text, text, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_degree | text, text, dryrun boolean, OUT node bigint, OUT degree bigint | SETOF record + pgrouting | public | pgr_depthfirstsearch | text, anyarray, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_depthfirstsearch | text, bigint, directed boolean, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstra | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, anyarray, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, anyarray, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, bigint, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, bigint, bigint, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracost | text, text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstracostmatrix | text, anyarray, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, anyarray, anyarray, directed boolean, cap bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, anyarray, bigint, directed boolean, cap bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, bigint, anyarray, directed boolean, cap bigint, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranear | text, text, directed boolean, cap bigint, global boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, anyarray, anyarray, directed boolean, cap bigint, global boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, anyarray, bigint, directed boolean, cap bigint, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, bigint, anyarray, directed boolean, cap bigint, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstranearcost | text, text, directed boolean, cap bigint, global boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_dijkstravia | text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | pgr_drivingdistance | text, anyarray, double precision, directed boolean, equicost boolean, OUT seq integer, OUT from_v bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_drivingdistance | text, bigint, double precision, directed boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgecoloring | text, OUT edge_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edgedisjointpaths | text, text, directed boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edmondskarp | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edmondskarp | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_edwardmoore | text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_edwardmoore | text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_extractvertices | text, dryrun boolean, OUT id bigint, OUT in_edges bigint[], OUT out_edges bigint[], OUT x double precision, OUT y double precision, OUT geom geometry | SETOF record + pgrouting | public | pgr_findcloseedges | text, geometry, double precision, cap integer, partial boolean, dryrun boolean, OUT edge_id bigint, OUT fraction double precision, OUT side character, OUT distance double precision, OUT geom geometry, OUT edge geometry | SETOF record + pgrouting | public | pgr_findcloseedges | text, geometry[], double precision, cap integer, partial boolean, dryrun boolean, OUT edge_id bigint, OUT fraction double precision, OUT side character, OUT distance double precision, OUT geom geometry, OUT edge geometry | SETOF record + pgrouting | public | pgr_floydwarshall | text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_full_version | OUT version text, OUT build_type text, OUT compile_date text, OUT library text, OUT system text, OUT postgresql text, OUT compiler text, OUT boost text, OUT hash text | record + pgrouting | public | pgr_hawickcircuits | text, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_isplanar | text | boolean + pgrouting | public | pgr_johnson | text, directed boolean, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskal | text, OUT edge bigint, OUT cost double precision | SETOF record + pgrouting | public | pgr_kruskalbfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskalbfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, anyarray, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, anyarray, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, bigint, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldd | text, bigint, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_kruskaldfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_ksp | text, bigint, bigint, integer, directed boolean, heap_paths boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_lengauertarjandominatortree | text, bigint, OUT seq integer, OUT vertex_id bigint, OUT idom bigint | SETOF record + pgrouting | public | pgr_linegraph | text, directed boolean, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT reverse_cost double precision | SETOF record + pgrouting | public | pgr_linegraphfull | text, OUT seq integer, OUT source bigint, OUT target bigint, OUT cost double precision, OUT edge bigint | SETOF record + pgrouting | public | pgr_makeconnected | text, OUT seq bigint, OUT start_vid bigint, OUT end_vid bigint | SETOF record + pgrouting | public | pgr_maxcardinalitymatch | text, directed boolean, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint | SETOF record + pgrouting | public | pgr_maxcardinalitymatch | text, OUT edge bigint | SETOF bigint + pgrouting | public | pgr_maxflow | text, anyarray, anyarray | bigint + pgrouting | public | pgr_maxflow | text, anyarray, bigint | bigint + pgrouting | public | pgr_maxflow | text, bigint, anyarray | bigint + pgrouting | public | pgr_maxflow | text, bigint, bigint | bigint + pgrouting | public | pgr_maxflow | text, text | bigint + pgrouting | public | pgr_maxflowmincost | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost | text, text, OUT seq integer, OUT edge bigint, OUT source bigint, OUT target bigint, OUT flow bigint, OUT residual_capacity bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_maxflowmincost_cost | text, anyarray, anyarray | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, anyarray, bigint | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, bigint, anyarray | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, bigint, bigint | double precision + pgrouting | public | pgr_maxflowmincost_cost | text, text | double precision + pgrouting | public | pgr_nodenetwork | text, double precision, id text, the_geom text, table_ending text, rows_where text, outall boolean | text + pgrouting | public | pgr_pickdeliver | text, text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT stop_id bigint, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | pgr_pickdelivereuclidean | text, text, factor double precision, max_cycles integer, initial_sol integer, OUT seq integer, OUT vehicle_seq integer, OUT vehicle_id bigint, OUT stop_seq integer, OUT stop_type integer, OUT order_id bigint, OUT cargo double precision, OUT travel_time double precision, OUT arrival_time double precision, OUT wait_time double precision, OUT service_time double precision, OUT departure_time double precision | SETOF record + pgrouting | public | pgr_prim | text, OUT edge bigint, OUT cost double precision | SETOF record + pgrouting | public | pgr_primbfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primbfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, anyarray, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, anyarray, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, bigint, double precision, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdd | text, bigint, numeric, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdfs | text, anyarray, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_primdfs | text, bigint, max_depth bigint, OUT seq bigint, OUT depth bigint, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_pushrelabel | text, anyarray, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, anyarray, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, bigint, anyarray, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, bigint, bigint, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_pushrelabel | text, text, OUT seq integer, OUT edge bigint, OUT start_vid bigint, OUT end_vid bigint, OUT flow bigint, OUT residual_capacity bigint | SETOF record + pgrouting | public | pgr_sequentialvertexcoloring | text, OUT vertex_id bigint, OUT color_id bigint | SETOF record + pgrouting | public | pgr_stoerwagner | text, OUT seq integer, OUT edge bigint, OUT cost double precision, OUT mincut double precision | SETOF record + pgrouting | public | pgr_strongcomponents | text, OUT seq bigint, OUT component bigint, OUT node bigint | SETOF record + pgrouting | public | pgr_topologicalsort | text, OUT seq integer, OUT sorted_v bigint | SETOF record + pgrouting | public | pgr_transitiveclosure | text, OUT seq integer, OUT vid bigint, OUT target_array bigint[] | SETOF record + pgrouting | public | pgr_trsp | text, integer, double precision, integer, double precision, boolean, boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, integer, integer, boolean, boolean, restrictions_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, anyarray, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, anyarray, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, bigint, anyarray, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, bigint, bigint, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp | text, text, text, directed boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, anyarray, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, bigint, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, bigint, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trsp_withpoints | text, text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_trspvia | text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | pgr_trspvia_withpoints | text, text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrouting | public | pgr_trspviaedges | text, integer[], double precision[], boolean, boolean, turn_restrict_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_trspviavertices | text, anyarray, boolean, boolean, restrictions_sql text, OUT seq integer, OUT id1 integer, OUT id2 integer, OUT id3 integer, OUT cost double precision | SETOF record + pgrouting | public | pgr_tsp | text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_tspeuclidean | text, start_id bigint, end_id bigint, max_processing_time double precision, tries_per_temperature integer, max_changes_per_temperature integer, max_consecutive_non_changes integer, initial_temperature double precision, final_temperature double precision, cooling_factor double precision, randomize boolean, OUT seq integer, OUT node bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_turnrestrictedpath | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, stop_on_first boolean, strict boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_version | | text + pgrouting | public | pgr_vrponedepot | text, text, text, integer, OUT oid integer, OUT opos integer, OUT vid integer, OUT tarrival integer, OUT tdepart integer | SETOF record + pgrouting | public | pgr_withpoints | text, text, anyarray, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, anyarray, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, bigint, anyarray, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, bigint, bigint, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpoints | text, text, text, directed boolean, driving_side character, details boolean, OUT seq integer, OUT path_seq integer, OUT start_pid bigint, OUT end_pid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, anyarray, anyarray, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, anyarray, bigint, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, bigint, anyarray, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, bigint, bigint, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscost | text, text, text, directed boolean, driving_side character, OUT start_pid bigint, OUT end_pid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointscostmatrix | text, text, anyarray, directed boolean, driving_side character, OUT start_vid bigint, OUT end_vid bigint, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsdd | text, text, anyarray, double precision, directed boolean, driving_side character, details boolean, equicost boolean, OUT seq integer, OUT start_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsdd | text, text, bigint, double precision, directed boolean, driving_side character, details boolean, OUT seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsksp | text, text, bigint, bigint, integer, directed boolean, heap_paths boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision | SETOF record + pgrouting | public | pgr_withpointsvia | text, text, anyarray, directed boolean, strict boolean, u_turn_on_edge boolean, driving_side character, details boolean, OUT seq integer, OUT path_id integer, OUT path_seq integer, OUT start_vid bigint, OUT end_vid bigint, OUT node bigint, OUT edge bigint, OUT cost double precision, OUT agg_cost double precision, OUT route_agg_cost double precision | SETOF record + pgrowlocks | public | pgrowlocks | relname text, OUT locked_row tid, OUT locker xid, OUT multi boolean, OUT xids xid[], OUT modes text[], OUT pids integer[] | SETOF record + pgsodium | pgsodium | create_key | key_type pgsodium.key_type, name text, raw_key bytea, raw_key_nonce bytea, parent_key uuid, key_context bytea, expires timestamp with time zone, associated_data text | pgsodium.valid_key + pgsodium | pgsodium | create_mask_view | relid oid, debug boolean | void + pgsodium | pgsodium | create_mask_view | relid oid, subid integer, debug boolean | void + pgsodium | pgsodium | crypto_aead_det_decrypt | ciphertext bytea, additional bytea, key bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_det_decrypt | message bytea, additional bytea, key_uuid uuid, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_det_encrypt | message bytea, additional bytea, key_uuid uuid, nonce bytea | bytea + pgsodium | pgsodium | crypto_aead_det_keygen | | bytea + pgsodium | pgsodium | crypto_aead_det_noncegen | | bytea + pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_decrypt | message bytea, additional bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_aead_ietf_encrypt | message bytea, additional bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_aead_ietf_keygen | | bytea + pgsodium | pgsodium | crypto_aead_ietf_noncegen | | bytea + pgsodium | pgsodium | crypto_auth | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_auth | message bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_auth | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256 | message bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256_keygen | | bytea + pgsodium | pgsodium | crypto_auth_hmacsha256_verify | hash bytea, message bytea, key_id bigint, context bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha256_verify | hash bytea, message bytea, secret bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha256_verify | signature bytea, message bytea, key_uuid uuid | boolean + pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512 | message bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512_keygen | | bytea + pgsodium | pgsodium | crypto_auth_hmacsha512_verify | hash bytea, message bytea, key_id bigint, context bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha512_verify | hash bytea, message bytea, secret bytea | boolean + pgsodium | pgsodium | crypto_auth_hmacsha512_verify | signature bytea, message bytea, key_uuid uuid | boolean + pgsodium | pgsodium | crypto_auth_keygen | | bytea + pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key bytea | boolean + pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key_id bigint, context bytea | boolean + pgsodium | pgsodium | crypto_auth_verify | mac bytea, message bytea, key_uuid uuid | boolean + pgsodium | pgsodium | crypto_box | message bytea, nonce bytea, public bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_box_new_keypair | | pgsodium.crypto_box_keypair + pgsodium | pgsodium | crypto_box_new_seed | | bytea + pgsodium | pgsodium | crypto_box_noncegen | | bytea + pgsodium | pgsodium | crypto_box_open | ciphertext bytea, nonce bytea, public bytea, secret bytea | bytea + pgsodium | pgsodium | crypto_box_seal | message bytea, public_key bytea | bytea + pgsodium | pgsodium | crypto_box_seal_open | ciphertext bytea, public_key bytea, secret_key bytea | bytea + pgsodium | pgsodium | crypto_box_seed_new_keypair | seed bytea | pgsodium.crypto_box_keypair + pgsodium | pgsodium | crypto_cmp | text, text | boolean + pgsodium | pgsodium | crypto_generichash | message bytea, key bigint, context bytea | bytea + pgsodium | pgsodium | crypto_generichash | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_generichash | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_generichash_keygen | | bytea + pgsodium | pgsodium | crypto_hash_sha256 | message bytea | bytea + pgsodium | pgsodium | crypto_hash_sha512 | message bytea | bytea + pgsodium | pgsodium | crypto_kdf_derive_from_key | subkey_size bigint, subkey_id bigint, context bytea, primary_key bytea | bytea + pgsodium | pgsodium | crypto_kdf_derive_from_key | subkey_size integer, subkey_id bigint, context bytea, primary_key uuid | bytea + pgsodium | pgsodium | crypto_kdf_keygen | | bytea + pgsodium | pgsodium | crypto_kx_client_session_keys | client_pk bytea, client_sk bytea, server_pk bytea | pgsodium.crypto_kx_session + pgsodium | pgsodium | crypto_kx_new_keypair | | pgsodium.crypto_kx_keypair + pgsodium | pgsodium | crypto_kx_new_seed | | bytea + pgsodium | pgsodium | crypto_kx_seed_new_keypair | seed bytea | pgsodium.crypto_kx_keypair + pgsodium | pgsodium | crypto_kx_server_session_keys | server_pk bytea, server_sk bytea, client_pk bytea | pgsodium.crypto_kx_session + pgsodium | pgsodium | crypto_pwhash | password bytea, salt bytea | bytea + pgsodium | pgsodium | crypto_pwhash_saltgen | | bytea + pgsodium | pgsodium | crypto_pwhash_str | password bytea | bytea + pgsodium | pgsodium | crypto_pwhash_str_verify | hashed_password bytea, password bytea | boolean + pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_secretbox | message bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_secretbox_keygen | | bytea + pgsodium | pgsodium | crypto_secretbox_noncegen | | bytea + pgsodium | pgsodium | crypto_secretbox_open | ciphertext bytea, nonce bytea, key bytea | bytea + pgsodium | pgsodium | crypto_secretbox_open | message bytea, nonce bytea, key_id bigint, context bytea | bytea + pgsodium | pgsodium | crypto_secretbox_open | message bytea, nonce bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_secretstream_keygen | | bytea + pgsodium | pgsodium | crypto_shorthash | message bytea, key bigint, context bytea | bytea + pgsodium | pgsodium | crypto_shorthash | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_shorthash | message bytea, key_uuid uuid | bytea + pgsodium | pgsodium | crypto_shorthash_keygen | | bytea + pgsodium | pgsodium | crypto_sign | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_detached | message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_final_create | state bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_final_verify | state bytea, signature bytea, key bytea | boolean + pgsodium | pgsodium | crypto_sign_init | | bytea + pgsodium | pgsodium | crypto_sign_new_keypair | | pgsodium.crypto_sign_keypair + pgsodium | pgsodium | crypto_sign_new_seed | | bytea + pgsodium | pgsodium | crypto_sign_open | signed_message bytea, key bytea | bytea + pgsodium | pgsodium | crypto_sign_seed_new_keypair | seed bytea | pgsodium.crypto_sign_keypair + pgsodium | pgsodium | crypto_sign_update | state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg | message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg | state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg1 | state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_update_agg2 | cur_state bytea, initial_state bytea, message bytea | bytea + pgsodium | pgsodium | crypto_sign_verify_detached | sig bytea, message bytea, key bytea | boolean + pgsodium | pgsodium | crypto_signcrypt_new_keypair | | pgsodium.crypto_signcrypt_keypair + pgsodium | pgsodium | crypto_signcrypt_sign_after | state bytea, sender_sk bytea, ciphertext bytea | bytea + pgsodium | pgsodium | crypto_signcrypt_sign_before | sender bytea, recipient bytea, sender_sk bytea, recipient_pk bytea, additional bytea | pgsodium.crypto_signcrypt_state_key + pgsodium | pgsodium | crypto_signcrypt_verify_after | state bytea, signature bytea, sender_pk bytea, ciphertext bytea | boolean + pgsodium | pgsodium | crypto_signcrypt_verify_before | signature bytea, sender bytea, recipient bytea, additional bytea, sender_pk bytea, recipient_sk bytea | pgsodium.crypto_signcrypt_state_key + pgsodium | pgsodium | crypto_signcrypt_verify_public | signature bytea, sender bytea, recipient bytea, additional bytea, sender_pk bytea, ciphertext bytea | boolean + pgsodium | pgsodium | crypto_stream_xchacha20 | bigint, bytea, bigint, context bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20 | bigint, bytea, bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_keygen | | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_noncegen | | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor | bytea, bytea, bigint, context bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor | bytea, bytea, bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor_ic | bytea, bytea, bigint, bigint, context bytea | bytea + pgsodium | pgsodium | crypto_stream_xchacha20_xor_ic | bytea, bytea, bigint, bytea | bytea + pgsodium | pgsodium | decrypted_columns | relid oid | text + pgsodium | pgsodium | derive_key | key_id bigint, key_len integer, context bytea | bytea + pgsodium | pgsodium | disable_security_label_trigger | | void + pgsodium | pgsodium | enable_security_label_trigger | | void + pgsodium | pgsodium | encrypted_column | relid oid, m record | text + pgsodium | pgsodium | encrypted_columns | relid oid | text + pgsodium | pgsodium | get_key_by_id | uuid | pgsodium.valid_key + pgsodium | pgsodium | get_key_by_name | text | pgsodium.valid_key + pgsodium | pgsodium | get_named_keys | filter text | SETOF pgsodium.valid_key + pgsodium | pgsodium | has_mask | role regrole, source_name text | boolean + pgsodium | pgsodium | key_encrypt_secret_raw_key | | trigger + pgsodium | pgsodium | mask_columns | source_relid oid | TABLE(attname name, key_id text, key_id_column text, associated_column text, nonce_column text, format_type text) + pgsodium | pgsodium | mask_role | masked_role regrole, source_name text, view_name text | void + pgsodium | pgsodium | pgsodium_derive | key_id bigint, key_len integer, context bytea | bytea + pgsodium | pgsodium | quote_assoc | text, boolean | text + pgsodium | pgsodium | randombytes_buf | size integer | bytea + pgsodium | pgsodium | randombytes_buf_deterministic | size integer, seed bytea | bytea + pgsodium | pgsodium | randombytes_new_seed | | bytea + pgsodium | pgsodium | randombytes_random | | integer + pgsodium | pgsodium | randombytes_uniform | upper_bound integer | integer + pgsodium | pgsodium | sodium_base642bin | base64 text | bytea + pgsodium | pgsodium | sodium_bin2base64 | bin bytea | text + pgsodium | pgsodium | trg_mask_update | | event_trigger + pgsodium | pgsodium | update_mask | target oid, debug boolean | void + pgsodium | pgsodium | update_masks | debug boolean | void + pgsodium | pgsodium | version | | text + pgstattuple | public | pg_relpages | relname regclass | bigint + pgstattuple | public | pg_relpages | relname text | bigint + pgstattuple | public | pgstatginindex | relname regclass, OUT version integer, OUT pending_pages integer, OUT pending_tuples bigint | record + pgstattuple | public | pgstathashindex | relname regclass, OUT version integer, OUT bucket_pages bigint, OUT overflow_pages bigint, OUT bitmap_pages bigint, OUT unused_pages bigint, OUT live_items bigint, OUT dead_items bigint, OUT free_percent double precision | record + pgstattuple | public | pgstatindex | relname regclass, OUT version integer, OUT tree_level integer, OUT index_size bigint, OUT root_block_no bigint, OUT internal_pages bigint, OUT leaf_pages bigint, OUT empty_pages bigint, OUT deleted_pages bigint, OUT avg_leaf_density double precision, OUT leaf_fragmentation double precision | record + pgstattuple | public | pgstatindex | relname text, OUT version integer, OUT tree_level integer, OUT index_size bigint, OUT root_block_no bigint, OUT internal_pages bigint, OUT leaf_pages bigint, OUT empty_pages bigint, OUT deleted_pages bigint, OUT avg_leaf_density double precision, OUT leaf_fragmentation double precision | record + pgstattuple | public | pgstattuple | relname text, OUT table_len bigint, OUT tuple_count bigint, OUT tuple_len bigint, OUT tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT free_space bigint, OUT free_percent double precision | record + pgstattuple | public | pgstattuple | reloid regclass, OUT table_len bigint, OUT tuple_count bigint, OUT tuple_len bigint, OUT tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT free_space bigint, OUT free_percent double precision | record + pgstattuple | public | pgstattuple_approx | reloid regclass, OUT table_len bigint, OUT scanned_percent double precision, OUT approx_tuple_count bigint, OUT approx_tuple_len bigint, OUT approx_tuple_percent double precision, OUT dead_tuple_count bigint, OUT dead_tuple_len bigint, OUT dead_tuple_percent double precision, OUT approx_free_space bigint, OUT approx_free_percent double precision | record + pgtap | public | _add | text, integer | integer + pgtap | public | _add | text, integer, text | integer + pgtap | public | _alike | boolean, anyelement, text, text | text + pgtap | public | _ancestor_of | name, name, integer | boolean + pgtap | public | _ancestor_of | name, name, name, name, integer | boolean + pgtap | public | _are | text, name[], name[], text | text + pgtap | public | _areni | text, text[], text[], text | text + pgtap | public | _array_to_sorted_string | name[], text | text + pgtap | public | _assets_are | text, text[], text[], text | text + pgtap | public | _cast_exists | name, name | boolean + pgtap | public | _cast_exists | name, name, name | boolean + pgtap | public | _cast_exists | name, name, name, name | boolean + pgtap | public | _cdi | name, name, anyelement | text + pgtap | public | _cdi | name, name, anyelement, text | text + pgtap | public | _cdi | name, name, name, anyelement, text | text + pgtap | public | _cexists | name, name | boolean + pgtap | public | _cexists | name, name, name | boolean + pgtap | public | _ckeys | name, character | name[] + pgtap | public | _ckeys | name, name, character | name[] + pgtap | public | _cleanup | | boolean + pgtap | public | _cmp_types | oid, name | boolean + pgtap | public | _col_is_null | name, name, name, text, boolean | text + pgtap | public | _col_is_null | name, name, text, boolean | text + pgtap | public | _constraint | name, character, name[], text, text | text + pgtap | public | _constraint | name, name, character, name[], text, text | text + pgtap | public | _contract_on | text | "char" + pgtap | public | _currtest | | integer + pgtap | public | _db_privs | | name[] + pgtap | public | _def_is | text, text, anyelement, text | text + pgtap | public | _definer | name | boolean + pgtap | public | _definer | name, name | boolean + pgtap | public | _definer | name, name, name[] | boolean + pgtap | public | _definer | name, name[] | boolean + pgtap | public | _dexists | name | boolean + pgtap | public | _dexists | name, name | boolean + pgtap | public | _do_ne | text, text, text, text | text + pgtap | public | _docomp | text, text, text, text | text + pgtap | public | _error_diag | text, text, text, text, text, text, text, text, text, text | text + pgtap | public | _expand_context | character | text + pgtap | public | _expand_on | character | text + pgtap | public | _expand_vol | character | text + pgtap | public | _ext_exists | name | boolean + pgtap | public | _ext_exists | name, name | boolean + pgtap | public | _extensions | | SETOF name + pgtap | public | _extensions | name | SETOF name + pgtap | public | _extras | character, name, name[] | name[] + pgtap | public | _extras | character, name[] | name[] + pgtap | public | _extras | character[], name, name[] | name[] + pgtap | public | _extras | character[], name[] | name[] + pgtap | public | _finish | integer, integer, integer, boolean | SETOF text + pgtap | public | _fkexists | name, name, name[] | boolean + pgtap | public | _fkexists | name, name[] | boolean + pgtap | public | _fprivs_are | text, name, name[], text | text + pgtap | public | _func_compare | name, name, anyelement, anyelement, text | text + pgtap | public | _func_compare | name, name, boolean, text | text + pgtap | public | _func_compare | name, name, name[], anyelement, anyelement, text | text + pgtap | public | _func_compare | name, name, name[], boolean, text | text + pgtap | public | _funkargs | name[] | text + pgtap | public | _get | text | integer + pgtap | public | _get_ac_privs | name, text | text[] + pgtap | public | _get_col_ns_type | name, name, name | text + pgtap | public | _get_col_privs | name, text, name | text[] + pgtap | public | _get_col_type | name, name | text + pgtap | public | _get_col_type | name, name, name | text + pgtap | public | _get_context | name, name | "char" + pgtap | public | _get_db_owner | name | name + pgtap | public | _get_db_privs | name, text | text[] + pgtap | public | _get_dtype | name | text + pgtap | public | _get_dtype | name, text, boolean | text + pgtap | public | _get_fdw_privs | name, text | text[] + pgtap | public | _get_func_owner | name, name, name[] | name + pgtap | public | _get_func_owner | name, name[] | name + pgtap | public | _get_func_privs | text, text | text[] + pgtap | public | _get_index_owner | name, name | name + pgtap | public | _get_index_owner | name, name, name | name + pgtap | public | _get_lang_privs | name, text | text[] + pgtap | public | _get_language_owner | name | name + pgtap | public | _get_latest | text | integer[] + pgtap | public | _get_latest | text, integer | integer + pgtap | public | _get_note | integer | text + pgtap | public | _get_note | text | text + pgtap | public | _get_opclass_owner | name | name + pgtap | public | _get_opclass_owner | name, name | name + pgtap | public | _get_rel_owner | character, name | name + pgtap | public | _get_rel_owner | character, name, name | name + pgtap | public | _get_rel_owner | character[], name | name + pgtap | public | _get_rel_owner | character[], name, name | name + pgtap | public | _get_rel_owner | name | name + pgtap | public | _get_rel_owner | name, name | name + pgtap | public | _get_schema_owner | name | name + pgtap | public | _get_schema_privs | name, text | text[] + pgtap | public | _get_sequence_privs | name, text | text[] + pgtap | public | _get_server_privs | name, text | text[] + pgtap | public | _get_table_privs | name, text | text[] + pgtap | public | _get_tablespace_owner | name | name + pgtap | public | _get_tablespaceprivs | name, text | text[] + pgtap | public | _get_type_owner | name | name + pgtap | public | _get_type_owner | name, name | name + pgtap | public | _got_func | name | boolean + pgtap | public | _got_func | name, name | boolean + pgtap | public | _got_func | name, name, name[] | boolean + pgtap | public | _got_func | name, name[] | boolean + pgtap | public | _grolist | name | oid[] + pgtap | public | _has_def | name, name | boolean + pgtap | public | _has_def | name, name, name | boolean + pgtap | public | _has_group | name | boolean + pgtap | public | _has_role | name | boolean + pgtap | public | _has_type | name, character[] | boolean + pgtap | public | _has_type | name, name, character[] | boolean + pgtap | public | _has_user | name | boolean + pgtap | public | _hasc | name, character | boolean + pgtap | public | _hasc | name, name, character | boolean + pgtap | public | _have_index | name, name | boolean + pgtap | public | _have_index | name, name, name | boolean + pgtap | public | _ident_array_to_sorted_string | name[], text | text + pgtap | public | _ident_array_to_string | name[], text | text + pgtap | public | _ikeys | name, name | text[] + pgtap | public | _ikeys | name, name, name | text[] + pgtap | public | _inherited | name | boolean + pgtap | public | _inherited | name, name | boolean + pgtap | public | _is_indexed | name, name, text[] | boolean + pgtap | public | _is_instead | name, name | boolean + pgtap | public | _is_instead | name, name, name | boolean + pgtap | public | _is_schema | name | boolean + pgtap | public | _is_super | name | boolean + pgtap | public | _is_trusted | name | boolean + pgtap | public | _is_verbose | | boolean + pgtap | public | _keys | name, character | SETOF name[] + pgtap | public | _keys | name, name, character | SETOF name[] + pgtap | public | _lang | name | name + pgtap | public | _lang | name, name | name + pgtap | public | _lang | name, name, name[] | name + pgtap | public | _lang | name, name[] | name + pgtap | public | _missing | character, name, name[] | name[] + pgtap | public | _missing | character, name[] | name[] + pgtap | public | _missing | character[], name, name[] | name[] + pgtap | public | _missing | character[], name[] | name[] + pgtap | public | _nosuch | name, name, name[] | text + pgtap | public | _op_exists | name, name, name | boolean + pgtap | public | _op_exists | name, name, name, name | boolean + pgtap | public | _op_exists | name, name, name, name, name | boolean + pgtap | public | _opc_exists | name | boolean + pgtap | public | _opc_exists | name, name | boolean + pgtap | public | _partof | name, name | boolean + pgtap | public | _partof | name, name, name, name | boolean + pgtap | public | _parts | name | SETOF name + pgtap | public | _parts | name, name | SETOF name + pgtap | public | _pg_sv_column_array | oid, smallint[] | name[] + pgtap | public | _pg_sv_table_accessible | oid, oid | boolean + pgtap | public | _pg_sv_type_array | oid[] | name[] + pgtap | public | _prokind | p_oid oid | "char" + pgtap | public | _query | text | text + pgtap | public | _quote_ident_like | text, text | text + pgtap | public | _refine_vol | text | text + pgtap | public | _relcomp | text, anyarray, text, text | text + pgtap | public | _relcomp | text, text, text, text | text + pgtap | public | _relcomp | text, text, text, text, text | text + pgtap | public | _relexists | name | boolean + pgtap | public | _relexists | name, name | boolean + pgtap | public | _relne | text, anyarray, text, text | text + pgtap | public | _relne | text, text, text, text | text + pgtap | public | _returns | name | text + pgtap | public | _returns | name, name | text + pgtap | public | _returns | name, name, name[] | text + pgtap | public | _returns | name, name[] | text + pgtap | public | _rexists | character, name | boolean + pgtap | public | _rexists | character, name, name | boolean + pgtap | public | _rexists | character[], name | boolean + pgtap | public | _rexists | character[], name, name | boolean + pgtap | public | _rule_on | name, name | "char" + pgtap | public | _rule_on | name, name, name | "char" + pgtap | public | _runem | text[], boolean | SETOF text + pgtap | public | _runner | text[], text[], text[], text[], text[] | SETOF text + pgtap | public | _set | integer, integer | integer + pgtap | public | _set | text, integer | integer + pgtap | public | _set | text, integer, text | integer + pgtap | public | _strict | name | boolean + pgtap | public | _strict | name, name | boolean + pgtap | public | _strict | name, name, name[] | boolean + pgtap | public | _strict | name, name[] | boolean + pgtap | public | _table_privs | | name[] + pgtap | public | _temptable | anyarray, text | text + pgtap | public | _temptable | text, text | text + pgtap | public | _temptypes | text | text + pgtap | public | _time_trials | text, integer, numeric | SETOF _time_trial_type + pgtap | public | _tlike | boolean, text, text, text | text + pgtap | public | _todo | | text + pgtap | public | _trig | name, name | boolean + pgtap | public | _trig | name, name, name | boolean + pgtap | public | _type_func | "char", name | boolean + pgtap | public | _type_func | "char", name, name | boolean + pgtap | public | _type_func | "char", name, name, name[] | boolean + pgtap | public | _type_func | "char", name, name[] | boolean + pgtap | public | _types_are | name, name[], text, character[] | text + pgtap | public | _types_are | name[], text, character[] | text + pgtap | public | _unalike | boolean, anyelement, text, text | text + pgtap | public | _vol | name | text + pgtap | public | _vol | name, name | text + pgtap | public | _vol | name, name, name[] | text + pgtap | public | _vol | name, name[] | text + pgtap | public | add_result | boolean, boolean, text, text, text | integer + pgtap | public | alike | anyelement, text | text + pgtap | public | alike | anyelement, text, text | text + pgtap | public | any_column_privs_are | name, name, name, name[] | text + pgtap | public | any_column_privs_are | name, name, name, name[], text | text + pgtap | public | any_column_privs_are | name, name, name[] | text + pgtap | public | any_column_privs_are | name, name, name[], text | text + pgtap | public | bag_eq | text, anyarray | text + pgtap | public | bag_eq | text, anyarray, text | text + pgtap | public | bag_eq | text, text | text + pgtap | public | bag_eq | text, text, text | text + pgtap | public | bag_has | text, text | text + pgtap | public | bag_has | text, text, text | text + pgtap | public | bag_hasnt | text, text | text + pgtap | public | bag_hasnt | text, text, text | text + pgtap | public | bag_ne | text, anyarray | text + pgtap | public | bag_ne | text, anyarray, text | text + pgtap | public | bag_ne | text, text | text + pgtap | public | bag_ne | text, text, text | text + pgtap | public | can | name, name[] | text + pgtap | public | can | name, name[], text | text + pgtap | public | can | name[] | text + pgtap | public | can | name[], text | text + pgtap | public | cast_context_is | name, name, text | text + pgtap | public | cast_context_is | name, name, text, text | text + pgtap | public | casts_are | text[] | text + pgtap | public | casts_are | text[], text | text + pgtap | public | check_test | text, boolean | SETOF text + pgtap | public | check_test | text, boolean, text | SETOF text + pgtap | public | check_test | text, boolean, text, text | SETOF text + pgtap | public | check_test | text, boolean, text, text, text | SETOF text + pgtap | public | check_test | text, boolean, text, text, text, boolean | SETOF text + pgtap | public | cmp_ok | anyelement, text, anyelement | text + pgtap | public | cmp_ok | anyelement, text, anyelement, text | text + pgtap | public | col_default_is | name, name, anyelement | text + pgtap | public | col_default_is | name, name, anyelement, text | text + pgtap | public | col_default_is | name, name, name, anyelement, text | text + pgtap | public | col_default_is | name, name, name, text, text | text + pgtap | public | col_default_is | name, name, text | text + pgtap | public | col_default_is | name, name, text, text | text + pgtap | public | col_has_check | name, name | text + pgtap | public | col_has_check | name, name, name, text | text + pgtap | public | col_has_check | name, name, name[], text | text + pgtap | public | col_has_check | name, name, text | text + pgtap | public | col_has_check | name, name[] | text + pgtap | public | col_has_check | name, name[], text | text + pgtap | public | col_has_default | name, name | text + pgtap | public | col_has_default | name, name, name, text | text + pgtap | public | col_has_default | name, name, text | text + pgtap | public | col_hasnt_default | name, name | text + pgtap | public | col_hasnt_default | name, name, name, text | text + pgtap | public | col_hasnt_default | name, name, text | text + pgtap | public | col_is_fk | name, name | text + pgtap | public | col_is_fk | name, name, name, text | text + pgtap | public | col_is_fk | name, name, name[], text | text + pgtap | public | col_is_fk | name, name, text | text + pgtap | public | col_is_fk | name, name[] | text + pgtap | public | col_is_fk | name, name[], text | text + pgtap | public | col_is_null | schema_name name, table_name name, column_name name, description text | text + pgtap | public | col_is_null | table_name name, column_name name, description text | text + pgtap | public | col_is_pk | name, name | text + pgtap | public | col_is_pk | name, name, name, text | text + pgtap | public | col_is_pk | name, name, name[], text | text + pgtap | public | col_is_pk | name, name, text | text + pgtap | public | col_is_pk | name, name[] | text + pgtap | public | col_is_pk | name, name[], text | text + pgtap | public | col_is_unique | name, name | text + pgtap | public | col_is_unique | name, name, name | text + pgtap | public | col_is_unique | name, name, name, text | text + pgtap | public | col_is_unique | name, name, name[] | text + pgtap | public | col_is_unique | name, name, name[], text | text + pgtap | public | col_is_unique | name, name, text | text + pgtap | public | col_is_unique | name, name[] | text + pgtap | public | col_is_unique | name, name[], text | text + pgtap | public | col_isnt_fk | name, name | text + pgtap | public | col_isnt_fk | name, name, name, text | text + pgtap | public | col_isnt_fk | name, name, name[], text | text + pgtap | public | col_isnt_fk | name, name, text | text + pgtap | public | col_isnt_fk | name, name[] | text + pgtap | public | col_isnt_fk | name, name[], text | text + pgtap | public | col_isnt_pk | name, name | text + pgtap | public | col_isnt_pk | name, name, name, text | text + pgtap | public | col_isnt_pk | name, name, name[], text | text + pgtap | public | col_isnt_pk | name, name, text | text + pgtap | public | col_isnt_pk | name, name[] | text + pgtap | public | col_isnt_pk | name, name[], text | text + pgtap | public | col_not_null | schema_name name, table_name name, column_name name, description text | text + pgtap | public | col_not_null | table_name name, column_name name, description text | text + pgtap | public | col_type_is | name, name, name, name, text | text + pgtap | public | col_type_is | name, name, name, name, text, text | text + pgtap | public | col_type_is | name, name, name, text | text + pgtap | public | col_type_is | name, name, name, text, text | text + pgtap | public | col_type_is | name, name, text | text + pgtap | public | col_type_is | name, name, text, text | text + pgtap | public | collect_tap | character varying[] | text + pgtap | public | collect_tap | VARIADIC text[] | text + pgtap | public | column_privs_are | name, name, name, name, name[] | text + pgtap | public | column_privs_are | name, name, name, name, name[], text | text + pgtap | public | column_privs_are | name, name, name, name[] | text + pgtap | public | column_privs_are | name, name, name, name[], text | text + pgtap | public | columns_are | name, name, name[] | text + pgtap | public | columns_are | name, name, name[], text | text + pgtap | public | columns_are | name, name[] | text + pgtap | public | columns_are | name, name[], text | text + pgtap | public | composite_owner_is | name, name | text + pgtap | public | composite_owner_is | name, name, name | text + pgtap | public | composite_owner_is | name, name, name, text | text + pgtap | public | composite_owner_is | name, name, text | text + pgtap | public | database_privs_are | name, name, name[] | text + pgtap | public | database_privs_are | name, name, name[], text | text + pgtap | public | db_owner_is | name, name | text + pgtap | public | db_owner_is | name, name, text | text + pgtap | public | diag | msg anyelement | text + pgtap | public | diag | msg text | text + pgtap | public | diag | VARIADIC anyarray | text + pgtap | public | diag | VARIADIC text[] | text + pgtap | public | diag_test_name | text | text + pgtap | public | display_oper | name, oid | text + pgtap | public | do_tap | | SETOF text + pgtap | public | do_tap | name | SETOF text + pgtap | public | do_tap | name, text | SETOF text + pgtap | public | do_tap | text | SETOF text + pgtap | public | doesnt_imatch | anyelement, text | text + pgtap | public | doesnt_imatch | anyelement, text, text | text + pgtap | public | doesnt_match | anyelement, text | text + pgtap | public | doesnt_match | anyelement, text, text | text + pgtap | public | domain_type_is | name, text, name, text | text + pgtap | public | domain_type_is | name, text, name, text, text | text + pgtap | public | domain_type_is | name, text, text | text + pgtap | public | domain_type_is | name, text, text, text | text + pgtap | public | domain_type_is | text, text | text + pgtap | public | domain_type_is | text, text, text | text + pgtap | public | domain_type_isnt | name, text, name, text | text + pgtap | public | domain_type_isnt | name, text, name, text, text | text + pgtap | public | domain_type_isnt | name, text, text | text + pgtap | public | domain_type_isnt | name, text, text, text | text + pgtap | public | domain_type_isnt | text, text | text + pgtap | public | domain_type_isnt | text, text, text | text + pgtap | public | domains_are | name, name[] | text + pgtap | public | domains_are | name, name[], text | text + pgtap | public | domains_are | name[] | text + pgtap | public | domains_are | name[], text | text + pgtap | public | enum_has_labels | name, name, name[] | text + pgtap | public | enum_has_labels | name, name, name[], text | text + pgtap | public | enum_has_labels | name, name[] | text + pgtap | public | enum_has_labels | name, name[], text | text + pgtap | public | enums_are | name, name[] | text + pgtap | public | enums_are | name, name[], text | text + pgtap | public | enums_are | name[] | text + pgtap | public | enums_are | name[], text | text + pgtap | public | extensions_are | name, name[] | text + pgtap | public | extensions_are | name, name[], text | text + pgtap | public | extensions_are | name[] | text + pgtap | public | extensions_are | name[], text | text + pgtap | public | fail | | text + pgtap | public | fail | text | text + pgtap | public | fdw_privs_are | name, name, name[] | text + pgtap | public | fdw_privs_are | name, name, name[], text | text + pgtap | public | findfuncs | name, text | text[] + pgtap | public | findfuncs | name, text, text | text[] + pgtap | public | findfuncs | text | text[] + pgtap | public | findfuncs | text, text | text[] + pgtap | public | finish | exception_on_failure boolean | SETOF text + pgtap | public | fk_ok | name, name, name, name | text + pgtap | public | fk_ok | name, name, name, name, name, name, text | text + pgtap | public | fk_ok | name, name, name, name, name, text | text + pgtap | public | fk_ok | name, name, name, name, text | text + pgtap | public | fk_ok | name, name, name[], name, name, name[] | text + pgtap | public | fk_ok | name, name, name[], name, name, name[], text | text + pgtap | public | fk_ok | name, name[], name, name[] | text + pgtap | public | fk_ok | name, name[], name, name[], text | text + pgtap | public | foreign_table_owner_is | name, name | text + pgtap | public | foreign_table_owner_is | name, name, name | text + pgtap | public | foreign_table_owner_is | name, name, name, text | text + pgtap | public | foreign_table_owner_is | name, name, text | text + pgtap | public | foreign_tables_are | name, name[] | text + pgtap | public | foreign_tables_are | name, name[], text | text + pgtap | public | foreign_tables_are | name[] | text + pgtap | public | foreign_tables_are | name[], text | text + pgtap | public | function_lang_is | name, name | text + pgtap | public | function_lang_is | name, name, name | text + pgtap | public | function_lang_is | name, name, name, text | text + pgtap | public | function_lang_is | name, name, name[], name | text + pgtap | public | function_lang_is | name, name, name[], name, text | text + pgtap | public | function_lang_is | name, name, text | text + pgtap | public | function_lang_is | name, name[], name | text + pgtap | public | function_lang_is | name, name[], name, text | text + pgtap | public | function_owner_is | name, name, name[], name | text + pgtap | public | function_owner_is | name, name, name[], name, text | text + pgtap | public | function_owner_is | name, name[], name | text + pgtap | public | function_owner_is | name, name[], name, text | text + pgtap | public | function_privs_are | name, name, name[], name, name[] | text + pgtap | public | function_privs_are | name, name, name[], name, name[], text | text + pgtap | public | function_privs_are | name, name[], name, name[] | text + pgtap | public | function_privs_are | name, name[], name, name[], text | text + pgtap | public | function_returns | name, name, name[], text | text + pgtap | public | function_returns | name, name, name[], text, text | text + pgtap | public | function_returns | name, name, text | text + pgtap | public | function_returns | name, name, text, text | text + pgtap | public | function_returns | name, name[], text | text + pgtap | public | function_returns | name, name[], text, text | text + pgtap | public | function_returns | name, text | text + pgtap | public | function_returns | name, text, text | text + pgtap | public | functions_are | name, name[] | text + pgtap | public | functions_are | name, name[], text | text + pgtap | public | functions_are | name[] | text + pgtap | public | functions_are | name[], text | text + pgtap | public | groups_are | name[] | text + pgtap | public | groups_are | name[], text | text + pgtap | public | has_cast | name, name | text + pgtap | public | has_cast | name, name, name | text + pgtap | public | has_cast | name, name, name, name | text + pgtap | public | has_cast | name, name, name, name, text | text + pgtap | public | has_cast | name, name, name, text | text + pgtap | public | has_cast | name, name, text | text + pgtap | public | has_check | name | text + pgtap | public | has_check | name, name, text | text + pgtap | public | has_check | name, text | text + pgtap | public | has_column | name, name | text + pgtap | public | has_column | name, name, name, text | text + pgtap | public | has_column | name, name, text | text + pgtap | public | has_composite | name | text + pgtap | public | has_composite | name, name, text | text + pgtap | public | has_composite | name, text | text + pgtap | public | has_domain | name | text + pgtap | public | has_domain | name, name | text + pgtap | public | has_domain | name, name, text | text + pgtap | public | has_domain | name, text | text + pgtap | public | has_enum | name | text + pgtap | public | has_enum | name, name | text + pgtap | public | has_enum | name, name, text | text + pgtap | public | has_enum | name, text | text + pgtap | public | has_extension | name | text + pgtap | public | has_extension | name, name | text + pgtap | public | has_extension | name, name, text | text + pgtap | public | has_extension | name, text | text + pgtap | public | has_fk | name | text + pgtap | public | has_fk | name, name, text | text + pgtap | public | has_fk | name, text | text + pgtap | public | has_foreign_table | name | text + pgtap | public | has_foreign_table | name, name | text + pgtap | public | has_foreign_table | name, name, text | text + pgtap | public | has_foreign_table | name, text | text + pgtap | public | has_function | name | text + pgtap | public | has_function | name, name | text + pgtap | public | has_function | name, name, name[] | text + pgtap | public | has_function | name, name, name[], text | text + pgtap | public | has_function | name, name, text | text + pgtap | public | has_function | name, name[] | text + pgtap | public | has_function | name, name[], text | text + pgtap | public | has_function | name, text | text + pgtap | public | has_group | name | text + pgtap | public | has_group | name, text | text + pgtap | public | has_index | name, name | text + pgtap | public | has_index | name, name, name | text + pgtap | public | has_index | name, name, name, name | text + pgtap | public | has_index | name, name, name, name, text | text + pgtap | public | has_index | name, name, name, name[] | text + pgtap | public | has_index | name, name, name, name[], text | text + pgtap | public | has_index | name, name, name, text | text + pgtap | public | has_index | name, name, name[] | text + pgtap | public | has_index | name, name, name[], text | text + pgtap | public | has_index | name, name, text | text + pgtap | public | has_inherited_tables | name | text + pgtap | public | has_inherited_tables | name, name | text + pgtap | public | has_inherited_tables | name, name, text | text + pgtap | public | has_inherited_tables | name, text | text + pgtap | public | has_language | name | text + pgtap | public | has_language | name, text | text + pgtap | public | has_leftop | name, name | text + pgtap | public | has_leftop | name, name, name | text + pgtap | public | has_leftop | name, name, name, name | text + pgtap | public | has_leftop | name, name, name, name, text | text + pgtap | public | has_leftop | name, name, name, text | text + pgtap | public | has_leftop | name, name, text | text + pgtap | public | has_materialized_view | name | text + pgtap | public | has_materialized_view | name, name, text | text + pgtap | public | has_materialized_view | name, text | text + pgtap | public | has_opclass | name | text + pgtap | public | has_opclass | name, name | text + pgtap | public | has_opclass | name, name, text | text + pgtap | public | has_opclass | name, text | text + pgtap | public | has_operator | name, name, name | text + pgtap | public | has_operator | name, name, name, name | text + pgtap | public | has_operator | name, name, name, name, name | text + pgtap | public | has_operator | name, name, name, name, name, text | text + pgtap | public | has_operator | name, name, name, name, text | text + pgtap | public | has_operator | name, name, name, text | text + pgtap | public | has_pk | name | text + pgtap | public | has_pk | name, name, text | text + pgtap | public | has_pk | name, text | text + pgtap | public | has_relation | name | text + pgtap | public | has_relation | name, name, text | text + pgtap | public | has_relation | name, text | text + pgtap | public | has_rightop | name, name | text + pgtap | public | has_rightop | name, name, name | text + pgtap | public | has_rightop | name, name, name, name | text + pgtap | public | has_rightop | name, name, name, name, text | text + pgtap | public | has_rightop | name, name, name, text | text + pgtap | public | has_rightop | name, name, text | text + pgtap | public | has_role | name | text + pgtap | public | has_role | name, text | text + pgtap | public | has_rule | name, name | text + pgtap | public | has_rule | name, name, name | text + pgtap | public | has_rule | name, name, name, text | text + pgtap | public | has_rule | name, name, text | text + pgtap | public | has_schema | name | text + pgtap | public | has_schema | name, text | text + pgtap | public | has_sequence | name | text + pgtap | public | has_sequence | name, name | text + pgtap | public | has_sequence | name, name, text | text + pgtap | public | has_sequence | name, text | text + pgtap | public | has_table | name | text + pgtap | public | has_table | name, name | text + pgtap | public | has_table | name, name, text | text + pgtap | public | has_table | name, text | text + pgtap | public | has_tablespace | name | text + pgtap | public | has_tablespace | name, text | text + pgtap | public | has_tablespace | name, text, text | text + pgtap | public | has_trigger | name, name | text + pgtap | public | has_trigger | name, name, name | text + pgtap | public | has_trigger | name, name, name, text | text + pgtap | public | has_trigger | name, name, text | text + pgtap | public | has_type | name | text + pgtap | public | has_type | name, name | text + pgtap | public | has_type | name, name, text | text + pgtap | public | has_type | name, text | text + pgtap | public | has_unique | text | text + pgtap | public | has_unique | text, text | text + pgtap | public | has_unique | text, text, text | text + pgtap | public | has_user | name | text + pgtap | public | has_user | name, text | text + pgtap | public | has_view | name | text + pgtap | public | has_view | name, name | text + pgtap | public | has_view | name, name, text | text + pgtap | public | has_view | name, text | text + pgtap | public | hasnt_cast | name, name | text + pgtap | public | hasnt_cast | name, name, name | text + pgtap | public | hasnt_cast | name, name, name, name | text + pgtap | public | hasnt_cast | name, name, name, name, text | text + pgtap | public | hasnt_cast | name, name, name, text | text + pgtap | public | hasnt_cast | name, name, text | text + pgtap | public | hasnt_column | name, name | text + pgtap | public | hasnt_column | name, name, name, text | text + pgtap | public | hasnt_column | name, name, text | text + pgtap | public | hasnt_composite | name | text + pgtap | public | hasnt_composite | name, name, text | text + pgtap | public | hasnt_composite | name, text | text + pgtap | public | hasnt_domain | name | text + pgtap | public | hasnt_domain | name, name | text + pgtap | public | hasnt_domain | name, name, text | text + pgtap | public | hasnt_domain | name, text | text + pgtap | public | hasnt_enum | name | text + pgtap | public | hasnt_enum | name, name | text + pgtap | public | hasnt_enum | name, name, text | text + pgtap | public | hasnt_enum | name, text | text + pgtap | public | hasnt_extension | name | text + pgtap | public | hasnt_extension | name, name | text + pgtap | public | hasnt_extension | name, name, text | text + pgtap | public | hasnt_extension | name, text | text + pgtap | public | hasnt_fk | name | text + pgtap | public | hasnt_fk | name, name, text | text + pgtap | public | hasnt_fk | name, text | text + pgtap | public | hasnt_foreign_table | name | text + pgtap | public | hasnt_foreign_table | name, name | text + pgtap | public | hasnt_foreign_table | name, name, text | text + pgtap | public | hasnt_foreign_table | name, text | text + pgtap | public | hasnt_function | name | text + pgtap | public | hasnt_function | name, name | text + pgtap | public | hasnt_function | name, name, name[] | text + pgtap | public | hasnt_function | name, name, name[], text | text + pgtap | public | hasnt_function | name, name, text | text + pgtap | public | hasnt_function | name, name[] | text + pgtap | public | hasnt_function | name, name[], text | text + pgtap | public | hasnt_function | name, text | text + pgtap | public | hasnt_group | name | text + pgtap | public | hasnt_group | name, text | text + pgtap | public | hasnt_index | name, name | text + pgtap | public | hasnt_index | name, name, name | text + pgtap | public | hasnt_index | name, name, name, text | text + pgtap | public | hasnt_index | name, name, text | text + pgtap | public | hasnt_inherited_tables | name | text + pgtap | public | hasnt_inherited_tables | name, name | text + pgtap | public | hasnt_inherited_tables | name, name, text | text + pgtap | public | hasnt_inherited_tables | name, text | text + pgtap | public | hasnt_language | name | text + pgtap | public | hasnt_language | name, text | text + pgtap | public | hasnt_leftop | name, name | text + pgtap | public | hasnt_leftop | name, name, name | text + pgtap | public | hasnt_leftop | name, name, name, name | text + pgtap | public | hasnt_leftop | name, name, name, name, text | text + pgtap | public | hasnt_leftop | name, name, name, text | text + pgtap | public | hasnt_leftop | name, name, text | text + pgtap | public | hasnt_materialized_view | name | text + pgtap | public | hasnt_materialized_view | name, name, text | text + pgtap | public | hasnt_materialized_view | name, text | text + pgtap | public | hasnt_opclass | name | text + pgtap | public | hasnt_opclass | name, name | text + pgtap | public | hasnt_opclass | name, name, text | text + pgtap | public | hasnt_opclass | name, text | text + pgtap | public | hasnt_operator | name, name, name | text + pgtap | public | hasnt_operator | name, name, name, name | text + pgtap | public | hasnt_operator | name, name, name, name, name | text + pgtap | public | hasnt_operator | name, name, name, name, name, text | text + pgtap | public | hasnt_operator | name, name, name, name, text | text + pgtap | public | hasnt_operator | name, name, name, text | text + pgtap | public | hasnt_pk | name | text + pgtap | public | hasnt_pk | name, name, text | text + pgtap | public | hasnt_pk | name, text | text + pgtap | public | hasnt_relation | name | text + pgtap | public | hasnt_relation | name, name, text | text + pgtap | public | hasnt_relation | name, text | text + pgtap | public | hasnt_rightop | name, name | text + pgtap | public | hasnt_rightop | name, name, name | text + pgtap | public | hasnt_rightop | name, name, name, name | text + pgtap | public | hasnt_rightop | name, name, name, name, text | text + pgtap | public | hasnt_rightop | name, name, name, text | text + pgtap | public | hasnt_rightop | name, name, text | text + pgtap | public | hasnt_role | name | text + pgtap | public | hasnt_role | name, text | text + pgtap | public | hasnt_rule | name, name | text + pgtap | public | hasnt_rule | name, name, name | text + pgtap | public | hasnt_rule | name, name, name, text | text + pgtap | public | hasnt_rule | name, name, text | text + pgtap | public | hasnt_schema | name | text + pgtap | public | hasnt_schema | name, text | text + pgtap | public | hasnt_sequence | name | text + pgtap | public | hasnt_sequence | name, name, text | text + pgtap | public | hasnt_sequence | name, text | text + pgtap | public | hasnt_table | name | text + pgtap | public | hasnt_table | name, name | text + pgtap | public | hasnt_table | name, name, text | text + pgtap | public | hasnt_table | name, text | text + pgtap | public | hasnt_tablespace | name | text + pgtap | public | hasnt_tablespace | name, text | text + pgtap | public | hasnt_trigger | name, name | text + pgtap | public | hasnt_trigger | name, name, name | text + pgtap | public | hasnt_trigger | name, name, name, text | text + pgtap | public | hasnt_trigger | name, name, text | text + pgtap | public | hasnt_type | name | text + pgtap | public | hasnt_type | name, name | text + pgtap | public | hasnt_type | name, name, text | text + pgtap | public | hasnt_type | name, text | text + pgtap | public | hasnt_user | name | text + pgtap | public | hasnt_user | name, text | text + pgtap | public | hasnt_view | name | text + pgtap | public | hasnt_view | name, name | text + pgtap | public | hasnt_view | name, name, text | text + pgtap | public | hasnt_view | name, text | text + pgtap | public | ialike | anyelement, text | text + pgtap | public | ialike | anyelement, text, text | text + pgtap | public | imatches | anyelement, text | text + pgtap | public | imatches | anyelement, text, text | text + pgtap | public | in_todo | | boolean + pgtap | public | index_is_primary | name | text + pgtap | public | index_is_primary | name, name | text + pgtap | public | index_is_primary | name, name, name | text + pgtap | public | index_is_primary | name, name, name, text | text + pgtap | public | index_is_type | name, name | text + pgtap | public | index_is_type | name, name, name | text + pgtap | public | index_is_type | name, name, name, name | text + pgtap | public | index_is_type | name, name, name, name, text | text + pgtap | public | index_is_unique | name | text + pgtap | public | index_is_unique | name, name | text + pgtap | public | index_is_unique | name, name, name | text + pgtap | public | index_is_unique | name, name, name, text | text + pgtap | public | index_owner_is | name, name, name | text + pgtap | public | index_owner_is | name, name, name, name | text + pgtap | public | index_owner_is | name, name, name, name, text | text + pgtap | public | index_owner_is | name, name, name, text | text + pgtap | public | indexes_are | name, name, name[] | text + pgtap | public | indexes_are | name, name, name[], text | text + pgtap | public | indexes_are | name, name[] | text + pgtap | public | indexes_are | name, name[], text | text + pgtap | public | is | anyelement, anyelement | text + pgtap | public | is | anyelement, anyelement, text | text + pgtap | public | is_aggregate | name | text + pgtap | public | is_aggregate | name, name | text + pgtap | public | is_aggregate | name, name, name[] | text + pgtap | public | is_aggregate | name, name, name[], text | text + pgtap | public | is_aggregate | name, name, text | text + pgtap | public | is_aggregate | name, name[] | text + pgtap | public | is_aggregate | name, name[], text | text + pgtap | public | is_aggregate | name, text | text + pgtap | public | is_ancestor_of | name, name | text + pgtap | public | is_ancestor_of | name, name, integer | text + pgtap | public | is_ancestor_of | name, name, integer, text | text + pgtap | public | is_ancestor_of | name, name, name, name | text + pgtap | public | is_ancestor_of | name, name, name, name, integer | text + pgtap | public | is_ancestor_of | name, name, name, name, integer, text | text + pgtap | public | is_ancestor_of | name, name, name, name, text | text + pgtap | public | is_ancestor_of | name, name, text | text + pgtap | public | is_clustered | name | text + pgtap | public | is_clustered | name, name | text + pgtap | public | is_clustered | name, name, name | text + pgtap | public | is_clustered | name, name, name, text | text + pgtap | public | is_definer | name | text + pgtap | public | is_definer | name, name | text + pgtap | public | is_definer | name, name, name[] | text + pgtap | public | is_definer | name, name, name[], text | text + pgtap | public | is_definer | name, name, text | text + pgtap | public | is_definer | name, name[] | text + pgtap | public | is_definer | name, name[], text | text + pgtap | public | is_definer | name, text | text + pgtap | public | is_descendent_of | name, name | text + pgtap | public | is_descendent_of | name, name, integer | text + pgtap | public | is_descendent_of | name, name, integer, text | text + pgtap | public | is_descendent_of | name, name, name, name | text + pgtap | public | is_descendent_of | name, name, name, name, integer | text + pgtap | public | is_descendent_of | name, name, name, name, integer, text | text + pgtap | public | is_descendent_of | name, name, name, name, text | text + pgtap | public | is_descendent_of | name, name, text | text + pgtap | public | is_empty | text | text + pgtap | public | is_empty | text, text | text + pgtap | public | is_indexed | name, name | text + pgtap | public | is_indexed | name, name, name | text + pgtap | public | is_indexed | name, name, name, text | text + pgtap | public | is_indexed | name, name, name[] | text + pgtap | public | is_indexed | name, name, name[], text | text + pgtap | public | is_indexed | name, name[] | text + pgtap | public | is_indexed | name, name[], text | text + pgtap | public | is_member_of | name, name | text + pgtap | public | is_member_of | name, name, text | text + pgtap | public | is_member_of | name, name[] | text + pgtap | public | is_member_of | name, name[], text | text + pgtap | public | is_normal_function | name | text + pgtap | public | is_normal_function | name, name | text + pgtap | public | is_normal_function | name, name, name[] | text + pgtap | public | is_normal_function | name, name, name[], text | text + pgtap | public | is_normal_function | name, name, text | text + pgtap | public | is_normal_function | name, name[] | text + pgtap | public | is_normal_function | name, name[], text | text + pgtap | public | is_normal_function | name, text | text + pgtap | public | is_partition_of | name, name | text + pgtap | public | is_partition_of | name, name, name, name | text + pgtap | public | is_partition_of | name, name, name, name, text | text + pgtap | public | is_partition_of | name, name, text | text + pgtap | public | is_partitioned | name | text + pgtap | public | is_partitioned | name, name | text + pgtap | public | is_partitioned | name, name, text | text + pgtap | public | is_partitioned | name, text | text + pgtap | public | is_procedure | name | text + pgtap | public | is_procedure | name, name | text + pgtap | public | is_procedure | name, name, name[] | text + pgtap | public | is_procedure | name, name, name[], text | text + pgtap | public | is_procedure | name, name, text | text + pgtap | public | is_procedure | name, name[] | text + pgtap | public | is_procedure | name, name[], text | text + pgtap | public | is_procedure | name, text | text + pgtap | public | is_strict | name | text + pgtap | public | is_strict | name, name | text + pgtap | public | is_strict | name, name, name[] | text + pgtap | public | is_strict | name, name, name[], text | text + pgtap | public | is_strict | name, name, text | text + pgtap | public | is_strict | name, name[] | text + pgtap | public | is_strict | name, name[], text | text + pgtap | public | is_strict | name, text | text + pgtap | public | is_superuser | name | text + pgtap | public | is_superuser | name, text | text + pgtap | public | is_window | name | text + pgtap | public | is_window | name, name | text + pgtap | public | is_window | name, name, name[] | text + pgtap | public | is_window | name, name, name[], text | text + pgtap | public | is_window | name, name, text | text + pgtap | public | is_window | name, name[] | text + pgtap | public | is_window | name, name[], text | text + pgtap | public | is_window | name, text | text + pgtap | public | isa_ok | anyelement, regtype | text + pgtap | public | isa_ok | anyelement, regtype, text | text + pgtap | public | isnt | anyelement, anyelement | text + pgtap | public | isnt | anyelement, anyelement, text | text + pgtap | public | isnt_aggregate | name | text + pgtap | public | isnt_aggregate | name, name | text + pgtap | public | isnt_aggregate | name, name, name[] | text + pgtap | public | isnt_aggregate | name, name, name[], text | text + pgtap | public | isnt_aggregate | name, name, text | text + pgtap | public | isnt_aggregate | name, name[] | text + pgtap | public | isnt_aggregate | name, name[], text | text + pgtap | public | isnt_aggregate | name, text | text + pgtap | public | isnt_ancestor_of | name, name | text + pgtap | public | isnt_ancestor_of | name, name, integer | text + pgtap | public | isnt_ancestor_of | name, name, integer, text | text + pgtap | public | isnt_ancestor_of | name, name, name, name | text + pgtap | public | isnt_ancestor_of | name, name, name, name, integer | text + pgtap | public | isnt_ancestor_of | name, name, name, name, integer, text | text + pgtap | public | isnt_ancestor_of | name, name, name, name, text | text + pgtap | public | isnt_ancestor_of | name, name, text | text + pgtap | public | isnt_definer | name | text + pgtap | public | isnt_definer | name, name | text + pgtap | public | isnt_definer | name, name, name[] | text + pgtap | public | isnt_definer | name, name, name[], text | text + pgtap | public | isnt_definer | name, name, text | text + pgtap | public | isnt_definer | name, name[] | text + pgtap | public | isnt_definer | name, name[], text | text + pgtap | public | isnt_definer | name, text | text + pgtap | public | isnt_descendent_of | name, name | text + pgtap | public | isnt_descendent_of | name, name, integer | text + pgtap | public | isnt_descendent_of | name, name, integer, text | text + pgtap | public | isnt_descendent_of | name, name, name, name | text + pgtap | public | isnt_descendent_of | name, name, name, name, integer | text + pgtap | public | isnt_descendent_of | name, name, name, name, integer, text | text + pgtap | public | isnt_descendent_of | name, name, name, name, text | text + pgtap | public | isnt_descendent_of | name, name, text | text + pgtap | public | isnt_empty | text | text + pgtap | public | isnt_empty | text, text | text + pgtap | public | isnt_member_of | name, name | text + pgtap | public | isnt_member_of | name, name, text | text + pgtap | public | isnt_member_of | name, name[] | text + pgtap | public | isnt_member_of | name, name[], text | text + pgtap | public | isnt_normal_function | name | text + pgtap | public | isnt_normal_function | name, name | text + pgtap | public | isnt_normal_function | name, name, name[] | text + pgtap | public | isnt_normal_function | name, name, name[], text | text + pgtap | public | isnt_normal_function | name, name, text | text + pgtap | public | isnt_normal_function | name, name[] | text + pgtap | public | isnt_normal_function | name, name[], text | text + pgtap | public | isnt_normal_function | name, text | text + pgtap | public | isnt_partitioned | name | text + pgtap | public | isnt_partitioned | name, name | text + pgtap | public | isnt_partitioned | name, name, text | text + pgtap | public | isnt_partitioned | name, text | text + pgtap | public | isnt_procedure | name | text + pgtap | public | isnt_procedure | name, name | text + pgtap | public | isnt_procedure | name, name, name[] | text + pgtap | public | isnt_procedure | name, name, name[], text | text + pgtap | public | isnt_procedure | name, name, text | text + pgtap | public | isnt_procedure | name, name[] | text + pgtap | public | isnt_procedure | name, name[], text | text + pgtap | public | isnt_procedure | name, text | text + pgtap | public | isnt_strict | name | text + pgtap | public | isnt_strict | name, name | text + pgtap | public | isnt_strict | name, name, name[] | text + pgtap | public | isnt_strict | name, name, name[], text | text + pgtap | public | isnt_strict | name, name, text | text + pgtap | public | isnt_strict | name, name[] | text + pgtap | public | isnt_strict | name, name[], text | text + pgtap | public | isnt_strict | name, text | text + pgtap | public | isnt_superuser | name | text + pgtap | public | isnt_superuser | name, text | text + pgtap | public | isnt_window | name | text + pgtap | public | isnt_window | name, name | text + pgtap | public | isnt_window | name, name, name[] | text + pgtap | public | isnt_window | name, name, name[], text | text + pgtap | public | isnt_window | name, name, text | text + pgtap | public | isnt_window | name, name[] | text + pgtap | public | isnt_window | name, name[], text | text + pgtap | public | isnt_window | name, text | text + pgtap | public | language_is_trusted | name | text + pgtap | public | language_is_trusted | name, text | text + pgtap | public | language_owner_is | name, name | text + pgtap | public | language_owner_is | name, name, text | text + pgtap | public | language_privs_are | name, name, name[] | text + pgtap | public | language_privs_are | name, name, name[], text | text + pgtap | public | languages_are | name[] | text + pgtap | public | languages_are | name[], text | text + pgtap | public | lives_ok | text | text + pgtap | public | lives_ok | text, text | text + pgtap | public | matches | anyelement, text | text + pgtap | public | matches | anyelement, text, text | text + pgtap | public | materialized_view_owner_is | name, name | text + pgtap | public | materialized_view_owner_is | name, name, name | text + pgtap | public | materialized_view_owner_is | name, name, name, text | text + pgtap | public | materialized_view_owner_is | name, name, text | text + pgtap | public | materialized_views_are | name, name[] | text + pgtap | public | materialized_views_are | name, name[], text | text + pgtap | public | materialized_views_are | name[] | text + pgtap | public | materialized_views_are | name[], text | text + pgtap | public | no_plan | | SETOF boolean + pgtap | public | num_failed | | integer + pgtap | public | ok | boolean | text + pgtap | public | ok | boolean, text | text + pgtap | public | opclass_owner_is | name, name | text + pgtap | public | opclass_owner_is | name, name, name | text + pgtap | public | opclass_owner_is | name, name, name, text | text + pgtap | public | opclass_owner_is | name, name, text | text + pgtap | public | opclasses_are | name, name[] | text + pgtap | public | opclasses_are | name, name[], text | text + pgtap | public | opclasses_are | name[] | text + pgtap | public | opclasses_are | name[], text | text + pgtap | public | operators_are | name, text[] | text + pgtap | public | operators_are | name, text[], text | text + pgtap | public | operators_are | text[] | text + pgtap | public | operators_are | text[], text | text + pgtap | public | os_name | | text + pgtap | public | partitions_are | name, name, name[] | text + pgtap | public | partitions_are | name, name, name[], text | text + pgtap | public | partitions_are | name, name[] | text + pgtap | public | partitions_are | name, name[], text | text + pgtap | public | pass | | text + pgtap | public | pass | text | text + pgtap | public | performs_ok | text, numeric | text + pgtap | public | performs_ok | text, numeric, text | text + pgtap | public | performs_within | text, numeric, numeric | text + pgtap | public | performs_within | text, numeric, numeric, integer | text + pgtap | public | performs_within | text, numeric, numeric, integer, text | text + pgtap | public | performs_within | text, numeric, numeric, text | text + pgtap | public | pg_version | | text + pgtap | public | pg_version_num | | integer + pgtap | public | pgtap_version | | numeric + pgtap | public | plan | integer | text + pgtap | public | policies_are | name, name, name[] | text + pgtap | public | policies_are | name, name, name[], text | text + pgtap | public | policies_are | name, name[] | text + pgtap | public | policies_are | name, name[], text | text + pgtap | public | policy_cmd_is | name, name, name, text | text + pgtap | public | policy_cmd_is | name, name, name, text, text | text + pgtap | public | policy_cmd_is | name, name, text | text + pgtap | public | policy_cmd_is | name, name, text, text | text + pgtap | public | policy_roles_are | name, name, name, name[] | text + pgtap | public | policy_roles_are | name, name, name, name[], text | text + pgtap | public | policy_roles_are | name, name, name[] | text + pgtap | public | policy_roles_are | name, name, name[], text | text + pgtap | public | relation_owner_is | name, name | text + pgtap | public | relation_owner_is | name, name, name | text + pgtap | public | relation_owner_is | name, name, name, text | text + pgtap | public | relation_owner_is | name, name, text | text + pgtap | public | results_eq | refcursor, anyarray | text + pgtap | public | results_eq | refcursor, anyarray, text | text + pgtap | public | results_eq | refcursor, refcursor | text + pgtap | public | results_eq | refcursor, refcursor, text | text + pgtap | public | results_eq | refcursor, text | text + pgtap | public | results_eq | refcursor, text, text | text + pgtap | public | results_eq | text, anyarray | text + pgtap | public | results_eq | text, anyarray, text | text + pgtap | public | results_eq | text, refcursor | text + pgtap | public | results_eq | text, refcursor, text | text + pgtap | public | results_eq | text, text | text + pgtap | public | results_eq | text, text, text | text + pgtap | public | results_ne | refcursor, anyarray | text + pgtap | public | results_ne | refcursor, anyarray, text | text + pgtap | public | results_ne | refcursor, refcursor | text + pgtap | public | results_ne | refcursor, refcursor, text | text + pgtap | public | results_ne | refcursor, text | text + pgtap | public | results_ne | refcursor, text, text | text + pgtap | public | results_ne | text, anyarray | text + pgtap | public | results_ne | text, anyarray, text | text + pgtap | public | results_ne | text, refcursor | text + pgtap | public | results_ne | text, refcursor, text | text + pgtap | public | results_ne | text, text | text + pgtap | public | results_ne | text, text, text | text + pgtap | public | roles_are | name[] | text + pgtap | public | roles_are | name[], text | text + pgtap | public | row_eq | text, anyelement | text + pgtap | public | row_eq | text, anyelement, text | text + pgtap | public | rule_is_instead | name, name | text + pgtap | public | rule_is_instead | name, name, name | text + pgtap | public | rule_is_instead | name, name, name, text | text + pgtap | public | rule_is_instead | name, name, text | text + pgtap | public | rule_is_on | name, name, name, text | text + pgtap | public | rule_is_on | name, name, name, text, text | text + pgtap | public | rule_is_on | name, name, text | text + pgtap | public | rule_is_on | name, name, text, text | text + pgtap | public | rules_are | name, name, name[] | text + pgtap | public | rules_are | name, name, name[], text | text + pgtap | public | rules_are | name, name[] | text + pgtap | public | rules_are | name, name[], text | text + pgtap | public | runtests | | SETOF text + pgtap | public | runtests | name | SETOF text + pgtap | public | runtests | name, text | SETOF text + pgtap | public | runtests | text | SETOF text + pgtap | public | schema_owner_is | name, name | text + pgtap | public | schema_owner_is | name, name, text | text + pgtap | public | schema_privs_are | name, name, name[] | text + pgtap | public | schema_privs_are | name, name, name[], text | text + pgtap | public | schemas_are | name[] | text + pgtap | public | schemas_are | name[], text | text + pgtap | public | sequence_owner_is | name, name | text + pgtap | public | sequence_owner_is | name, name, name | text + pgtap | public | sequence_owner_is | name, name, name, text | text + pgtap | public | sequence_owner_is | name, name, text | text + pgtap | public | sequence_privs_are | name, name, name, name[] | text + pgtap | public | sequence_privs_are | name, name, name, name[], text | text + pgtap | public | sequence_privs_are | name, name, name[] | text + pgtap | public | sequence_privs_are | name, name, name[], text | text + pgtap | public | sequences_are | name, name[] | text + pgtap | public | sequences_are | name, name[], text | text + pgtap | public | sequences_are | name[] | text + pgtap | public | sequences_are | name[], text | text + pgtap | public | server_privs_are | name, name, name[] | text + pgtap | public | server_privs_are | name, name, name[], text | text + pgtap | public | set_eq | text, anyarray | text + pgtap | public | set_eq | text, anyarray, text | text + pgtap | public | set_eq | text, text | text + pgtap | public | set_eq | text, text, text | text + pgtap | public | set_has | text, text | text + pgtap | public | set_has | text, text, text | text + pgtap | public | set_hasnt | text, text | text + pgtap | public | set_hasnt | text, text, text | text + pgtap | public | set_ne | text, anyarray | text + pgtap | public | set_ne | text, anyarray, text | text + pgtap | public | set_ne | text, text | text + pgtap | public | set_ne | text, text, text | text + pgtap | public | skip | integer | text + pgtap | public | skip | integer, text | text + pgtap | public | skip | text | text + pgtap | public | skip | why text, how_many integer | text + pgtap | public | table_owner_is | name, name | text + pgtap | public | table_owner_is | name, name, name | text + pgtap | public | table_owner_is | name, name, name, text | text + pgtap | public | table_owner_is | name, name, text | text + pgtap | public | table_privs_are | name, name, name, name[] | text + pgtap | public | table_privs_are | name, name, name, name[], text | text + pgtap | public | table_privs_are | name, name, name[] | text + pgtap | public | table_privs_are | name, name, name[], text | text + pgtap | public | tables_are | name, name[] | text + pgtap | public | tables_are | name, name[], text | text + pgtap | public | tables_are | name[] | text + pgtap | public | tables_are | name[], text | text + pgtap | public | tablespace_owner_is | name, name | text + pgtap | public | tablespace_owner_is | name, name, text | text + pgtap | public | tablespace_privs_are | name, name, name[] | text + pgtap | public | tablespace_privs_are | name, name, name[], text | text + pgtap | public | tablespaces_are | name[] | text + pgtap | public | tablespaces_are | name[], text | text + pgtap | public | throws_ilike | text, text | text + pgtap | public | throws_ilike | text, text, text | text + pgtap | public | throws_imatching | text, text | text + pgtap | public | throws_imatching | text, text, text | text + pgtap | public | throws_like | text, text | text + pgtap | public | throws_like | text, text, text | text + pgtap | public | throws_matching | text, text | text + pgtap | public | throws_matching | text, text, text | text + pgtap | public | throws_ok | text | text + pgtap | public | throws_ok | text, character, text, text | text + pgtap | public | throws_ok | text, integer | text + pgtap | public | throws_ok | text, integer, text | text + pgtap | public | throws_ok | text, integer, text, text | text + pgtap | public | throws_ok | text, text | text + pgtap | public | throws_ok | text, text, text | text + pgtap | public | todo | how_many integer | SETOF boolean + pgtap | public | todo | how_many integer, why text | SETOF boolean + pgtap | public | todo | why text | SETOF boolean + pgtap | public | todo | why text, how_many integer | SETOF boolean + pgtap | public | todo_end | | SETOF boolean + pgtap | public | todo_start | | SETOF boolean + pgtap | public | todo_start | text | SETOF boolean + pgtap | public | trigger_is | name, name, name | text + pgtap | public | trigger_is | name, name, name, name, name | text + pgtap | public | trigger_is | name, name, name, name, name, text | text + pgtap | public | trigger_is | name, name, name, text | text + pgtap | public | triggers_are | name, name, name[] | text + pgtap | public | triggers_are | name, name, name[], text | text + pgtap | public | triggers_are | name, name[] | text + pgtap | public | triggers_are | name, name[], text | text + pgtap | public | type_owner_is | name, name | text + pgtap | public | type_owner_is | name, name, name | text + pgtap | public | type_owner_is | name, name, name, text | text + pgtap | public | type_owner_is | name, name, text | text + pgtap | public | types_are | name, name[] | text + pgtap | public | types_are | name, name[], text | text + pgtap | public | types_are | name[] | text + pgtap | public | types_are | name[], text | text + pgtap | public | unalike | anyelement, text | text + pgtap | public | unalike | anyelement, text, text | text + pgtap | public | unialike | anyelement, text | text + pgtap | public | unialike | anyelement, text, text | text + pgtap | public | users_are | name[] | text + pgtap | public | users_are | name[], text | text + pgtap | public | view_owner_is | name, name | text + pgtap | public | view_owner_is | name, name, name | text + pgtap | public | view_owner_is | name, name, name, text | text + pgtap | public | view_owner_is | name, name, text | text + pgtap | public | views_are | name, name[] | text + pgtap | public | views_are | name, name[], text | text + pgtap | public | views_are | name[] | text + pgtap | public | views_are | name[], text | text + pgtap | public | volatility_is | name, name, name[], text | text + pgtap | public | volatility_is | name, name, name[], text, text | text + pgtap | public | volatility_is | name, name, text | text + pgtap | public | volatility_is | name, name, text, text | text + pgtap | public | volatility_is | name, name[], text | text + pgtap | public | volatility_is | name, name[], text, text | text + pgtap | public | volatility_is | name, text | text + pgtap | public | volatility_is | name, text, text | text + plpgsql | pg_catalog | plpgsql_call_handler | | language_handler + plpgsql | pg_catalog | plpgsql_inline_handler | internal | void + plpgsql | pg_catalog | plpgsql_validator | oid | void + plpgsql_check | public | __plpgsql_show_dependency_tb | funcoid regprocedure, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plpgsql_check | public | __plpgsql_show_dependency_tb | name text, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plpgsql_check | public | plpgsql_check_function | funcoid regprocedure, relid regclass, format text, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | SETOF text + plpgsql_check | public | plpgsql_check_function | name text, relid regclass, format text, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | SETOF text + plpgsql_check | public | plpgsql_check_function_tb | funcoid regprocedure, relid regclass, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | TABLE(functionid regproc, lineno integer, statement text, sqlstate text, message text, detail text, hint text, level text, "position" integer, query text, context text) + plpgsql_check | public | plpgsql_check_function_tb | name text, relid regclass, fatal_errors boolean, other_warnings boolean, performance_warnings boolean, extra_warnings boolean, security_warnings boolean, compatibility_warnings boolean, oldtable name, newtable name, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype, without_warnings boolean, all_warnings boolean, use_incomment_options boolean, incomment_options_usage_warning boolean, constant_tracing boolean | TABLE(functionid regproc, lineno integer, statement text, sqlstate text, message text, detail text, hint text, level text, "position" integer, query text, context text) + plpgsql_check | public | plpgsql_check_pragma | VARIADIC name text[] | integer + plpgsql_check | public | plpgsql_check_profiler | enable boolean | boolean + plpgsql_check | public | plpgsql_check_tracer | enable boolean, verbosity text | boolean + plpgsql_check | public | plpgsql_coverage_branches | funcoid regprocedure | double precision + plpgsql_check | public | plpgsql_coverage_branches | name text | double precision + plpgsql_check | public | plpgsql_coverage_statements | funcoid regprocedure | double precision + plpgsql_check | public | plpgsql_coverage_statements | name text | double precision + plpgsql_check | public | plpgsql_profiler_function_statements_tb | funcoid regprocedure | TABLE(stmtid integer, parent_stmtid integer, parent_note text, block_num integer, lineno integer, queryid bigint, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision, processed_rows bigint, stmtname text) + plpgsql_check | public | plpgsql_profiler_function_statements_tb | name text | TABLE(stmtid integer, parent_stmtid integer, parent_note text, block_num integer, lineno integer, queryid bigint, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision, processed_rows bigint, stmtname text) + plpgsql_check | public | plpgsql_profiler_function_tb | funcoid regprocedure | TABLE(lineno integer, stmt_lineno integer, queryids bigint[], cmds_on_row integer, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision[], processed_rows bigint[], source text) + plpgsql_check | public | plpgsql_profiler_function_tb | name text | TABLE(lineno integer, stmt_lineno integer, queryids bigint[], cmds_on_row integer, exec_stmts bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, max_time double precision[], processed_rows bigint[], source text) + plpgsql_check | public | plpgsql_profiler_functions_all | | TABLE(funcoid regprocedure, exec_count bigint, exec_stmts_err bigint, total_time double precision, avg_time double precision, stddev_time double precision, min_time double precision, max_time double precision) + plpgsql_check | public | plpgsql_profiler_install_fake_queryid_hook | | void + plpgsql_check | public | plpgsql_profiler_remove_fake_queryid_hook | | void + plpgsql_check | public | plpgsql_profiler_reset | funcoid regprocedure | void + plpgsql_check | public | plpgsql_profiler_reset_all | | void + plpgsql_check | public | plpgsql_show_dependency_tb | fnname text, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + plpgsql_check | public | plpgsql_show_dependency_tb | funcoid regprocedure, relid regclass, anyelememttype regtype, anyenumtype regtype, anyrangetype regtype, anycompatibletype regtype, anycompatiblerangetype regtype | TABLE(type text, oid oid, schema text, name text, params text) + postgis | public | _postgis_deprecate | oldname text, newname text, version text | void + postgis | public | _postgis_index_extent | tbl regclass, col text | box2d + postgis | public | _postgis_join_selectivity | regclass, text, regclass, text, text | double precision + postgis | public | _postgis_pgsql_version | | text + postgis | public | _postgis_scripts_pgsql_version | | text + postgis | public | _postgis_selectivity | tbl regclass, att_name text, geom geometry, mode text | double precision + postgis | public | _postgis_stats | tbl regclass, att_name text, text | text + postgis | public | _st_3ddfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_3ddwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_3dintersects | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_asgml | integer, geometry, integer, integer, text, text | text + postgis | public | _st_asx3d | integer, geometry, integer, integer, text | text + postgis | public | _st_bestsrid | geography | integer + postgis | public | _st_bestsrid | geography, geography | integer + postgis | public | _st_contains | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_containsproperly | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_coveredby | geog1 geography, geog2 geography | boolean + postgis | public | _st_coveredby | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_covers | geog1 geography, geog2 geography | boolean + postgis | public | _st_covers | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_crosses | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_dfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_distancetree | geography, geography | double precision + postgis | public | _st_distancetree | geography, geography, double precision, boolean | double precision + postgis | public | _st_distanceuncached | geography, geography | double precision + postgis | public | _st_distanceuncached | geography, geography, boolean | double precision + postgis | public | _st_distanceuncached | geography, geography, double precision, boolean | double precision + postgis | public | _st_dwithin | geog1 geography, geog2 geography, tolerance double precision, use_spheroid boolean | boolean + postgis | public | _st_dwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | _st_dwithinuncached | geography, geography, double precision | boolean + postgis | public | _st_dwithinuncached | geography, geography, double precision, boolean | boolean + postgis | public | _st_equals | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_expand | geography, double precision | geography + postgis | public | _st_geomfromgml | text, integer | geometry + postgis | public | _st_intersects | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_linecrossingdirection | line1 geometry, line2 geometry | integer + postgis | public | _st_longestline | geom1 geometry, geom2 geometry | geometry + postgis | public | _st_maxdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | _st_orderingequals | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_overlaps | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_pointoutside | geography | geography + postgis | public | _st_sortablehash | geom geometry | bigint + postgis | public | _st_touches | geom1 geometry, geom2 geometry | boolean + postgis | public | _st_voronoi | g1 geometry, clip geometry, tolerance double precision, return_polygons boolean | geometry + postgis | public | _st_within | geom1 geometry, geom2 geometry | boolean + postgis | public | addauth | text | boolean + postgis | public | addgeometrycolumn | catalog_name character varying, schema_name character varying, table_name character varying, column_name character varying, new_srid_in integer, new_type character varying, new_dim integer, use_typmod boolean | text + postgis | public | addgeometrycolumn | schema_name character varying, table_name character varying, column_name character varying, new_srid integer, new_type character varying, new_dim integer, use_typmod boolean | text + postgis | public | addgeometrycolumn | table_name character varying, column_name character varying, new_srid integer, new_type character varying, new_dim integer, use_typmod boolean | text + postgis | public | box | box3d | box + postgis | public | box | geometry | box + postgis | public | box2d | box3d | box2d + postgis | public | box2d | geometry | box2d + postgis | public | box2d_in | cstring | box2d + postgis | public | box2d_out | box2d | cstring + postgis | public | box2df_in | cstring | box2df + postgis | public | box2df_out | box2df | cstring + postgis | public | box3d | box2d | box3d + postgis | public | box3d | geometry | box3d + postgis | public | box3d_in | cstring | box3d + postgis | public | box3d_out | box3d | cstring + postgis | public | box3dtobox | box3d | box + postgis | public | bytea | geography | bytea + postgis | public | bytea | geometry | bytea + postgis | public | checkauth | text, text | integer + postgis | public | checkauth | text, text, text | integer + postgis | public | checkauthtrigger | | trigger + postgis | public | contains_2d | box2df, box2df | boolean + postgis | public | contains_2d | box2df, geometry | boolean + postgis | public | contains_2d | geometry, box2df | boolean + postgis | public | disablelongtransactions | | text + postgis | public | dropgeometrycolumn | catalog_name character varying, schema_name character varying, table_name character varying, column_name character varying | text + postgis | public | dropgeometrycolumn | schema_name character varying, table_name character varying, column_name character varying | text + postgis | public | dropgeometrycolumn | table_name character varying, column_name character varying | text + postgis | public | dropgeometrytable | catalog_name character varying, schema_name character varying, table_name character varying | text + postgis | public | dropgeometrytable | schema_name character varying, table_name character varying | text + postgis | public | dropgeometrytable | table_name character varying | text + postgis | public | enablelongtransactions | | text + postgis | public | equals | geom1 geometry, geom2 geometry | boolean + postgis | public | find_srid | character varying, character varying, character varying | integer + postgis | public | geog_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geography | bytea | geography + postgis | public | geography | geography, integer, boolean | geography + postgis | public | geography | geometry | geography + postgis | public | geography_analyze | internal | boolean + postgis | public | geography_cmp | geography, geography | integer + postgis | public | geography_distance_knn | geography, geography | double precision + postgis | public | geography_eq | geography, geography | boolean + postgis | public | geography_ge | geography, geography | boolean + postgis | public | geography_gist_compress | internal | internal + postgis | public | geography_gist_consistent | internal, geography, integer | boolean + postgis | public | geography_gist_decompress | internal | internal + postgis | public | geography_gist_distance | internal, geography, integer | double precision + postgis | public | geography_gist_penalty | internal, internal, internal | internal + postgis | public | geography_gist_picksplit | internal, internal | internal + postgis | public | geography_gist_same | box2d, box2d, internal | internal + postgis | public | geography_gist_union | bytea, internal | internal + postgis | public | geography_gt | geography, geography | boolean + postgis | public | geography_in | cstring, oid, integer | geography + postgis | public | geography_le | geography, geography | boolean + postgis | public | geography_lt | geography, geography | boolean + postgis | public | geography_out | geography | cstring + postgis | public | geography_overlaps | geography, geography | boolean + postgis | public | geography_recv | internal, oid, integer | geography + postgis | public | geography_send | geography | bytea + postgis | public | geography_spgist_choose_nd | internal, internal | void + postgis | public | geography_spgist_compress_nd | internal | internal + postgis | public | geography_spgist_config_nd | internal, internal | void + postgis | public | geography_spgist_inner_consistent_nd | internal, internal | void + postgis | public | geography_spgist_leaf_consistent_nd | internal, internal | boolean + postgis | public | geography_spgist_picksplit_nd | internal, internal | void + postgis | public | geography_typmod_in | cstring[] | integer + postgis | public | geography_typmod_out | integer | cstring + postgis | public | geom2d_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geom3d_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geom4d_brin_inclusion_add_value | internal, internal, internal, internal | boolean + postgis | public | geometry | box2d | geometry + postgis | public | geometry | box3d | geometry + postgis | public | geometry | bytea | geometry + postgis | public | geometry | geography | geometry + postgis | public | geometry | geometry, integer, boolean | geometry + postgis | public | geometry | path | geometry + postgis | public | geometry | point | geometry + postgis | public | geometry | polygon | geometry + postgis | public | geometry | text | geometry + postgis | public | geometry_above | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_analyze | internal | boolean + postgis | public | geometry_below | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_cmp | geom1 geometry, geom2 geometry | integer + postgis | public | geometry_contained_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_contains | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_contains_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_contains_nd | geometry, geometry | boolean + postgis | public | geometry_distance_box | geom1 geometry, geom2 geometry | double precision + postgis | public | geometry_distance_centroid | geom1 geometry, geom2 geometry | double precision + postgis | public | geometry_distance_centroid_nd | geometry, geometry | double precision + postgis | public | geometry_distance_cpa | geometry, geometry | double precision + postgis | public | geometry_eq | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_ge | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_gist_compress_2d | internal | internal + postgis | public | geometry_gist_compress_nd | internal | internal + postgis | public | geometry_gist_consistent_2d | internal, geometry, integer | boolean + postgis | public | geometry_gist_consistent_nd | internal, geometry, integer | boolean + postgis | public | geometry_gist_decompress_2d | internal | internal + postgis | public | geometry_gist_decompress_nd | internal | internal + postgis | public | geometry_gist_distance_2d | internal, geometry, integer | double precision + postgis | public | geometry_gist_distance_nd | internal, geometry, integer | double precision + postgis | public | geometry_gist_penalty_2d | internal, internal, internal | internal + postgis | public | geometry_gist_penalty_nd | internal, internal, internal | internal + postgis | public | geometry_gist_picksplit_2d | internal, internal | internal + postgis | public | geometry_gist_picksplit_nd | internal, internal | internal + postgis | public | geometry_gist_same_2d | geom1 geometry, geom2 geometry, internal | internal + postgis | public | geometry_gist_same_nd | geometry, geometry, internal | internal + postgis | public | geometry_gist_sortsupport_2d | internal | void + postgis | public | geometry_gist_union_2d | bytea, internal | internal + postgis | public | geometry_gist_union_nd | bytea, internal | internal + postgis | public | geometry_gt | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_hash | geometry | integer + postgis | public | geometry_in | cstring | geometry + postgis | public | geometry_le | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_left | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_lt | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_out | geometry | cstring + postgis | public | geometry_overabove | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overbelow | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overlaps | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overlaps_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overlaps_nd | geometry, geometry | boolean + postgis | public | geometry_overleft | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_overright | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_recv | internal | geometry + postgis | public | geometry_right | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_same | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_same_3d | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_same_nd | geometry, geometry | boolean + postgis | public | geometry_send | geometry | bytea + postgis | public | geometry_sortsupport | internal | void + postgis | public | geometry_spgist_choose_2d | internal, internal | void + postgis | public | geometry_spgist_choose_3d | internal, internal | void + postgis | public | geometry_spgist_choose_nd | internal, internal | void + postgis | public | geometry_spgist_compress_2d | internal | internal + postgis | public | geometry_spgist_compress_3d | internal | internal + postgis | public | geometry_spgist_compress_nd | internal | internal + postgis | public | geometry_spgist_config_2d | internal, internal | void + postgis | public | geometry_spgist_config_3d | internal, internal | void + postgis | public | geometry_spgist_config_nd | internal, internal | void + postgis | public | geometry_spgist_inner_consistent_2d | internal, internal | void + postgis | public | geometry_spgist_inner_consistent_3d | internal, internal | void + postgis | public | geometry_spgist_inner_consistent_nd | internal, internal | void + postgis | public | geometry_spgist_leaf_consistent_2d | internal, internal | boolean + postgis | public | geometry_spgist_leaf_consistent_3d | internal, internal | boolean + postgis | public | geometry_spgist_leaf_consistent_nd | internal, internal | boolean + postgis | public | geometry_spgist_picksplit_2d | internal, internal | void + postgis | public | geometry_spgist_picksplit_3d | internal, internal | void + postgis | public | geometry_spgist_picksplit_nd | internal, internal | void + postgis | public | geometry_typmod_in | cstring[] | integer + postgis | public | geometry_typmod_out | integer | cstring + postgis | public | geometry_within | geom1 geometry, geom2 geometry | boolean + postgis | public | geometry_within_nd | geometry, geometry | boolean + postgis | public | geometrytype | geography | text + postgis | public | geometrytype | geometry | text + postgis | public | geomfromewkb | bytea | geometry + postgis | public | geomfromewkt | text | geometry + postgis | public | get_proj4_from_srid | integer | text + postgis | public | gettransactionid | | xid + postgis | public | gidx_in | cstring | gidx + postgis | public | gidx_out | gidx | cstring + postgis | public | gserialized_gist_joinsel_2d | internal, oid, internal, smallint | double precision + postgis | public | gserialized_gist_joinsel_nd | internal, oid, internal, smallint | double precision + postgis | public | gserialized_gist_sel_2d | internal, oid, internal, integer | double precision + postgis | public | gserialized_gist_sel_nd | internal, oid, internal, integer | double precision + postgis | public | is_contained_2d | box2df, box2df | boolean + postgis | public | is_contained_2d | box2df, geometry | boolean + postgis | public | is_contained_2d | geometry, box2df | boolean + postgis | public | json | geometry | json + postgis | public | jsonb | geometry | jsonb + postgis | public | lockrow | text, text, text | integer + postgis | public | lockrow | text, text, text, text | integer + postgis | public | lockrow | text, text, text, text, timestamp without time zone | integer + postgis | public | lockrow | text, text, text, timestamp without time zone | integer + postgis | public | longtransactionsenabled | | boolean + postgis | public | overlaps_2d | box2df, box2df | boolean + postgis | public | overlaps_2d | box2df, geometry | boolean + postgis | public | overlaps_2d | geometry, box2df | boolean + postgis | public | overlaps_geog | geography, gidx | boolean + postgis | public | overlaps_geog | gidx, geography | boolean + postgis | public | overlaps_geog | gidx, gidx | boolean + postgis | public | overlaps_nd | geometry, gidx | boolean + postgis | public | overlaps_nd | gidx, geometry | boolean + postgis | public | overlaps_nd | gidx, gidx | boolean + postgis | public | path | geometry | path + postgis | public | pgis_asflatgeobuf_finalfn | internal | bytea + postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement | internal + postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement, boolean | internal + postgis | public | pgis_asflatgeobuf_transfn | internal, anyelement, boolean, text | internal + postgis | public | pgis_asgeobuf_finalfn | internal | bytea + postgis | public | pgis_asgeobuf_transfn | internal, anyelement | internal + postgis | public | pgis_asgeobuf_transfn | internal, anyelement, text | internal + postgis | public | pgis_asmvt_combinefn | internal, internal | internal + postgis | public | pgis_asmvt_deserialfn | bytea, internal | internal + postgis | public | pgis_asmvt_finalfn | internal | bytea + postgis | public | pgis_asmvt_serialfn | internal | bytea + postgis | public | pgis_asmvt_transfn | internal, anyelement | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer, text | internal + postgis | public | pgis_asmvt_transfn | internal, anyelement, text, integer, text, text | internal + postgis | public | pgis_geometry_accum_transfn | internal, geometry | internal + postgis | public | pgis_geometry_accum_transfn | internal, geometry, double precision | internal + postgis | public | pgis_geometry_accum_transfn | internal, geometry, double precision, integer | internal + postgis | public | pgis_geometry_clusterintersecting_finalfn | internal | geometry[] + postgis | public | pgis_geometry_clusterwithin_finalfn | internal | geometry[] + postgis | public | pgis_geometry_collect_finalfn | internal | geometry + postgis | public | pgis_geometry_makeline_finalfn | internal | geometry + postgis | public | pgis_geometry_polygonize_finalfn | internal | geometry + postgis | public | pgis_geometry_union_parallel_combinefn | internal, internal | internal + postgis | public | pgis_geometry_union_parallel_deserialfn | bytea, internal | internal + postgis | public | pgis_geometry_union_parallel_finalfn | internal | geometry + postgis | public | pgis_geometry_union_parallel_serialfn | internal | bytea + postgis | public | pgis_geometry_union_parallel_transfn | internal, geometry | internal + postgis | public | pgis_geometry_union_parallel_transfn | internal, geometry, double precision | internal + postgis | public | point | geometry | point + postgis | public | polygon | geometry | polygon + postgis | public | populate_geometry_columns | tbl_oid oid, use_typmod boolean | integer + postgis | public | populate_geometry_columns | use_typmod boolean | text + postgis | public | postgis_addbbox | geometry | geometry + postgis | public | postgis_cache_bbox | | trigger + postgis | public | postgis_constraint_dims | geomschema text, geomtable text, geomcolumn text | integer + postgis | public | postgis_constraint_srid | geomschema text, geomtable text, geomcolumn text | integer + postgis | public | postgis_constraint_type | geomschema text, geomtable text, geomcolumn text | character varying + postgis | public | postgis_dropbbox | geometry | geometry + postgis | public | postgis_extensions_upgrade | | text + postgis | public | postgis_full_version | | text + postgis | public | postgis_geos_noop | geometry | geometry + postgis | public | postgis_geos_version | | text + postgis | public | postgis_getbbox | geometry | box2d + postgis | public | postgis_hasbbox | geometry | boolean + postgis | public | postgis_index_supportfn | internal | internal + postgis | public | postgis_lib_build_date | | text + postgis | public | postgis_lib_revision | | text + postgis | public | postgis_lib_version | | text + postgis | public | postgis_libjson_version | | text + postgis | public | postgis_liblwgeom_version | | text + postgis | public | postgis_libprotobuf_version | | text + postgis | public | postgis_libxml_version | | text + postgis | public | postgis_noop | geometry | geometry + postgis | public | postgis_proj_version | | text + postgis | public | postgis_scripts_build_date | | text + postgis | public | postgis_scripts_installed | | text + postgis | public | postgis_scripts_released | | text + postgis | public | postgis_svn_version | | text + postgis | public | postgis_transform_geometry | geom geometry, text, text, integer | geometry + postgis | public | postgis_type_name | geomname character varying, coord_dimension integer, use_new_name boolean | character varying + postgis | public | postgis_typmod_dims | integer | integer + postgis | public | postgis_typmod_srid | integer | integer + postgis | public | postgis_typmod_type | integer | text + postgis | public | postgis_version | | text + postgis | public | postgis_wagyu_version | | text + postgis | public | spheroid_in | cstring | spheroid + postgis | public | spheroid_out | spheroid | cstring + postgis | public | st_3dclosestpoint | geom1 geometry, geom2 geometry | geometry + postgis | public | st_3ddfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_3ddistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_3ddwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_3dextent | geometry | box3d + postgis | public | st_3dintersects | geom1 geometry, geom2 geometry | boolean + postgis | public | st_3dlength | geometry | double precision + postgis | public | st_3dlineinterpolatepoint | geometry, double precision | geometry + postgis | public | st_3dlongestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_3dmakebox | geom1 geometry, geom2 geometry | box3d + postgis | public | st_3dmaxdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_3dperimeter | geometry | double precision + postgis | public | st_3dshortestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_addmeasure | geometry, double precision, double precision | geometry + postgis | public | st_addpoint | geom1 geometry, geom2 geometry | geometry + postgis | public | st_addpoint | geom1 geometry, geom2 geometry, integer | geometry + postgis | public | st_affine | geometry, double precision, double precision, double precision, double precision, double precision, double precision | geometry + postgis | public | st_affine | geometry, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision, double precision | geometry + postgis | public | st_angle | line1 geometry, line2 geometry | double precision + postgis | public | st_angle | pt1 geometry, pt2 geometry, pt3 geometry, pt4 geometry | double precision + postgis | public | st_area | geog geography, use_spheroid boolean | double precision + postgis | public | st_area | geometry | double precision + postgis | public | st_area | text | double precision + postgis | public | st_area2d | geometry | double precision + postgis | public | st_asbinary | geography | bytea + postgis | public | st_asbinary | geography, text | bytea + postgis | public | st_asbinary | geometry | bytea + postgis | public | st_asbinary | geometry, text | bytea + postgis | public | st_asencodedpolyline | geom geometry, nprecision integer | text + postgis | public | st_asewkb | geometry | bytea + postgis | public | st_asewkb | geometry, text | bytea + postgis | public | st_asewkt | geography | text + postgis | public | st_asewkt | geography, integer | text + postgis | public | st_asewkt | geometry | text + postgis | public | st_asewkt | geometry, integer | text + postgis | public | st_asewkt | text | text + postgis | public | st_asflatgeobuf | anyelement | bytea + postgis | public | st_asflatgeobuf | anyelement, boolean | bytea + postgis | public | st_asflatgeobuf | anyelement, boolean, text | bytea + postgis | public | st_asgeobuf | anyelement | bytea + postgis | public | st_asgeobuf | anyelement, text | bytea + postgis | public | st_asgeojson | geog geography, maxdecimaldigits integer, options integer | text + postgis | public | st_asgeojson | geom geometry, maxdecimaldigits integer, options integer | text + postgis | public | st_asgeojson | r record, geom_column text, maxdecimaldigits integer, pretty_bool boolean | text + postgis | public | st_asgeojson | text | text + postgis | public | st_asgml | geog geography, maxdecimaldigits integer, options integer, nprefix text, id text | text + postgis | public | st_asgml | geom geometry, maxdecimaldigits integer, options integer | text + postgis | public | st_asgml | text | text + postgis | public | st_asgml | version integer, geog geography, maxdecimaldigits integer, options integer, nprefix text, id text | text + postgis | public | st_asgml | version integer, geom geometry, maxdecimaldigits integer, options integer, nprefix text, id text | text + postgis | public | st_ashexewkb | geometry | text + postgis | public | st_ashexewkb | geometry, text | text + postgis | public | st_askml | geog geography, maxdecimaldigits integer, nprefix text | text + postgis | public | st_askml | geom geometry, maxdecimaldigits integer, nprefix text | text + postgis | public | st_askml | text | text + postgis | public | st_aslatlontext | geom geometry, tmpl text | text + postgis | public | st_asmarc21 | geom geometry, format text | text + postgis | public | st_asmvt | anyelement | bytea + postgis | public | st_asmvt | anyelement, text | bytea + postgis | public | st_asmvt | anyelement, text, integer | bytea + postgis | public | st_asmvt | anyelement, text, integer, text | bytea + postgis | public | st_asmvt | anyelement, text, integer, text, text | bytea + postgis | public | st_asmvtgeom | geom geometry, bounds box2d, extent integer, buffer integer, clip_geom boolean | geometry + postgis | public | st_assvg | geog geography, rel integer, maxdecimaldigits integer | text + postgis | public | st_assvg | geom geometry, rel integer, maxdecimaldigits integer | text + postgis | public | st_assvg | text | text + postgis | public | st_astext | geography | text + postgis | public | st_astext | geography, integer | text + postgis | public | st_astext | geometry | text + postgis | public | st_astext | geometry, integer | text + postgis | public | st_astext | text | text + postgis | public | st_astwkb | geom geometry, prec integer, prec_z integer, prec_m integer, with_sizes boolean, with_boxes boolean | bytea + postgis | public | st_astwkb | geom geometry[], ids bigint[], prec integer, prec_z integer, prec_m integer, with_sizes boolean, with_boxes boolean | bytea + postgis | public | st_asx3d | geom geometry, maxdecimaldigits integer, options integer | text + postgis | public | st_azimuth | geog1 geography, geog2 geography | double precision + postgis | public | st_azimuth | geom1 geometry, geom2 geometry | double precision + postgis | public | st_bdmpolyfromtext | text, integer | geometry + postgis | public | st_bdpolyfromtext | text, integer | geometry + postgis | public | st_boundary | geometry | geometry + postgis | public | st_boundingdiagonal | geom geometry, fits boolean | geometry + postgis | public | st_box2dfromgeohash | text, integer | box2d + postgis | public | st_buffer | geography, double precision | geography + postgis | public | st_buffer | geography, double precision, integer | geography + postgis | public | st_buffer | geography, double precision, text | geography + postgis | public | st_buffer | geom geometry, radius double precision, options text | geometry + postgis | public | st_buffer | geom geometry, radius double precision, quadsegs integer | geometry + postgis | public | st_buffer | text, double precision | geometry + postgis | public | st_buffer | text, double precision, integer | geometry + postgis | public | st_buffer | text, double precision, text | geometry + postgis | public | st_buildarea | geometry | geometry + postgis | public | st_centroid | geography, use_spheroid boolean | geography + postgis | public | st_centroid | geometry | geometry + postgis | public | st_centroid | text | geometry + postgis | public | st_chaikinsmoothing | geometry, integer, boolean | geometry + postgis | public | st_cleangeometry | geometry | geometry + postgis | public | st_clipbybox2d | geom geometry, box box2d | geometry + postgis | public | st_closestpoint | geom1 geometry, geom2 geometry | geometry + postgis | public | st_closestpointofapproach | geometry, geometry | double precision + postgis | public | st_clusterdbscan | geometry, eps double precision, minpoints integer | integer + postgis | public | st_clusterintersecting | geometry | geometry[] + postgis | public | st_clusterintersecting | geometry[] | geometry[] + postgis | public | st_clusterkmeans | geom geometry, k integer, max_radius double precision | integer + postgis | public | st_clusterwithin | geometry, double precision | geometry[] + postgis | public | st_clusterwithin | geometry[], double precision | geometry[] + postgis | public | st_collect | geom1 geometry, geom2 geometry | geometry + postgis | public | st_collect | geometry | geometry + postgis | public | st_collect | geometry[] | geometry + postgis | public | st_collectionextract | geometry | geometry + postgis | public | st_collectionextract | geometry, integer | geometry + postgis | public | st_collectionhomogenize | geometry | geometry + postgis | public | st_combinebbox | box2d, geometry | box2d + postgis | public | st_combinebbox | box3d, box3d | box3d + postgis | public | st_combinebbox | box3d, geometry | box3d + postgis | public | st_concavehull | param_geom geometry, param_pctconvex double precision, param_allow_holes boolean | geometry + postgis | public | st_contains | geom1 geometry, geom2 geometry | boolean + postgis | public | st_containsproperly | geom1 geometry, geom2 geometry | boolean + postgis | public | st_convexhull | geometry | geometry + postgis | public | st_coorddim | geometry geometry | smallint + postgis | public | st_coveredby | geog1 geography, geog2 geography | boolean + postgis | public | st_coveredby | geom1 geometry, geom2 geometry | boolean + postgis | public | st_coveredby | text, text | boolean + postgis | public | st_covers | geog1 geography, geog2 geography | boolean + postgis | public | st_covers | geom1 geometry, geom2 geometry | boolean + postgis | public | st_covers | text, text | boolean + postgis | public | st_cpawithin | geometry, geometry, double precision | boolean + postgis | public | st_crosses | geom1 geometry, geom2 geometry | boolean + postgis | public | st_curvetoline | geom geometry, tol double precision, toltype integer, flags integer | geometry + postgis | public | st_delaunaytriangles | g1 geometry, tolerance double precision, flags integer | geometry + postgis | public | st_dfullywithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_difference | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_dimension | geometry | integer + postgis | public | st_disjoint | geom1 geometry, geom2 geometry | boolean + postgis | public | st_distance | geog1 geography, geog2 geography, use_spheroid boolean | double precision + postgis | public | st_distance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_distance | text, text | double precision + postgis | public | st_distancecpa | geometry, geometry | double precision + postgis | public | st_distancesphere | geom1 geometry, geom2 geometry | double precision + postgis | public | st_distancesphere | geom1 geometry, geom2 geometry, radius double precision | double precision + postgis | public | st_distancespheroid | geom1 geometry, geom2 geometry | double precision + postgis | public | st_distancespheroid | geom1 geometry, geom2 geometry, spheroid | double precision + postgis | public | st_dump | geometry | SETOF geometry_dump + postgis | public | st_dumppoints | geometry | SETOF geometry_dump + postgis | public | st_dumprings | geometry | SETOF geometry_dump + postgis | public | st_dumpsegments | geometry | SETOF geometry_dump + postgis | public | st_dwithin | geog1 geography, geog2 geography, tolerance double precision, use_spheroid boolean | boolean + postgis | public | st_dwithin | geom1 geometry, geom2 geometry, double precision | boolean + postgis | public | st_dwithin | text, text, double precision | boolean + postgis | public | st_endpoint | geometry | geometry + postgis | public | st_envelope | geometry | geometry + postgis | public | st_equals | geom1 geometry, geom2 geometry | boolean + postgis | public | st_estimatedextent | text, text | box2d + postgis | public | st_estimatedextent | text, text, text | box2d + postgis | public | st_estimatedextent | text, text, text, boolean | box2d + postgis | public | st_expand | box box2d, dx double precision, dy double precision | box2d + postgis | public | st_expand | box box3d, dx double precision, dy double precision, dz double precision | box3d + postgis | public | st_expand | box2d, double precision | box2d + postgis | public | st_expand | box3d, double precision | box3d + postgis | public | st_expand | geom geometry, dx double precision, dy double precision, dz double precision, dm double precision | geometry + postgis | public | st_expand | geometry, double precision | geometry + postgis | public | st_extent | geometry | box2d + postgis | public | st_exteriorring | geometry | geometry + postgis | public | st_filterbym | geometry, double precision, double precision, boolean | geometry + postgis | public | st_findextent | text, text | box2d + postgis | public | st_findextent | text, text, text | box2d + postgis | public | st_flipcoordinates | geometry | geometry + postgis | public | st_force2d | geometry | geometry + postgis | public | st_force3d | geom geometry, zvalue double precision | geometry + postgis | public | st_force3dm | geom geometry, mvalue double precision | geometry + postgis | public | st_force3dz | geom geometry, zvalue double precision | geometry + postgis | public | st_force4d | geom geometry, zvalue double precision, mvalue double precision | geometry + postgis | public | st_forcecollection | geometry | geometry + postgis | public | st_forcecurve | geometry | geometry + postgis | public | st_forcepolygonccw | geometry | geometry + postgis | public | st_forcepolygoncw | geometry | geometry + postgis | public | st_forcerhr | geometry | geometry + postgis | public | st_forcesfs | geometry | geometry + postgis | public | st_forcesfs | geometry, version text | geometry + postgis | public | st_frechetdistance | geom1 geometry, geom2 geometry, double precision | double precision + postgis | public | st_fromflatgeobuf | anyelement, bytea | SETOF anyelement + postgis | public | st_fromflatgeobuftotable | text, text, bytea | void + postgis | public | st_generatepoints | area geometry, npoints integer | geometry + postgis | public | st_generatepoints | area geometry, npoints integer, seed integer | geometry + postgis | public | st_geogfromtext | text | geography + postgis | public | st_geogfromwkb | bytea | geography + postgis | public | st_geographyfromtext | text | geography + postgis | public | st_geohash | geog geography, maxchars integer | text + postgis | public | st_geohash | geom geometry, maxchars integer | text + postgis | public | st_geomcollfromtext | text | geometry + postgis | public | st_geomcollfromtext | text, integer | geometry + postgis | public | st_geomcollfromwkb | bytea | geometry + postgis | public | st_geomcollfromwkb | bytea, integer | geometry + postgis | public | st_geometricmedian | g geometry, tolerance double precision, max_iter integer, fail_if_not_converged boolean | geometry + postgis | public | st_geometryfromtext | text | geometry + postgis | public | st_geometryfromtext | text, integer | geometry + postgis | public | st_geometryn | geometry, integer | geometry + postgis | public | st_geometrytype | geometry | text + postgis | public | st_geomfromewkb | bytea | geometry + postgis | public | st_geomfromewkt | text | geometry + postgis | public | st_geomfromgeohash | text, integer | geometry + postgis | public | st_geomfromgeojson | json | geometry + postgis | public | st_geomfromgeojson | jsonb | geometry + postgis | public | st_geomfromgeojson | text | geometry + postgis | public | st_geomfromgml | text | geometry + postgis | public | st_geomfromgml | text, integer | geometry + postgis | public | st_geomfromkml | text | geometry + postgis | public | st_geomfrommarc21 | marc21xml text | geometry + postgis | public | st_geomfromtext | text | geometry + postgis | public | st_geomfromtext | text, integer | geometry + postgis | public | st_geomfromtwkb | bytea | geometry + postgis | public | st_geomfromwkb | bytea | geometry + postgis | public | st_geomfromwkb | bytea, integer | geometry + postgis | public | st_gmltosql | text | geometry + postgis | public | st_gmltosql | text, integer | geometry + postgis | public | st_hasarc | geometry geometry | boolean + postgis | public | st_hausdorffdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_hausdorffdistance | geom1 geometry, geom2 geometry, double precision | double precision + postgis | public | st_hexagon | size double precision, cell_i integer, cell_j integer, origin geometry | geometry + postgis | public | st_hexagongrid | size double precision, bounds geometry, OUT geom geometry, OUT i integer, OUT j integer | SETOF record + postgis | public | st_interiorringn | geometry, integer | geometry + postgis | public | st_interpolatepoint | line geometry, point geometry | double precision + postgis | public | st_intersection | geography, geography | geography + postgis | public | st_intersection | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_intersection | text, text | geometry + postgis | public | st_intersects | geog1 geography, geog2 geography | boolean + postgis | public | st_intersects | geom1 geometry, geom2 geometry | boolean + postgis | public | st_intersects | text, text | boolean + postgis | public | st_isclosed | geometry | boolean + postgis | public | st_iscollection | geometry | boolean + postgis | public | st_isempty | geometry | boolean + postgis | public | st_ispolygonccw | geometry | boolean + postgis | public | st_ispolygoncw | geometry | boolean + postgis | public | st_isring | geometry | boolean + postgis | public | st_issimple | geometry | boolean + postgis | public | st_isvalid | geometry | boolean + postgis | public | st_isvalid | geometry, integer | boolean + postgis | public | st_isvaliddetail | geom geometry, flags integer | valid_detail + postgis | public | st_isvalidreason | geometry | text + postgis | public | st_isvalidreason | geometry, integer | text + postgis | public | st_isvalidtrajectory | geometry | boolean + postgis | public | st_length | geog geography, use_spheroid boolean | double precision + postgis | public | st_length | geometry | double precision + postgis | public | st_length | text | double precision + postgis | public | st_length2d | geometry | double precision + postgis | public | st_length2dspheroid | geometry, spheroid | double precision + postgis | public | st_lengthspheroid | geometry, spheroid | double precision + postgis | public | st_letters | letters text, font json | geometry + postgis | public | st_linecrossingdirection | line1 geometry, line2 geometry | integer + postgis | public | st_linefromencodedpolyline | txtin text, nprecision integer | geometry + postgis | public | st_linefrommultipoint | geometry | geometry + postgis | public | st_linefromtext | text | geometry + postgis | public | st_linefromtext | text, integer | geometry + postgis | public | st_linefromwkb | bytea | geometry + postgis | public | st_linefromwkb | bytea, integer | geometry + postgis | public | st_lineinterpolatepoint | geometry, double precision | geometry + postgis | public | st_lineinterpolatepoints | geometry, double precision, repeat boolean | geometry + postgis | public | st_linelocatepoint | geom1 geometry, geom2 geometry | double precision + postgis | public | st_linemerge | geometry | geometry + postgis | public | st_linemerge | geometry, boolean | geometry + postgis | public | st_linestringfromwkb | bytea | geometry + postgis | public | st_linestringfromwkb | bytea, integer | geometry + postgis | public | st_linesubstring | geometry, double precision, double precision | geometry + postgis | public | st_linetocurve | geometry geometry | geometry + postgis | public | st_locatealong | geometry geometry, measure double precision, leftrightoffset double precision | geometry + postgis | public | st_locatebetween | geometry geometry, frommeasure double precision, tomeasure double precision, leftrightoffset double precision | geometry + postgis | public | st_locatebetweenelevations | geometry geometry, fromelevation double precision, toelevation double precision | geometry + postgis | public | st_longestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_m | geometry | double precision + postgis | public | st_makebox2d | geom1 geometry, geom2 geometry | box2d + postgis | public | st_makeenvelope | double precision, double precision, double precision, double precision, integer | geometry + postgis | public | st_makeline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_makeline | geometry | geometry + postgis | public | st_makeline | geometry[] | geometry + postgis | public | st_makepoint | double precision, double precision | geometry + postgis | public | st_makepoint | double precision, double precision, double precision | geometry + postgis | public | st_makepoint | double precision, double precision, double precision, double precision | geometry + postgis | public | st_makepointm | double precision, double precision, double precision | geometry + postgis | public | st_makepolygon | geometry | geometry + postgis | public | st_makepolygon | geometry, geometry[] | geometry + postgis | public | st_makevalid | geom geometry, params text | geometry + postgis | public | st_makevalid | geometry | geometry + postgis | public | st_maxdistance | geom1 geometry, geom2 geometry | double precision + postgis | public | st_maximuminscribedcircle | geometry, OUT center geometry, OUT nearest geometry, OUT radius double precision | record + postgis | public | st_memcollect | geometry | geometry + postgis | public | st_memsize | geometry | integer + postgis | public | st_memunion | geometry | geometry + postgis | public | st_minimumboundingcircle | inputgeom geometry, segs_per_quarter integer | geometry + postgis | public | st_minimumboundingradius | geometry, OUT center geometry, OUT radius double precision | record + postgis | public | st_minimumclearance | geometry | double precision + postgis | public | st_minimumclearanceline | geometry | geometry + postgis | public | st_mlinefromtext | text | geometry + postgis | public | st_mlinefromtext | text, integer | geometry + postgis | public | st_mlinefromwkb | bytea | geometry + postgis | public | st_mlinefromwkb | bytea, integer | geometry + postgis | public | st_mpointfromtext | text | geometry + postgis | public | st_mpointfromtext | text, integer | geometry + postgis | public | st_mpointfromwkb | bytea | geometry + postgis | public | st_mpointfromwkb | bytea, integer | geometry + postgis | public | st_mpolyfromtext | text | geometry + postgis | public | st_mpolyfromtext | text, integer | geometry + postgis | public | st_mpolyfromwkb | bytea | geometry + postgis | public | st_mpolyfromwkb | bytea, integer | geometry + postgis | public | st_multi | geometry | geometry + postgis | public | st_multilinefromwkb | bytea | geometry + postgis | public | st_multilinestringfromtext | text | geometry + postgis | public | st_multilinestringfromtext | text, integer | geometry + postgis | public | st_multipointfromtext | text | geometry + postgis | public | st_multipointfromwkb | bytea | geometry + postgis | public | st_multipointfromwkb | bytea, integer | geometry + postgis | public | st_multipolyfromwkb | bytea | geometry + postgis | public | st_multipolyfromwkb | bytea, integer | geometry + postgis | public | st_multipolygonfromtext | text | geometry + postgis | public | st_multipolygonfromtext | text, integer | geometry + postgis | public | st_ndims | geometry | smallint + postgis | public | st_node | g geometry | geometry + postgis | public | st_normalize | geom geometry | geometry + postgis | public | st_npoints | geometry | integer + postgis | public | st_nrings | geometry | integer + postgis | public | st_numgeometries | geometry | integer + postgis | public | st_numinteriorring | geometry | integer + postgis | public | st_numinteriorrings | geometry | integer + postgis | public | st_numpatches | geometry | integer + postgis | public | st_numpoints | geometry | integer + postgis | public | st_offsetcurve | line geometry, distance double precision, params text | geometry + postgis | public | st_orderingequals | geom1 geometry, geom2 geometry | boolean + postgis | public | st_orientedenvelope | geometry | geometry + postgis | public | st_overlaps | geom1 geometry, geom2 geometry | boolean + postgis | public | st_patchn | geometry, integer | geometry + postgis | public | st_perimeter | geog geography, use_spheroid boolean | double precision + postgis | public | st_perimeter | geometry | double precision + postgis | public | st_perimeter2d | geometry | double precision + postgis | public | st_point | double precision, double precision | geometry + postgis | public | st_point | double precision, double precision, srid integer | geometry + postgis | public | st_pointfromgeohash | text, integer | geometry + postgis | public | st_pointfromtext | text | geometry + postgis | public | st_pointfromtext | text, integer | geometry + postgis | public | st_pointfromwkb | bytea | geometry + postgis | public | st_pointfromwkb | bytea, integer | geometry + postgis | public | st_pointinsidecircle | geometry, double precision, double precision, double precision | boolean + postgis | public | st_pointm | xcoordinate double precision, ycoordinate double precision, mcoordinate double precision, srid integer | geometry + postgis | public | st_pointn | geometry, integer | geometry + postgis | public | st_pointonsurface | geometry | geometry + postgis | public | st_points | geometry | geometry + postgis | public | st_pointz | xcoordinate double precision, ycoordinate double precision, zcoordinate double precision, srid integer | geometry + postgis | public | st_pointzm | xcoordinate double precision, ycoordinate double precision, zcoordinate double precision, mcoordinate double precision, srid integer | geometry + postgis | public | st_polyfromtext | text | geometry + postgis | public | st_polyfromtext | text, integer | geometry + postgis | public | st_polyfromwkb | bytea | geometry + postgis | public | st_polyfromwkb | bytea, integer | geometry + postgis | public | st_polygon | geometry, integer | geometry + postgis | public | st_polygonfromtext | text | geometry + postgis | public | st_polygonfromtext | text, integer | geometry + postgis | public | st_polygonfromwkb | bytea | geometry + postgis | public | st_polygonfromwkb | bytea, integer | geometry + postgis | public | st_polygonize | geometry | geometry + postgis | public | st_polygonize | geometry[] | geometry + postgis | public | st_project | geog geography, distance double precision, azimuth double precision | geography + postgis | public | st_quantizecoordinates | g geometry, prec_x integer, prec_y integer, prec_z integer, prec_m integer | geometry + postgis | public | st_reduceprecision | geom geometry, gridsize double precision | geometry + postgis | public | st_relate | geom1 geometry, geom2 geometry | text + postgis | public | st_relate | geom1 geometry, geom2 geometry, integer | text + postgis | public | st_relate | geom1 geometry, geom2 geometry, text | boolean + postgis | public | st_relatematch | text, text | boolean + postgis | public | st_removepoint | geometry, integer | geometry + postgis | public | st_removerepeatedpoints | geom geometry, tolerance double precision | geometry + postgis | public | st_reverse | geometry | geometry + postgis | public | st_rotate | geometry, double precision | geometry + postgis | public | st_rotate | geometry, double precision, double precision, double precision | geometry + postgis | public | st_rotate | geometry, double precision, geometry | geometry + postgis | public | st_rotatex | geometry, double precision | geometry + postgis | public | st_rotatey | geometry, double precision | geometry + postgis | public | st_rotatez | geometry, double precision | geometry + postgis | public | st_scale | geometry, double precision, double precision | geometry + postgis | public | st_scale | geometry, double precision, double precision, double precision | geometry + postgis | public | st_scale | geometry, geometry | geometry + postgis | public | st_scale | geometry, geometry, origin geometry | geometry + postgis | public | st_scroll | geometry, geometry | geometry + postgis | public | st_segmentize | geog geography, max_segment_length double precision | geography + postgis | public | st_segmentize | geometry, double precision | geometry + postgis | public | st_seteffectivearea | geometry, double precision, integer | geometry + postgis | public | st_setpoint | geometry, integer, geometry | geometry + postgis | public | st_setsrid | geog geography, srid integer | geography + postgis | public | st_setsrid | geom geometry, srid integer | geometry + postgis | public | st_sharedpaths | geom1 geometry, geom2 geometry | geometry + postgis | public | st_shiftlongitude | geometry | geometry + postgis | public | st_shortestline | geom1 geometry, geom2 geometry | geometry + postgis | public | st_simplify | geometry, double precision | geometry + postgis | public | st_simplify | geometry, double precision, boolean | geometry + postgis | public | st_simplifypolygonhull | geom geometry, vertex_fraction double precision, is_outer boolean | geometry + postgis | public | st_simplifypreservetopology | geometry, double precision | geometry + postgis | public | st_simplifyvw | geometry, double precision | geometry + postgis | public | st_snap | geom1 geometry, geom2 geometry, double precision | geometry + postgis | public | st_snaptogrid | geom1 geometry, geom2 geometry, double precision, double precision, double precision, double precision | geometry + postgis | public | st_snaptogrid | geometry, double precision | geometry + postgis | public | st_snaptogrid | geometry, double precision, double precision | geometry + postgis | public | st_snaptogrid | geometry, double precision, double precision, double precision, double precision | geometry + postgis | public | st_split | geom1 geometry, geom2 geometry | geometry + postgis | public | st_square | size double precision, cell_i integer, cell_j integer, origin geometry | geometry + postgis | public | st_squaregrid | size double precision, bounds geometry, OUT geom geometry, OUT i integer, OUT j integer | SETOF record + postgis | public | st_srid | geog geography | integer + postgis | public | st_srid | geom geometry | integer + postgis | public | st_startpoint | geometry | geometry + postgis | public | st_subdivide | geom geometry, maxvertices integer, gridsize double precision | SETOF geometry + postgis | public | st_summary | geography | text + postgis | public | st_summary | geometry | text + postgis | public | st_swapordinates | geom geometry, ords cstring | geometry + postgis | public | st_symdifference | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_symmetricdifference | geom1 geometry, geom2 geometry | geometry + postgis | public | st_tileenvelope | zoom integer, x integer, y integer, bounds geometry, margin double precision | geometry + postgis | public | st_touches | geom1 geometry, geom2 geometry | boolean + postgis | public | st_transform | geom geometry, from_proj text, to_proj text | geometry + postgis | public | st_transform | geom geometry, from_proj text, to_srid integer | geometry + postgis | public | st_transform | geom geometry, to_proj text | geometry + postgis | public | st_transform | geometry, integer | geometry + postgis | public | st_translate | geometry, double precision, double precision | geometry + postgis | public | st_translate | geometry, double precision, double precision, double precision | geometry + postgis | public | st_transscale | geometry, double precision, double precision, double precision, double precision | geometry + postgis | public | st_triangulatepolygon | g1 geometry | geometry + postgis | public | st_unaryunion | geometry, gridsize double precision | geometry + postgis | public | st_union | geom1 geometry, geom2 geometry | geometry + postgis | public | st_union | geom1 geometry, geom2 geometry, gridsize double precision | geometry + postgis | public | st_union | geometry | geometry + postgis | public | st_union | geometry, gridsize double precision | geometry + postgis | public | st_union | geometry[] | geometry + postgis | public | st_voronoilines | g1 geometry, tolerance double precision, extend_to geometry | geometry + postgis | public | st_voronoipolygons | g1 geometry, tolerance double precision, extend_to geometry | geometry + postgis | public | st_within | geom1 geometry, geom2 geometry | boolean + postgis | public | st_wkbtosql | wkb bytea | geometry + postgis | public | st_wkttosql | text | geometry + postgis | public | st_wrapx | geom geometry, wrap double precision, move double precision | geometry + postgis | public | st_x | geometry | double precision + postgis | public | st_xmax | box3d | double precision + postgis | public | st_xmin | box3d | double precision + postgis | public | st_y | geometry | double precision + postgis | public | st_ymax | box3d | double precision + postgis | public | st_ymin | box3d | double precision + postgis | public | st_z | geometry | double precision + postgis | public | st_zmax | box3d | double precision + postgis | public | st_zmflag | geometry | smallint + postgis | public | st_zmin | box3d | double precision + postgis | public | text | geometry | text + postgis | public | unlockrows | text | integer + postgis | public | updategeometrysrid | catalogn_name character varying, schema_name character varying, table_name character varying, column_name character varying, new_srid_in integer | text + postgis | public | updategeometrysrid | character varying, character varying, character varying, integer | text + postgis | public | updategeometrysrid | character varying, character varying, integer | text + postgis_raster | public | __st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | agg_count + postgis_raster | public | _add_overview_constraint | ovschema name, ovtable name, ovcolumn name, refschema name, reftable name, refcolumn name, factor integer | boolean + postgis_raster | public | _add_raster_constraint | cn name, sql text | boolean + postgis_raster | public | _add_raster_constraint_alignment | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | boolean + postgis_raster | public | _add_raster_constraint_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_extent | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_nodata_values | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_num_bands | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_out_db | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_pixel_types | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_scale | rastschema name, rasttable name, rastcolumn name, axis character | boolean + postgis_raster | public | _add_raster_constraint_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _add_raster_constraint_srid | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_overview_constraint | ovschema name, ovtable name, ovcolumn name | boolean + postgis_raster | public | _drop_raster_constraint | rastschema name, rasttable name, cn name | boolean + postgis_raster | public | _drop_raster_constraint_alignment | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | boolean + postgis_raster | public | _drop_raster_constraint_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_extent | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_nodata_values | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_num_bands | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_out_db | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_pixel_types | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_regular_blocking | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_scale | rastschema name, rasttable name, rastcolumn name, axis character | boolean + postgis_raster | public | _drop_raster_constraint_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _drop_raster_constraint_srid | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _overview_constraint | ov raster, factor integer, refschema name, reftable name, refcolumn name | boolean + postgis_raster | public | _overview_constraint_info | ovschema name, ovtable name, ovcolumn name, OUT refschema name, OUT reftable name, OUT refcolumn name, OUT factor integer | record + postgis_raster | public | _raster_constraint_info_alignment | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_blocksize | rastschema name, rasttable name, rastcolumn name, axis text | integer + postgis_raster | public | _raster_constraint_info_coverage_tile | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_extent | rastschema name, rasttable name, rastcolumn name | geometry + postgis_raster | public | _raster_constraint_info_index | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_nodata_values | rastschema name, rasttable name, rastcolumn name | double precision[] + postgis_raster | public | _raster_constraint_info_num_bands | rastschema name, rasttable name, rastcolumn name | integer + postgis_raster | public | _raster_constraint_info_out_db | rastschema name, rasttable name, rastcolumn name | boolean[] + postgis_raster | public | _raster_constraint_info_pixel_types | rastschema name, rasttable name, rastcolumn name | text[] + postgis_raster | public | _raster_constraint_info_regular_blocking | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_scale | rastschema name, rasttable name, rastcolumn name, axis character | double precision + postgis_raster | public | _raster_constraint_info_spatially_unique | rastschema name, rasttable name, rastcolumn name | boolean + postgis_raster | public | _raster_constraint_info_srid | rastschema name, rasttable name, rastcolumn name | integer + postgis_raster | public | _raster_constraint_nodata_values | rast raster | numeric[] + postgis_raster | public | _raster_constraint_out_db | rast raster | boolean[] + postgis_raster | public | _raster_constraint_pixel_types | rast raster | text[] + postgis_raster | public | _st_aspect4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_asraster | geom geometry, scalex double precision, scaley double precision, width integer, height integer, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | _st_clip | rast raster, nband integer[], geom geometry, nodataval double precision[], crop boolean | raster + postgis_raster | public | _st_colormap | rast raster, nband integer, colormap text, method text | raster + postgis_raster | public | _st_contains | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_containsproperly | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_convertarray4ma | value double precision[] | double precision[] + postgis_raster | public | _st_count | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | bigint + postgis_raster | public | _st_countagg_finalfn | agg agg_count | bigint + postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, exclude_nodata_value boolean | agg_count + postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean | agg_count + postgis_raster | public | _st_countagg_transfn | agg agg_count, rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | agg_count + postgis_raster | public | _st_coveredby | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_covers | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_dfullywithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | _st_dwithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | _st_gdalwarp | rast raster, algorithm text, maxerr double precision, srid integer, scalex double precision, scaley double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, width integer, height integer | raster + postgis_raster | public | _st_grayscale4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_hillshade4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_histogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, width double precision[], "right" boolean, min double precision, max double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | _st_intersects | geom geometry, rast raster, nband integer | boolean + postgis_raster | public | _st_intersects | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_mapalgebra | rastbandargset rastbandarg[], callbackfunc regprocedure, pixeltype text, distancex integer, distancey integer, extenttype text, customextent raster, mask double precision[], weighted boolean, VARIADIC userargs text[] | raster + postgis_raster | public | _st_mapalgebra | rastbandargset rastbandarg[], expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | _st_neighborhood | rast raster, band integer, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | _st_overlaps | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_pixelascentroids | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | _st_pixelaspolygons | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | _st_quantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | _st_rastertoworldcoord | rast raster, columnx integer, rowy integer, OUT longitude double precision, OUT latitude double precision | record + postgis_raster | public | _st_reclass | rast raster, VARIADIC reclassargset reclassarg[] | raster + postgis_raster | public | _st_roughness4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_samealignment_finalfn | agg agg_samealignment | boolean + postgis_raster | public | _st_samealignment_transfn | agg agg_samealignment, rast raster | agg_samealignment + postgis_raster | public | _st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], noset boolean[], hasnosetvalue boolean, nosetvalue double precision, keepnodata boolean | raster + postgis_raster | public | _st_slope4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_summarystats | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | summarystats + postgis_raster | public | _st_summarystats_finalfn | internal | summarystats + postgis_raster | public | _st_summarystats_transfn | internal, raster, boolean, double precision | internal + postgis_raster | public | _st_summarystats_transfn | internal, raster, integer, boolean | internal + postgis_raster | public | _st_summarystats_transfn | internal, raster, integer, boolean, double precision | internal + postgis_raster | public | _st_tile | rast raster, width integer, height integer, nband integer[], padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | _st_touches | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_tpi4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_tri4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | _st_union_finalfn | internal | raster + postgis_raster | public | _st_union_transfn | internal, raster | internal + postgis_raster | public | _st_union_transfn | internal, raster, integer | internal + postgis_raster | public | _st_union_transfn | internal, raster, integer, text | internal + postgis_raster | public | _st_union_transfn | internal, raster, text | internal + postgis_raster | public | _st_union_transfn | internal, raster, unionarg[] | internal + postgis_raster | public | _st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer, OUT percent double precision | SETOF record + postgis_raster | public | _st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer, OUT percent double precision | SETOF record + postgis_raster | public | _st_within | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | _st_worldtorastercoord | rast raster, longitude double precision, latitude double precision, OUT columnx integer, OUT rowy integer | record + postgis_raster | public | _updaterastersrid | schema_name name, table_name name, column_name name, new_srid integer | boolean + postgis_raster | public | addoverviewconstraints | ovschema name, ovtable name, ovcolumn name, refschema name, reftable name, refcolumn name, ovfactor integer | boolean + postgis_raster | public | addoverviewconstraints | ovtable name, ovcolumn name, reftable name, refcolumn name, ovfactor integer | boolean + postgis_raster | public | addrasterconstraints | rastschema name, rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | addrasterconstraints | rastschema name, rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | addrasterconstraints | rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | addrasterconstraints | rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | box3d | raster | box3d + postgis_raster | public | bytea | raster | bytea + postgis_raster | public | dropoverviewconstraints | ovschema name, ovtable name, ovcolumn name | boolean + postgis_raster | public | dropoverviewconstraints | ovtable name, ovcolumn name | boolean + postgis_raster | public | droprasterconstraints | rastschema name, rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | droprasterconstraints | rastschema name, rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | droprasterconstraints | rasttable name, rastcolumn name, srid boolean, scale_x boolean, scale_y boolean, blocksize_x boolean, blocksize_y boolean, same_alignment boolean, regular_blocking boolean, num_bands boolean, pixel_types boolean, nodata_values boolean, out_db boolean, extent boolean | boolean + postgis_raster | public | droprasterconstraints | rasttable name, rastcolumn name, VARIADIC constraints text[] | boolean + postgis_raster | public | geometry_contained_by_raster | geometry, raster | boolean + postgis_raster | public | geometry_raster_contain | geometry, raster | boolean + postgis_raster | public | geometry_raster_overlap | geometry, raster | boolean + postgis_raster | public | postgis_gdal_version | | text + postgis_raster | public | postgis_noop | raster | geometry + postgis_raster | public | postgis_raster_lib_build_date | | text + postgis_raster | public | postgis_raster_lib_version | | text + postgis_raster | public | postgis_raster_scripts_installed | | text + postgis_raster | public | raster_above | raster, raster | boolean + postgis_raster | public | raster_below | raster, raster | boolean + postgis_raster | public | raster_contain | raster, raster | boolean + postgis_raster | public | raster_contained | raster, raster | boolean + postgis_raster | public | raster_contained_by_geometry | raster, geometry | boolean + postgis_raster | public | raster_eq | raster, raster | boolean + postgis_raster | public | raster_geometry_contain | raster, geometry | boolean + postgis_raster | public | raster_geometry_overlap | raster, geometry | boolean + postgis_raster | public | raster_hash | raster | integer + postgis_raster | public | raster_in | cstring | raster + postgis_raster | public | raster_left | raster, raster | boolean + postgis_raster | public | raster_out | raster | cstring + postgis_raster | public | raster_overabove | raster, raster | boolean + postgis_raster | public | raster_overbelow | raster, raster | boolean + postgis_raster | public | raster_overlap | raster, raster | boolean + postgis_raster | public | raster_overleft | raster, raster | boolean + postgis_raster | public | raster_overright | raster, raster | boolean + postgis_raster | public | raster_right | raster, raster | boolean + postgis_raster | public | raster_same | raster, raster | boolean + postgis_raster | public | st_addband | rast raster, addbandargset addbandarg[] | raster + postgis_raster | public | st_addband | rast raster, index integer, outdbfile text, outdbindex integer[], nodataval double precision | raster + postgis_raster | public | st_addband | rast raster, index integer, pixeltype text, initialvalue double precision, nodataval double precision | raster + postgis_raster | public | st_addband | rast raster, outdbfile text, outdbindex integer[], index integer, nodataval double precision | raster + postgis_raster | public | st_addband | rast raster, pixeltype text, initialvalue double precision, nodataval double precision | raster + postgis_raster | public | st_addband | torast raster, fromrast raster, fromband integer, torastindex integer | raster + postgis_raster | public | st_addband | torast raster, fromrasts raster[], fromband integer, torastindex integer | raster + postgis_raster | public | st_approxcount | rast raster, exclude_nodata_value boolean, sample_percent double precision | bigint + postgis_raster | public | st_approxcount | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | bigint + postgis_raster | public | st_approxcount | rast raster, nband integer, sample_percent double precision | bigint + postgis_raster | public | st_approxcount | rast raster, sample_percent double precision | bigint + postgis_raster | public | st_approxhistogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, nband integer, sample_percent double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxhistogram | rast raster, sample_percent double precision, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, exclude_nodata_value boolean, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, nband integer, sample_percent double precision, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, nband integer, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxquantile | rast raster, sample_percent double precision, quantile double precision | double precision + postgis_raster | public | st_approxquantile | rast raster, sample_percent double precision, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_approxsummarystats | rast raster, exclude_nodata_value boolean, sample_percent double precision | summarystats + postgis_raster | public | st_approxsummarystats | rast raster, nband integer, exclude_nodata_value boolean, sample_percent double precision | summarystats + postgis_raster | public | st_approxsummarystats | rast raster, nband integer, sample_percent double precision | summarystats + postgis_raster | public | st_approxsummarystats | rast raster, sample_percent double precision | summarystats + postgis_raster | public | st_asbinary | raster, outasin boolean | bytea + postgis_raster | public | st_asgdalraster | rast raster, format text, options text[], srid integer | bytea + postgis_raster | public | st_ashexwkb | raster, outasin boolean | text + postgis_raster | public | st_asjpeg | rast raster, nband integer, options text[] | bytea + postgis_raster | public | st_asjpeg | rast raster, nband integer, quality integer | bytea + postgis_raster | public | st_asjpeg | rast raster, nbands integer[], options text[] | bytea + postgis_raster | public | st_asjpeg | rast raster, nbands integer[], quality integer | bytea + postgis_raster | public | st_asjpeg | rast raster, options text[] | bytea + postgis_raster | public | st_aspect | rast raster, nband integer, customextent raster, pixeltype text, units text, interpolate_nodata boolean | raster + postgis_raster | public | st_aspect | rast raster, nband integer, pixeltype text, units text, interpolate_nodata boolean | raster + postgis_raster | public | st_aspng | rast raster, nband integer, compression integer | bytea + postgis_raster | public | st_aspng | rast raster, nband integer, options text[] | bytea + postgis_raster | public | st_aspng | rast raster, nbands integer[], compression integer | bytea + postgis_raster | public | st_aspng | rast raster, nbands integer[], options text[] | bytea + postgis_raster | public | st_aspng | rast raster, options text[] | bytea + postgis_raster | public | st_asraster | geom geometry, ref raster, pixeltype text, value double precision, nodataval double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, ref raster, pixeltype text[], value double precision[], nodataval double precision[], touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, gridx double precision, gridy double precision, pixeltype text, value double precision, nodataval double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, gridx double precision, gridy double precision, pixeltype text[], value double precision[], nodataval double precision[], skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, pixeltype text, value double precision, nodataval double precision, upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, scalex double precision, scaley double precision, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, gridx double precision, gridy double precision, pixeltype text, value double precision, nodataval double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, gridx double precision, gridy double precision, pixeltype text[], value double precision[], nodataval double precision[], skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, pixeltype text, value double precision, nodataval double precision, upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_asraster | geom geometry, width integer, height integer, pixeltype text[], value double precision[], nodataval double precision[], upperleftx double precision, upperlefty double precision, skewx double precision, skewy double precision, touched boolean | raster + postgis_raster | public | st_astiff | rast raster, compression text, srid integer | bytea + postgis_raster | public | st_astiff | rast raster, nbands integer[], compression text, srid integer | bytea + postgis_raster | public | st_astiff | rast raster, nbands integer[], options text[], srid integer | bytea + postgis_raster | public | st_astiff | rast raster, options text[], srid integer | bytea + postgis_raster | public | st_aswkb | raster, outasin boolean | bytea + postgis_raster | public | st_band | rast raster, nband integer | raster + postgis_raster | public | st_band | rast raster, nbands integer[] | raster + postgis_raster | public | st_band | rast raster, nbands text, delimiter character | raster + postgis_raster | public | st_bandfilesize | rast raster, band integer | bigint + postgis_raster | public | st_bandfiletimestamp | rast raster, band integer | bigint + postgis_raster | public | st_bandisnodata | rast raster, band integer, forcechecking boolean | boolean + postgis_raster | public | st_bandisnodata | rast raster, forcechecking boolean | boolean + postgis_raster | public | st_bandmetadata | rast raster, band integer | TABLE(pixeltype text, nodatavalue double precision, isoutdb boolean, path text, outdbbandnum integer, filesize bigint, filetimestamp bigint) + postgis_raster | public | st_bandmetadata | rast raster, band integer[] | TABLE(bandnum integer, pixeltype text, nodatavalue double precision, isoutdb boolean, path text, outdbbandnum integer, filesize bigint, filetimestamp bigint) + postgis_raster | public | st_bandnodatavalue | rast raster, band integer | double precision + postgis_raster | public | st_bandpath | rast raster, band integer | text + postgis_raster | public | st_bandpixeltype | rast raster, band integer | text + postgis_raster | public | st_clip | rast raster, geom geometry, crop boolean | raster + postgis_raster | public | st_clip | rast raster, geom geometry, nodataval double precision, crop boolean | raster + postgis_raster | public | st_clip | rast raster, geom geometry, nodataval double precision[], crop boolean | raster + postgis_raster | public | st_clip | rast raster, nband integer, geom geometry, crop boolean | raster + postgis_raster | public | st_clip | rast raster, nband integer, geom geometry, nodataval double precision, crop boolean | raster + postgis_raster | public | st_clip | rast raster, nband integer[], geom geometry, nodataval double precision[], crop boolean | raster + postgis_raster | public | st_colormap | rast raster, colormap text, method text | raster + postgis_raster | public | st_colormap | rast raster, nband integer, colormap text, method text | raster + postgis_raster | public | st_contains | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_contains | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_containsproperly | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_containsproperly | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_contour | rast raster, bandnumber integer, level_interval double precision, level_base double precision, fixed_levels double precision[], polygonize boolean | TABLE(geom geometry, id integer, value double precision) + postgis_raster | public | st_convexhull | raster | geometry + postgis_raster | public | st_count | rast raster, exclude_nodata_value boolean | bigint + postgis_raster | public | st_count | rast raster, nband integer, exclude_nodata_value boolean | bigint + postgis_raster | public | st_countagg | raster, boolean | bigint + postgis_raster | public | st_countagg | raster, integer, boolean | bigint + postgis_raster | public | st_countagg | raster, integer, boolean, double precision | bigint + postgis_raster | public | st_coveredby | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_coveredby | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_covers | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_covers | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_createoverview | tab regclass, col name, factor integer, algo text | regclass + postgis_raster | public | st_dfullywithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | st_dfullywithin | rast1 raster, rast2 raster, distance double precision | boolean + postgis_raster | public | st_disjoint | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_disjoint | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_distinct4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_distinct4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_dumpaspolygons | rast raster, band integer, exclude_nodata_value boolean | SETOF geomval + postgis_raster | public | st_dumpvalues | rast raster, nband integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_dumpvalues | rast raster, nband integer[], exclude_nodata_value boolean | TABLE(nband integer, valarray double precision[]) + postgis_raster | public | st_dwithin | rast1 raster, nband1 integer, rast2 raster, nband2 integer, distance double precision | boolean + postgis_raster | public | st_dwithin | rast1 raster, rast2 raster, distance double precision | boolean + postgis_raster | public | st_envelope | raster | geometry + postgis_raster | public | st_fromgdalraster | gdaldata bytea, srid integer | raster + postgis_raster | public | st_gdaldrivers | OUT idx integer, OUT short_name text, OUT long_name text, OUT can_read boolean, OUT can_write boolean, OUT create_options text | SETOF record + postgis_raster | public | st_georeference | rast raster, format text | text + postgis_raster | public | st_geotransform | raster, OUT imag double precision, OUT jmag double precision, OUT theta_i double precision, OUT theta_ij double precision, OUT xoffset double precision, OUT yoffset double precision | record + postgis_raster | public | st_grayscale | rast raster, redband integer, greenband integer, blueband integer, extenttype text | raster + postgis_raster | public | st_grayscale | rastbandargset rastbandarg[], extenttype text | raster + postgis_raster | public | st_hasnoband | rast raster, nband integer | boolean + postgis_raster | public | st_height | raster | integer + postgis_raster | public | st_hillshade | rast raster, nband integer, customextent raster, pixeltype text, azimuth double precision, altitude double precision, max_bright double precision, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_hillshade | rast raster, nband integer, pixeltype text, azimuth double precision, altitude double precision, max_bright double precision, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_histogram | rast raster, nband integer, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_histogram | rast raster, nband integer, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_histogram | rast raster, nband integer, exclude_nodata_value boolean, bins integer, "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_histogram | rast raster, nband integer, exclude_nodata_value boolean, bins integer, width double precision[], "right" boolean, OUT min double precision, OUT max double precision, OUT count bigint, OUT percent double precision | SETOF record + postgis_raster | public | st_interpolateraster | geom geometry, options text, rast raster, bandnumber integer | raster + postgis_raster | public | st_intersection | geomin geometry, rast raster, band integer | SETOF geomval + postgis_raster | public | st_intersection | rast raster, band integer, geomin geometry | SETOF geomval + postgis_raster | public | st_intersection | rast raster, geomin geometry | SETOF geomval + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, nodataval double precision[] | raster + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, returnband text, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, band1 integer, rast2 raster, band2 integer, returnband text, nodataval double precision[] | raster + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, nodataval double precision[] | raster + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, returnband text, nodataval double precision | raster + postgis_raster | public | st_intersection | rast1 raster, rast2 raster, returnband text, nodataval double precision[] | raster + postgis_raster | public | st_intersects | geom geometry, rast raster, nband integer | boolean + postgis_raster | public | st_intersects | rast raster, geom geometry, nband integer | boolean + postgis_raster | public | st_intersects | rast raster, nband integer, geom geometry | boolean + postgis_raster | public | st_intersects | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_intersects | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_invdistweight4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_iscoveragetile | rast raster, coverage raster, tilewidth integer, tileheight integer | boolean + postgis_raster | public | st_isempty | rast raster | boolean + postgis_raster | public | st_makeemptycoverage | tilewidth integer, tileheight integer, width integer, height integer, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision, srid integer | SETOF raster + postgis_raster | public | st_makeemptyraster | rast raster | raster + postgis_raster | public | st_makeemptyraster | width integer, height integer, upperleftx double precision, upperlefty double precision, pixelsize double precision | raster + postgis_raster | public | st_makeemptyraster | width integer, height integer, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision, srid integer | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer, callbackfunc regprocedure, mask double precision[], weighted boolean, pixeltype text, extenttype text, customextent raster, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer, callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebra | rast raster, nband integer[], callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast raster, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebra | rast1 raster, band1 integer, rast2 raster, band2 integer, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebra | rast1 raster, nband1 integer, rast2 raster, nband2 integer, callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebra | rast1 raster, rast2 raster, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebra | rastbandargset rastbandarg[], callbackfunc regprocedure, pixeltype text, extenttype text, customextent raster, distancex integer, distancey integer, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebraexpr | rast raster, band integer, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebraexpr | rast raster, pixeltype text, expression text, nodataval double precision | raster + postgis_raster | public | st_mapalgebraexpr | rast1 raster, band1 integer, rast2 raster, band2 integer, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebraexpr | rast1 raster, rast2 raster, expression text, pixeltype text, extenttype text, nodata1expr text, nodata2expr text, nodatanodataval double precision | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, pixeltype text, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast raster, band integer, pixeltype text, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast raster, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast raster, pixeltype text, onerastuserfunc regprocedure | raster + postgis_raster | public | st_mapalgebrafct | rast raster, pixeltype text, onerastuserfunc regprocedure, VARIADIC args text[] | raster + postgis_raster | public | st_mapalgebrafct | rast1 raster, band1 integer, rast2 raster, band2 integer, tworastuserfunc regprocedure, pixeltype text, extenttype text, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebrafct | rast1 raster, rast2 raster, tworastuserfunc regprocedure, pixeltype text, extenttype text, VARIADIC userargs text[] | raster + postgis_raster | public | st_mapalgebrafctngb | rast raster, band integer, pixeltype text, ngbwidth integer, ngbheight integer, onerastngbuserfunc regprocedure, nodatamode text, VARIADIC args text[] | raster + postgis_raster | public | st_max4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_max4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_mean4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_mean4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_memsize | raster | integer + postgis_raster | public | st_metadata | rast raster, OUT upperleftx double precision, OUT upperlefty double precision, OUT width integer, OUT height integer, OUT scalex double precision, OUT scaley double precision, OUT skewx double precision, OUT skewy double precision, OUT srid integer, OUT numbands integer | record + postgis_raster | public | st_min4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_min4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_minconvexhull | rast raster, nband integer | geometry + postgis_raster | public | st_mindist4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_minpossiblevalue | pixeltype text | double precision + postgis_raster | public | st_nearestvalue | rast raster, band integer, columnx integer, rowy integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_nearestvalue | rast raster, band integer, pt geometry, exclude_nodata_value boolean | double precision + postgis_raster | public | st_nearestvalue | rast raster, columnx integer, rowy integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_nearestvalue | rast raster, pt geometry, exclude_nodata_value boolean | double precision + postgis_raster | public | st_neighborhood | rast raster, band integer, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_neighborhood | rast raster, band integer, pt geometry, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_neighborhood | rast raster, columnx integer, rowy integer, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_neighborhood | rast raster, pt geometry, distancex integer, distancey integer, exclude_nodata_value boolean | double precision[] + postgis_raster | public | st_notsamealignmentreason | rast1 raster, rast2 raster | text + postgis_raster | public | st_numbands | raster | integer + postgis_raster | public | st_overlaps | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_overlaps | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_pixelascentroid | rast raster, x integer, y integer | geometry + postgis_raster | public | st_pixelascentroids | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | st_pixelaspoint | rast raster, x integer, y integer | geometry + postgis_raster | public | st_pixelaspoints | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | st_pixelaspolygon | rast raster, x integer, y integer | geometry + postgis_raster | public | st_pixelaspolygons | rast raster, band integer, exclude_nodata_value boolean | TABLE(geom geometry, val double precision, x integer, y integer) + postgis_raster | public | st_pixelheight | raster | double precision + postgis_raster | public | st_pixelofvalue | rast raster, nband integer, search double precision, exclude_nodata_value boolean | TABLE(x integer, y integer) + postgis_raster | public | st_pixelofvalue | rast raster, nband integer, search double precision[], exclude_nodata_value boolean | TABLE(val double precision, x integer, y integer) + postgis_raster | public | st_pixelofvalue | rast raster, search double precision, exclude_nodata_value boolean | TABLE(x integer, y integer) + postgis_raster | public | st_pixelofvalue | rast raster, search double precision[], exclude_nodata_value boolean | TABLE(val double precision, x integer, y integer) + postgis_raster | public | st_pixelwidth | raster | double precision + postgis_raster | public | st_polygon | rast raster, band integer | geometry + postgis_raster | public | st_quantile | rast raster, exclude_nodata_value boolean, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, nband integer, exclude_nodata_value boolean, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, nband integer, exclude_nodata_value boolean, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_quantile | rast raster, nband integer, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, nband integer, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_quantile | rast raster, quantile double precision | double precision + postgis_raster | public | st_quantile | rast raster, quantiles double precision[], OUT quantile double precision, OUT value double precision | SETOF record + postgis_raster | public | st_range4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_range4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_rastertoworldcoord | rast raster, columnx integer, rowy integer, OUT longitude double precision, OUT latitude double precision | record + postgis_raster | public | st_rastertoworldcoordx | rast raster, xr integer | double precision + postgis_raster | public | st_rastertoworldcoordx | rast raster, xr integer, yr integer | double precision + postgis_raster | public | st_rastertoworldcoordy | rast raster, xr integer, yr integer | double precision + postgis_raster | public | st_rastertoworldcoordy | rast raster, yr integer | double precision + postgis_raster | public | st_rastfromhexwkb | text | raster + postgis_raster | public | st_rastfromwkb | bytea | raster + postgis_raster | public | st_reclass | rast raster, nband integer, reclassexpr text, pixeltype text, nodataval double precision | raster + postgis_raster | public | st_reclass | rast raster, reclassexpr text, pixeltype text | raster + postgis_raster | public | st_reclass | rast raster, VARIADIC reclassargset reclassarg[] | raster + postgis_raster | public | st_resample | rast raster, ref raster, algorithm text, maxerr double precision, usescale boolean | raster + postgis_raster | public | st_resample | rast raster, ref raster, usescale boolean, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resample | rast raster, scalex double precision, scaley double precision, gridx double precision, gridy double precision, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resample | rast raster, width integer, height integer, gridx double precision, gridy double precision, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_rescale | rast raster, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_rescale | rast raster, scalexy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resize | rast raster, percentwidth double precision, percentheight double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resize | rast raster, width integer, height integer, algorithm text, maxerr double precision | raster + postgis_raster | public | st_resize | rast raster, width text, height text, algorithm text, maxerr double precision | raster + postgis_raster | public | st_reskew | rast raster, skewx double precision, skewy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_reskew | rast raster, skewxy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_retile | tab regclass, col name, ext geometry, sfx double precision, sfy double precision, tw integer, th integer, algo text | SETOF raster + postgis_raster | public | st_rotation | raster | double precision + postgis_raster | public | st_roughness | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_roughness | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_samealignment | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_samealignment | raster | boolean + postgis_raster | public | st_samealignment | ulx1 double precision, uly1 double precision, scalex1 double precision, scaley1 double precision, skewx1 double precision, skewy1 double precision, ulx2 double precision, uly2 double precision, scalex2 double precision, scaley2 double precision, skewx2 double precision, skewy2 double precision | boolean + postgis_raster | public | st_scalex | raster | double precision + postgis_raster | public | st_scaley | raster | double precision + postgis_raster | public | st_setbandindex | rast raster, band integer, outdbindex integer, force boolean | raster + postgis_raster | public | st_setbandisnodata | rast raster, band integer | raster + postgis_raster | public | st_setbandnodatavalue | rast raster, band integer, nodatavalue double precision, forcechecking boolean | raster + postgis_raster | public | st_setbandnodatavalue | rast raster, nodatavalue double precision | raster + postgis_raster | public | st_setbandpath | rast raster, band integer, outdbpath text, outdbindex integer, force boolean | raster + postgis_raster | public | st_setgeoreference | rast raster, georef text, format text | raster + postgis_raster | public | st_setgeoreference | rast raster, upperleftx double precision, upperlefty double precision, scalex double precision, scaley double precision, skewx double precision, skewy double precision | raster + postgis_raster | public | st_setgeotransform | rast raster, imag double precision, jmag double precision, theta_i double precision, theta_ij double precision, xoffset double precision, yoffset double precision | raster + postgis_raster | public | st_setm | rast raster, geom geometry, resample text, band integer | geometry + postgis_raster | public | st_setrotation | rast raster, rotation double precision | raster + postgis_raster | public | st_setscale | rast raster, scale double precision | raster + postgis_raster | public | st_setscale | rast raster, scalex double precision, scaley double precision | raster + postgis_raster | public | st_setskew | rast raster, skew double precision | raster + postgis_raster | public | st_setskew | rast raster, skewx double precision, skewy double precision | raster + postgis_raster | public | st_setsrid | rast raster, srid integer | raster + postgis_raster | public | st_setupperleft | rast raster, upperleftx double precision, upperlefty double precision | raster + postgis_raster | public | st_setvalue | rast raster, band integer, x integer, y integer, newvalue double precision | raster + postgis_raster | public | st_setvalue | rast raster, geom geometry, newvalue double precision | raster + postgis_raster | public | st_setvalue | rast raster, nband integer, geom geometry, newvalue double precision | raster + postgis_raster | public | st_setvalue | rast raster, x integer, y integer, newvalue double precision | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, geomvalset geomval[], keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], noset boolean[], keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, newvalueset double precision[], nosetvalue double precision, keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, nband integer, x integer, y integer, width integer, height integer, newvalue double precision, keepnodata boolean | raster + postgis_raster | public | st_setvalues | rast raster, x integer, y integer, width integer, height integer, newvalue double precision, keepnodata boolean | raster + postgis_raster | public | st_setz | rast raster, geom geometry, resample text, band integer | geometry + postgis_raster | public | st_skewx | raster | double precision + postgis_raster | public | st_skewy | raster | double precision + postgis_raster | public | st_slope | rast raster, nband integer, customextent raster, pixeltype text, units text, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_slope | rast raster, nband integer, pixeltype text, units text, scale double precision, interpolate_nodata boolean | raster + postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, algorithm text, maxerr double precision, scalex double precision, scaley double precision | raster + postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_snaptogrid | rast raster, gridx double precision, gridy double precision, scalexy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_srid | raster | integer + postgis_raster | public | st_stddev4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_stddev4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_sum4ma | matrix double precision[], nodatamode text, VARIADIC args text[] | double precision + postgis_raster | public | st_sum4ma | value double precision[], pos integer[], VARIADIC userargs text[] | double precision + postgis_raster | public | st_summary | rast raster | text + postgis_raster | public | st_summarystats | rast raster, exclude_nodata_value boolean | summarystats + postgis_raster | public | st_summarystats | rast raster, nband integer, exclude_nodata_value boolean | summarystats + postgis_raster | public | st_summarystatsagg | raster, boolean, double precision | summarystats + postgis_raster | public | st_summarystatsagg | raster, integer, boolean | summarystats + postgis_raster | public | st_summarystatsagg | raster, integer, boolean, double precision | summarystats + postgis_raster | public | st_tile | rast raster, nband integer, width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | st_tile | rast raster, nband integer[], width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | st_tile | rast raster, width integer, height integer, padwithnodata boolean, nodataval double precision | SETOF raster + postgis_raster | public | st_touches | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_touches | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_tpi | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_tpi | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_transform | rast raster, alignto raster, algorithm text, maxerr double precision | raster + postgis_raster | public | st_transform | rast raster, srid integer, algorithm text, maxerr double precision, scalex double precision, scaley double precision | raster + postgis_raster | public | st_transform | rast raster, srid integer, scalex double precision, scaley double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_transform | rast raster, srid integer, scalexy double precision, algorithm text, maxerr double precision | raster + postgis_raster | public | st_tri | rast raster, nband integer, customextent raster, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_tri | rast raster, nband integer, pixeltype text, interpolate_nodata boolean | raster + postgis_raster | public | st_union | raster | raster + postgis_raster | public | st_union | raster, integer | raster + postgis_raster | public | st_union | raster, integer, text | raster + postgis_raster | public | st_union | raster, text | raster + postgis_raster | public | st_union | raster, unionarg[] | raster + postgis_raster | public | st_upperleftx | raster | double precision + postgis_raster | public | st_upperlefty | raster | double precision + postgis_raster | public | st_value | rast raster, band integer, pt geometry, exclude_nodata_value boolean, resample text | double precision + postgis_raster | public | st_value | rast raster, band integer, x integer, y integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_value | rast raster, pt geometry, exclude_nodata_value boolean | double precision + postgis_raster | public | st_value | rast raster, x integer, y integer, exclude_nodata_value boolean | double precision + postgis_raster | public | st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rast raster, nband integer, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rast raster, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rast raster, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rast raster, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, searchvalue double precision, roundto double precision | integer + postgis_raster | public | st_valuecount | rastertable text, rastercolumn text, searchvalues double precision[], roundto double precision, OUT value double precision, OUT count integer | SETOF record + postgis_raster | public | st_valuepercent | rast raster, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rast raster, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rast raster, nband integer, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rast raster, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rast raster, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rast raster, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, exclude_nodata_value boolean, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, nband integer, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, searchvalue double precision, roundto double precision | double precision + postgis_raster | public | st_valuepercent | rastertable text, rastercolumn text, searchvalues double precision[], roundto double precision, OUT value double precision, OUT percent double precision | SETOF record + postgis_raster | public | st_width | raster | integer + postgis_raster | public | st_within | rast1 raster, nband1 integer, rast2 raster, nband2 integer | boolean + postgis_raster | public | st_within | rast1 raster, rast2 raster | boolean + postgis_raster | public | st_worldtorastercoord | rast raster, longitude double precision, latitude double precision, OUT columnx integer, OUT rowy integer | record + postgis_raster | public | st_worldtorastercoord | rast raster, pt geometry, OUT columnx integer, OUT rowy integer | record + postgis_raster | public | st_worldtorastercoordx | rast raster, pt geometry | integer + postgis_raster | public | st_worldtorastercoordx | rast raster, xw double precision | integer + postgis_raster | public | st_worldtorastercoordx | rast raster, xw double precision, yw double precision | integer + postgis_raster | public | st_worldtorastercoordy | rast raster, pt geometry | integer + postgis_raster | public | st_worldtorastercoordy | rast raster, xw double precision, yw double precision | integer + postgis_raster | public | st_worldtorastercoordy | rast raster, yw double precision | integer + postgis_raster | public | updaterastersrid | schema_name name, table_name name, column_name name, new_srid integer | boolean + postgis_raster | public | updaterastersrid | table_name name, column_name name, new_srid integer | boolean + postgis_sfcgal | public | postgis_sfcgal_full_version | | text + postgis_sfcgal | public | postgis_sfcgal_noop | geometry | geometry + postgis_sfcgal | public | postgis_sfcgal_scripts_installed | | text + postgis_sfcgal | public | postgis_sfcgal_version | | text + postgis_sfcgal | public | st_3darea | geometry | double precision + postgis_sfcgal | public | st_3dconvexhull | geometry | geometry + postgis_sfcgal | public | st_3ddifference | geom1 geometry, geom2 geometry | geometry + postgis_sfcgal | public | st_3dintersection | geom1 geometry, geom2 geometry | geometry + postgis_sfcgal | public | st_3dunion | geom1 geometry, geom2 geometry | geometry + postgis_sfcgal | public | st_3dunion | geometry | geometry + postgis_sfcgal | public | st_alphashape | g1 geometry, alpha double precision, allow_holes boolean | geometry + postgis_sfcgal | public | st_approximatemedialaxis | geometry | geometry + postgis_sfcgal | public | st_constraineddelaunaytriangles | geometry | geometry + postgis_sfcgal | public | st_extrude | geometry, double precision, double precision, double precision | geometry + postgis_sfcgal | public | st_forcelhr | geometry | geometry + postgis_sfcgal | public | st_isplanar | geometry | boolean + postgis_sfcgal | public | st_issolid | geometry | boolean + postgis_sfcgal | public | st_makesolid | geometry | geometry + postgis_sfcgal | public | st_minkowskisum | geometry, geometry | geometry + postgis_sfcgal | public | st_optimalalphashape | g1 geometry, allow_holes boolean, nb_components integer | geometry + postgis_sfcgal | public | st_orientation | geometry | integer + postgis_sfcgal | public | st_straightskeleton | geometry | geometry + postgis_sfcgal | public | st_tesselate | geometry | geometry + postgis_sfcgal | public | st_volume | geometry | double precision + postgis_topology | topology | _asgmledge | edge_id integer, start_node integer, end_node integer, line geometry, visitedtable regclass, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text + postgis_topology | topology | _asgmlface | toponame text, face_id integer, visitedtable regclass, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text + postgis_topology | topology | _asgmlnode | id integer, point geometry, nsprefix_in text, prec integer, options integer, idprefix text, gmlver integer | text + postgis_topology | topology | _checkedgelinking | curedge_edge_id integer, prevedge_edge_id integer, prevedge_next_left_edge integer, prevedge_next_right_edge integer | topology.validatetopology_returntype + postgis_topology | topology | _st_adjacentedges | atopology character varying, anode integer, anedge integer | integer[] + postgis_topology | topology | _st_mintolerance | ageom geometry | double precision + postgis_topology | topology | _st_mintolerance | atopology character varying, ageom geometry | double precision + postgis_topology | topology | _validatetopologyedgelinking | bbox geometry | SETOF topology.validatetopology_returntype + postgis_topology | topology | _validatetopologygetfaceshellmaximaledgering | atopology character varying, aface integer | geometry + postgis_topology | topology | _validatetopologygetringedges | starting_edge integer | integer[] + postgis_topology | topology | _validatetopologyrings | bbox geometry | SETOF topology.validatetopology_returntype + postgis_topology | topology | addedge | atopology character varying, aline geometry | integer + postgis_topology | topology | addface | atopology character varying, apoly geometry, force_new boolean | integer + postgis_topology | topology | addnode | atopology character varying, apoint geometry, allowedgesplitting boolean, setcontainingface boolean | integer + postgis_topology | topology | addtopogeometrycolumn | character varying, character varying, character varying, character varying, character varying | integer + postgis_topology | topology | addtopogeometrycolumn | toponame character varying, schema character varying, tbl character varying, col character varying, ltype character varying, child integer | integer + postgis_topology | topology | addtosearchpath | a_schema_name character varying | text + postgis_topology | topology | asgml | tg topology.topogeometry | text + postgis_topology | topology | asgml | tg topology.topogeometry, nsprefix text | text + postgis_topology | topology | asgml | tg topology.topogeometry, nsprefix text, prec integer, options integer, vis regclass | text + postgis_topology | topology | asgml | tg topology.topogeometry, nsprefix text, prec integer, options integer, visitedtable regclass, idprefix text | text + postgis_topology | topology | asgml | tg topology.topogeometry, nsprefix text, prec integer, opts integer | text + postgis_topology | topology | asgml | tg topology.topogeometry, nsprefix_in text, precision_in integer, options_in integer, visitedtable regclass, idprefix text, gmlver integer | text + postgis_topology | topology | asgml | tg topology.topogeometry, visitedtable regclass | text + postgis_topology | topology | asgml | tg topology.topogeometry, visitedtable regclass, nsprefix text | text + postgis_topology | topology | astopojson | tg topology.topogeometry, edgemaptable regclass | text + postgis_topology | topology | cleartopogeom | tg topology.topogeometry | topology.topogeometry + postgis_topology | topology | copytopology | atopology character varying, newtopo character varying | integer + postgis_topology | topology | createtopogeom | toponame character varying, tg_type integer, layer_id integer | topology.topogeometry + postgis_topology | topology | createtopogeom | toponame character varying, tg_type integer, layer_id integer, tg_objs topology.topoelementarray | topology.topogeometry + postgis_topology | topology | createtopology | atopology character varying, srid integer, prec double precision, hasz boolean | integer + postgis_topology | topology | createtopology | character varying | integer + postgis_topology | topology | createtopology | character varying, integer | integer + postgis_topology | topology | createtopology | toponame character varying, srid integer, prec double precision | integer + postgis_topology | topology | droptopogeometrycolumn | schema character varying, tbl character varying, col character varying | text + postgis_topology | topology | droptopology | atopology character varying | text + postgis_topology | topology | equals | tg1 topology.topogeometry, tg2 topology.topogeometry | boolean + postgis_topology | topology | findlayer | layer_table regclass, feature_column name | topology.layer + postgis_topology | topology | findlayer | schema_name name, table_name name, feature_column name | topology.layer + postgis_topology | topology | findlayer | tg topology.topogeometry | topology.layer + postgis_topology | topology | findlayer | topology_id integer, layer_id integer | topology.layer + postgis_topology | topology | findtopology | integer | topology.topology + postgis_topology | topology | findtopology | name, name, name | topology.topology + postgis_topology | topology | findtopology | regclass, name | topology.topology + postgis_topology | topology | findtopology | text | topology.topology + postgis_topology | topology | findtopology | topology.topogeometry | topology.topology + postgis_topology | topology | geometry | topogeom topology.topogeometry | geometry + postgis_topology | topology | geometrytype | tg topology.topogeometry | text + postgis_topology | topology | getedgebypoint | atopology character varying, apoint geometry, tol1 double precision | integer + postgis_topology | topology | getfacebypoint | atopology character varying, apoint geometry, tol1 double precision | integer + postgis_topology | topology | getfacecontainingpoint | atopology text, apoint geometry | integer + postgis_topology | topology | getnodebypoint | atopology character varying, apoint geometry, tol1 double precision | integer + postgis_topology | topology | getnodeedges | atopology character varying, anode integer | SETOF topology.getfaceedges_returntype + postgis_topology | topology | getringedges | atopology character varying, anedge integer, maxedges integer | SETOF topology.getfaceedges_returntype + postgis_topology | topology | gettopogeomelementarray | tg topology.topogeometry | topology.topoelementarray + postgis_topology | topology | gettopogeomelementarray | toponame character varying, layer_id integer, tgid integer | topology.topoelementarray + postgis_topology | topology | gettopogeomelements | tg topology.topogeometry | SETOF topology.topoelement + postgis_topology | topology | gettopogeomelements | toponame character varying, layerid integer, tgid integer | SETOF topology.topoelement + postgis_topology | topology | gettopologyid | toponame character varying | integer + postgis_topology | topology | gettopologyname | topoid integer | character varying + postgis_topology | topology | gettopologysrid | toponame character varying | integer + postgis_topology | topology | intersects | tg1 topology.topogeometry, tg2 topology.topogeometry | boolean + postgis_topology | topology | layertrigger | | trigger + postgis_topology | topology | polygonize | toponame character varying | text + postgis_topology | topology | populate_topology_layer | | TABLE(schema_name text, table_name text, feature_column text) + postgis_topology | topology | postgis_topology_scripts_installed | | text + postgis_topology | topology | relationtrigger | | trigger + postgis_topology | topology | removeunusedprimitives | atopology text, bbox geometry | integer + postgis_topology | topology | st_addedgemodface | atopology character varying, anode integer, anothernode integer, acurve geometry | integer + postgis_topology | topology | st_addedgenewfaces | atopology character varying, anode integer, anothernode integer, acurve geometry | integer + postgis_topology | topology | st_addisoedge | atopology character varying, anode integer, anothernode integer, acurve geometry | integer + postgis_topology | topology | st_addisonode | atopology character varying, aface integer, apoint geometry | integer + postgis_topology | topology | st_changeedgegeom | atopology character varying, anedge integer, acurve geometry | text + postgis_topology | topology | st_createtopogeo | atopology character varying, acollection geometry | text + postgis_topology | topology | st_geometrytype | tg topology.topogeometry | text + postgis_topology | topology | st_getfaceedges | toponame character varying, face_id integer | SETOF topology.getfaceedges_returntype + postgis_topology | topology | st_getfacegeometry | toponame character varying, aface integer | geometry + postgis_topology | topology | st_inittopogeo | atopology character varying | text + postgis_topology | topology | st_modedgeheal | toponame character varying, e1id integer, e2id integer | integer + postgis_topology | topology | st_modedgesplit | atopology character varying, anedge integer, apoint geometry | integer + postgis_topology | topology | st_moveisonode | atopology character varying, anode integer, apoint geometry | text + postgis_topology | topology | st_newedgeheal | toponame character varying, e1id integer, e2id integer | integer + postgis_topology | topology | st_newedgessplit | atopology character varying, anedge integer, apoint geometry | integer + postgis_topology | topology | st_remedgemodface | toponame character varying, e1id integer | integer + postgis_topology | topology | st_remedgenewface | toponame character varying, e1id integer | integer + postgis_topology | topology | st_remisonode | character varying, integer | text + postgis_topology | topology | st_removeisoedge | atopology character varying, anedge integer | text + postgis_topology | topology | st_removeisonode | atopology character varying, anode integer | text + postgis_topology | topology | st_simplify | tg topology.topogeometry, tolerance double precision | geometry + postgis_topology | topology | st_srid | tg topology.topogeometry | integer + postgis_topology | topology | topoelementarray_agg | topology.topoelement | topology.topoelementarray + postgis_topology | topology | topoelementarray_append | topology.topoelementarray, topology.topoelement | topology.topoelementarray + postgis_topology | topology | topogeo_addgeometry | atopology character varying, ageom geometry, tolerance double precision | void + postgis_topology | topology | topogeo_addlinestring | atopology character varying, aline geometry, tolerance double precision | SETOF integer + postgis_topology | topology | topogeo_addpoint | atopology character varying, apoint geometry, tolerance double precision | integer + postgis_topology | topology | topogeo_addpolygon | atopology character varying, apoly geometry, tolerance double precision | SETOF integer + postgis_topology | topology | topogeom_addelement | tg topology.topogeometry, el topology.topoelement | topology.topogeometry + postgis_topology | topology | topogeom_addtopogeom | tgt topology.topogeometry, src topology.topogeometry | topology.topogeometry + postgis_topology | topology | topogeom_remelement | tg topology.topogeometry, el topology.topoelement | topology.topogeometry + postgis_topology | topology | topologysummary | atopology character varying | text + postgis_topology | topology | totopogeom | ageom geometry, atopology character varying, alayer integer, atolerance double precision | topology.topogeometry + postgis_topology | topology | totopogeom | ageom geometry, tg topology.topogeometry, atolerance double precision | topology.topogeometry + postgis_topology | topology | validatetopology | toponame character varying, bbox geometry | SETOF topology.validatetopology_returntype + postgis_topology | topology | validatetopologyrelation | toponame character varying | TABLE(error text, layer_id integer, topogeo_id integer, element_id integer) + postgres_fdw | public | postgres_fdw_disconnect | text | boolean + postgres_fdw | public | postgres_fdw_disconnect_all | | boolean + postgres_fdw | public | postgres_fdw_get_connections | OUT server_name text, OUT valid boolean | SETOF record + postgres_fdw | public | postgres_fdw_handler | | fdw_handler + postgres_fdw | public | postgres_fdw_validator | text[], oid | void + refint | public | check_foreign_key | | trigger + refint | public | check_primary_key | | trigger + rum | public | rum_anyarray_config | internal | void + rum | public | rum_anyarray_consistent | internal, smallint, anyarray, integer, internal, internal, internal, internal | boolean + rum | public | rum_anyarray_distance | anyarray, anyarray | double precision + rum | public | rum_anyarray_ordering | internal, smallint, anyarray, integer, internal, internal, internal, internal, internal | double precision + rum | public | rum_anyarray_similar | anyarray, anyarray | boolean + rum | public | rum_bit_compare_prefix | bit, bit, smallint, internal | integer + rum | public | rum_bit_extract_query | bit, internal, smallint, internal, internal | internal + rum | public | rum_bit_extract_value | bit, internal | internal + rum | public | rum_btree_consistent | internal, smallint, internal, integer, internal, internal, internal, internal | boolean + rum | public | rum_bytea_compare_prefix | bytea, bytea, smallint, internal | integer + rum | public | rum_bytea_extract_query | bytea, internal, smallint, internal, internal | internal + rum | public | rum_bytea_extract_value | bytea, internal | internal + rum | public | rum_char_compare_prefix | "char", "char", smallint, internal | integer + rum | public | rum_char_extract_query | "char", internal, smallint, internal, internal | internal + rum | public | rum_char_extract_value | "char", internal | internal + rum | public | rum_cidr_compare_prefix | cidr, cidr, smallint, internal | integer + rum | public | rum_cidr_extract_query | cidr, internal, smallint, internal, internal | internal + rum | public | rum_cidr_extract_value | cidr, internal | internal + rum | public | rum_date_compare_prefix | date, date, smallint, internal | integer + rum | public | rum_date_extract_query | date, internal, smallint, internal, internal | internal + rum | public | rum_date_extract_value | date, internal | internal + rum | public | rum_extract_anyarray | anyarray, internal, internal, internal, internal | internal + rum | public | rum_extract_anyarray_query | anyarray, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_extract_tsquery | tsquery, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_extract_tsquery_hash | tsquery, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_extract_tsvector | tsvector, internal, internal, internal, internal | internal + rum | public | rum_extract_tsvector_hash | tsvector, internal, internal, internal, internal | internal + rum | public | rum_float4_compare_prefix | real, real, smallint, internal | integer + rum | public | rum_float4_config | internal | void + rum | public | rum_float4_distance | real, real | double precision + rum | public | rum_float4_extract_query | real, internal, smallint, internal, internal | internal + rum | public | rum_float4_extract_value | real, internal | internal + rum | public | rum_float4_key_distance | real, real, smallint | double precision + rum | public | rum_float4_left_distance | real, real | double precision + rum | public | rum_float4_outer_distance | real, real, smallint | double precision + rum | public | rum_float4_right_distance | real, real | double precision + rum | public | rum_float8_compare_prefix | double precision, double precision, smallint, internal | integer + rum | public | rum_float8_config | internal | void + rum | public | rum_float8_distance | double precision, double precision | double precision + rum | public | rum_float8_extract_query | double precision, internal, smallint, internal, internal | internal + rum | public | rum_float8_extract_value | double precision, internal | internal + rum | public | rum_float8_key_distance | double precision, double precision, smallint | double precision + rum | public | rum_float8_left_distance | double precision, double precision | double precision + rum | public | rum_float8_outer_distance | double precision, double precision, smallint | double precision + rum | public | rum_float8_right_distance | double precision, double precision | double precision + rum | public | rum_inet_compare_prefix | inet, inet, smallint, internal | integer + rum | public | rum_inet_extract_query | inet, internal, smallint, internal, internal | internal + rum | public | rum_inet_extract_value | inet, internal | internal + rum | public | rum_int2_compare_prefix | smallint, smallint, smallint, internal | integer + rum | public | rum_int2_config | internal | void + rum | public | rum_int2_distance | smallint, smallint | double precision + rum | public | rum_int2_extract_query | smallint, internal, smallint, internal, internal | internal + rum | public | rum_int2_extract_value | smallint, internal | internal + rum | public | rum_int2_key_distance | smallint, smallint, smallint | double precision + rum | public | rum_int2_left_distance | smallint, smallint | double precision + rum | public | rum_int2_outer_distance | smallint, smallint, smallint | double precision + rum | public | rum_int2_right_distance | smallint, smallint | double precision + rum | public | rum_int4_compare_prefix | integer, integer, smallint, internal | integer + rum | public | rum_int4_config | internal | void + rum | public | rum_int4_distance | integer, integer | double precision + rum | public | rum_int4_extract_query | integer, internal, smallint, internal, internal | internal + rum | public | rum_int4_extract_value | integer, internal | internal + rum | public | rum_int4_key_distance | integer, integer, smallint | double precision + rum | public | rum_int4_left_distance | integer, integer | double precision + rum | public | rum_int4_outer_distance | integer, integer, smallint | double precision + rum | public | rum_int4_right_distance | integer, integer | double precision + rum | public | rum_int8_compare_prefix | bigint, bigint, smallint, internal | integer + rum | public | rum_int8_config | internal | void + rum | public | rum_int8_distance | bigint, bigint | double precision + rum | public | rum_int8_extract_query | bigint, internal, smallint, internal, internal | internal + rum | public | rum_int8_extract_value | bigint, internal | internal + rum | public | rum_int8_key_distance | bigint, bigint, smallint | double precision + rum | public | rum_int8_left_distance | bigint, bigint | double precision + rum | public | rum_int8_outer_distance | bigint, bigint, smallint | double precision + rum | public | rum_int8_right_distance | bigint, bigint | double precision + rum | public | rum_interval_compare_prefix | interval, interval, smallint, internal | integer + rum | public | rum_interval_extract_query | interval, internal, smallint, internal, internal | internal + rum | public | rum_interval_extract_value | interval, internal | internal + rum | public | rum_macaddr_compare_prefix | macaddr, macaddr, smallint, internal | integer + rum | public | rum_macaddr_extract_query | macaddr, internal, smallint, internal, internal | internal + rum | public | rum_macaddr_extract_value | macaddr, internal | internal + rum | public | rum_money_compare_prefix | money, money, smallint, internal | integer + rum | public | rum_money_config | internal | void + rum | public | rum_money_distance | money, money | double precision + rum | public | rum_money_extract_query | money, internal, smallint, internal, internal | internal + rum | public | rum_money_extract_value | money, internal | internal + rum | public | rum_money_key_distance | money, money, smallint | double precision + rum | public | rum_money_left_distance | money, money | double precision + rum | public | rum_money_outer_distance | money, money, smallint | double precision + rum | public | rum_money_right_distance | money, money | double precision + rum | public | rum_numeric_cmp | numeric, numeric | integer + rum | public | rum_numeric_compare_prefix | numeric, numeric, smallint, internal | integer + rum | public | rum_numeric_extract_query | numeric, internal, smallint, internal, internal | internal + rum | public | rum_numeric_extract_value | numeric, internal | internal + rum | public | rum_oid_compare_prefix | oid, oid, smallint, internal | integer + rum | public | rum_oid_config | internal | void + rum | public | rum_oid_distance | oid, oid | double precision + rum | public | rum_oid_extract_query | oid, internal, smallint, internal, internal | internal + rum | public | rum_oid_extract_value | oid, internal | internal + rum | public | rum_oid_key_distance | oid, oid, smallint | double precision + rum | public | rum_oid_left_distance | oid, oid | double precision + rum | public | rum_oid_outer_distance | oid, oid, smallint | double precision + rum | public | rum_oid_right_distance | oid, oid | double precision + rum | public | rum_text_compare_prefix | text, text, smallint, internal | integer + rum | public | rum_text_extract_query | text, internal, smallint, internal, internal | internal + rum | public | rum_text_extract_value | text, internal | internal + rum | public | rum_time_compare_prefix | time without time zone, time without time zone, smallint, internal | integer + rum | public | rum_time_extract_query | time without time zone, internal, smallint, internal, internal | internal + rum | public | rum_time_extract_value | time without time zone, internal | internal + rum | public | rum_timestamp_compare_prefix | timestamp without time zone, timestamp without time zone, smallint, internal | integer + rum | public | rum_timestamp_config | internal | void + rum | public | rum_timestamp_consistent | internal, smallint, timestamp without time zone, integer, internal, internal, internal, internal | boolean + rum | public | rum_timestamp_distance | timestamp without time zone, timestamp without time zone | double precision + rum | public | rum_timestamp_extract_query | timestamp without time zone, internal, smallint, internal, internal, internal, internal | internal + rum | public | rum_timestamp_extract_value | timestamp without time zone, internal, internal, internal, internal | internal + rum | public | rum_timestamp_key_distance | timestamp without time zone, timestamp without time zone, smallint | double precision + rum | public | rum_timestamp_left_distance | timestamp without time zone, timestamp without time zone | double precision + rum | public | rum_timestamp_outer_distance | timestamp without time zone, timestamp without time zone, smallint | double precision + rum | public | rum_timestamp_right_distance | timestamp without time zone, timestamp without time zone | double precision + rum | public | rum_timestamptz_distance | timestamp with time zone, timestamp with time zone | double precision + rum | public | rum_timestamptz_key_distance | timestamp with time zone, timestamp with time zone, smallint | double precision + rum | public | rum_timestamptz_left_distance | timestamp with time zone, timestamp with time zone | double precision + rum | public | rum_timestamptz_right_distance | timestamp with time zone, timestamp with time zone | double precision + rum | public | rum_timetz_compare_prefix | time with time zone, time with time zone, smallint, internal | integer + rum | public | rum_timetz_extract_query | time with time zone, internal, smallint, internal, internal | internal + rum | public | rum_timetz_extract_value | time with time zone, internal | internal + rum | public | rum_ts_distance | tsvector, rum_distance_query | real + rum | public | rum_ts_distance | tsvector, tsquery | real + rum | public | rum_ts_distance | tsvector, tsquery, integer | real + rum | public | rum_ts_join_pos | internal, internal | bytea + rum | public | rum_ts_score | tsvector, rum_distance_query | real + rum | public | rum_ts_score | tsvector, tsquery | real + rum | public | rum_ts_score | tsvector, tsquery, integer | real + rum | public | rum_tsquery_addon_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | rum_tsquery_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | rum_tsquery_distance | internal, smallint, tsvector, integer, internal, internal, internal, internal, internal | double precision + rum | public | rum_tsquery_pre_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | rum_tsvector_config | internal | void + rum | public | rum_varbit_compare_prefix | bit varying, bit varying, smallint, internal | integer + rum | public | rum_varbit_extract_query | bit varying, internal, smallint, internal, internal | internal + rum | public | rum_varbit_extract_value | bit varying, internal | internal + rum | public | rumhandler | internal | index_am_handler + rum | public | ruminv_extract_tsquery | tsquery, internal, internal, internal, internal | internal + rum | public | ruminv_extract_tsvector | tsvector, internal, smallint, internal, internal, internal, internal | internal + rum | public | ruminv_tsquery_config | internal | void + rum | public | ruminv_tsvector_consistent | internal, smallint, tsvector, integer, internal, internal, internal, internal | boolean + rum | public | tsquery_to_distance_query | tsquery | rum_distance_query + seg | public | gseg_consistent | internal, seg, smallint, oid, internal | boolean + seg | public | gseg_penalty | internal, internal, internal | internal + seg | public | gseg_picksplit | internal, internal | internal + seg | public | gseg_same | seg, seg, internal | internal + seg | public | gseg_union | internal, internal | seg + seg | public | seg_center | seg | real + seg | public | seg_cmp | seg, seg | integer + seg | public | seg_contained | seg, seg | boolean + seg | public | seg_contains | seg, seg | boolean + seg | public | seg_different | seg, seg | boolean + seg | public | seg_ge | seg, seg | boolean + seg | public | seg_gt | seg, seg | boolean + seg | public | seg_in | cstring | seg + seg | public | seg_inter | seg, seg | seg + seg | public | seg_le | seg, seg | boolean + seg | public | seg_left | seg, seg | boolean + seg | public | seg_lower | seg | real + seg | public | seg_lt | seg, seg | boolean + seg | public | seg_out | seg | cstring + seg | public | seg_over_left | seg, seg | boolean + seg | public | seg_over_right | seg, seg | boolean + seg | public | seg_overlap | seg, seg | boolean + seg | public | seg_right | seg, seg | boolean + seg | public | seg_same | seg, seg | boolean + seg | public | seg_size | seg | real + seg | public | seg_union | seg, seg | seg + seg | public | seg_upper | seg | real + sslinfo | public | ssl_cipher | | text + sslinfo | public | ssl_client_cert_present | | boolean + sslinfo | public | ssl_client_dn | | text + sslinfo | public | ssl_client_dn_field | text | text + sslinfo | public | ssl_client_serial | | numeric + sslinfo | public | ssl_extension_info | OUT name text, OUT value text, OUT critical boolean | SETOF record + sslinfo | public | ssl_is_used | | boolean + sslinfo | public | ssl_issuer_dn | | text + sslinfo | public | ssl_issuer_field | text | text + sslinfo | public | ssl_version | | text + supabase_vault | vault | create_secret | new_secret text, new_name text, new_description text, new_key_id uuid | uuid + supabase_vault | vault | update_secret | secret_id uuid, new_secret text, new_name text, new_description text, new_key_id uuid | void + tablefunc | public | connectby | text, text, text, text, integer | SETOF record + tablefunc | public | connectby | text, text, text, text, integer, text | SETOF record + tablefunc | public | connectby | text, text, text, text, text, integer | SETOF record + tablefunc | public | connectby | text, text, text, text, text, integer, text | SETOF record + tablefunc | public | crosstab | text | SETOF record + tablefunc | public | crosstab | text, integer | SETOF record + tablefunc | public | crosstab | text, text | SETOF record + tablefunc | public | crosstab2 | text | SETOF tablefunc_crosstab_2 + tablefunc | public | crosstab3 | text | SETOF tablefunc_crosstab_3 + tablefunc | public | crosstab4 | text | SETOF tablefunc_crosstab_4 + tablefunc | public | normal_rand | integer, double precision, double precision | SETOF double precision + tcn | public | triggered_change_notification | | trigger + tsm_system_rows | public | system_rows | internal | tsm_handler + unaccent | public | unaccent | regdictionary, text | text + unaccent | public | unaccent | text | text + unaccent | public | unaccent_init | internal | internal + unaccent | public | unaccent_lexize | internal, internal, internal, internal | internal + uuid-ossp | extensions | uuid_generate_v1 | | uuid + uuid-ossp | extensions | uuid_generate_v1mc | | uuid + uuid-ossp | extensions | uuid_generate_v3 | namespace uuid, name text | uuid + uuid-ossp | extensions | uuid_generate_v4 | | uuid + uuid-ossp | extensions | uuid_generate_v5 | namespace uuid, name text | uuid + uuid-ossp | extensions | uuid_nil | | uuid + uuid-ossp | extensions | uuid_ns_dns | | uuid + uuid-ossp | extensions | uuid_ns_oid | | uuid + uuid-ossp | extensions | uuid_ns_url | | uuid + uuid-ossp | extensions | uuid_ns_x500 | | uuid + vector | public | array_to_halfvec | double precision[], integer, boolean | halfvec + vector | public | array_to_halfvec | integer[], integer, boolean | halfvec + vector | public | array_to_halfvec | numeric[], integer, boolean | halfvec + vector | public | array_to_halfvec | real[], integer, boolean | halfvec + vector | public | array_to_sparsevec | double precision[], integer, boolean | sparsevec + vector | public | array_to_sparsevec | integer[], integer, boolean | sparsevec + vector | public | array_to_sparsevec | numeric[], integer, boolean | sparsevec + vector | public | array_to_sparsevec | real[], integer, boolean | sparsevec + vector | public | array_to_vector | double precision[], integer, boolean | vector + vector | public | array_to_vector | integer[], integer, boolean | vector + vector | public | array_to_vector | numeric[], integer, boolean | vector + vector | public | array_to_vector | real[], integer, boolean | vector + vector | public | avg | halfvec | halfvec + vector | public | avg | vector | vector + vector | public | binary_quantize | halfvec | bit + vector | public | binary_quantize | vector | bit + vector | public | cosine_distance | halfvec, halfvec | double precision + vector | public | cosine_distance | sparsevec, sparsevec | double precision + vector | public | cosine_distance | vector, vector | double precision + vector | public | halfvec | halfvec, integer, boolean | halfvec + vector | public | halfvec_accum | double precision[], halfvec | double precision[] + vector | public | halfvec_add | halfvec, halfvec | halfvec + vector | public | halfvec_avg | double precision[] | halfvec + vector | public | halfvec_cmp | halfvec, halfvec | integer + vector | public | halfvec_combine | double precision[], double precision[] | double precision[] + vector | public | halfvec_concat | halfvec, halfvec | halfvec + vector | public | halfvec_eq | halfvec, halfvec | boolean + vector | public | halfvec_ge | halfvec, halfvec | boolean + vector | public | halfvec_gt | halfvec, halfvec | boolean + vector | public | halfvec_in | cstring, oid, integer | halfvec + vector | public | halfvec_l2_squared_distance | halfvec, halfvec | double precision + vector | public | halfvec_le | halfvec, halfvec | boolean + vector | public | halfvec_lt | halfvec, halfvec | boolean + vector | public | halfvec_mul | halfvec, halfvec | halfvec + vector | public | halfvec_ne | halfvec, halfvec | boolean + vector | public | halfvec_negative_inner_product | halfvec, halfvec | double precision + vector | public | halfvec_out | halfvec | cstring + vector | public | halfvec_recv | internal, oid, integer | halfvec + vector | public | halfvec_send | halfvec | bytea + vector | public | halfvec_spherical_distance | halfvec, halfvec | double precision + vector | public | halfvec_sub | halfvec, halfvec | halfvec + vector | public | halfvec_to_float4 | halfvec, integer, boolean | real[] + vector | public | halfvec_to_sparsevec | halfvec, integer, boolean | sparsevec + vector | public | halfvec_to_vector | halfvec, integer, boolean | vector + vector | public | halfvec_typmod_in | cstring[] | integer + vector | public | hamming_distance | bit, bit | double precision + vector | public | hnsw_bit_support | internal | internal + vector | public | hnsw_halfvec_support | internal | internal + vector | public | hnsw_sparsevec_support | internal | internal + vector | public | hnswhandler | internal | index_am_handler + vector | public | inner_product | halfvec, halfvec | double precision + vector | public | inner_product | sparsevec, sparsevec | double precision + vector | public | inner_product | vector, vector | double precision + vector | public | ivfflat_bit_support | internal | internal + vector | public | ivfflat_halfvec_support | internal | internal + vector | public | ivfflathandler | internal | index_am_handler + vector | public | jaccard_distance | bit, bit | double precision + vector | public | l1_distance | halfvec, halfvec | double precision + vector | public | l1_distance | sparsevec, sparsevec | double precision + vector | public | l1_distance | vector, vector | double precision + vector | public | l2_distance | halfvec, halfvec | double precision + vector | public | l2_distance | sparsevec, sparsevec | double precision + vector | public | l2_distance | vector, vector | double precision + vector | public | l2_norm | halfvec | double precision + vector | public | l2_norm | sparsevec | double precision + vector | public | l2_normalize | halfvec | halfvec + vector | public | l2_normalize | sparsevec | sparsevec + vector | public | l2_normalize | vector | vector + vector | public | sparsevec | sparsevec, integer, boolean | sparsevec + vector | public | sparsevec_cmp | sparsevec, sparsevec | integer + vector | public | sparsevec_eq | sparsevec, sparsevec | boolean + vector | public | sparsevec_ge | sparsevec, sparsevec | boolean + vector | public | sparsevec_gt | sparsevec, sparsevec | boolean + vector | public | sparsevec_in | cstring, oid, integer | sparsevec + vector | public | sparsevec_l2_squared_distance | sparsevec, sparsevec | double precision + vector | public | sparsevec_le | sparsevec, sparsevec | boolean + vector | public | sparsevec_lt | sparsevec, sparsevec | boolean + vector | public | sparsevec_ne | sparsevec, sparsevec | boolean + vector | public | sparsevec_negative_inner_product | sparsevec, sparsevec | double precision + vector | public | sparsevec_out | sparsevec | cstring + vector | public | sparsevec_recv | internal, oid, integer | sparsevec + vector | public | sparsevec_send | sparsevec | bytea + vector | public | sparsevec_to_halfvec | sparsevec, integer, boolean | halfvec + vector | public | sparsevec_to_vector | sparsevec, integer, boolean | vector + vector | public | sparsevec_typmod_in | cstring[] | integer + vector | public | subvector | halfvec, integer, integer | halfvec + vector | public | subvector | vector, integer, integer | vector + vector | public | sum | halfvec | halfvec + vector | public | sum | vector | vector + vector | public | vector | vector, integer, boolean | vector + vector | public | vector_accum | double precision[], vector | double precision[] + vector | public | vector_add | vector, vector | vector + vector | public | vector_avg | double precision[] | vector + vector | public | vector_cmp | vector, vector | integer + vector | public | vector_combine | double precision[], double precision[] | double precision[] + vector | public | vector_concat | vector, vector | vector + vector | public | vector_dims | halfvec | integer + vector | public | vector_dims | vector | integer + vector | public | vector_eq | vector, vector | boolean + vector | public | vector_ge | vector, vector | boolean + vector | public | vector_gt | vector, vector | boolean + vector | public | vector_in | cstring, oid, integer | vector + vector | public | vector_l2_squared_distance | vector, vector | double precision + vector | public | vector_le | vector, vector | boolean + vector | public | vector_lt | vector, vector | boolean + vector | public | vector_mul | vector, vector | vector + vector | public | vector_ne | vector, vector | boolean + vector | public | vector_negative_inner_product | vector, vector | double precision + vector | public | vector_norm | vector | double precision + vector | public | vector_out | vector | cstring + vector | public | vector_recv | internal, oid, integer | vector + vector | public | vector_send | vector | bytea + vector | public | vector_spherical_distance | vector, vector | double precision + vector | public | vector_sub | vector, vector | vector + vector | public | vector_to_float4 | vector, integer, boolean | real[] + vector | public | vector_to_halfvec | vector, integer, boolean | halfvec + vector | public | vector_to_sparsevec | vector, integer, boolean | sparsevec + vector | public | vector_typmod_in | cstring[] | integer + wrappers | public | airtable_fdw_handler | | fdw_handler + wrappers | public | airtable_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | airtable_fdw_validator | options text[], catalog oid | void + wrappers | public | auth0_fdw_handler | | fdw_handler + wrappers | public | auth0_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | auth0_fdw_validator | options text[], catalog oid | void + wrappers | public | big_query_fdw_handler | | fdw_handler + wrappers | public | big_query_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | big_query_fdw_validator | options text[], catalog oid | void + wrappers | public | click_house_fdw_handler | | fdw_handler + wrappers | public | click_house_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | click_house_fdw_validator | options text[], catalog oid | void + wrappers | public | cognito_fdw_handler | | fdw_handler + wrappers | public | cognito_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | cognito_fdw_validator | options text[], catalog oid | void + wrappers | public | firebase_fdw_handler | | fdw_handler + wrappers | public | firebase_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | firebase_fdw_validator | options text[], catalog oid | void + wrappers | public | hello_world_fdw_handler | | fdw_handler + wrappers | public | hello_world_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | hello_world_fdw_validator | options text[], catalog oid | void + wrappers | public | logflare_fdw_handler | | fdw_handler + wrappers | public | logflare_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | logflare_fdw_validator | options text[], catalog oid | void + wrappers | public | mssql_fdw_handler | | fdw_handler + wrappers | public | mssql_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | mssql_fdw_validator | options text[], catalog oid | void + wrappers | public | redis_fdw_handler | | fdw_handler + wrappers | public | redis_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | redis_fdw_validator | options text[], catalog oid | void + wrappers | public | s3_fdw_handler | | fdw_handler + wrappers | public | s3_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | s3_fdw_validator | options text[], catalog oid | void + wrappers | public | stripe_fdw_handler | | fdw_handler + wrappers | public | stripe_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | stripe_fdw_validator | options text[], catalog oid | void + wrappers | public | wasm_fdw_handler | | fdw_handler + wrappers | public | wasm_fdw_meta | | TABLE(name text, version text, author text, website text) + wrappers | public | wasm_fdw_validator | options text[], catalog oid | void + xml2 | public | xml_encode_special_chars | text | text + xml2 | public | xml_valid | text | boolean + xml2 | public | xpath_bool | text, text | boolean + xml2 | public | xpath_list | text, text | text + xml2 | public | xpath_list | text, text, text | text + xml2 | public | xpath_nodeset | text, text | text + xml2 | public | xpath_nodeset | text, text, text | text + xml2 | public | xpath_nodeset | text, text, text, text | text + xml2 | public | xpath_number | text, text | real + xml2 | public | xpath_string | text, text | text + xml2 | public | xpath_table | text, text, text, text, text | SETOF record + xml2 | public | xslt_process | text, text | text + xml2 | public | xslt_process | text, text, text | text +(4747 rows) + +/* + +Monitor extension public table/view/matview/index interface + +*/ +select + e.extname as extension_name, + n.nspname as schema_name, + pc.relname as entity_name, + pa.attname +from + pg_catalog.pg_class pc + join pg_catalog.pg_namespace n + on n.oid = pc.relnamespace + join pg_catalog.pg_depend d + on d.objid = pc.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid + left join pg_catalog.pg_attribute pa + on pa.attrelid = pc.oid + and pa.attnum > 0 + and not pa.attisdropped +where + d.deptype = 'e' + and pc.relkind in ('r', 'v', 'm', 'i') +order by + e.extname, + n.nspname, + pc.relname, + pa.attname; + extension_name | schema_name | entity_name | attname +------------------------------+-------------+-------------------------+------------------------ + address_standardizer_data_us | public | us_gaz | id + address_standardizer_data_us | public | us_gaz | is_custom + address_standardizer_data_us | public | us_gaz | seq + address_standardizer_data_us | public | us_gaz | stdword + address_standardizer_data_us | public | us_gaz | token + address_standardizer_data_us | public | us_gaz | word + address_standardizer_data_us | public | us_lex | id + address_standardizer_data_us | public | us_lex | is_custom + address_standardizer_data_us | public | us_lex | seq + address_standardizer_data_us | public | us_lex | stdword + address_standardizer_data_us | public | us_lex | token + address_standardizer_data_us | public | us_lex | word + address_standardizer_data_us | public | us_rules | id + address_standardizer_data_us | public | us_rules | is_custom + address_standardizer_data_us | public | us_rules | rule + hypopg | public | hypopg_hidden_indexes | am_name + hypopg | public | hypopg_hidden_indexes | index_name + hypopg | public | hypopg_hidden_indexes | indexrelid + hypopg | public | hypopg_hidden_indexes | is_hypo + hypopg | public | hypopg_hidden_indexes | schema_name + hypopg | public | hypopg_hidden_indexes | table_name + hypopg | public | hypopg_list_indexes | am_name + hypopg | public | hypopg_list_indexes | index_name + hypopg | public | hypopg_list_indexes | indexrelid + hypopg | public | hypopg_list_indexes | schema_name + hypopg | public | hypopg_list_indexes | table_name + orioledb | public | orioledb_index | datoid + orioledb | public | orioledb_index | description + orioledb | public | orioledb_index | index_relnode + orioledb | public | orioledb_index | index_reloid + orioledb | public | orioledb_index | index_type + orioledb | public | orioledb_index | name + orioledb | public | orioledb_index | table_relnode + orioledb | public | orioledb_index | table_reloid + orioledb | public | orioledb_index_descr | datoid + orioledb | public | orioledb_index_descr | refcnt + orioledb | public | orioledb_index_descr | relnode + orioledb | public | orioledb_index_descr | reloid + orioledb | public | orioledb_table | datoid + orioledb | public | orioledb_table | description + orioledb | public | orioledb_table | relnode + orioledb | public | orioledb_table | reloid + orioledb | public | orioledb_table_descr | datoid + orioledb | public | orioledb_table_descr | refcnt + orioledb | public | orioledb_table_descr | relnode + orioledb | public | orioledb_table_descr | reloid + pg_buffercache | public | pg_buffercache | bufferid + pg_buffercache | public | pg_buffercache | isdirty + pg_buffercache | public | pg_buffercache | pinning_backends + pg_buffercache | public | pg_buffercache | relblocknumber + pg_buffercache | public | pg_buffercache | reldatabase + pg_buffercache | public | pg_buffercache | relfilenode + pg_buffercache | public | pg_buffercache | relforknumber + pg_buffercache | public | pg_buffercache | reltablespace + pg_buffercache | public | pg_buffercache | usagecount + pg_net | net | _http_response | content + pg_net | net | _http_response | content_type + pg_net | net | _http_response | created + pg_net | net | _http_response | error_msg + pg_net | net | _http_response | headers + pg_net | net | _http_response | id + pg_net | net | _http_response | status_code + pg_net | net | _http_response | timed_out + pg_net | net | http_request_queue | body + pg_net | net | http_request_queue | headers + pg_net | net | http_request_queue | id + pg_net | net | http_request_queue | method + pg_net | net | http_request_queue | timeout_milliseconds + pg_net | net | http_request_queue | url + pg_repack | repack | primary_keys | indexrelid + pg_repack | repack | primary_keys | indrelid + pg_repack | repack | tables | alter_col_storage + pg_repack | repack | tables | ckey + pg_repack | repack | tables | ckid + pg_repack | repack | tables | copy_data + pg_repack | repack | tables | create_log + pg_repack | repack | tables | create_pktype + pg_repack | repack | tables | create_table + pg_repack | repack | tables | create_trigger + pg_repack | repack | tables | delete_log + pg_repack | repack | tables | drop_columns + pg_repack | repack | tables | enable_trigger + pg_repack | repack | tables | lock_table + pg_repack | repack | tables | pkid + pg_repack | repack | tables | relid + pg_repack | repack | tables | relname + pg_repack | repack | tables | reltoastidxid + pg_repack | repack | tables | reltoastrelid + pg_repack | repack | tables | schemaname + pg_repack | repack | tables | sql_delete + pg_repack | repack | tables | sql_insert + pg_repack | repack | tables | sql_peek + pg_repack | repack | tables | sql_pop + pg_repack | repack | tables | sql_update + pg_repack | repack | tables | tablespace_orig + pg_stat_monitor | public | pg_stat_monitor | application_name + pg_stat_monitor | public | pg_stat_monitor | bucket + pg_stat_monitor | public | pg_stat_monitor | bucket_done + pg_stat_monitor | public | pg_stat_monitor | bucket_start_time + pg_stat_monitor | public | pg_stat_monitor | calls + pg_stat_monitor | public | pg_stat_monitor | client_ip + pg_stat_monitor | public | pg_stat_monitor | cmd_type + pg_stat_monitor | public | pg_stat_monitor | cmd_type_text + pg_stat_monitor | public | pg_stat_monitor | comments + pg_stat_monitor | public | pg_stat_monitor | cpu_sys_time + pg_stat_monitor | public | pg_stat_monitor | cpu_user_time + pg_stat_monitor | public | pg_stat_monitor | datname + pg_stat_monitor | public | pg_stat_monitor | dbid + pg_stat_monitor | public | pg_stat_monitor | elevel + pg_stat_monitor | public | pg_stat_monitor | jit_deform_count + pg_stat_monitor | public | pg_stat_monitor | jit_deform_time + pg_stat_monitor | public | pg_stat_monitor | jit_emission_count + pg_stat_monitor | public | pg_stat_monitor | jit_emission_time + pg_stat_monitor | public | pg_stat_monitor | jit_functions + pg_stat_monitor | public | pg_stat_monitor | jit_generation_time + pg_stat_monitor | public | pg_stat_monitor | jit_inlining_count + pg_stat_monitor | public | pg_stat_monitor | jit_inlining_time + pg_stat_monitor | public | pg_stat_monitor | jit_optimization_count + pg_stat_monitor | public | pg_stat_monitor | jit_optimization_time + pg_stat_monitor | public | pg_stat_monitor | local_blk_read_time + pg_stat_monitor | public | pg_stat_monitor | local_blk_write_time + pg_stat_monitor | public | pg_stat_monitor | local_blks_dirtied + pg_stat_monitor | public | pg_stat_monitor | local_blks_hit + pg_stat_monitor | public | pg_stat_monitor | local_blks_read + pg_stat_monitor | public | pg_stat_monitor | local_blks_written + pg_stat_monitor | public | pg_stat_monitor | max_exec_time + pg_stat_monitor | public | pg_stat_monitor | max_plan_time + pg_stat_monitor | public | pg_stat_monitor | mean_exec_time + pg_stat_monitor | public | pg_stat_monitor | mean_plan_time + pg_stat_monitor | public | pg_stat_monitor | message + pg_stat_monitor | public | pg_stat_monitor | min_exec_time + pg_stat_monitor | public | pg_stat_monitor | min_plan_time + pg_stat_monitor | public | pg_stat_monitor | minmax_stats_since + pg_stat_monitor | public | pg_stat_monitor | pgsm_query_id + pg_stat_monitor | public | pg_stat_monitor | planid + pg_stat_monitor | public | pg_stat_monitor | plans + pg_stat_monitor | public | pg_stat_monitor | query + pg_stat_monitor | public | pg_stat_monitor | query_plan + pg_stat_monitor | public | pg_stat_monitor | queryid + pg_stat_monitor | public | pg_stat_monitor | relations + pg_stat_monitor | public | pg_stat_monitor | resp_calls + pg_stat_monitor | public | pg_stat_monitor | rows + pg_stat_monitor | public | pg_stat_monitor | shared_blk_read_time + pg_stat_monitor | public | pg_stat_monitor | shared_blk_write_time + pg_stat_monitor | public | pg_stat_monitor | shared_blks_dirtied + pg_stat_monitor | public | pg_stat_monitor | shared_blks_hit + pg_stat_monitor | public | pg_stat_monitor | shared_blks_read + pg_stat_monitor | public | pg_stat_monitor | shared_blks_written + pg_stat_monitor | public | pg_stat_monitor | sqlcode + pg_stat_monitor | public | pg_stat_monitor | stats_since + pg_stat_monitor | public | pg_stat_monitor | stddev_exec_time + pg_stat_monitor | public | pg_stat_monitor | stddev_plan_time + pg_stat_monitor | public | pg_stat_monitor | temp_blk_read_time + pg_stat_monitor | public | pg_stat_monitor | temp_blk_write_time + pg_stat_monitor | public | pg_stat_monitor | temp_blks_read + pg_stat_monitor | public | pg_stat_monitor | temp_blks_written + pg_stat_monitor | public | pg_stat_monitor | top_query + pg_stat_monitor | public | pg_stat_monitor | top_queryid + pg_stat_monitor | public | pg_stat_monitor | toplevel + pg_stat_monitor | public | pg_stat_monitor | total_exec_time + pg_stat_monitor | public | pg_stat_monitor | total_plan_time + pg_stat_monitor | public | pg_stat_monitor | userid + pg_stat_monitor | public | pg_stat_monitor | username + pg_stat_monitor | public | pg_stat_monitor | wal_bytes + pg_stat_monitor | public | pg_stat_monitor | wal_fpi + pg_stat_monitor | public | pg_stat_monitor | wal_records + pg_stat_statements | extensions | pg_stat_statements | calls + pg_stat_statements | extensions | pg_stat_statements | dbid + pg_stat_statements | extensions | pg_stat_statements | jit_deform_count + pg_stat_statements | extensions | pg_stat_statements | jit_deform_time + pg_stat_statements | extensions | pg_stat_statements | jit_emission_count + pg_stat_statements | extensions | pg_stat_statements | jit_emission_time + pg_stat_statements | extensions | pg_stat_statements | jit_functions + pg_stat_statements | extensions | pg_stat_statements | jit_generation_time + pg_stat_statements | extensions | pg_stat_statements | jit_inlining_count + pg_stat_statements | extensions | pg_stat_statements | jit_inlining_time + pg_stat_statements | extensions | pg_stat_statements | jit_optimization_count + pg_stat_statements | extensions | pg_stat_statements | jit_optimization_time + pg_stat_statements | extensions | pg_stat_statements | local_blk_read_time + pg_stat_statements | extensions | pg_stat_statements | local_blk_write_time + pg_stat_statements | extensions | pg_stat_statements | local_blks_dirtied + pg_stat_statements | extensions | pg_stat_statements | local_blks_hit + pg_stat_statements | extensions | pg_stat_statements | local_blks_read + pg_stat_statements | extensions | pg_stat_statements | local_blks_written + pg_stat_statements | extensions | pg_stat_statements | max_exec_time + pg_stat_statements | extensions | pg_stat_statements | max_plan_time + pg_stat_statements | extensions | pg_stat_statements | mean_exec_time + pg_stat_statements | extensions | pg_stat_statements | mean_plan_time + pg_stat_statements | extensions | pg_stat_statements | min_exec_time + pg_stat_statements | extensions | pg_stat_statements | min_plan_time + pg_stat_statements | extensions | pg_stat_statements | minmax_stats_since + pg_stat_statements | extensions | pg_stat_statements | plans + pg_stat_statements | extensions | pg_stat_statements | query + pg_stat_statements | extensions | pg_stat_statements | queryid + pg_stat_statements | extensions | pg_stat_statements | rows + pg_stat_statements | extensions | pg_stat_statements | shared_blk_read_time + pg_stat_statements | extensions | pg_stat_statements | shared_blk_write_time + pg_stat_statements | extensions | pg_stat_statements | shared_blks_dirtied + pg_stat_statements | extensions | pg_stat_statements | shared_blks_hit + pg_stat_statements | extensions | pg_stat_statements | shared_blks_read + pg_stat_statements | extensions | pg_stat_statements | shared_blks_written + pg_stat_statements | extensions | pg_stat_statements | stats_since + pg_stat_statements | extensions | pg_stat_statements | stddev_exec_time + pg_stat_statements | extensions | pg_stat_statements | stddev_plan_time + pg_stat_statements | extensions | pg_stat_statements | temp_blk_read_time + pg_stat_statements | extensions | pg_stat_statements | temp_blk_write_time + pg_stat_statements | extensions | pg_stat_statements | temp_blks_read + pg_stat_statements | extensions | pg_stat_statements | temp_blks_written + pg_stat_statements | extensions | pg_stat_statements | toplevel + pg_stat_statements | extensions | pg_stat_statements | total_exec_time + pg_stat_statements | extensions | pg_stat_statements | total_plan_time + pg_stat_statements | extensions | pg_stat_statements | userid + pg_stat_statements | extensions | pg_stat_statements | wal_bytes + pg_stat_statements | extensions | pg_stat_statements | wal_fpi + pg_stat_statements | extensions | pg_stat_statements | wal_records + pg_stat_statements | extensions | pg_stat_statements_info | dealloc + pg_stat_statements | extensions | pg_stat_statements_info | stats_reset + pg_tle | pgtle | feature_info | feature + pg_tle | pgtle | feature_info | obj_identity + pg_tle | pgtle | feature_info | proname + pg_tle | pgtle | feature_info | schema_name + pgmq | pgmq | a_foo | archived_at + pgmq | pgmq | a_foo | enqueued_at + pgmq | pgmq | a_foo | message + pgmq | pgmq | a_foo | msg_id + pgmq | pgmq | a_foo | read_ct + pgmq | pgmq | a_foo | vt + pgmq | pgmq | meta | created_at + pgmq | pgmq | meta | is_partitioned + pgmq | pgmq | meta | is_unlogged + pgmq | pgmq | meta | queue_name + pgmq | pgmq | q_foo | enqueued_at + pgmq | pgmq | q_foo | message + pgmq | pgmq | q_foo | msg_id + pgmq | pgmq | q_foo | read_ct + pgmq | pgmq | q_foo | vt + pgsodium | pgsodium | decrypted_key | associated_data + pgsodium | pgsodium | decrypted_key | comment + pgsodium | pgsodium | decrypted_key | created + pgsodium | pgsodium | decrypted_key | decrypted_raw_key + pgsodium | pgsodium | decrypted_key | expires + pgsodium | pgsodium | decrypted_key | id + pgsodium | pgsodium | decrypted_key | key_context + pgsodium | pgsodium | decrypted_key | key_id + pgsodium | pgsodium | decrypted_key | key_type + pgsodium | pgsodium | decrypted_key | name + pgsodium | pgsodium | decrypted_key | parent_key + pgsodium | pgsodium | decrypted_key | raw_key + pgsodium | pgsodium | decrypted_key | raw_key_nonce + pgsodium | pgsodium | decrypted_key | status + pgsodium | pgsodium | key | associated_data + pgsodium | pgsodium | key | comment + pgsodium | pgsodium | key | created + pgsodium | pgsodium | key | expires + pgsodium | pgsodium | key | id + pgsodium | pgsodium | key | key_context + pgsodium | pgsodium | key | key_id + pgsodium | pgsodium | key | key_type + pgsodium | pgsodium | key | name + pgsodium | pgsodium | key | parent_key + pgsodium | pgsodium | key | raw_key + pgsodium | pgsodium | key | raw_key_nonce + pgsodium | pgsodium | key | status + pgsodium | pgsodium | key | user_data + pgsodium | pgsodium | mask_columns | associated_columns + pgsodium | pgsodium | mask_columns | attname + pgsodium | pgsodium | mask_columns | attrelid + pgsodium | pgsodium | mask_columns | format_type + pgsodium | pgsodium | mask_columns | key_id + pgsodium | pgsodium | mask_columns | key_id_column + pgsodium | pgsodium | mask_columns | nonce_column + pgsodium | pgsodium | masking_rule | associated_columns + pgsodium | pgsodium | masking_rule | attname + pgsodium | pgsodium | masking_rule | attnum + pgsodium | pgsodium | masking_rule | attrelid + pgsodium | pgsodium | masking_rule | col_description + pgsodium | pgsodium | masking_rule | format_type + pgsodium | pgsodium | masking_rule | key_id + pgsodium | pgsodium | masking_rule | key_id_column + pgsodium | pgsodium | masking_rule | nonce_column + pgsodium | pgsodium | masking_rule | priority + pgsodium | pgsodium | masking_rule | relname + pgsodium | pgsodium | masking_rule | relnamespace + pgsodium | pgsodium | masking_rule | security_invoker + pgsodium | pgsodium | masking_rule | view_name + pgsodium | pgsodium | valid_key | associated_data + pgsodium | pgsodium | valid_key | created + pgsodium | pgsodium | valid_key | expires + pgsodium | pgsodium | valid_key | id + pgsodium | pgsodium | valid_key | key_context + pgsodium | pgsodium | valid_key | key_id + pgsodium | pgsodium | valid_key | key_type + pgsodium | pgsodium | valid_key | name + pgsodium | pgsodium | valid_key | status + pgtap | public | pg_all_foreign_keys | fk_columns + pgtap | public | pg_all_foreign_keys | fk_constraint_name + pgtap | public | pg_all_foreign_keys | fk_schema_name + pgtap | public | pg_all_foreign_keys | fk_table_name + pgtap | public | pg_all_foreign_keys | fk_table_oid + pgtap | public | pg_all_foreign_keys | is_deferrable + pgtap | public | pg_all_foreign_keys | is_deferred + pgtap | public | pg_all_foreign_keys | match_type + pgtap | public | pg_all_foreign_keys | on_delete + pgtap | public | pg_all_foreign_keys | on_update + pgtap | public | pg_all_foreign_keys | pk_columns + pgtap | public | pg_all_foreign_keys | pk_constraint_name + pgtap | public | pg_all_foreign_keys | pk_index_name + pgtap | public | pg_all_foreign_keys | pk_schema_name + pgtap | public | pg_all_foreign_keys | pk_table_name + pgtap | public | pg_all_foreign_keys | pk_table_oid + pgtap | public | tap_funky | args + pgtap | public | tap_funky | is_definer + pgtap | public | tap_funky | is_strict + pgtap | public | tap_funky | is_visible + pgtap | public | tap_funky | kind + pgtap | public | tap_funky | langoid + pgtap | public | tap_funky | name + pgtap | public | tap_funky | oid + pgtap | public | tap_funky | owner + pgtap | public | tap_funky | returns + pgtap | public | tap_funky | returns_set + pgtap | public | tap_funky | schema + pgtap | public | tap_funky | volatility + postgis | public | geography_columns | coord_dimension + postgis | public | geography_columns | f_geography_column + postgis | public | geography_columns | f_table_catalog + postgis | public | geography_columns | f_table_name + postgis | public | geography_columns | f_table_schema + postgis | public | geography_columns | srid + postgis | public | geography_columns | type + postgis | public | geometry_columns | coord_dimension + postgis | public | geometry_columns | f_geometry_column + postgis | public | geometry_columns | f_table_catalog + postgis | public | geometry_columns | f_table_name + postgis | public | geometry_columns | f_table_schema + postgis | public | geometry_columns | srid + postgis | public | geometry_columns | type + postgis | public | spatial_ref_sys | auth_name + postgis | public | spatial_ref_sys | auth_srid + postgis | public | spatial_ref_sys | proj4text + postgis | public | spatial_ref_sys | srid + postgis | public | spatial_ref_sys | srtext + postgis_raster | public | raster_columns | blocksize_x + postgis_raster | public | raster_columns | blocksize_y + postgis_raster | public | raster_columns | extent + postgis_raster | public | raster_columns | nodata_values + postgis_raster | public | raster_columns | num_bands + postgis_raster | public | raster_columns | out_db + postgis_raster | public | raster_columns | pixel_types + postgis_raster | public | raster_columns | r_raster_column + postgis_raster | public | raster_columns | r_table_catalog + postgis_raster | public | raster_columns | r_table_name + postgis_raster | public | raster_columns | r_table_schema + postgis_raster | public | raster_columns | regular_blocking + postgis_raster | public | raster_columns | same_alignment + postgis_raster | public | raster_columns | scale_x + postgis_raster | public | raster_columns | scale_y + postgis_raster | public | raster_columns | spatial_index + postgis_raster | public | raster_columns | srid + postgis_raster | public | raster_overviews | o_raster_column + postgis_raster | public | raster_overviews | o_table_catalog + postgis_raster | public | raster_overviews | o_table_name + postgis_raster | public | raster_overviews | o_table_schema + postgis_raster | public | raster_overviews | overview_factor + postgis_raster | public | raster_overviews | r_raster_column + postgis_raster | public | raster_overviews | r_table_catalog + postgis_raster | public | raster_overviews | r_table_name + postgis_raster | public | raster_overviews | r_table_schema + postgis_topology | topology | layer | child_id + postgis_topology | topology | layer | feature_column + postgis_topology | topology | layer | feature_type + postgis_topology | topology | layer | layer_id + postgis_topology | topology | layer | level + postgis_topology | topology | layer | schema_name + postgis_topology | topology | layer | table_name + postgis_topology | topology | layer | topology_id + postgis_topology | topology | topology | hasz + postgis_topology | topology | topology | id + postgis_topology | topology | topology | name + postgis_topology | topology | topology | precision + postgis_topology | topology | topology | srid + supabase_vault | vault | secrets | created_at + supabase_vault | vault | secrets | description + supabase_vault | vault | secrets | id + supabase_vault | vault | secrets | key_id + supabase_vault | vault | secrets | name + supabase_vault | vault | secrets | nonce + supabase_vault | vault | secrets | secret + supabase_vault | vault | secrets | updated_at + wrappers | public | wrappers_fdw_stats | bytes_in + wrappers | public | wrappers_fdw_stats | bytes_out + wrappers | public | wrappers_fdw_stats | create_times + wrappers | public | wrappers_fdw_stats | created_at + wrappers | public | wrappers_fdw_stats | fdw_name + wrappers | public | wrappers_fdw_stats | metadata + wrappers | public | wrappers_fdw_stats | rows_in + wrappers | public | wrappers_fdw_stats | rows_out + wrappers | public | wrappers_fdw_stats | updated_at +(398 rows) + diff --git a/postgres_15.8.1.044/nix/tests/expected/z_17_pg_stat_monitor.out b/postgres_15.8.1.044/nix/tests/expected/z_17_pg_stat_monitor.out new file mode 100644 index 0000000..8b90c12 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/z_17_pg_stat_monitor.out @@ -0,0 +1,10 @@ +select + * +from + pg_stat_monitor +where + false; + bucket | bucket_start_time | userid | username | dbid | datname | client_ip | pgsm_query_id | queryid | toplevel | top_queryid | query | comments | planid | query_plan | top_query | application_name | relations | cmd_type | cmd_type_text | elevel | sqlcode | message | calls | total_exec_time | min_exec_time | max_exec_time | mean_exec_time | stddev_exec_time | rows | shared_blks_hit | shared_blks_read | shared_blks_dirtied | shared_blks_written | local_blks_hit | local_blks_read | local_blks_dirtied | local_blks_written | temp_blks_read | temp_blks_written | shared_blk_read_time | shared_blk_write_time | local_blk_read_time | local_blk_write_time | temp_blk_read_time | temp_blk_write_time | resp_calls | cpu_user_time | cpu_sys_time | wal_records | wal_fpi | wal_bytes | bucket_done | plans | total_plan_time | min_plan_time | max_plan_time | mean_plan_time | stddev_plan_time | jit_functions | jit_generation_time | jit_inlining_count | jit_inlining_time | jit_optimization_count | jit_optimization_time | jit_emission_count | jit_emission_time | jit_deform_count | jit_deform_time | stats_since | minmax_stats_since +--------+-------------------+--------+----------+------+---------+-----------+---------------+---------+----------+-------------+-------+----------+--------+------------+-----------+------------------+-----------+----------+---------------+--------+---------+---------+-------+-----------------+---------------+---------------+----------------+------------------+------+-----------------+------------------+---------------------+---------------------+----------------+-----------------+--------------------+--------------------+----------------+-------------------+----------------------+-----------------------+---------------------+----------------------+--------------------+---------------------+------------+---------------+--------------+-------------+---------+-----------+-------------+-------+-----------------+---------------+---------------+----------------+------------------+---------------+---------------------+--------------------+-------------------+------------------------+-----------------------+--------------------+-------------------+------------------+-----------------+-------------+-------------------- +(0 rows) + diff --git a/postgres_15.8.1.044/nix/tests/expected/z_17_pgvector.out b/postgres_15.8.1.044/nix/tests/expected/z_17_pgvector.out new file mode 100644 index 0000000..2c1cb10 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/expected/z_17_pgvector.out @@ -0,0 +1,59 @@ +/* +This test excludes indexes shipped with pgvector because orioledb doesn't support them yet +*/ +create schema v; +create table v.items( + id serial primary key, + embedding vector(3), + half_embedding halfvec(3), + bit_embedding bit(3), + sparse_embedding sparsevec(3) +); +-- Populate some records +insert into v.items( + embedding, + half_embedding, + bit_embedding, + sparse_embedding +) +values + ('[1,2,3]', '[1,2,3]', '101', '{1:4}/3'), + ('[2,3,4]', '[2,3,4]', '010', '{1:7,3:0}/3'); +-- Test op types +select + * +from + v.items +order by + embedding <-> '[2,3,5]', + embedding <=> '[2,3,5]', + embedding <+> '[2,3,5]', + embedding <#> '[2,3,5]', + half_embedding <-> '[2,3,5]', + half_embedding <=> '[2,3,5]', + half_embedding <+> '[2,3,5]', + half_embedding <#> '[2,3,5]', + sparse_embedding <-> '{2:4,3:1}/3', + sparse_embedding <=> '{2:4,3:1}/3', + sparse_embedding <+> '{2:4,3:1}/3', + sparse_embedding <#> '{2:4,3:1}/3', + bit_embedding <~> '011'; + id | embedding | half_embedding | bit_embedding | sparse_embedding +----+-----------+----------------+---------------+------------------ + 2 | [2,3,4] | [2,3,4] | 010 | {1:7}/3 + 1 | [1,2,3] | [1,2,3] | 101 | {1:4}/3 +(2 rows) + +select + avg(embedding), + avg(half_embedding) +from + v.items; + avg | avg +---------------+--------------- + [1.5,2.5,3.5] | [1.5,2.5,3.5] +(1 row) + +-- Cleanup +drop schema v cascade; +NOTICE: drop cascades to table v.items diff --git a/postgres_15.8.1.044/nix/tests/migrations/data.sql b/postgres_15.8.1.044/nix/tests/migrations/data.sql new file mode 100644 index 0000000..36396e6 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/migrations/data.sql @@ -0,0 +1,21 @@ +create table account( + id int primary key, + is_verified bool, + name text, + phone text +); + +insert into public.account(id, is_verified, name, phone) +values + (1, true, 'foo', '1111111111'), + (2, true, 'bar', null), + (3, false, 'baz', '33333333333'); + +select id as test_new_key_id from pgsodium.create_key(name:='test_new_key') \gset + +select vault.create_secret ( + 's3kr3t_k3y', 'a_name', 'this is the foo secret key') test_secret_id \gset + +select vault.create_secret ( + 's3kr3t_k3y_2', 'another_name', 'this is another foo key', + (select id from pgsodium.key where name = 'test_new_key')) test_secret_id_2 \gset diff --git a/postgres_15.8.1.044/nix/tests/migrations/pgmq.sql b/postgres_15.8.1.044/nix/tests/migrations/pgmq.sql new file mode 100644 index 0000000..d0121a2 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/migrations/pgmq.sql @@ -0,0 +1,12 @@ +select + pgmq.create('Foo'); + +select + * +from + pgmq.send( + queue_name:='Foo', + msg:='{"foo": "bar1"}' + ); + + diff --git a/postgres_15.8.1.044/nix/tests/postgresql.conf.in b/postgres_15.8.1.044/nix/tests/postgresql.conf.in new file mode 100644 index 0000000..ef860af --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/postgresql.conf.in @@ -0,0 +1,800 @@ +# ----------------------------- +# PostgreSQL configuration file + +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' # what IP address(es) to listen on; +#port = @PGSQL_DEFAULT_PORT@ # (change requires restart) +max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +unix_socket_directories = '/tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +#password_encryption = scram-sha-256 # scram-sha-256 or md5 +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off + +# - SSL - + +#ssl = off +#ssl_ca_file = '' +#ssl_cert_file = 'server.crt' +#ssl_crl_file = '' +#ssl_crl_dir = '' +#ssl_key_file = 'server.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_min_protocol_version = 'TLSv1.2' +#ssl_max_protocol_version = '' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#huge_page_size = 0 # zero for system default + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem +#maintenance_work_mem = 64MB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +#min_dynamic_shared_memory = 0MB # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#backend_flush_after = 0 # measured in pages, 0 disables +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +#parallel_leader_participation = on +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +wal_level = logical # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +wal_log_hints = on # also do full page writes of non-critical updates + # (change requires restart) +#wal_compression = off # enable compression of full-page writes +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +max_wal_size = 1GB +min_wal_size = 80MB + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived logfile segment + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#promote_trigger_file = '' # file name whose presence ends recovery +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_memoize = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +#effective_cache_size = 4GB + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#jit = on # allow JIT compilation +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_autovacuum_min_duration = -1 # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +#log_line_prefix = '%m [%p] ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'America/Chicago' + + +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Query and Index Statistics Collector - + +#track_activities = on +#track_activity_query_size = 1024 # (change requires restart) +#track_counts = on +#track_io_timing = off +#track_wal_io_timing = off +#track_functions = none # none, pl, all +#stats_temp_directory = 'pg_stat_tmp' + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table + # size before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +#default_toast_compression = 'pglz' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'America/Chicago' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 1 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'C' # locale for system error message + # strings +lc_monetary = 'C' # locale for monetary formatting +lc_numeric = 'C' # locale for number formatting +lc_time = 'C' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' +shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain, pg_tle, plan_filter, pg_backtrace' # (change requires restart) +jit_provider = 'llvmjit' # JIT library to use + + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#gin_fuzzy_search_limit = 0 + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here + +pgsodium.getkey_script = '@PGSODIUM_GETKEY_SCRIPT@' + +auto_explain.log_min_duration = 10s +cron.database_name = 'postgres' diff --git a/postgres_15.8.1.044/nix/tests/prime.sql b/postgres_15.8.1.044/nix/tests/prime.sql new file mode 100644 index 0000000..5116c41 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/prime.sql @@ -0,0 +1,85 @@ +-- disable notice messages becuase they differ between 15 and 17 +set client_min_messages = warning; + +create extension if not exists address_standardizer; +create extension if not exists address_standardizer_data_us; +create extension if not exists amcheck; +create extension if not exists autoinc; +create extension if not exists bloom; +create extension if not exists btree_gin; +create extension if not exists btree_gist; +create extension if not exists citext; +create extension if not exists cube; +create extension if not exists dblink; +create extension if not exists dict_int; +create extension if not exists dict_xsyn; +create extension if not exists earthdistance; +create extension if not exists file_fdw; +create extension if not exists fuzzystrmatch; +create extension if not exists http; +create extension if not exists hstore; +create extension if not exists hypopg; +create extension if not exists index_advisor; +create extension if not exists insert_username; +create extension if not exists intagg; +create extension if not exists intarray; +create extension if not exists isn; +create extension if not exists lo; +create extension if not exists ltree; +create extension if not exists moddatetime; +create extension if not exists pageinspect; +create extension if not exists pg_backtrace; +create extension if not exists pg_buffercache; + +/* +TODO: Does not enable locally mode +requires a change to postgresql.conf to set +cron.database_name = 'testing' +*/ +-- create extension if not exists pg_cron; + +create extension if not exists pg_net; +create extension if not exists pg_graphql; +create extension if not exists pg_freespacemap; +create extension if not exists pg_hashids; +create extension if not exists pg_prewarm; +create extension if not exists pgmq; +create extension if not exists pg_jsonschema; +create extension if not exists pg_repack; +create extension if not exists pg_stat_monitor; +create extension if not exists pg_stat_statements; +create extension if not exists pg_surgery; +create extension if not exists pg_tle; +create extension if not exists pg_trgm; +create extension if not exists pg_visibility; +create extension if not exists pg_walinspect; +create extension if not exists pgaudit; +create extension if not exists pgcrypto; +create extension if not exists pgtap; +create extension if not exists pgjwt; +create extension if not exists pgroonga; +create extension if not exists pgroonga_database; +create extension if not exists pgsodium; +create extension if not exists pgrowlocks; +create extension if not exists pgstattuple; +create extension if not exists plpgsql_check; +create extension if not exists postgis; +create extension if not exists postgis_raster; +create extension if not exists postgis_sfcgal; +create extension if not exists postgis_topology; +create extension if not exists pgrouting; -- requires postgis +create extension if not exists postgres_fdw; +create extension if not exists rum; +create extension if not exists refint; +create extension if not exists seg; +create extension if not exists sslinfo; +create extension if not exists supabase_vault; +create extension if not exists tablefunc; +create extension if not exists tcn; +create extension if not exists tsm_system_rows; +-- create extension if not exists tsm_system_time; not supported in apache license +create extension if not exists unaccent; +create extension if not exists "uuid-ossp"; +create extension if not exists vector; +create extension if not exists wrappers; +create extension if not exists xml2; diff --git a/postgres_15.8.1.044/nix/tests/smoke/0000-hello-world.sql b/postgres_15.8.1.044/nix/tests/smoke/0000-hello-world.sql new file mode 100644 index 0000000..d6f002d --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/smoke/0000-hello-world.sql @@ -0,0 +1,10 @@ +-- Start transaction and plan the tests. +BEGIN; +SELECT plan(1); + +-- Run the tests. +SELECT pass( 'My test passed, w00t!' ); + +-- Finish the tests and clean up. +SELECT * FROM finish(); +ROLLBACK; diff --git a/postgres_15.8.1.044/nix/tests/smoke/0001-pg_graphql.sql b/postgres_15.8.1.044/nix/tests/smoke/0001-pg_graphql.sql new file mode 100644 index 0000000..80e3cb2 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/smoke/0001-pg_graphql.sql @@ -0,0 +1,59 @@ +-- Start transaction and plan the tests. +begin; + select plan(1); + + create extension if not exists pg_graphql; + + create table account( + id int primary key, + is_verified bool, + name text, + phone text + ); + + insert into public.account(id, is_verified, name, phone) + values + (1, true, 'foo', '1111111111'), + (2, true, 'bar', null), + (3, false, 'baz', '33333333333'); + + select is( + graphql.resolve($$ + { + accountCollection { + edges { + node { + id + } + } + } + } + $$), + '{ + "data": { + "accountCollection": { + "edges": [ + { + "node": { + "id": 1 + } + }, + { + "node": { + "id": 2 + } + }, + { + "node": { + "id": 3 + } + } + ] + } + } + }'::jsonb + ); + + + select * from finish(); +rollback; diff --git a/postgres_15.8.1.044/nix/tests/smoke/0002-supautils.sql b/postgres_15.8.1.044/nix/tests/smoke/0002-supautils.sql new file mode 100644 index 0000000..7a21606 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/smoke/0002-supautils.sql @@ -0,0 +1,17 @@ +BEGIN; +SELECT plan(2); + +-- the setting doesn't exist when supautils is not loaded +SELECT throws_ok($$ + select current_setting('supautils.privileged_extensions', false) +$$); + +LOAD 'supautils'; + +-- now it does +SELECT ok( + current_setting('supautils.privileged_extensions', false) = '' +); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/postgres_15.8.1.044/nix/tests/smoke/0003-pgsodium-vault.sql b/postgres_15.8.1.044/nix/tests/smoke/0003-pgsodium-vault.sql new file mode 100644 index 0000000..1c9cedf --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/smoke/0003-pgsodium-vault.sql @@ -0,0 +1,40 @@ +BEGIN; + +select plan(3); + +select id as test_new_key_id from pgsodium.create_key(name:='test_new_key') \gset + +select vault.create_secret ( + 's3kr3t_k3y', 'a_name', 'this is the foo secret key') test_secret_id \gset + +select vault.create_secret ( + 's3kr3t_k3y_2', 'another_name', 'this is another foo key', + (select id from pgsodium.key where name = 'test_new_key')) test_secret_id_2 \gset + +SELECT results_eq( + $$ + SELECT decrypted_secret = 's3kr3t_k3y', description = 'this is the foo secret key' + FROM vault.decrypted_secrets WHERE name = 'a_name'; + $$, + $$VALUES (true, true)$$, + 'can select from masking view with custom key'); + +SELECT results_eq( + $$ + SELECT decrypted_secret = 's3kr3t_k3y_2', description = 'this is another foo key' + FROM vault.decrypted_secrets WHERE key_id = (select id from pgsodium.key where name = 'test_new_key'); + $$, + $$VALUES (true, true)$$, + 'can select from masking view'); + +SELECT lives_ok( + format($test$ + select vault.update_secret( + %L::uuid, new_name:='a_new_name', + new_secret:='new_s3kr3t_k3y', new_description:='this is the bar key') + $test$, :'test_secret_id'), + 'can update name, secret and description' + ); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/postgres_15.8.1.044/nix/tests/smoke/0004-index_advisor.sql b/postgres_15.8.1.044/nix/tests/smoke/0004-index_advisor.sql new file mode 100644 index 0000000..53170f6 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/smoke/0004-index_advisor.sql @@ -0,0 +1,19 @@ +-- Start transaction and plan the tests. +begin; + select plan(1); + + create extension if not exists index_advisor; + + create table account( + id int primary key, + is_verified bool + ); + + select is( + (select count(1) from index_advisor('select id from public.account where is_verified;'))::int, + 1, + 'index_advisor returns 1 row' + ); + + select * from finish(); +rollback; diff --git a/postgres_15.8.1.044/nix/tests/smoke/0005-test_pgroonga_mecab.sql b/postgres_15.8.1.044/nix/tests/smoke/0005-test_pgroonga_mecab.sql new file mode 100644 index 0000000..7341d5f --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/smoke/0005-test_pgroonga_mecab.sql @@ -0,0 +1,36 @@ +-- File: 0005-test_pgroonga_revised.sql + +begin; + -- Plan for 3 tests: extension, table, and index + select plan(3); + + -- Create the PGroonga extension + create extension if not exists pgroonga; + + -- -- Test 1: Check if PGroonga extension exists + select has_extension('pgroonga', 'The pgroonga extension should exist.'); + + -- Create the table + create table notes( + id integer primary key, + content text + ); + + -- Test 2: Check if the table was created + SELECT has_table('public', 'notes', 'The notes table should exist.'); + -- Create the PGroonga index + CREATE INDEX pgroonga_content_index + ON notes + USING pgroonga (content) + WITH (tokenizer='TokenMecab'); + + -- -- Test 3: Check if the index was created + SELECT has_index('public', 'notes', 'pgroonga_content_index', 'The pgroonga_content_index should exist.'); + + -- -- Cleanup (this won't affect the test results as they've already been checked) + DROP INDEX IF EXISTS pgroonga_content_index; + DROP TABLE IF EXISTS notes; + + -- Finish the test plan + select * from finish(); +rollback; \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/tests/sql/hypopg.sql b/postgres_15.8.1.044/nix/tests/sql/hypopg.sql new file mode 100644 index 0000000..6aabb69 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/hypopg.sql @@ -0,0 +1,13 @@ +create schema v; + +create table v.samp( + id int +); + +select 1 from hypopg_create_index($$ + create index on v.samp(id) +$$); + +drop schema v cascade; + + diff --git a/postgres_15.8.1.044/nix/tests/sql/index_advisor.sql b/postgres_15.8.1.044/nix/tests/sql/index_advisor.sql new file mode 100644 index 0000000..3911d6e --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/index_advisor.sql @@ -0,0 +1,13 @@ +create schema v; + +create table v.book( + id int primary key, + title text not null +); + +select + index_statements, errors +from + index_advisor('select id from v.book where title = $1'); + +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/sql/pg-safeupdate.sql b/postgres_15.8.1.044/nix/tests/sql/pg-safeupdate.sql new file mode 100644 index 0000000..790ec79 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pg-safeupdate.sql @@ -0,0 +1,15 @@ +load 'safeupdate'; + +set safeupdate.enabled=1; + +create schema v; + +create table v.foo( + id int, + val text +); + +update v.foo + set val = 'bar'; + +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/sql/pg_graphql.sql b/postgres_15.8.1.044/nix/tests/sql/pg_graphql.sql new file mode 100644 index 0000000..03f844d --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pg_graphql.sql @@ -0,0 +1,219 @@ +begin; + comment on schema public is '@graphql({"inflect_names": true})'; + + create table account( + id serial primary key, + email varchar(255) not null, + priority int, + status text default 'active' + ); + + create table blog( + id serial primary key, + owner_id integer not null references account(id) + ); + comment on table blog is e'@graphql({"totalCount": {"enabled": true}})'; + + -- Make sure functions still work + create function _echo_email(account) + returns text + language sql + as $$ select $1.email $$; + + /* + Literals + */ + + select graphql.resolve($$ + mutation { + insertIntoAccountCollection(objects: [ + { email: "foo@barsley.com", priority: 1 }, + { email: "bar@foosworth.com" } + ]) { + affectedCount + records { + id + status + echoEmail + blogCollection { + totalCount + } + } + } + } + $$); + + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: [{ + ownerId: 1 + }]) { + records { + id + owner { + id + } + } + } + } + $$); + + + -- Override a default on status with null + select graphql.resolve($$ + mutation { + insertIntoAccountCollection(objects: [ + { email: "baz@baz.com", status: null }, + ]) { + affectedCount + records { + email + status + } + } + } + $$); + + + /* + Variables + */ + + select graphql.resolve($$ + mutation newAccount($emailAddress: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: $emailAddress }, + { email: "other@email.com" } + ]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"emailAddress": "foo@bar.com"}'::jsonb + ); + + + -- Variable override of default with null results in null + select graphql.resolve($$ + mutation newAccount($status: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: "1@email.com", status: $status} + ]) { + affectedCount + records { + email + status + } + } + } + $$, + variables := '{"status": null}'::jsonb + ); + + -- Skipping variable override of default results in default + select graphql.resolve($$ + mutation newAccount($status: String) { + xyz: insertIntoAccountCollection(objects: [ + { email: "x@y.com", status: $status}, + ]) { + affectedCount + records { + email + status + } + } + } + $$, + variables := '{}'::jsonb + ); + + + select graphql.resolve($$ + mutation newAccount($acc: AccountInsertInput!) { + insertIntoAccountCollection(objects: [$acc]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"acc": {"email": "bar@foo.com"}}'::jsonb + ); + + select graphql.resolve($$ + mutation newAccounts($acc: [AccountInsertInput!]!) { + insertIntoAccountCollection(objects: $accs) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"accs": [{"email": "bar@foo.com"}]}'::jsonb + ); + + -- Single object coerces to a list + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: {ownerId: 1}) { + affectedCount + } + } + $$); + + + /* + Errors + */ + + -- Field does not exist + select graphql.resolve($$ + mutation createAccount($acc: AccountInsertInput) { + insertIntoAccountCollection(objects: [$acc]) { + affectedCount + records { + id + email + } + } + } + $$, + variables := '{"acc": {"doesNotExist": "other"}}'::jsonb + ); + + -- Wrong input type (list of string, not list of object) + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: ["not an object"]) { + affectedCount + } + } + $$); + + -- objects argument is missing + select graphql.resolve($$ + mutation { + insertIntoBlogCollection { + affectedCount + } + } + $$); + + -- Empty call + select graphql.resolve($$ + mutation { + insertIntoBlogCollection(objects: []) { + affectedCount + } + } + $$); + +rollback; diff --git a/postgres_15.8.1.044/nix/tests/sql/pg_hashids.sql b/postgres_15.8.1.044/nix/tests/sql/pg_hashids.sql new file mode 100644 index 0000000..1b82eee --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pg_hashids.sql @@ -0,0 +1,6 @@ +select id_encode(1001); -- Result: jNl +select id_encode(1234567, 'This is my salt'); -- Result: Pdzxp +select id_encode(1234567, 'This is my salt', 10); -- Result: PlRPdzxpR7 +select id_encode(1234567, 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 3GJ956J9B9 +select id_decode('PlRPdzxpR7', 'This is my salt', 10); -- Result: 1234567 +select id_decode('3GJ956J9B9', 'This is my salt', 10, 'abcdefghijABCDxFGHIJ1234567890'); -- Result: 1234567 diff --git a/postgres_15.8.1.044/nix/tests/sql/pg_jsonschema.sql b/postgres_15.8.1.044/nix/tests/sql/pg_jsonschema.sql new file mode 100644 index 0000000..f5d7c8c --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pg_jsonschema.sql @@ -0,0 +1,68 @@ +begin; + +-- Test json_matches_schema +create table customer( + id serial primary key, + metadata json, + + check ( + json_matches_schema( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }', + metadata + ) + ) +); + +insert into customer(metadata) +values ('{"tags": ["vip", "darkmode-ui"]}'); + +-- Test jsonb_matches_schema +select + jsonb_matches_schema( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }', + '{"tags": ["vip", "darkmode-ui"]}'::jsonb +); + +-- Test jsonschema_is_valid +select + jsonschema_is_valid( + '{ + "type": "object", + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string", + "maxLength": 16 + } + } + } + }'); + +-- Test invalid payload +insert into customer(metadata) +values ('{"tags": [1, 3]}'); + +rollback; diff --git a/postgres_15.8.1.044/nix/tests/sql/pg_net.sql b/postgres_15.8.1.044/nix/tests/sql/pg_net.sql new file mode 100644 index 0000000..bf44db5 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pg_net.sql @@ -0,0 +1,7 @@ +-- This is a very basic test because you can't get the value returned +-- by a pg_net request in the same transaction that created it; + +select + net.http_get ( + 'https://postman-echo.com/get?foo1=bar1&foo2=bar2' + ) as request_id; diff --git a/postgres_15.8.1.044/nix/tests/sql/pg_plan_filter.sql b/postgres_15.8.1.044/nix/tests/sql/pg_plan_filter.sql new file mode 100644 index 0000000..b49834d --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pg_plan_filter.sql @@ -0,0 +1,22 @@ +begin; + load 'plan_filter'; + + create schema v; + + -- create a sample table + create table v.test_table ( + id serial primary key, + data text + ); + + -- insert some test data + insert into v.test_table (data) + values ('sample1'), ('sample2'), ('sample3'); + + set local plan_filter.statement_cost_limit = 0.001; + + select * from v.test_table; + +rollback; + + diff --git a/postgres_15.8.1.044/nix/tests/sql/pg_tle.sql b/postgres_15.8.1.044/nix/tests/sql/pg_tle.sql new file mode 100644 index 0000000..2f6d71a --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pg_tle.sql @@ -0,0 +1,73 @@ +set client_min_messages = warning; + +select + pgtle.install_extension( + 'pg_distance', + '0.1', + 'Distance functions for two points', + $_pg_tle_$ + CREATE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) + RETURNS float8 + AS $$ + SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); + $$ LANGUAGE SQL; + + CREATE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 1); + $$ LANGUAGE SQL; + + CREATE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 2); + $$ LANGUAGE SQL; + $_pg_tle_$ + ); + +create extension pg_distance; + +select manhattan_dist(1, 1, 5, 5)::numeric(10,2); +select euclidean_dist(1, 1, 5, 5)::numeric(10,2); + +SELECT pgtle.install_update_path( + 'pg_distance', + '0.1', + '0.2', + $_pg_tle_$ + CREATE OR REPLACE FUNCTION dist(x1 float8, y1 float8, x2 float8, y2 float8, norm int) + RETURNS float8 + AS $$ + SELECT (abs(x2 - x1) ^ norm + abs(y2 - y1) ^ norm) ^ (1::float8 / norm); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + + CREATE OR REPLACE FUNCTION manhattan_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 1); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + + CREATE OR REPLACE FUNCTION euclidean_dist(x1 float8, y1 float8, x2 float8, y2 float8) + RETURNS float8 + AS $$ + SELECT dist(x1, y1, x2, y2, 2); + $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE; + $_pg_tle_$ + ); + + +select + pgtle.set_default_version('pg_distance', '0.2'); + +alter extension pg_distance update; + +drop extension pg_distance; + +select + pgtle.uninstall_extension('pg_distance'); + +-- Restore original state if any of the above fails +drop extension pg_tle cascade; + +create extension pg_tle; diff --git a/postgres_15.8.1.044/nix/tests/sql/pgaudit.sql b/postgres_15.8.1.044/nix/tests/sql/pgaudit.sql new file mode 100644 index 0000000..c071c6e --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pgaudit.sql @@ -0,0 +1,23 @@ +-- Note: there is no test that the logs were correctly output. Only checking for exceptions +set pgaudit.log = 'write, ddl'; +set pgaudit.log_relation = on; +set pgaudit.log_level = notice; + +create schema v; + +create table v.account( + id int, + name text, + password text, + description text +); + +insert into v.account (id, name, password, description) +values (1, 'user1', 'HASH1', 'blah, blah'); + +select + * +from + v.account; + +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/sql/pgjwt.sql b/postgres_15.8.1.044/nix/tests/sql/pgjwt.sql new file mode 100644 index 0000000..24179e7 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pgjwt.sql @@ -0,0 +1,13 @@ +select + sign( + payload := '{"sub":"1234567890","name":"John Doe","iat":1516239022}', + secret := 'secret', + algorithm := 'HS256' + ); + +select + verify( + token := 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiRm9vIn0.Q8hKjuadCEhnCPuqIj9bfLhTh_9QSxshTRsA5Aq4IuM', + secret := 'secret', + algorithm := 'HS256' + ); diff --git a/postgres_15.8.1.044/nix/tests/sql/pgmq.sql b/postgres_15.8.1.044/nix/tests/sql/pgmq.sql new file mode 100644 index 0000000..cd47cc3 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pgmq.sql @@ -0,0 +1,90 @@ +-- Test the standard flow +select + pgmq.create('Foo'); + +select + * +from + pgmq.send( + queue_name:='Foo', + msg:='{"foo": "bar1"}' + ); + +-- Test queue is not case sensitive +select + * +from + pgmq.send( + queue_name:='foo', -- note: lowercase useage + msg:='{"foo": "bar2"}', + delay:=5 + ); + +select + msg_id, + read_ct, + message +from + pgmq.read( + queue_name:='Foo', + vt:=30, + qty:=2 + ); + +select + msg_id, + read_ct, + message +from + pgmq.pop('Foo'); + + +-- Archive message with msg_id=2. +select + pgmq.archive( + queue_name:='Foo', + msg_id:=2 + ); + + +select + pgmq.create('my_queue'); + +select + pgmq.send_batch( + queue_name:='my_queue', + msgs:=array['{"foo": "bar3"}','{"foo": "bar4"}','{"foo": "bar5"}']::jsonb[] +); + +select + pgmq.archive( + queue_name:='my_queue', + msg_ids:=array[3, 4, 5] + ); + +select + pgmq.delete('my_queue', 6); + + +select + pgmq.drop_queue('my_queue'); + +/* +-- Disabled until pg_partman goes back into the image +select + pgmq.create_partitioned( + 'my_partitioned_queue', + '5 seconds', + '10 seconds' +); +*/ + + +-- Make sure SQLI enabling characters are blocked +select pgmq.create('F--oo'); +select pgmq.create('F$oo'); +select pgmq.create($$F'oo$$); + + + + diff --git a/postgres_15.8.1.044/nix/tests/sql/pgrouting.sql b/postgres_15.8.1.044/nix/tests/sql/pgrouting.sql new file mode 100644 index 0000000..e3af562 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pgrouting.sql @@ -0,0 +1,27 @@ +create schema v; + +-- create the roads table +create table v.roads ( + id serial primary key, + source integer, + target integer, + cost double precision +); + +-- insert sample data into roads table +insert into v.roads (source, target, cost) values +(1, 2, 1.0), +(2, 3, 1.0), +(3, 4, 1.0), +(1, 3, 2.5), +(3, 5, 2.0); + +-- create a function to use pgRouting to find the shortest path +select * from pgr_dijkstra( + 'select id, source, target, cost from v.roads', + 1, -- start node + 4 -- end node +); + +drop schema v cascade; + diff --git a/postgres_15.8.1.044/nix/tests/sql/pgsodium.sql b/postgres_15.8.1.044/nix/tests/sql/pgsodium.sql new file mode 100644 index 0000000..cd3c382 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pgsodium.sql @@ -0,0 +1,4 @@ +select + status +from + pgsodium.create_key(); diff --git a/postgres_15.8.1.044/nix/tests/sql/pgtap.sql b/postgres_15.8.1.044/nix/tests/sql/pgtap.sql new file mode 100644 index 0000000..b99976a --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/pgtap.sql @@ -0,0 +1,11 @@ +begin; + +select plan(1); + +-- Run the tests. +select pass( 'My test passed, w00t!' ); + +-- Finish the tests and clean up. +select * from finish(); + +rollback; diff --git a/postgres_15.8.1.044/nix/tests/sql/plpgsql-check.sql b/postgres_15.8.1.044/nix/tests/sql/plpgsql-check.sql new file mode 100644 index 0000000..d54d2c4 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/plpgsql-check.sql @@ -0,0 +1,26 @@ +create schema v; + +create table v.t1( + a int, + b int +); + +create or replace function v.f1() + returns void + language plpgsql +as $$ +declare r record; +begin + for r in select * from v.t1 + loop + raise notice '%', r.c; -- there is bug - table t1 missing "c" column + end loop; +end; +$$; + +select * from v.f1(); + +-- use plpgsql_check_function to check the function for errors +select * from plpgsql_check_function('v.f1()'); + +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/sql/postgis.sql b/postgres_15.8.1.044/nix/tests/sql/postgis.sql new file mode 100644 index 0000000..766844b --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/postgis.sql @@ -0,0 +1,52 @@ +create schema v; + +-- create a table to store geographic points +create table v.places ( + id serial primary key, + name text, + geom geometry(point, 4326) -- using WGS 84 coordinate system +); + +-- insert some sample geographic points into the places table +insert into v.places (name, geom) +values + ('place_a', st_setsrid(st_makepoint(-73.9857, 40.7484), 4326)), -- latitude and longitude for a location + ('place_b', st_setsrid(st_makepoint(-74.0060, 40.7128), 4326)), -- another location + ('place_c', st_setsrid(st_makepoint(-73.9687, 40.7851), 4326)); -- yet another location + +-- calculate the distance between two points (in meters) +select + a.name as place_a, + b.name as place_b, + st_distance(a.geom::geography, b.geom::geography) as distance_meters +from + v.places a, + v.places b +where + a.name = 'place_a' + and b.name = 'place_b'; + +-- find all places within a 5km radius of 'place_a' +select + name, + st_distance( + geom::geography, + ( + select + geom + from + v.places + where + name = 'place_a' + )::geography) as distance_meters +from + v.places +where + st_dwithin( + geom::geography, + (select geom from v.places where name = 'place_a')::geography, + 5000 + ) + and name != 'place_a'; + +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/sql/vault.sql b/postgres_15.8.1.044/nix/tests/sql/vault.sql new file mode 100644 index 0000000..bafcb4d --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/vault.sql @@ -0,0 +1,30 @@ +select + 1 +from + vault.create_secret('my_s3kre3t'); + +select + 1 +from + vault.create_secret( + 'another_s3kre3t', + 'unique_name', + 'This is the description' + ); + +insert into vault.secrets (secret) +values + ('s3kre3t_k3y'); + +select + name, + description +from + vault.decrypted_secrets +order by + created_at desc +limit + 3; + + + diff --git a/postgres_15.8.1.044/nix/tests/sql/wal2json.sql b/postgres_15.8.1.044/nix/tests/sql/wal2json.sql new file mode 100644 index 0000000..6ec4a6d --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/wal2json.sql @@ -0,0 +1,32 @@ +create schema v; + +create table v.foo( + id int primary key +); + +select + 1 +from + pg_create_logical_replication_slot('reg_test', 'wal2json', false); + +insert into v.foo(id) values (1); + +select + data +from + pg_logical_slot_get_changes( + 'reg_test', + null, + null, + 'include-pk', '1', + 'include-transaction', 'false', + 'include-timestamp', 'false', + 'include-type-oids', 'false', + 'format-version', '2', + 'actions', 'insert,update,delete' + ) x; + +select + pg_drop_replication_slot('reg_test'); + +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/sql/z_15_ext_interface.sql b/postgres_15.8.1.044/nix/tests/sql/z_15_ext_interface.sql new file mode 100644 index 0000000..24ee03e --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/z_15_ext_interface.sql @@ -0,0 +1,122 @@ +/* + +The purpose of this test is to monitor the SQL interface exposed +by Postgres extensions so we have to manually review/approve any difference +that emerge as versions change. + +*/ + + +/* + +List all extensions that are not enabled +If a new entry shows up in this list, that means a new extension has been +added and you should `create extension ...` to enable it in ./nix/tests/prime + +*/ +create extension if not exists adminpack; +create extension if not exists plv8; +create extension if not exists plcoffee; +create extension if not exists plls; +create extension if not exists old_snapshot; +create extension if not exists timescaledb; +create extension if not exists postgis_tiger_geocoder; + + +select + name +from + pg_available_extensions +where + installed_version is null +order by + name asc; + + +/* + +Monitor relocatability and config of each extension +- lesson learned from pg_cron + +*/ + +select + extname as extension_name, + extrelocatable as is_relocatable +from + pg_extension +order by + extname asc; + + +/* + +Monitor extension public function interface + +*/ + +select + e.extname as extension_name, + n.nspname as schema_name, + p.proname as function_name, + pg_catalog.pg_get_function_identity_arguments(p.oid) as argument_types, + pg_catalog.pg_get_function_result(p.oid) as return_type +from + pg_catalog.pg_proc p + join pg_catalog.pg_namespace n + on n.oid = p.pronamespace + join pg_catalog.pg_depend d + on d.objid = p.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid +where + d.deptype = 'e' + -- Filter out changes between pg15 and pg16 from extensions that ship with postgres + -- new in pg16 + and not (e.extname = 'fuzzystrmatch' and p.proname = 'daitch_mokotoff') + and not (e.extname = 'pageinspect' and p.proname = 'bt_multi_page_stats') + and not (e.extname = 'pg_buffercache' and p.proname = 'pg_buffercache_summary') + and not (e.extname = 'pg_buffercache' and p.proname = 'pg_buffercache_usage_counts') + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_block_info') + -- removed in pg16 + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_records_info_till_end_of_wal') + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_stats_till_end_of_wal') + -- changed in pg16 - output signature added a column + and not (e.extname = 'pageinspect' and p.proname = 'brin_page_items') +order by + e.extname, + n.nspname, + p.proname, + md5(pg_catalog.pg_get_function_identity_arguments(p.oid)); + +/* + +Monitor extension public table/view/matview/index interface + +*/ + +select + e.extname as extension_name, + n.nspname as schema_name, + pc.relname as entity_name, + pa.attname +from + pg_catalog.pg_class pc + join pg_catalog.pg_namespace n + on n.oid = pc.relnamespace + join pg_catalog.pg_depend d + on d.objid = pc.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid + left join pg_catalog.pg_attribute pa + on pa.attrelid = pc.oid + and pa.attnum > 0 + and not pa.attisdropped +where + d.deptype = 'e' + and pc.relkind in ('r', 'v', 'm', 'i') +order by + e.extname, + n.nspname, + pc.relname, + pa.attname; diff --git a/postgres_15.8.1.044/nix/tests/sql/z_15_pg_stat_monitor.sql b/postgres_15.8.1.044/nix/tests/sql/z_15_pg_stat_monitor.sql new file mode 100644 index 0000000..69d996b --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/z_15_pg_stat_monitor.sql @@ -0,0 +1,6 @@ +select + * +from + pg_stat_monitor +where + false; diff --git a/postgres_15.8.1.044/nix/tests/sql/z_15_pgroonga.sql b/postgres_15.8.1.044/nix/tests/sql/z_15_pgroonga.sql new file mode 100644 index 0000000..503f266 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/z_15_pgroonga.sql @@ -0,0 +1,48 @@ +create schema v; + +create table v.roon( + id serial primary key, + content text +); + + +with tokenizers as ( + select + x + from + jsonb_array_elements( + (select pgroonga_command('tokenizer_list'))::jsonb + ) x(val) + limit + 1 + offset + 1 -- first record is unrelated and not stable +) +select + t.x::jsonb ->> 'name' +from + jsonb_array_elements((select * from tokenizers)) t(x) +order by + t.x::jsonb ->> 'name'; + + +insert into v.roon (content) +values + ('Hello World'), + ('PostgreSQL with PGroonga is a thing'), + ('This is a full-text search test'), + ('PGroonga supports various languages'); + +-- Create default index +create index pgroonga_index on v.roon using pgroonga (content); + +-- Create mecab tokenizer index since we had a bug with this one once +create index pgroonga_index_mecab on v.roon using pgroonga (content) with (tokenizer='TokenMecab'); + +-- Run some queries to test the index +select * from v.roon where content &@~ 'Hello'; +select * from v.roon where content &@~ 'powerful'; +select * from v.roon where content &@~ 'supports'; + + +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/sql/z_15_pgvector.sql b/postgres_15.8.1.044/nix/tests/sql/z_15_pgvector.sql new file mode 100644 index 0000000..f2de305 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/z_15_pgvector.sql @@ -0,0 +1,72 @@ +create schema v; + +create table v.items( + id serial primary key, + embedding vector(3), + half_embedding halfvec(3), + bit_embedding bit(3), + sparse_embedding sparsevec(3) +); + +-- vector ops +create index on v.items using hnsw (embedding vector_l2_ops); +create index on v.items using hnsw (embedding vector_cosine_ops); +create index on v.items using hnsw (embedding vector_l1_ops); +create index on v.items using ivfflat (embedding vector_l2_ops); +create index on v.items using ivfflat (embedding vector_cosine_ops); + +-- halfvec ops +create index on v.items using hnsw (half_embedding halfvec_l2_ops); +create index on v.items using hnsw (half_embedding halfvec_cosine_ops); +create index on v.items using hnsw (half_embedding halfvec_l1_ops); +create index on v.items using ivfflat (half_embedding halfvec_l2_ops); +create index on v.items using ivfflat (half_embedding halfvec_cosine_ops); + +-- sparsevec +create index on v.items using hnsw (sparse_embedding sparsevec_l2_ops); +create index on v.items using hnsw (sparse_embedding sparsevec_cosine_ops); +create index on v.items using hnsw (sparse_embedding sparsevec_l1_ops); + +-- bit ops +create index on v.items using hnsw (bit_embedding bit_hamming_ops); +create index on v.items using ivfflat (bit_embedding bit_hamming_ops); + +-- Populate some records +insert into v.items( + embedding, + half_embedding, + bit_embedding, + sparse_embedding +) +values + ('[1,2,3]', '[1,2,3]', '101', '{1:4}/3'), + ('[2,3,4]', '[2,3,4]', '010', '{1:7,3:0}/3'); + +-- Test op types +select + * +from + v.items +order by + embedding <-> '[2,3,5]', + embedding <=> '[2,3,5]', + embedding <+> '[2,3,5]', + embedding <#> '[2,3,5]', + half_embedding <-> '[2,3,5]', + half_embedding <=> '[2,3,5]', + half_embedding <+> '[2,3,5]', + half_embedding <#> '[2,3,5]', + sparse_embedding <-> '{2:4,3:1}/3', + sparse_embedding <=> '{2:4,3:1}/3', + sparse_embedding <+> '{2:4,3:1}/3', + sparse_embedding <#> '{2:4,3:1}/3', + bit_embedding <~> '011'; + +select + avg(embedding), + avg(half_embedding) +from + v.items; + +-- Cleanup +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/sql/z_15_plv8.sql b/postgres_15.8.1.044/nix/tests/sql/z_15_plv8.sql new file mode 100644 index 0000000..044d69c --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/z_15_plv8.sql @@ -0,0 +1,20 @@ +/* +This test is excluded from the Postgres 17 suite because it does not ship +with the Supabase PG17 image +*/ +create extension if not exists plv8; + +create schema v; + +-- create a function to perform some JavaScript operations +create function v.multiply_numbers(a integer, b integer) + returns integer + language plv8 +as $$ + return a * b; +$$; + +select + v.multiply_numbers(3, 4); + +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/sql/z_15_rum.sql b/postgres_15.8.1.044/nix/tests/sql/z_15_rum.sql new file mode 100644 index 0000000..6ae9459 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/z_15_rum.sql @@ -0,0 +1,40 @@ +/* +This extension is excluded from oriole-17 because it uses an unsupported index type +*/ +create schema v; + +create table v.test_rum( + t text, + a tsvector +); + +create trigger tsvectorupdate + before update or insert on v.test_rum + for each row + execute procedure + tsvector_update_trigger( + 'a', + 'pg_catalog.english', + 't' + ); + +insert into v.test_rum(t) +values + ('the situation is most beautiful'), + ('it is a beautiful'), + ('it looks like a beautiful place'); + +create index rumidx on v.test_rum using rum (a rum_tsvector_ops); + +select + t, + round(a <=> to_tsquery('english', 'beautiful | place')) as rank +from + v.test_rum +where + a @@ to_tsquery('english', 'beautiful | place') +order by + a <=> to_tsquery('english', 'beautiful | place'); + + +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/sql/z_15_timescale.sql b/postgres_15.8.1.044/nix/tests/sql/z_15_timescale.sql new file mode 100644 index 0000000..fd29bb1 --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/z_15_timescale.sql @@ -0,0 +1,39 @@ +/* +This test is excluded from the Postgres 17 suite because it does not ship +with the Supabase PG17 image +*/ +create extension if not exists timescaledb; + +-- Confirm we're running the apache version +show timescaledb.license; + +-- Create schema v +create schema v; + +-- Create a table in the v schema +create table v.sensor_data ( + time timestamptz not null, + sensor_id int not null, + temperature double precision not null, + humidity double precision not null +); + +-- Convert the table to a hypertable +select create_hypertable('v.sensor_data', 'time'); + +-- Insert some data into the hypertable +insert into v.sensor_data (time, sensor_id, temperature, humidity) +values + ('2024-08-09', 1, 22.5, 60.2), + ('2024-08-08', 1, 23.0, 59.1), + ('2024-08-07', 2, 21.7, 63.3); + +-- Select data from the hypertable +select + * +from + v.sensor_data; + +-- Drop schema v and all its entities +drop schema v cascade; + diff --git a/postgres_15.8.1.044/nix/tests/sql/z_17_ext_interface.sql b/postgres_15.8.1.044/nix/tests/sql/z_17_ext_interface.sql new file mode 100644 index 0000000..ad0f63e --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/z_17_ext_interface.sql @@ -0,0 +1,114 @@ +/* + +The purpose of this test is to monitor the SQL interface exposed +by Postgres extensions so we have to manually review/approve any difference +that emerge as versions change. + +*/ + + +/* + +List all extensions that are not enabled +If a new entry shows up in this list, that means a new extension has been +added and you should `create extension ...` to enable it in ./nix/tests/prime + +*/ + +select + name +from + pg_available_extensions +where + installed_version is null +order by + name asc; + + +/* + +Monitor relocatability and config of each extension +- lesson learned from pg_cron + +*/ + +select + extname as extension_name, + extrelocatable as is_relocatable +from + pg_extension +order by + extname asc; + + +/* + +Monitor extension public function interface + +*/ + +select + e.extname as extension_name, + n.nspname as schema_name, + p.proname as function_name, + pg_catalog.pg_get_function_identity_arguments(p.oid) as argument_types, + pg_catalog.pg_get_function_result(p.oid) as return_type +from + pg_catalog.pg_proc p + join pg_catalog.pg_namespace n + on n.oid = p.pronamespace + join pg_catalog.pg_depend d + on d.objid = p.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid +where + d.deptype = 'e' + -- Filter out changes between pg15 and pg16 from extensions that ship with postgres + -- new in pg16 + and not (e.extname = 'fuzzystrmatch' and p.proname = 'daitch_mokotoff') + and not (e.extname = 'pageinspect' and p.proname = 'bt_multi_page_stats') + and not (e.extname = 'pg_buffercache' and p.proname = 'pg_buffercache_summary') + and not (e.extname = 'pg_buffercache' and p.proname = 'pg_buffercache_usage_counts') + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_block_info') + -- removed in pg16 + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_records_info_till_end_of_wal') + and not (e.extname = 'pg_walinspect' and p.proname = 'pg_get_wal_stats_till_end_of_wal') + -- changed in pg16 - output signature added a column + and not (e.extname = 'pageinspect' and p.proname = 'brin_page_items') +order by + e.extname, + n.nspname, + p.proname, + pg_catalog.pg_get_function_identity_arguments(p.oid); + +/* + +Monitor extension public table/view/matview/index interface + +*/ + +select + e.extname as extension_name, + n.nspname as schema_name, + pc.relname as entity_name, + pa.attname +from + pg_catalog.pg_class pc + join pg_catalog.pg_namespace n + on n.oid = pc.relnamespace + join pg_catalog.pg_depend d + on d.objid = pc.oid + join pg_catalog.pg_extension e + on e.oid = d.refobjid + left join pg_catalog.pg_attribute pa + on pa.attrelid = pc.oid + and pa.attnum > 0 + and not pa.attisdropped +where + d.deptype = 'e' + and pc.relkind in ('r', 'v', 'm', 'i') +order by + e.extname, + n.nspname, + pc.relname, + pa.attname; diff --git a/postgres_15.8.1.044/nix/tests/sql/z_17_pg_stat_monitor.sql b/postgres_15.8.1.044/nix/tests/sql/z_17_pg_stat_monitor.sql new file mode 100644 index 0000000..69d996b --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/z_17_pg_stat_monitor.sql @@ -0,0 +1,6 @@ +select + * +from + pg_stat_monitor +where + false; diff --git a/postgres_15.8.1.044/nix/tests/sql/z_17_pgvector.sql b/postgres_15.8.1.044/nix/tests/sql/z_17_pgvector.sql new file mode 100644 index 0000000..c90219c --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/sql/z_17_pgvector.sql @@ -0,0 +1,52 @@ +/* +This test excludes indexes shipped with pgvector because orioledb doesn't support them yet +*/ +create schema v; + +create table v.items( + id serial primary key, + embedding vector(3), + half_embedding halfvec(3), + bit_embedding bit(3), + sparse_embedding sparsevec(3) +); + +-- Populate some records +insert into v.items( + embedding, + half_embedding, + bit_embedding, + sparse_embedding +) +values + ('[1,2,3]', '[1,2,3]', '101', '{1:4}/3'), + ('[2,3,4]', '[2,3,4]', '010', '{1:7,3:0}/3'); + +-- Test op types +select + * +from + v.items +order by + embedding <-> '[2,3,5]', + embedding <=> '[2,3,5]', + embedding <+> '[2,3,5]', + embedding <#> '[2,3,5]', + half_embedding <-> '[2,3,5]', + half_embedding <=> '[2,3,5]', + half_embedding <+> '[2,3,5]', + half_embedding <#> '[2,3,5]', + sparse_embedding <-> '{2:4,3:1}/3', + sparse_embedding <=> '{2:4,3:1}/3', + sparse_embedding <+> '{2:4,3:1}/3', + sparse_embedding <#> '{2:4,3:1}/3', + bit_embedding <~> '011'; + +select + avg(embedding), + avg(half_embedding) +from + v.items; + +-- Cleanup +drop schema v cascade; diff --git a/postgres_15.8.1.044/nix/tests/util/pgsodium_getkey.sh b/postgres_15.8.1.044/nix/tests/util/pgsodium_getkey.sh new file mode 100644 index 0000000..7044d0f --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/util/pgsodium_getkey.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -euo pipefail + +KEY_FILE="${1:-/tmp/pgsodium.key}" + +if [[ ! -f "$KEY_FILE" ]]; then + head -c 32 /dev/urandom | od -A n -t x1 | tr -d ' \n' > "$KEY_FILE" +fi +cat $KEY_FILE diff --git a/postgres_15.8.1.044/nix/tests/util/pgsodium_getkey_arb.sh b/postgres_15.8.1.044/nix/tests/util/pgsodium_getkey_arb.sh new file mode 100644 index 0000000..446dbba --- /dev/null +++ b/postgres_15.8.1.044/nix/tests/util/pgsodium_getkey_arb.sh @@ -0,0 +1 @@ +echo -n 8359dafbba5c05568799c1c24eb6c2fbff497654bc6aa5e9a791c666768875a1 \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/tools/README.md b/postgres_15.8.1.044/nix/tools/README.md new file mode 100644 index 0000000..2606a57 --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/README.md @@ -0,0 +1,2 @@ +This directory just contains tools, but you can't run them directly. For the +sake of robustness, you should use `nix run` on this repository to do so. diff --git a/postgres_15.8.1.044/nix/tools/dbmate-tool.sh.in b/postgres_15.8.1.044/nix/tools/dbmate-tool.sh.in new file mode 100644 index 0000000..1197228 --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/dbmate-tool.sh.in @@ -0,0 +1,304 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +[ ! -z "$DEBUG" ] && set -x + +# Default values +PSQL_VERSION="ALL" +PORTNO="@PGSQL_DEFAULT_PORT@" +PGSQL_SUPERUSER="@PGSQL_SUPERUSER@" +PGPASSWORD="${PGPASSWORD:-postgres}" +PGSQL_USER="postgres" +FLAKE_URL="github:supabase/postgres" +MIGRATIONS_DIR="@MIGRATIONS_DIR@" +CURRENT_SYSTEM="@CURRENT_SYSTEM@" +ANSIBLE_VARS="@ANSIBLE_VARS@" +PGBOUNCER_AUTH_SCHEMA_SQL=@PGBOUNCER_AUTH_SCHEMA_SQL@ +STAT_EXTENSION_SQL=@STAT_EXTENSION_SQL@ + +# Start PostgreSQL using nix +start_postgres() { + DATDIR=$(mktemp -d) + echo "Starting PostgreSQL in directory: $DATDIR" # Create the DATDIR if it doesn't exist + nix run "$FLAKE_URL#start-server" -- "$PSQL_VERSION" --skip-migrations --daemonize --datdir "$DATDIR" + echo "PostgreSQL started." +} + +# Cleanup function +cleanup() { + echo "Cleaning up..." + + # Check if PostgreSQL processes exist + if pgrep -f "postgres" >/dev/null; then + echo "Stopping PostgreSQL gracefully..." + + # Use pg_ctl to stop PostgreSQL + pg_ctl -D "$DATDIR" stop + + # Wait a bit for graceful shutdown + sleep 5 + + # Check if processes are still running + if pgrep -f "postgres" >/dev/null; then + echo "Warning: Some PostgreSQL processes could not be stopped gracefully." + fi + else + echo "PostgreSQL is not running, skipping stop." + fi + + # Always exit successfully, log any remaining processes + if pgrep -f "postgres" >/dev/null; then + echo "Warning: Some PostgreSQL processes could not be cleaned up:" + pgrep -f "postgres" + else + echo "Cleanup completed successfully" + fi +} + + +# Function to display help +print_help() { + echo "Usage: nix run .#dbmate-tool -- [options]" + echo + echo "Options:" + echo " -v, --version [15|16|orioledb-17|all] Specify the PostgreSQL version to use (required defaults to --version all)" + echo " -p, --port PORT Specify the port number to use (default: 5435)" + echo " -h, --help Show this help message" + echo " -f, --flake-url URL Specify the flake URL to use (default: github:supabase/postgres)" + echo "Description:" + echo " Runs 'dbmate up' against a locally running the version of database you specify. Or 'all' to run against all versions." + echo " NOTE: To create a migration, you must run 'nix develop' and then 'dbmate new ' to create a new migration file." + echo + echo "Examples:" + echo " nix run .#dbmate-tool" + echo " nix run .#dbmate-tool -- --version 15" + echo " nix run .#dbmate-tool -- --version 16 --port 5433" + echo " nix run .#dbmate-tool -- --version 16 --port 5433 --flake-url github:supabase/postgres/" +} + +# Parse arguments +while [[ "$#" -gt 0 ]]; do + case "$1" in + -v|--version) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + PSQL_VERSION="$2" + shift 2 + else + echo "Error: --version requires an argument (15, 16, or orioledb-17)" + exit 1 + fi + ;; + -u|--user) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + PGSQL_USER="$2" + shift 2 + else + echo "Error: --user requires an argument" + exit 1 + fi + ;; + -f|--flake-url) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + FLAKE_URL="$2" + shift 2 + else + echo "Error: --flake-url requires an argument" + exit 1 + fi + ;; + -p|--port) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + PORTNO="$2" + shift 2 + else + echo "Error: --port requires an argument" + exit 1 + fi + ;; + -h|--help) + print_help + exit 0 + ;; + *) + echo "Unknown option: $1" + print_help + exit 1 + ;; + esac +done + +# Function to wait for PostgreSQL to be ready +wait_for_postgres() { + local max_attempts=30 # Increased significantly + local attempt=1 + + # Give PostgreSQL a moment to actually start the process + sleep 2 + + while [ $attempt -le $max_attempts ]; do + "${PSQLBIN}/pg_isready" -h localhost -p "$PORTNO" -U "$PGSQL_SUPERUSER" -d postgres + local status=$? + + if [ $status -eq 0 ]; then + echo "PostgreSQL is ready!" + return 0 + fi + echo "Waiting for PostgreSQL to start (attempt $attempt/$max_attempts)..." + sleep 2 + attempt=$((attempt + 1)) + done + + echo "PostgreSQL failed to start after $max_attempts attempts" + return 1 +} + +check_orioledb_ready() { + local max_attempts=30 + local attempt=1 + + while [ $attempt -le $max_attempts ]; do + if "${PSQLBIN}/psql" -v ON_ERROR_STOP=1 -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -d postgres -c "SELECT * FROM pg_am WHERE amname = 'orioledb'" | grep -q orioledb; then + echo "Orioledb extension is ready!" + return 0 + fi + echo "Waiting for orioledb to be ready (attempt $attempt/$max_attempts)..." + sleep 2 + attempt=$((attempt + 1)) + done + + echo "Orioledb failed to initialize after $max_attempts attempts" + return 1 +} + +trim_schema() { + case "$CURRENT_SYSTEM" in + "x86_64-darwin"|"aarch64-darwin") + sed -i '' '/INSERT INTO public.schema_migrations/,$d' "./db/schema.sql" + echo "Matched: $CURRENT_SYSTEM" + ;; + *) + sed -i '/INSERT INTO public.schema_migrations/,$d' "./db/schema.sql" + ;; + esac +} + +perform_dump() { + local max_attempts=3 + local attempt=1 + + while [ $attempt -le $max_attempts ]; do + echo "Attempting dbmate dump (attempt $attempt/$max_attempts)" + + if dbmate dump; then + return 0 + fi + + echo "Dump attempt $attempt failed, waiting before retry..." + sleep 5 + attempt=$((attempt + 1)) + done + + echo "All dump attempts failed" + return 1 +} + +migrate_version() { + echo "PSQL_VERSION: $PSQL_VERSION" + #pkill -f "postgres" || true # Ensure PostgreSQL is stopped before starting + PSQLBIN=$(nix build --no-link "$FLAKE_URL#psql_$PSQL_VERSION/bin" --json | jq -r '.[].outputs.out + "/bin"') + echo "Using PostgreSQL version $PSQL_VERSION from $PSQLBIN" + + # Start PostgreSQL + start_postgres + echo "Waiting for PostgreSQL to be ready..." + + # Wait for PostgreSQL to be ready to accept connections + if ! wait_for_postgres; then + echo "Failed to connect to PostgreSQL server" + exit 1 + fi + + if [ "$PSQL_VERSION" = "orioledb-17" ]; then + if ! check_orioledb_ready; then + echo "Failed to initialize orioledb extension" + exit 1 + fi + fi + + echo "PostgreSQL server is ready" + + # Configure PostgreSQL roles and permissions + if ! "${PSQLBIN}/psql" -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -d postgres <<-EOSQL +create role postgres superuser login password '$PGPASSWORD'; +alter database postgres owner to postgres; +EOSQL + then + echo "Failed to configure PostgreSQL roles and permissions" + exit 1 + fi + "${PSQLBIN}/psql" -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$PGBOUNCER_AUTH_SCHEMA_SQL" + "${PSQLBIN}/psql" -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$STAT_EXTENSION_SQL" + + # Set db url to run dbmate + export DATABASE_URL="postgres://$PGSQL_USER:$PGPASSWORD@localhost:$PORTNO/postgres?sslmode=disable" + # Export path so dbmate can find correct psql and pg_dump + export PATH="$PSQLBIN:$PATH" + # Run init scripts + if ! dbmate --migrations-dir "$MIGRATIONS_DIR/init-scripts" up; then + echo "Error: Initial migration failed" + exit 1 + fi + + # Password update command + if ! "${PSQLBIN}/psql" -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -c "ALTER USER supabase_admin WITH PASSWORD '$PGPASSWORD'"; then + echo "Error: Failed to update supabase_admin password" + exit 1 + fi + + # Set up database URL + export DATABASE_URL="postgres://$PGSQL_SUPERUSER:$PGPASSWORD@localhost:$PORTNO/postgres?sslmode=disable" + # Run migrations + if ! dbmate --migrations-dir "$MIGRATIONS_DIR/migrations" up; then + echo "Error: Final migration failed" + exit 1 + fi + + echo "Running dbmate dump with $PSQLBIN" + perform_dump + + echo "CURRENT_SYSTEM: $CURRENT_SYSTEM" + if [ -f "./db/schema.sql" ]; then + trim_schema + cp "./db/schema.sql" "./migrations/schema-$PSQL_VERSION.sql" + echo "Schema file moved to ./migrations/schema-$PSQL_VERSION.sql" + echo "PSQLBIN is $PSQLBIN" + else + echo "Warning: schema.sql file not found in ./db directory" + exit 1 + fi + + # If we get here, all commands succeeded + echo "PostgreSQL migration completed successfully" + echo "Check migrations are idempotent" + for sql in ./migrations/db/migrations/*.sql; do + echo "$0: running $sql" + "${PSQLBIN}/psql" -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -d postgres -f "$sql" || { + echo "Failed to execute $sql" + exit 1 + } + done +} + +if [ "$PSQL_VERSION" == "all" ]; then + VERSIONS=$(yq '.postgres_major[]' "$ANSIBLE_VARS" | tr -d '"') + echo "$VERSIONS" | while read -r version; do + PSQL_VERSION="$version" + echo "Migrating to PostgreSQL version $PSQL_VERSION" + migrate_version + cleanup + done +else + echo "Migrating to PostgreSQL version $PSQL_VERSION" + migrate_version + cleanup +fi diff --git a/postgres_15.8.1.044/nix/tools/local-infra-bootstrap.sh.in b/postgres_15.8.1.044/nix/tools/local-infra-bootstrap.sh.in new file mode 100644 index 0000000..f1f8b75 --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/local-infra-bootstrap.sh.in @@ -0,0 +1,407 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +[ ! -z "$DEBUG" ] && set -x + +# Colors +GREEN='\033[0;32m' +RED='\033[0;31m' +NC='\033[0m' # No Color +BOLD='\033[1m' + +INFRA_REPO_DIR="" +SUPABASE_REPO="" +SETUP_FLAG=false +NODE_VERSION="20" # Default Node.js version + +print_help() { + echo "Usage: nix run .#local-infra-bootstrap -- [options]" + echo + echo "Options:" + echo " -h, --help Show this help message" + echo " -s, --setup Setup the local infrastructure for development NOTE: Requires --infrastructure-repo and --supabase-repo" + echo " --infrastructure-repo Full path to infrastructure repository directory" + echo " --supabase-repo Full path to Supabase repository directory" + echo " --aws-yubikey-setup Install AWS CLI tools with YubiKey support" + echo " --aws-yubikey-setup-no-key Install AWS CLI tools without YubiKey" + echo " --node-version Specify Node.js version to install/use (default: $NODE_VERSION)" + echo + echo "Description:" + echo " Bootstrap the local infrastructure for development." + echo " This tool wraps homebrew and other tools to install the necessary dependencies." + echo + echo "Examples:" + echo " nix run .#local-infra-bootstrap -- --setup --infrastructure-repo /path/to/infrastructure --supabase-repo /path/to/supabase" + echo " nix run .#local-infra-bootstrap -- --aws-yubikey-setup" + echo " nix run .#local-infra-bootstrap -- --setup --node-version 18" +} + +check_brew() { + if command -v brew >/dev/null 2>&1; then + echo "Homebrew is installed." + echo "Version: $(brew --version)" + else + echo "Homebrew is not installed." + echo "To install Homebrew, run the following command:" + echo + echo '/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"' + echo + echo "After installation, you may need to add Homebrew to your PATH:" + echo + echo "For Intel Macs:" + echo 'echo '\''eval "$(/usr/local/bin/brew shellenv)"'\'' >> ~/.zprofile' + echo 'eval "$(/usr/local/bin/brew shellenv)"' + echo + echo "For Apple Silicon Macs (M1/M2/M3):" + echo 'echo '\''eval "$(/opt/homebrew/bin/brew shellenv)"'\'' >> ~/.zprofile' + echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' + exit 1 + fi +} + +check_and_setup_node() { + echo -e "\n${BOLD}Checking Node.js installation...${NC}" + + # Check if the specified node version is installed + if ! brew list "node@$NODE_VERSION" &>/dev/null; then + echo "Node.js $NODE_VERSION is not installed. Installing..." + brew install "node@$NODE_VERSION" + fi + + # Unlink any existing node version + brew unlink node@* 2>/dev/null || true + + # Link the desired version with overwrite + echo "Linking Node.js $NODE_VERSION..." + brew link --overwrite --force "node@$NODE_VERSION" + + # Verify installation + if ! command -v node &>/dev/null; then + echo -e "${RED}❌ Failed to install Node.js $NODE_VERSION${NC}" + return 1 + fi + + current_version=$(node -v | cut -d 'v' -f2 | cut -d '.' -f1) + if [ "$current_version" = "$NODE_VERSION" ]; then + echo -e "${GREEN}✅ Node.js $NODE_VERSION is now active${NC}" + return 0 + else + echo -e "${RED}❌ Failed to switch to Node.js $NODE_VERSION${NC}" + return 1 + fi +} + +configure_ngrok() { + echo -e "\n${BOLD}Configuring ngrok settings...${NC}" + + if [ -z "$INFRA_REPO_DIR" ]; then + echo -e "${RED}Error: Infrastructure repository directory not specified${NC}" + return 1 + fi + + local env_file="$INFRA_REPO_DIR/.local.env" + mkdir -p "$INFRA_REPO_DIR" + + read -p "Enter your ngrok static domain (example.ngrok-free.app): " static_domain + read -p "Enter your ngrok auth token: " auth_token + + if [[ -z "$static_domain" || -z "$auth_token" ]]; then + echo -e "${RED}Error: Both static domain and auth token are required${NC}" + return 1 + fi + + cat > "$env_file" << EOF +EXTERNAL_SUPABASE_API_URL=http://${static_domain} +NGROK_AUTHTOKEN=${auth_token} +NGROK_STATIC_DOMAIN=${static_domain} +WARP_ALWAYS_ENABLED=true +SUPABASE_PATH=${SUPABASE_REPO} +EOF + + echo -e "${GREEN}✅ ngrok configuration saved to ${env_file}${NC}" +} + +check_app() { + local brew_name=$1 + local check_command=$2 + + echo "Checking $brew_name..." + + # Special case for OrbStack + if [ "$brew_name" = "orbstack" ]; then + if [ -d "/Applications/OrbStack.app" ]; then + echo "✅ $brew_name is installed" + return 0 + else + echo "❌ $brew_name is not installed" + return 1 + fi + fi + + # Standard command check + if command -v "$check_command" >/dev/null 2>&1; then + echo "✅ $brew_name is installed" + return 0 + else + echo "❌ $brew_name is not installed" + return 1 + fi +} + +install_app() { + local app=$1 + echo "Installing $app..." + + case "$app" in + "orbstack") + brew install --cask "$app" + if [ -d "/Applications/OrbStack.app" ]; then + echo "✅ OrbStack installed successfully" + echo "⚠️ Important: Please open OrbStack.app to complete the setup" + return 0 + fi + ;; + "aws-vault") + brew install --cask "$app" + # Give the system a moment to complete the linking + sleep 1 + if [ -f "/opt/homebrew/bin/aws-vault" ] || [ -f "/usr/local/bin/aws-vault" ]; then + echo "✅ aws-vault installed successfully" + return 0 + fi + ;; + "awscli") + brew install "$app" + # Reload shell environment to ensure AWS CLI is in PATH + eval "$(/opt/homebrew/bin/brew shellenv)" + if command -v aws >/dev/null 2>&1; then + echo "✅ $app installed successfully" + return 0 + fi + ;; + "dbmate"|*) + brew install "$app" + if command -v "$app" >/dev/null 2>&1; then + echo "✅ $app installed successfully" + return 0 + fi + ;; + esac + + echo "❌ Failed to install $app" + return 1 +} + +check_corepack_pnpm() { + echo -e "\nChecking Corepack PNPM setup..." + + # First check if pnpm binary exists in common locations + if [ -f "$(which pnpm 2>/dev/null)" ]; then + # Try to get version without executing pnpm + echo -e "${GREEN}✅ PNPM is enabled${NC}" + return 0 + else + echo -e "${RED}❌ PNPM is not installed${NC}" + return 1 + fi +} + +enable_corepack_pnpm() { + local pnpm_checked=false + + if [ "$pnpm_checked" = false ]; then + if ! check_corepack_pnpm; then + read -p "Would you like to enable PNPM through Corepack? (y/n) " -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + echo "Running corepack enable pnpm..." + # Remove existing symlinks if present + sudo rm -f /opt/homebrew/bin/pnpm /opt/homebrew/bin/pnpx + if NODE_OPTIONS="" corepack enable pnpm; then + echo -e "${GREEN}✅ Successfully enabled PNPM through Corepack${NC}" + pnpm_checked=true + return 0 + else + echo -e "${RED}❌ Failed to enable PNPM through Corepack${NC}" + pnpm_checked=true + return 1 + fi + else + echo -e "\n${BOLD}Skipping PNPM setup...${NC}" + pnpm_checked=true + return 0 + fi + else + pnpm_checked=true + return 0 + fi + fi + return 0 +} + +install_prerequisites() { + echo -e "\n${BOLD}Checking Prerequisites ...${NC}" + echo + + # Define apps and their check commands + local apps=("awscli" "dbmate" "orbstack" "corepack" "aws-vault" "tmux" "tmuxp" "ngrok") + local commands=("aws" "dbmate" "orbstack" "corepack" "aws-vault" "tmux" "tmuxp" "ngrok") + local pnpm_checked=false + + # Check each app and prompt for installation if missing + for i in "${!apps[@]}"; do + local brew_name="${apps[$i]}" + local check_command="${commands[$i]}" + + check_app "$brew_name" "$check_command" + if [ $? -eq 1 ]; then + read -p "Would you like to install $brew_name? (y/n) " -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + case "$brew_name" in + "tmux"|"tmuxp") + echo "Installing $brew_name..." + brew install "$brew_name" + if command -v "$brew_name" >/dev/null 2>&1; then + echo -e "${GREEN}✅ $brew_name installed successfully${NC}" + else + echo -e "${RED}❌ Failed to install $brew_name${NC}" + fi + ;; + *) + install_app "$brew_name" + ;; + esac + + # If we just installed corepack, check and enable pnpm + if [ "$brew_name" = "corepack" ] && [ "$pnpm_checked" = false ]; then + NODE_OPTIONS="" enable_corepack_pnpm + pnpm_checked=true + fi + else + echo -e "\n${BOLD}Skipping installation of $brew_name ...${NC}" + fi + elif [ "$brew_name" = "corepack" ] && [ "$pnpm_checked" = false ]; then + # If corepack is already installed, check pnpm once + NODE_OPTIONS="" enable_corepack_pnpm + pnpm_checked=true + fi + echo + done + if command -v ngrok >/dev/null 2>&1; then + configure_ngrok + fi + echo -e "\n${BOLD}Prerequisites Check Complete ${NC}" +} + +# AWS YubiKey Setup Function - Only installs required tools +install_aws_tools() { + echo -e "\n${BOLD}Installing required AWS CLI tools...${NC}" + + # Check and install AWS CLI + if ! command -v aws >/dev/null 2>&1; then + brew install awscli + echo -e "✅ AWS CLI installed" + else + echo -e "✅ AWS CLI already installed" + fi + + # Check and install AWS Vault + if ! command -v aws-vault >/dev/null 2>&1; then + brew install homebrew/cask/aws-vault + echo -e "✅ AWS Vault installed" + else + echo -e "✅ AWS Vault already installed" + fi + + if [[ "$1" != "--no-yubikey" ]]; then + # Check and install YubiKey Manager + if ! command -v ykman >/dev/null 2>&1; then + brew install ykman + echo -e "✅ YubiKey Manager installed" + else + echo -e "✅ YubiKey Manager already installed" + fi + fi + + echo -e "\n${BOLD}✅ AWS CLI tools installation complete${NC}" + echo -e "Please follow the AWS CLI MFA+YubiKey setup documentation for next steps." +} + +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + print_help + exit 0 + ;; + -s|--setup) + SETUP_FLAG=true + shift + ;; + --node-version) + if [ -n "$2" ]; then + NODE_VERSION="$2" + shift 2 + else + echo "Error: --node-version requires a version number" + exit 1 + fi + ;; + --infrastructure-repo) + if [ -n "$2" ]; then + INFRA_REPO_DIR="$2" + shift 2 + else + echo "Error: --infrastructure-repo requires a path argument" + exit 1 + fi + ;; + --supabase-repo) + if [ -n "$2" ]; then + SUPABASE_REPO="$2" + shift 2 + else + echo "Error: --supabase-repo requires a path argument" + exit 1 + fi + ;; + --aws-yubikey-setup) + check_brew + install_aws_tools + shift + ;; + --aws-yubikey-setup-no-key) + check_brew + install_aws_tools "--no-yubikey" + shift + ;; + *) + echo "Unknown argument: $1" + print_help + exit 1 + ;; + esac +done + +# Validate setup requirements +if [ "$SETUP_FLAG" = true ]; then + if [ -z "$INFRA_REPO_DIR" ]; then + echo -e "${RED}Error: --infrastructure-repo is required when using --setup${NC}" + print_help + exit 1 + fi + if [ -z "$SUPABASE_REPO" ]; then + echo -e "${RED}Error: --supabase-repo is required when using --setup${NC}" + print_help + exit 1 + fi + check_brew + check_and_setup_node + install_prerequisites +fi + +# If no arguments provided, show help +if [ "$SETUP_FLAG" = false ] && [ -z "$INFRA_REPO_DIR" ]; then + print_help + exit 0 +fi \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/tools/migrate-tool.sh.in b/postgres_15.8.1.044/nix/tools/migrate-tool.sh.in new file mode 100644 index 0000000..10239ed --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/migrate-tool.sh.in @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +[ ! -z "$DEBUG" ] && set -x + +# first argument is the old version; a path 15 or 16 +if [[ $1 == /nix/store* ]]; then + if [ ! -L "$1/receipt.json" ] || [ ! -e "$1/receipt.json" ]; then + echo "ERROR: $1 does not look like a valid Postgres install" + exit 1 + fi + OLDVER="$1" +elif [ "$1" == "15" ]; then + PSQL15=@PSQL15_BINDIR@ + OLDVER="$PSQL15" +elif [ "$1" == "16" ]; then + PSQL16=@PSQL16_BINDIR@ + OLDVER="$PSQL16" +else + echo "Please provide a valid Postgres version (15 or 16), or a /nix/store path" + exit 1 +fi + +# second argument is the new version; 15 or 16 +if [[ $2 == /nix/store* ]]; then + if [ ! -L "$2/receipt.json" ] || [ ! -e "$2/receipt.json" ]; then + echo "ERROR: $1 does not look like a valid Postgres install" + exit 1 + fi + NEWVER="$2" +elif [ "$2" == "15" ]; then + PSQL15=@PSQL15_BINDIR@ + NEWVER="$PSQL15" +elif [ "$2" == "16" ]; then + PSQL16=@PSQL16_BINDIR@ + NEWVER="$PSQL16" + echo "NEWVER IS $NEWVER" +else + echo "Please provide a valid Postgres version (15 or 16), or a /nix/store path" + exit 1 +fi + +# thid argument is the upgrade method: either pg_dumpall or pg_ugprade +if [ "$3" != "pg_dumpall" ] && [ "$3" != "pg_upgrade" ]; then + echo "Please provide a valid upgrade method (pg_dumpall or pg_upgrade)" + exit 1 +fi +UPGRADE_METHOD="$3" + +echo "Old server build: PSQL $1" +echo "New server build: PSQL $2" +echo "Upgrade method: $UPGRADE_METHOD" + +PORTNO="${2:-@PGSQL_DEFAULT_PORT@}" +DATDIR=$(mktemp -d) +NEWDAT=$(mktemp -d) +mkdir -p "$DATDIR" "$NEWDAT" + +echo "NOTE: using temporary directory $DATDIR for PSQL $1 data, which will not be removed" +echo "NOTE: you are free to re-use this data directory at will" +echo + +$OLDVER/bin/initdb -D "$DATDIR" --locale=C --username=supabase_admin +$NEWVER/bin/initdb -D "$NEWDAT" --locale=C --username=supabase_admin + +# NOTE (aseipp): we need to patch postgresql.conf to have the right pgsodium_getkey script +PSQL_CONF_FILE=@PSQL_CONF_FILE@ +PGSODIUM_GETKEY_SCRIPT=@PGSODIUM_GETKEY@ +echo "NOTE: patching postgresql.conf files" +for x in "$DATDIR" "$NEWDAT"; do + sed \ + "s#@PGSODIUM_GETKEY_SCRIPT@#$PGSODIUM_GETKEY_SCRIPT#g" \ + $PSQL_CONF_FILE > "$x/postgresql.conf" +done + +echo "NOTE: Starting first server (v${1}) to load data into the system" +$OLDVER/bin/pg_ctl start -D "$DATDIR" + +PRIMING_SCRIPT=@PRIMING_SCRIPT@ +MIGRATION_DATA=@MIGRATION_DATA@ + +$OLDVER/bin/psql -h localhost -d postgres -Xf "$PRIMING_SCRIPT" +$OLDVER/bin/psql -h localhost -d postgres -Xf "$MIGRATION_DATA" + +if [ "$UPGRADE_METHOD" == "pg_upgrade" ]; then + echo "NOTE: Stopping old server (v${1}) to prepare for migration" + $OLDVER/bin/pg_ctl stop -D "$DATDIR" + + echo "NOTE: Migrating old data $DATDIR to $NEWDAT using pg_upgrade" + + export PGDATAOLD="$DATDIR" + export PGDATANEW="$NEWDAT" + export PGBINOLD="$OLDVER/bin" + export PGBINNEW="$NEWVER/bin" + + if ! $NEWVER/bin/pg_upgrade --check; then + echo "ERROR: pg_upgrade check failed" + exit 1 + fi + + echo "NOTE: pg_upgrade check passed, proceeding with migration" + $NEWVER/bin/pg_upgrade + rm -f delete_old_cluster.sh # we don't need this + exit 0 +fi + +if [ "$UPGRADE_METHOD" == "pg_dumpall" ]; then + SQLDAT="$DATDIR/dump.sql" + echo "NOTE: Exporting data via pg_dumpall ($SQLDAT)" + $NEWVER/bin/pg_dumpall -h localhost > "$SQLDAT" + + echo "NOTE: Stopping old server (v${1}) to prepare for migration" + $OLDVER/bin/pg_ctl stop -D "$DATDIR" + + echo "NOTE: Starting second server (v${2}) to load data into the system" + $NEWVER/bin/pg_ctl start -D "$NEWDAT" + + echo "NOTE: Loading data into new server (v${2}) via 'cat | psql'" + cat "$SQLDAT" | $NEWVER/bin/psql -h localhost -d postgres + + printf "\n\n\n\n" + echo "NOTE: Done, check logs. Stopping the server; new database is located at $NEWDAT" + $NEWVER/bin/pg_ctl stop -D "$NEWDAT" +fi diff --git a/postgres_15.8.1.044/nix/tools/postgresql_schema.sql b/postgres_15.8.1.044/nix/tools/postgresql_schema.sql new file mode 100644 index 0000000..4547ab2 --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/postgresql_schema.sql @@ -0,0 +1,11 @@ +ALTER DATABASE postgres SET "app.settings.jwt_secret" TO 'my_jwt_secret_which_is_not_so_secret'; +ALTER DATABASE postgres SET "app.settings.jwt_exp" TO 3600; +ALTER USER supabase_admin WITH PASSWORD 'postgres'; +ALTER USER postgres WITH PASSWORD 'postgres'; +ALTER USER authenticator WITH PASSWORD 'postgres'; +ALTER USER pgbouncer WITH PASSWORD 'postgres'; +ALTER USER supabase_auth_admin WITH PASSWORD 'postgres'; +ALTER USER supabase_storage_admin WITH PASSWORD 'postgres'; +ALTER USER supabase_replication_admin WITH PASSWORD 'postgres'; +ALTER ROLE supabase_read_only_user WITH PASSWORD 'postgres'; +ALTER ROLE supabase_admin SET search_path TO "$user",public,auth,extensions; diff --git a/postgres_15.8.1.044/nix/tools/run-client.sh.in b/postgres_15.8.1.044/nix/tools/run-client.sh.in new file mode 100644 index 0000000..ef81d58 --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/run-client.sh.in @@ -0,0 +1,125 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +[ ! -z "$DEBUG" ] && set -x + +# Default values +PSQL_VERSION="15" +MIGRATION_FILE="" +PORTNO="@PGSQL_DEFAULT_PORT@" +PSQL_USER="postgres" + +# Function to display help +print_help() { + echo "Usage: nix run .#start-client -- [options]" + echo + echo "Options:" + echo " -v, --version [15|16|orioledb-16] Specify the PostgreSQL version to use (required)" + echo " -f, --file FILE Provide a custom migration script" + echo " -u, --user USER Specify the user/role to use (default: postgres)" + echo " -h, --help Show this help message" + echo + echo "Description:" + echo " Starts an interactive 'psql' session connecting to a Postgres database started with the" + echo " 'nix run .#start-server' command. If a migration file is not provided, the client" + echo " initializes the database with the default migrations for a new Supabase project." + echo " If a migrations file is provided, default migrations are skipped" + echo " If no migration file is provided, it runs the default Supabase migrations." + echo + echo "Examples:" + echo " nix run .#start-client" + echo " nix run .#start-client -- --version 15" + echo " nix run .#start-client -- --version 16 --file custom_migration.sql" + echo " nix run .#start-client -- --version 16 --port 5433" + echo " nix run .#start-client -- --version 16 --user supabase_admin" +} + +# Parse arguments +while [[ "$#" -gt 0 ]]; do + case "$1" in + -v|--version) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + PSQL_VERSION="$2" + shift 2 + else + echo "Error: --version requires an argument (15, 16, or orioledb-16)" + exit 1 + fi + ;; + -f|--file) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + MIGRATION_FILE="$2" + shift 2 + else + echo "Error: --file requires a filename" + exit 1 + fi + ;; + -u|--user) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + PSQL_USER="$2" + shift 2 + else + echo "Error: --user requires an argument" + exit 1 + fi + ;; + -p|--port) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + PORTNO="$2" + shift 2 + else + echo "Error: --port requires an argument" + exit 1 + fi + ;; + -h|--help) + print_help + exit 0 + ;; + *) + echo "Unknown option: $1" + print_help + exit 1 + ;; + esac +done + +# Check if version is provided +if [[ -z "$PSQL_VERSION" ]]; then + echo "Error: PostgreSQL version is required." + print_help + exit 1 +fi + +# Determine PostgreSQL version +if [ "$PSQL_VERSION" == "15" ]; then + echo "Starting client for PSQL 15" + PSQL15=@PSQL15_BINDIR@ + BINDIR="$PSQL15" +elif [ "$PSQL_VERSION" == "16" ]; then + echo "Starting client for PSQL 16" + PSQL16=@PSQL16_BINDIR@ + BINDIR="$PSQL16" +elif [ "$PSQL_VERSION" == "orioledb-17" ]; then + echo "Starting client for PSQL ORIOLEDB 17" + PSQLORIOLEDB16=@PSQLORIOLEDB17_BINDIR@ + BINDIR="$PSQLORIOLEDB16" +else + echo "Please provide a valid Postgres version (15, 16, or orioledb-16)" + exit 1 +fi + +#vars for migration.sh +export PATH=$BINDIR/bin:$PATH +export POSTGRES_DB=postgres +export POSTGRES_HOST=localhost + +PGSQL_SUPERUSER=@PGSQL_SUPERUSER@ +MIGRATIONS_DIR=@MIGRATIONS_DIR@ +POSTGRESQL_SCHEMA_SQL=@POSTGRESQL_SCHEMA_SQL@ +PGBOUNCER_AUTH_SCHEMA_SQL=@PGBOUNCER_AUTH_SCHEMA_SQL@ +STAT_EXTENSION_SQL=@STAT_EXTENSION_SQL@ + +# Start interactive psql session +exec psql -U "$PSQL_USER" -p "$PORTNO" -h localhost postgres diff --git a/postgres_15.8.1.044/nix/tools/run-replica.sh.in b/postgres_15.8.1.044/nix/tools/run-replica.sh.in new file mode 100644 index 0000000..e2096b1 --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/run-replica.sh.in @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +[ ! -z "$DEBUG" ] && set -x + +# first argument should be '15' or '16' for the version +if [ "$1" == "15" ]; then + echo "Starting server for PSQL 15" + PSQL15=@PSQL15_BINDIR@ + BINDIR="$PSQL15" +elif [ "$1" == "16" ]; then + echo "Starting server for PSQL 16" + PSQL16=@PSQL16_BINDIR@ + BINDIR="$PSQL16" +elif [ "$1" == "orioledb-16" ]; then + echo "Starting server for PSQL ORIOLEDB 16" + PSQLORIOLEDB16=@PSQLORIOLEDB16_BINDIR@ + BINDIR="$PSQLORIOLEDB16" +else + echo "Please provide a valid Postgres version (15, 16 or orioledb-16)" + exit 1 +fi + +export PATH=$BINDIR/bin:$PATH + +PGSQL_SUPERUSER=@PGSQL_SUPERUSER@ +MASTER_PORTNO="$2" +REPLICA_PORTNO="$3" +REPLICA_SLOT="replica_$RANDOM" +DATDIR=$(mktemp -d) +mkdir -p "$DATDIR" + +echo "NOTE: runing pg_basebackup for server on port $MASTER_PORTNO" +echo "NOTE: using replica slot $REPLICA_SLOT" + +pg_basebackup -p "$MASTER_PORTNO" -h localhost -U "${PGSQL_SUPERUSER}" -X stream -C -S "$REPLICA_SLOT" -v -R -D "$DATDIR" + +echo "NOTE: using port $REPLICA_PORTNO for replica" +echo "NOTE: using temporary directory $DATDIR for data, which will not be removed" +echo "NOTE: you are free to re-use this data directory at will" +echo + +exec postgres -p "$REPLICA_PORTNO" -D "$DATDIR" -k /tmp diff --git a/postgres_15.8.1.044/nix/tools/run-restore.sh.in b/postgres_15.8.1.044/nix/tools/run-restore.sh.in new file mode 100644 index 0000000..33fa70c --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/run-restore.sh.in @@ -0,0 +1,121 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +set -euo pipefail + +# Function to display help message +show_help() { + echo "Usage: nix run .#pg-restore -- [OPTIONS]" + echo + echo "Run pg_restore with the specified parameters." + echo + echo "Options:" + echo " --version PostgreSQL version (currently only 15 is supported)" + echo " --dbname Name of the database to restore to" + echo " --host Host of the database server" + echo " --user Database user to connect as" + echo " --file Path to the file to restore from (absolute or relative to current directory)" + echo " --port Port number (default: 5432)" + echo " -h, --help Show this help message and exit" + echo "Example:" + echo "nix run .#pg-restore -- --version 15 --dbname postgres --host localhost --user postgres --port 5435 --file my.dump" +} + +# Initialize variables +PG_VERSION="" +DBNAME="" +DBHOST="" +DBUSER="" +RESTORE_FILE="" +PORT="5432" + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --version) + PG_VERSION="$2" + shift 2 + ;; + --dbname) + DBNAME="$2" + shift 2 + ;; + --host) + DBHOST="$2" + shift 2 + ;; + --user) + DBUSER="$2" + shift 2 + ;; + --file) + RESTORE_FILE="$2" + shift 2 + ;; + --port) + PORT="$2" + shift 2 + ;; + -h|--help) + show_help + exit 0 + ;; + *) + echo "Unknown option: $1" + show_help + exit 1 + ;; + esac +done + +# Check if all required arguments are provided +if [ -z "$PG_VERSION" ] || [ -z "$DBNAME" ] || [ -z "$DBHOST" ] || [ -z "$DBUSER" ] || [ -z "$RESTORE_FILE" ]; then + echo "Error: Missing required arguments." + show_help + exit 1 +fi + +if [ "$PG_VERSION" == "15" ]; then + echo "Starting restore for PSQL 15" + PSQL15=@PSQL15_BINDIR@ + PSQL_BINDIR="$PSQL15" +else + echo "Error: Please provide a valid Postgres version (currently only 15 is supported)" + show_help + exit 1 +fi + +# Convert RESTORE_FILE to an absolute path if it's relative +if [[ "$RESTORE_FILE" != /* ]]; then + RESTORE_FILE="$(pwd)/$RESTORE_FILE" +fi + +# Check if the file exists +if [ ! -f "$RESTORE_FILE" ]; then + echo "Error: Restore file '$RESTORE_FILE' does not exist." + exit 1 +fi + +echo "Using restore file: $RESTORE_FILE" + +# Run pg_restore and capture its exit status +"$PSQL_BINDIR/bin/pg_restore" \ + -h "$DBHOST" \ + -p "$PORT" \ + -U "$DBUSER" \ + -d "$DBNAME" \ + -v \ + --no-owner \ + --no-acl \ + "$RESTORE_FILE" + +RESTORE_STATUS=$? + +# Check the exit status of pg_restore +if [ $RESTORE_STATUS -eq 0 ]; then + echo "Restore completed successfully." + exit 0 +else + echo "Restore failed with exit code $RESTORE_STATUS." + exit $RESTORE_STATUS +fi \ No newline at end of file diff --git a/postgres_15.8.1.044/nix/tools/run-server.sh.in b/postgres_15.8.1.044/nix/tools/run-server.sh.in new file mode 100644 index 0000000..0586e01 --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/run-server.sh.in @@ -0,0 +1,356 @@ +#!@SHELL_PATH@ +# shellcheck shell=bash +[ ! -z "$DEBUG" ] && set -x + +# Default values +SKIP_MIGRATIONS=false +PSQL_USER="postgres" +MIGRATION_FILE="" +DAEMONIZE=false +GETKEY_SCRIPT="" + +# Function to display help +print_help() { + echo "Usage: start-postgres-server [options] VERSION [PORT]" + echo + echo "Options:" + echo " --skip-migrations Skip running migrations and SQL statements" + echo " --migration-file FILE Provide a custom migration script" + echo " --user USER Specify the user/role to use (default: postgres)" + echo " --getkey-script SCRIPT Provide a custom path to the PGSODIUM_GETKEY_SCRIPT" + echo " -h, --help Show this help message" + echo + echo "VERSION must be one of: 15, orioledb-17" + echo "PORT is optional (default: @PGSQL_DEFAULT_PORT@)" +} + +start_postgres() { + local mode=$1 + local LOG_DIR="${DATDIR}_logs" + mkdir -p "$LOG_DIR" + local LOG_FILE="$LOG_DIR/postgres.log" + touch "$LOG_FILE" + if [ "$mode" = "daemon" ]; then + # Start the server + pg_ctl start -D "$DATDIR" -l "$LOG_FILE" \ + -o "--config-file=$DATDIR/postgresql.conf -p $PORTNO -k $DATDIR/tmp" + + # Give it a moment to write logs + sleep 1 + + # Check server status and logs + if ! pg_ctl status -D "$DATDIR"; then + echo "PostgreSQL failed to start. Full logs:" + cat "$LOG_FILE" + # You might also want to see the postmaster.pid if it exists + if [ -f "$DATDIR/postmaster.pid" ]; then + echo "postmaster.pid contents:" + cat "$DATDIR/postmaster.pid" + fi + return 1 + fi + else + # Foreground mode + exec postgres --config-file="$DATDIR/postgresql.conf" -p "$PORTNO" -D "$DATDIR" -k "/tmp" -F + fi +} + +stop_postgres() { + if [ "$DAEMONIZE" = true ]; then + echo "PostgreSQL is running in daemon mode. Please stop it using pg_ctl." + else + pg_ctl stop -D "$DATDIR" -m fast + fi +} + +trap 'stop_postgres' SIGINT SIGTERM + +# Parse arguments +while [[ "$#" -gt 0 ]]; do + case "$1" in + --skip-migrations) + SKIP_MIGRATIONS=true + shift + ;; + --migration-file) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + MIGRATION_FILE="$2" + shift 2 + else + echo "Error: --migration-file requires a filename" + exit 1 + fi + ;; + --user) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + PSQL_USER="$2" + shift 2 + else + echo "Error: --user requires an argument" + exit 1 + fi + ;; + --getkey-script) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + GETKEY_SCRIPT="$2" + shift 2 + else + echo "Error: --getkey-script requires a script path" + exit 1 + fi + ;; + --daemonize) + DAEMONIZE=true + shift + ;; + -h|--help) + print_help + exit 0 + ;; + --datdir) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + DATDIR="$2" + shift 2 + else + echo "Error: --datadir requires a directory path" + exit 1 + fi + ;; + *) + if [[ "$1" =~ ^- ]]; then + echo "Unknown option: $1" + print_help + exit 1 + elif [[ -z "$VERSION" ]]; then + VERSION="$1" + shift + elif [[ -z "$PORTNO" ]]; then + PORTNO="$1" + shift + else + echo "Error: Unexpected argument: $1" + print_help + exit 1 + fi + ;; + esac +done +if [[ -n "${GETKEY_SCRIPT:-}" ]]; then + export PGSODIUM_GETKEY_SCRIPT="$GETKEY_SCRIPT" +else + PGSODIUM_GETKEY_SCRIPT="${PGSODIUM_GETKEY_SCRIPT:-@PGSODIUM_GETKEY@}" +fi +# Verify version and set binary directory +if [ "$VERSION" == "15" ]; then + echo "Starting server for PSQL 15" + PSQL15=@PSQL15_BINDIR@ + BINDIR="$PSQL15" +elif [ "$VERSION" == "orioledb-17" ]; then + echo "Starting server for PSQL ORIOLEDB 17" + PSQLORIOLEDB17=@PSQLORIOLEDB17_BINDIR@ + BINDIR="$PSQLORIOLEDB17" +else + echo "Please provide a valid Postgres version (15, orioledb-17)" + exit 1 +fi + +# Set environment variables and paths +export PATH=$BINDIR/bin:$PATH +PGSQL_SUPERUSER=@PGSQL_SUPERUSER@ +PSQL_CONF_FILE=@PSQL_CONF_FILE@ +PORTNO="${PORTNO:-@PGSQL_DEFAULT_PORT@}" +SUPAUTILS_CONFIG_FILE=@SUPAUTILS_CONF_FILE@ +LOGGING_CONFIG_FILE=@LOGGING_CONF_FILE@ +READREPL_CONFIG_FILE=@READREPL_CONF_FILE@ +PG_HBA_FILE=@PG_HBA@ +PG_IDENT_FILE=@PG_IDENT@ +EXTENSION_CUSTOM_SCRIPTS=@EXTENSION_CUSTOM_SCRIPTS_DIR@ +GROONGA=@GROONGA_DIR@ +MIGRATIONS_DIR=@MIGRATIONS_DIR@ +POSTGRESQL_SCHEMA_SQL=@POSTGRESQL_SCHEMA_SQL@ +PGBOUNCER_AUTH_SCHEMA_SQL=@PGBOUNCER_AUTH_SCHEMA_SQL@ +STAT_EXTENSION_SQL=@STAT_EXTENSION_SQL@ +MECAB_LIB=@MECAB_LIB@ + +# Setup directories and locale settings +if [[ -z "$DATDIR" ]]; then + DATDIR=$(mktemp -d) +fi +LOCALE_ARCHIVE=@LOCALES@ +CURRENT_SYSTEM=@CURRENT_SYSTEM@ + +# Set locale environment +export LOCALE_ARCHIVE +export LANG=en_US.UTF-8 +export LANGUAGE=en_US.UTF-8 +export LC_ALL=en_US.UTF-8 +export LC_CTYPE=en_US.UTF-8 +export KEY_FILE="$DATDIR/pgsodium.key" +echo "KEY_FILE: $KEY_FILE" +echo "KEY_FILE contents:" +cat "$KEY_FILE" + +echo "PGSODIUM_GETKEY_SCRIPT: $PGSODIUM_GETKEY_SCRIPT" +echo "NOTE: using port $PORTNO for server" +echo "NOTE: using temporary directory $DATDIR for data" +echo "NOTE: you are free to re-use this data directory at will" + +# Initialize database +if [ "$VERSION" = "orioledb-17" ]; then + initdb -D "$DATDIR" \ + --allow-group-access \ + --username="$PGSQL_SUPERUSER" \ + --locale-provider=icu \ + --encoding=UTF-8 \ + --icu-locale=en_US.UTF-8 +else + initdb -U "$PGSQL_SUPERUSER" -D "$DATDIR" +fi + +# Copy configuration files +echo "NOTE: patching postgresql.conf files" +cp "$PG_HBA_FILE" "$DATDIR/pg_hba.conf" +cp "$PG_IDENT_FILE" "$DATDIR/pg_ident.conf" +cp "$READREPL_CONFIG_FILE" "$DATDIR/read-replica.conf" +mkdir -p "$DATDIR/extension-custom-scripts" +cp -r "$EXTENSION_CUSTOM_SCRIPTS"/* "$DATDIR/extension-custom-scripts" + +# Configure supautils +sed "s|supautils.privileged_extensions_custom_scripts_path = '/etc/postgresql-custom/extension-custom-scripts'|supautils.privileged_extensions_custom_scripts_path = '$DATDIR/extension-custom-scripts'|" "$SUPAUTILS_CONFIG_FILE" > "$DATDIR/supautils.conf" + +# Configure PostgreSQL +sed -e "1i\\ +include = '$DATDIR/supautils.conf'" \ +-e "\$a\\ +pgsodium.getkey_script = '$PGSODIUM_GETKEY_SCRIPT'" \ +-e "\$a\\ +vault.getkey_script = '$PGSODIUM_GETKEY_SCRIPT'" \ +-e "s|data_directory = '/var/lib/postgresql/data'|data_directory = '$DATDIR'|" \ +-e "s|hba_file = '/etc/postgresql/pg_hba.conf'|hba_file = '$DATDIR/pg_hba.conf'|" \ +-e "s|ident_file = '/etc/postgresql/pg_ident.conf'|ident_file = '$DATDIR/pg_ident.conf'|" \ +-e "s|include = '/etc/postgresql/logging.conf'|#&|" \ +-e "s|include = '/etc/postgresql-custom/read-replica.conf'|include = '$DATDIR/read-replica.conf'|" \ +-e "\$a\\ +session_preload_libraries = 'supautils'" \ +"$PSQL_CONF_FILE" > "$DATDIR/postgresql.conf" + +# Function to configure OrioleDB specific settings +orioledb_config_items() { + if [[ "$1" = "orioledb-17" && "$CURRENT_SYSTEM" != "aarch64-darwin" ]]; then + # Remove items from postgresql.conf + echo "non-macos oriole conf" + sed -i 's/ timescaledb,//g;' "$DATDIR/postgresql.conf" + sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "$DATDIR/postgresql.conf" + sed -i 's/ timescaledb,//g; s/ plv8,//g; s/ postgis,//g; s/ pgrouting,//g' "$DATDIR/supautils.conf" + sed -i 's/\(shared_preload_libraries.*\)'\''\(.*\)$/\1, orioledb'\''\2/' "$DATDIR/postgresql.conf" + echo "default_table_access_method = 'orioledb'" >> "$DATDIR/postgresql.conf" + elif [[ "$1" = "orioledb-17" && "$CURRENT_SYSTEM" = "aarch64-darwin" ]]; then + # macOS specific configuration + echo "macOS detected, applying macOS specific configuration" + ls -la "$DATDIR" + + # Use perl instead of sed for macOS + perl -pi -e 's/ timescaledb,//g' "$DATDIR/postgresql.conf" + perl -pi -e 's/db_user_namespace = off/#db_user_namespace = off/g' "$DATDIR/postgresql.conf" + + perl -pi -e 's/ timescaledb,//g' "$DATDIR/supautils.conf" + perl -pi -e 's/ plv8,//g' "$DATDIR/supautils.conf" + perl -pi -e 's/ postgis,//g' "$DATDIR/supautils.conf" + perl -pi -e 's/ pgrouting,//g' "$DATDIR/supautils.conf" + + perl -pi -e 's/(shared_preload_libraries\s*=\s*'\''.*?)'\''/\1, orioledb'\''/' "$DATDIR/postgresql.conf" + + echo "default_table_access_method = 'orioledb'" >> "$DATDIR/postgresql.conf" + fi +} + +# Apply OrioleDB configuration if needed +orioledb_config_items "$VERSION" +# Configure Groonga +export GRN_PLUGINS_DIR=$GROONGA/lib/groonga/plugins + +# Start postgres +mkdir -p "$DATDIR/tmp" +chmod 1777 "$DATDIR/tmp" +start_postgres "daemon" + +# Wait for PostgreSQL to start +for i in {1..60}; do + if pg_isready -h localhost -p "$PORTNO" -q; then + echo "PostgreSQL is ready" + break + fi + sleep 1 + if [ $i -eq 60 ]; then + echo "PostgreSQL failed to start" + 'stop_postgres' 1 + fi +done + +# Create orioledb extension if needed +if [ "$VERSION" = "orioledb-17" ]; then + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -d postgres -c "CREATE EXTENSION IF NOT EXISTS orioledb;" +fi + +# Skip migrations if requested +if [ "$SKIP_MIGRATIONS" = false ]; then + # Create postgres role and set ownership + if ! psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -d postgres <<-EOSQL + create role postgres superuser login password '$PGPASSWORD'; + alter database postgres owner to postgres; +EOSQL + then + 'stop_postgres' 1 + fi + + if [ -n "$MIGRATION_FILE" ]; then + echo "Running user-provided migration file $MIGRATION_FILE" + if ! psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -f "$MIGRATION_FILE" postgres; then + 'stop_postgres' 1 + fi + else + # Run default init scripts + for sql in "$MIGRATIONS_DIR"/init-scripts/*.sql; do + echo "Running $sql" + if ! psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PSQL_USER" -p "$PORTNO" -h localhost -f "$sql" postgres; then + 'stop_postgres' 1 + fi + done + + # Set superuser password + if ! psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PSQL_USER" -p "$PORTNO" -h localhost -c "ALTER USER supabase_admin WITH PASSWORD '$PGPASSWORD'"; then + 'stop_postgres' 1 + fi + + # Run additional schema files + if ! psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PSQL_USER" -p "$PORTNO" -h localhost -d postgres -f "$PGBOUNCER_AUTH_SCHEMA_SQL"; then + 'stop_postgres' 1 + fi + if ! psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PSQL_USER" -p "$PORTNO" -h localhost -d postgres -f "$STAT_EXTENSION_SQL"; then + 'stop_postgres' 1 + fi + + # Run migrations as superuser + for sql in "$MIGRATIONS_DIR"/migrations/*.sql; do + echo "Running $sql" + if ! psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -f "$sql" postgres; then + 'stop_postgres' 1 + fi + done + + # Run PostgreSQL schema + if ! psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -f "$POSTGRESQL_SCHEMA_SQL" postgres; then + 'stop_postgres' 1 + fi + fi +fi +echo "Shutting down PostgreSQL..." + +stop_postgres + +# Step 4: Restart PostgreSQL in the foreground (with log output visible) or as a daemon +if [ "$DAEMONIZE" = true ]; then + start_postgres "daemon" +else + start_postgres "foreground" +fi diff --git a/postgres_15.8.1.044/nix/tools/sync-exts-versions.sh.in b/postgres_15.8.1.044/nix/tools/sync-exts-versions.sh.in new file mode 100644 index 0000000..1b120e9 --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/sync-exts-versions.sh.in @@ -0,0 +1,282 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +[ ! -z "$DEBUG" ] && set -x + +#pass in env vars supplied by nix +yq=@YQ@ +jq=@JQ@ +editor=@NIX_EDITOR@ +ansible_vars=$($yq '.' $PWD/ansible/vars.yml) +prefetchurl=@NIXPREFETCHURL@ +_nix=@NIX@ +fetch_source_url() { + local source_url=${1//\"/} # Remove double quotes + source_url=${source_url//\'/} # Remove single quotes + + # Check if the source URL is provided + if [ -z "$source_url" ]; then + echo "Usage: fetch_nix_url " + return 1 + fi + + echo "$source_url" + + # Run nix-prefetch-url command + local initial_hash=$($prefetchurl --type sha256 "$source_url" --unpack | cut -d ' ' -f 2) + #once we can bump up nix version, we can use nix hash convert --hash-algo sha256 + local final_hash=$($_nix hash to-sri --type sha256 $initial_hash) + echo "$final_hash" +} + +sync_version() { + + local package_name=$1 + local version="\"$2\"" + local hash="\"$3\"" + + + # Update the version and hash in the Nix expression + $editor $PWD/nix/ext/$package_name.nix version --inplace -v "$version" + $editor $PWD/nix/ext/$package_name.nix src.hash --inplace -v $hash +} + +run_sync() { + local varname=$1 + local package_name=$2 + + version=$(echo $ansible_vars | $jq -r '.'$varname'') + echo "$key: $version" + url=$($_nix eval .#psql_15/exts/$package_name.src.url) + hash=$(fetch_source_url $url | tail -n 1) + $(sync_version $package_name $version $hash) + echo "synced $package_name to version $version with hash $hash" + + +} + +#for use where nix uses fetchurl +# instead of fetchFromGithub +fetchurl_source_url() { + local source_url=${1//\"/} # Remove double quotes + source_url=${source_url//\'/} # Remove single quotes + + # Check if the source URL is provided + if [ -z "$source_url" ]; then + echo "Usage: fetch_nix_url " + return 1 + fi + + echo "$source_url" + + # Run nix-prefetch-url command + local initial_hash=$($prefetchurl --type sha256 "$source_url" | cut -d ' ' -f 2) + #once we can bump up nix version, we can use nix hash convert --hash-algo sha256 + local final_hash=$($_nix hash to-sri --type sha256 $initial_hash) + echo "$final_hash" +} + +sync_version_fetchurl() { + + local package_name=$1 + local version="\"$2\"" + local hash="\"$3\"" + + + # Update the version and hash in the Nix expression + $editor $PWD/nix/ext/$package_name.nix version --inplace -v "$version" + $editor $PWD/nix/ext/$package_name.nix src.sha256 --inplace -v $hash +} + + +run_sync_fetchurl() { + local varname=$1 + local package_name=$2 + + version=$(echo $ansible_vars | $jq -r '.'$varname'') + echo "$key: $version" + url=$($_nix eval .#psql_15/exts/$package_name.src.url) + hash=$(fetchurl_source_url $url | tail -n 1) + $(sync_version_fetchurl $package_name $version $hash) + echo "synced $package_name to version $version with hash $hash" + + +} + +#for use on derivations that use cargoHash +update_cargo_vendor_hash() { + local package_name=$1 + $editor $PWD/nix/ext/$package_name.nix cargoHash --inplace -v "" + output=$($_nix build .#psql_15/exts/$package_name 2>&1) + + # Check if the command exited with an error + if [ $? -ne 0 ]; then + # Extract the hash value after "got: " + hash_value_scraped=$(echo "$output" | grep "got:" | awk '{for (i=1; i<=NF; i++) if ($i ~ /^sha/) print $i}') + hash_value="\"$hash_value_scraped\"" + # Continue using the captured hash value + $editor $PWD/nix/ext/$package_name.nix cargoHash --inplace -v $hash_value + echo "Updated cargoHash for $package_name to $hash_value" + else + echo "$package_name builds successfully, moving on..." + fi +} + +#iterate values in ansible vars, case statement +# to match ansible var to package name +keys=$(echo "$ansible_vars" | $jq -r 'keys[]') + +for key in $keys; do + case $key in + "pg_hashids_release") + varname="pg_hashids_release" + package_name="pg_hashids" + run_sync $varname $package_name + ;; + "hypopg_release") + varname="hypopg_release" + package_name="hypopg" + run_sync $varname $package_name + ;; + "pg_graphql_release") + varname="pg_graphql_release" + package_name="pg_graphql" + run_sync $varname $package_name + update_cargo_vendor_hash $package_name + ;; + "pg_cron_release") + varname="pg_cron_release" + package_name="pg_cron" + run_sync $varname $package_name + ;; + "pgsql_http_release") + varname="pgsql_http_release" + package_name="pgsql-http" + run_sync $varname $package_name + ;; + "pg_jsonschema_release") + varname="pg_jsonschema_release" + package_name="pg_jsonschema" + run_sync $varname $package_name + update_cargo_vendor_hash $package_name + ;; + "pg_net_release") + varname="pg_net_release" + package_name="pg_net" + run_sync $varname $package_name + ;; + "pg_plan_filter_release") + varname="pg_plan_filter_release" + package_name="pg_plan_filter" + run_sync $varname $package_name + ;; + "pg_safeupdate_release") + varname="pg_safeupdate_release" + package_name="pg-safeupdate" + run_sync $varname $package_name + ;; + "pgsodium_release") + varname="pgsodium_release" + package_name="pgsodium" + run_sync $varname $package_name + ;; + "pg_repack_release") + varname="pg_repack_release" + package_name="pg_repack" + run_sync $varname $package_name + ;; + "pgrouting_release") + varname="pgrouting_release" + package_name="pgrouting" + run_sync $varname $package_name + ;; + "ptap_release") + varname="pgtap_release" + package_name="pgtap" + run_sync $varname $package_name + ;; + "pg_stat_monitor_release") + varname="pg_stat_monitor_release" + package_name="pg_stat_monitor" + run_sync $varname $package_name + ;; + "pg_tle_release") + varname="pg_tle_release" + package_name="pg_tle" + run_sync $varname $package_name + ;; + "pgaudit_release") + varname="pgaudit_release" + package_name="pgaudit" + run_sync $varname $package_name + ;; + "plpgsql_check_release") + varname="plpgsql_check_release" + package_name="plpgsql-check" + run_sync $varname $package_name + ;; + "pgvector_release") + varname="pgvector_release" + package_name="pgvector" + run_sync $varname $package_name + ;; + "pgjwt_release") + varname="pgjwt_release" + package_name="pgjwt" + run_sync $varname $package_name + ;; + "plv8_release") + varname="plv8_release" + package_name="plv8" + run_sync $varname $package_name + ;; + "postgis_release") + varname="postgis_release" + package_name="postgis" + run_sync_fetchurl $varname $package_name + ;; + "pgroonga_release") + varname="pgroonga_release" + package_name="pgroonga" + run_sync_fetchurl $varname $package_name + ;; + "rum_release") + varname="rum_release" + package_name="rum" + run_sync $varname $package_name + ;; + "timescaledb_release") + varname="timescaledb_release" + package_name="timescaledb" + run_sync $varname $package_name + ;; + "supautils_release") + varname="supautils_release" + package_name="supautils" + run_sync $varname $package_name + ;; + "vault_release") + varname="vault_release" + package_name="vault" + run_sync $varname $package_name + ;; + "wal2json_release") + varname="wal2json_release" + package_name="wal2json" + run_sync $varname $package_name + ;; + *) + ;; + esac +done + +# url=$($_nix eval .#psql_16/exts/pgvector.src.url) + +# fetch_nix_url "$url" + +#res=$editor /home/sam/postgres/nix/ext/pgvector.nix src +#echo $res +# url=$($_nix eval .#psql_16/exts/pgvector.src.url) +# #echo $url +# hash=$(fetch_source_url $url | tail -n 1) +# echo "$hash" diff --git a/postgres_15.8.1.044/nix/tools/update_readme.nu b/postgres_15.8.1.044/nix/tools/update_readme.nu new file mode 100644 index 0000000..bfb46ab --- /dev/null +++ b/postgres_15.8.1.044/nix/tools/update_readme.nu @@ -0,0 +1,212 @@ +#!/usr/bin/env nu + +# Load required data +def load_flake [] { + nix flake show --json --all-systems | from json +} + +def find_index [list: list, value: any] { + let enumerated = ($list | enumerate) + let found = ($enumerated | where item == $value | first) + if ($found | is-empty) { + -1 + } else { + $found.index + } +} + +def get_systems [flake_json] { + $flake_json | get packages | columns +} + +def get_postgres_versions [flake_json] { + let packages = ($flake_json | get packages | get aarch64-linux) + + # Get available versions from postgresql packages + let available_versions = ($packages + | columns + | where {|col| + # Match exact postgresql_ or postgresql_orioledb- + $col =~ "^postgresql_\\d+$" or $col =~ "^postgresql_orioledb-\\d+$" + } + | each {|pkg_name| + let is_orioledb = ($pkg_name =~ "orioledb") + let pkg_info = ($packages | get $pkg_name) + let version = if $is_orioledb { + $pkg_info.name | str replace "postgresql-" "" | split row "_" | first # Get "17" from "postgresql-17_5" + } else { + $pkg_info.name | str replace "postgresql-" "" | split row "." | first # Get "15" from "postgresql-15.8" + } + { + version: $version, + is_orioledb: $is_orioledb, + name: $pkg_info.name + } + } + ) + + $available_versions | uniq | sort-by version +} + +def get_src_url [pkg_attr] { + let result = (do { nix eval $".#($pkg_attr).src.url" } | complete) + if $result.exit_code == 0 { + $result.stdout | str trim | str replace -a '"' '' # Remove all quotes + } else { + null + } +} + +def get_extension_info [flake_json, pg_info] { + let major_version = ($pg_info.version | split row "." | first) + let version_prefix = if $pg_info.is_orioledb { + "psql_orioledb-" + $major_version + "/exts/" + } else { + "psql_" + $major_version + "/exts/" + } + + print $"Looking for extensions with prefix: ($version_prefix)" + + let sys_packages = ($flake_json | get packages | get aarch64-linux) + let ext_names = ($sys_packages + | columns + | where {|col| $col =~ $"^($version_prefix)"} + ) + print $"Found extensions: ($ext_names | str join ', ')" + + let all_exts = ($ext_names | each {|ext_name| + let ext_info = ($sys_packages | get $ext_name) + let name = ($ext_name | str replace $version_prefix "") + let version = if $name == "orioledb" { + $ext_info.name # Use name directly for orioledb + } else if ($ext_info.name | str contains "-") { + $ext_info.name | split row "-" | last + } else { + $ext_info.name + } + let src_url = (get_src_url $ext_name) + { + name: $name, + version: $version, + description: $ext_info.description, + url: $src_url + } + }) + + $all_exts | sort-by name +} + +def create_version_link [pg_info] { + if $pg_info.is_orioledb { + let display = $"orioledb-($pg_info.name)" + let url = "https://github.com/orioledb/orioledb" + $"- ✅ Postgres [($display)]\(($url)\)" + } else { + let major_version = ($pg_info.version | split row "." | first) + let url = $"https://www.postgresql.org/docs/($major_version)/index.html" + $"- ✅ Postgres [($pg_info.name)]\(($url)\)" # Use full version number + } +} + +def create_ext_table [extensions, pg_info] { + let header_version = if $pg_info.is_orioledb { + $"orioledb-($pg_info.version)" # Add orioledb prefix for orioledb versions + } else { + $pg_info.version + } + + let header = [ + "", # blank line for spacing + $"### PostgreSQL ($header_version) Extensions", + "| Extension | Version | Description |", + "| ------------- | :-------------: | ------------- |" + ] + + let rows = ($extensions | each {|ext| + let name = $ext.name + let version = $ext.version + let desc = $ext.description + let url = $ext.url # Get URL from extension info + + $"| [($name)]\(($url)\) | [($version)]\(($url)\) | ($desc) |" + }) + + $header | append $rows +} + +def update_readme [] { + let flake_json = (load_flake) + let readme_path = ([$env.PWD "README.md"] | path join) + let readme = (open $readme_path | lines) + let pg_versions = (get_postgres_versions $flake_json) + + # Find section indices + let features_start = ($readme | where $it =~ "^## Primary Features" | first) + let features_end = ($readme | where $it =~ "^## Extensions" | first) + let features_start_idx = (find_index $readme $features_start) + let features_end_idx = (find_index $readme $features_end) + + if $features_start_idx == -1 or $features_end_idx == -1 { + error make {msg: "Could not find Features sections"} + } + + # Update Primary Features section + let features_content = [ + ($pg_versions | each {|version| create_version_link $version} | str join "\n") + "- ✅ Ubuntu 20.04 (Focal Fossa)." + "- ✅ [wal_level](https://www.postgresql.org/docs/current/runtime-config-wal.html) = logical and [max_replication_slots](https://www.postgresql.org/docs/current/runtime-config-replication.html) = 5. Ready for replication." + "- ✅ [Large Systems Extensions](https://github.com/aws/aws-graviton-getting-started#building-for-graviton-and-graviton2). Enabled for ARM images." + ] + + # Find extension section indices + let ext_start = ($readme | where $it =~ "^## Extensions" | first) + let ext_start_idx = (find_index $readme $ext_start) + + # Find next section after Extensions or use end of file + let next_section_idx = ($readme + | enumerate + | where {|it| $it.index > $ext_start_idx and ($it.item =~ "^## ")} + | first + | get index + | default ($readme | length) + ) + + if $ext_start_idx == -1 { + error make {msg: "Could not find Extensions section"} + } + + # Create extension sections content + let ext_sections_content = ($pg_versions | each {|version| + let extensions = (get_extension_info $flake_json $version) + create_ext_table $extensions $version + } | flatten) + + # Combine sections, removing duplicate headers + let before_features = ($readme + | range (0)..($features_start_idx) + | where {|line| not ($line =~ "^## Primary Features")} + ) + let features_header = ($readme | get $features_start_idx) + let between_sections = ($readme + | range ($features_end_idx)..($ext_start_idx) + | where {|line| + not ($line =~ "^## Primary Features" or $line =~ "^## Extensions") + } + ) + let ext_header = ($readme | get $ext_start_idx) + let after_ext = ($readme | range ($next_section_idx)..($readme | length)) + + let output = ($before_features + | append $features_header + | append $features_content + | append $between_sections + | append $ext_header + | append $ext_sections_content + | append $after_ext + | str join "\n") + + $output | save --force $readme_path +} + +# Main execution +update_readme diff --git a/postgres_15.8.1.044/postgresql.gpg.key b/postgres_15.8.1.044/postgresql.gpg.key new file mode 100644 index 0000000..443bf58 --- /dev/null +++ b/postgres_15.8.1.044/postgresql.gpg.key @@ -0,0 +1,64 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja +UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V +G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4 +bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi +c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC +IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh +hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U +A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3 +RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj +Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2 +AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB +tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQJOBBMBCAA4AhsDBQsJCAcD +BRUKCQgLBRYCAwEAAh4BAheAFiEEuXsK/KoaR/BE8kSgf8x9RqzMTPgFAlhtCD8A +CgkQf8x9RqzMTPgECxAAk8uL+dwveTv6eH21tIHcltt8U3Ofajdo+D/ayO53LiYO +xi27kdHD0zvFMUWXLGxQtWyeqqDRvDagfWglHucIcaLxoxNwL8+e+9hVFIEskQAY +kVToBCKMXTQDLarz8/J030Pmcv3ihbwB+jhnykMuyyNmht4kq0CNgnlcMCdVz0d3 +z/09puryIHJrD+A8y3TD4RM74snQuwc9u5bsckvRtRJKbP3GX5JaFZAqUyZNRJRJ +Tn2OQRBhCpxhlZ2afkAPFIq2aVnEt/Ie6tmeRCzsW3lOxEH2K7MQSfSu/kRz7ELf +Cz3NJHj7rMzC+76Rhsas60t9CjmvMuGONEpctijDWONLCuch3Pdj6XpC+MVxpgBy +2VUdkunb48YhXNW0jgFGM/BFRj+dMQOUbY8PjJjsmVV0joDruWATQG/M4C7O8iU0 +B7o6yVv4m8LDEN9CiR6r7H17m4xZseT3f+0QpMe7iQjz6XxTUFRQxXqzmNnloA1T +7VjwPqIIzkj/u0V8nICG/ktLzp1OsCFatWXh7LbU+hwYl6gsFH/mFDqVxJ3+DKQi +vyf1NatzEwl62foVjGUSpvh3ymtmtUQ4JUkNDsXiRBWczaiGSuzD9Qi0ONdkAX3b +ewqmN4TfE+XIpCPxxHXwGq9Rv1IFjOdCX0iG436GHyTLC1tTUIKF5xV4Y0+cXIOJ +Aj0EEwEIACcCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlLpFRkFCQ6EJy0A +CgkQf8x9RqzMTPjOZA//Zp0e25pcvle7cLc0YuFr9pBv2JIkLzPm83nkcwKmxaWa +yUIG4Sv6pH6hm8+S/CHQij/yFCX+o3ngMw2J9HBUvafZ4bnbI0RGJ70GsAwraQ0V +lkIfg7GUw3TzvoGYO42rZTru9S0K/6nFP6D1HUu+U+AsJONLeb6oypQgInfXQExP +ZyliUnHdipei4WR1YFW6sjSkZT/5C3J1wkAvPl5lvOVthI9Zs6bZlJLZwusKxU0U +M4Btgu1Sf3nnJcHmzisixwS9PMHE+AgPWIGSec/N27a0KmTTvImV6K6nEjXJey0K +2+EYJuIBsYUNorOGBwDFIhfRk9qGlpgt0KRyguV+AP5qvgry95IrYtrOuE7307Si +dEbSnvO5ezNemE7gT9Z1tM7IMPfmoKph4BfpNoH7aXiQh1Wo+ChdP92hZUtQrY2N +m13cmkxYjQ4ZgMWfYMC+DA/GooSgZM5i6hYqyyfAuUD9kwRN6BqTbuAUAp+hCWYe +N4D88sLYpFh3paDYNKJ+Gf7Yyi6gThcV956RUFDH3ys5Dk0vDL9NiWwdebWfRFbz +oRM3dyGP889aOyLzS3mh6nHzZrNGhW73kslSQek8tjKrB+56hXOnb4HaElTZGDvD +5wmrrhN94kbyGtz3cydIohvNO9d90+29h0eGEDYti7j7maHkBKUAwlcPvMg5m3aJ +Aj0EEwEIACcCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlEqbZUFCQg2wEEA +CgkQf8x9RqzMTPhFMQ//WxAfKMdpSIA9oIC/yPD/dJpY/+DyouOljpE6MucMy/Ar +BECjFTBwi/j9NYM4ynAk34IkhuNexc1i9/05f5RM6+riLCLgAOsADDbHD4miZzoS +xiVr6GQ3YXMbOGld9kV9Sy6mGNjcUov7iFcf5Hy5w3AjPfKuR9zXswyfzIU1YXOb +iiZT38l55pp/BSgvGVQsvbNjsff5CbEKXS7q3xW+WzN0QWF6YsfNVhFjRGj8hKtH +vwKcA02wwjLeLXVTm6915ZUKhZXUFc0vM4Pj4EgNswH8Ojw9AJaKWJIZmLyW+aP+ +wpu6YwVCicxBY59CzBO2pPJDfKFQzUtrErk9irXeuCCLesDyirxJhv8o0JAvmnMA +KOLhNFUrSQ2m+3EnF7zhfz70gHW+EG8X8mL/EN3/dUM09j6TVrjtw43RLxBzwMDe +ariFF9yC+5bLtnGgxjsB9Ik6GV5v34/NEEGf1qBiAzFmDVFRZlrNDkq6gmpvGnA5 +hUWNr+y0i01LjGyaLSWHYjgw2UEQOqcUtTFK9MNzbZze4mVaHMEz9/aMfX25R6qb +iNqCChveIm8mYr5Ds2zdZx+G5bAKdzX7nx2IUAxFQJEE94VLSp3npAaTWv3sHr7d +R8tSyUJ9poDwgw4W9BIcnAM7zvFYbLF5FNggg/26njHCCN70sHt8zGxKQINMc6SJ +Aj0EEwEIACcCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlB5KywFCQPDFt8A +CgkQf8x9RqzMTPhuCQ//QAjRSAOCQ02qmUAikT+mTB6baOAakkYq6uHbEO7qPZkv +4E/M+HPIJ4wdnBNeSQjfvdNcZBA/x0hr5EMcBneKKPDj4hJ0panOIRQmNSTThQw9 +OU351gm3YQctAMPRUu1fTJAL/AuZUQf9ESmhyVtWNlH/56HBfYjE4iVeaRkkNLJy +X3vkWdJSMwC/LO3Lw/0M3R8itDsm74F8w4xOdSQ52nSRFRh7PunFtREl+QzQ3EA/ +WB4AIj3VohIGkWDfPFCzV3cyZQiEnjAe9gG5pHsXHUWQsDFZ12t784JgkGyO5wT2 +6pzTiuApWM3k/9V+o3HJSgH5hn7wuTi3TelEFwP1fNzI5iUUtZdtxbFOfWMnZAyp +EhaLmXNkg4zDkH44r0ss9fR0DAgUav1a25UnbOn4PgIEQy2fgHKHwRpCy20d6oCS +lmgyWsR40EPPYvtGq49A2aK6ibXmdvvFT+Ts8Z+q2SkFpoYFX20mR2nsF0fbt1lf +H65P64dukxeRGteWIeNakDD40bAAOH8+OaoTGVBJ2ACJfLVNM53PEoftavAwUYMr +R910qvwYfd/46rh46g1Frr9SFMKYE9uvIJIgDsQB3QBp71houU4H55M5GD8XURYs ++bfiQpJG1p7eB8e5jZx1SagNWc4XwL2FzQ9svrkbg1Y+359buUiP7T6QXX2zY+8= +=XSRU +-----END PGP PUBLIC KEY BLOCK----- diff --git a/postgres_15.8.1.044/qemu-arm64-nix.pkr.hcl b/postgres_15.8.1.044/qemu-arm64-nix.pkr.hcl new file mode 100644 index 0000000..a9843d1 --- /dev/null +++ b/postgres_15.8.1.044/qemu-arm64-nix.pkr.hcl @@ -0,0 +1,142 @@ +variable "ansible_arguments" { + type = string + default = "--skip-tags install-postgrest,install-pgbouncer,install-supabase-internal" +} + +variable "environment" { + type = string + default = "prod" +} + +variable "git_sha" { + type = string +} + +locals { + creator = "packer" +} + +variable "postgres-version" { + type = string + default = "" +} + +variable "postgres-major-version" { + type = string + default = "" +} + +variable "git-head-version" { + type = string + default = "unknown" +} + +variable "packer-execution-id" { + type = string + default = "unknown" +} + +packer { + required_plugins { + amazon = { + source = "github.com/hashicorp/amazon" + version = "~> 1" + } + qemu = { + version = "~> 1.0" + source = "github.com/hashicorp/qemu" + } + } +} + +source "null" "dependencies" { + communicator = "none" +} + +build { + name = "cloudimg.deps" + sources = ["source.null.dependencies"] + + provisioner "shell-local" { + inline = [ + "cp /usr/share/AAVMF/AAVMF_VARS.fd AAVMF_VARS.fd", + "cloud-localds seeds-cloudimg.iso user-data-cloudimg meta-data" + ] + inline_shebang = "/bin/bash -e" + } +} + +source "qemu" "cloudimg" { + boot_wait = "2s" + cpus = 8 + disk_image = true + disk_size = "15G" + format = "qcow2" + headless = true + http_directory = "http" + iso_checksum = "file:https://cloud-images.ubuntu.com/focal/current/SHA256SUMS" + iso_url = "https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-arm64.img" + memory = 40000 + qemu_binary = "qemu-system-aarch64" + qemuargs = [ + ["-machine", "virt,gic-version=3"], + ["-cpu", "host"], + ["-device", "virtio-gpu-pci"], + ["-drive", "if=pflash,format=raw,id=ovmf_code,readonly=on,file=/usr/share/AAVMF/AAVMF_CODE.fd"], + ["-drive", "if=pflash,format=raw,id=ovmf_vars,file=AAVMF_VARS.fd"], + ["-drive", "file=output-cloudimg/packer-cloudimg,format=qcow2"], + ["-drive", "file=seeds-cloudimg.iso,format=raw"], + ["--enable-kvm"] + ] + shutdown_command = "sudo -S shutdown -P now" + ssh_handshake_attempts = 500 + ssh_password = "ubuntu" + ssh_timeout = "1h" + ssh_username = "ubuntu" + ssh_wait_timeout = "1h" + use_backing_file = false + accelerator = "kvm" +} + +build { + name = "cloudimg.image" + sources = ["source.qemu.cloudimg"] + + # Copy ansible playbook + provisioner "shell" { + inline = ["mkdir /tmp/ansible-playbook"] + } + + provisioner "file" { + source = "ansible" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "scripts" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "migrations" + destination = "/tmp" + } + + provisioner "file" { + source = "ebssurrogate/files/unit-tests" + destination = "/tmp" + } + + provisioner "shell" { + environment_vars = [ + "POSTGRES_MAJOR_VERSION=${var.postgres-major-version}", + "POSTGRES_SUPABASE_VERSION=${var.postgres-version}", + "GIT_SHA=${var.git_sha}" + ] + use_env_var_file = true + script = "ebssurrogate/scripts/qemu-bootstrap-nix.sh" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && cd /tmp/ansible-playbook && {{.Path}}'" + start_retry_timeout = "5m" + skip_clean = true + } +} diff --git a/postgres_15.8.1.044/qemu_artifact.md b/postgres_15.8.1.044/qemu_artifact.md new file mode 100644 index 0000000..c26b63b --- /dev/null +++ b/postgres_15.8.1.044/qemu_artifact.md @@ -0,0 +1,50 @@ +# QEMU artifact + +We build a container image that contains a QEMU qcow2 disk image. Container images are a convenient mechanism to ship the disk image to the nodes where they're needed. + +Given the size of the image, the first VM using it on a node might take a while to come up, while the image is being pulled down. The image can be pre-fetched to avoid this; we might also switch to other deployment mechanisms in the future. + +### Build process + +The current AMI process involves a few steps: + +1. nix package is build and published using GHA (`.github/workflows/nix-build.yml`) + - this builds Postgres along with the PG extensions we use. +2. "stage1" build (`amazon-arm64-nix.pkr.hcl`, invoked via `.github/workflows/ami-release-nix.yml`) + - uses an upstream Ubuntu image to initialize the AMI + - installs and configures the majority of the software that gets shipped as part of the AMI (e.g. gotrue, postgrest, ...) +3. "stage2" build (`stage2-nix-psql.pkr.hcl`, invoked via `.github/workflows/ami-release-nix.yml`) + - uses the image published from (2) + - installs and configures the software that is build and published using nix in (1) + - cleans up build dependencies etc + +The QEMU artifact process collapses (2) and (3): + +a. nix package is build and published using GHA (`.github/workflows/nix-build.yml`) +b. packer build (`qemu-arm64-nix.pkr.hcl`) + - uses an upstream Ubuntu live image as the base + - performs the work that was performed as part of the "stage1" and "stage2" builds + - this work is executed using `ebssurrogate/scripts/qemu-bootstrap-nix.sh` + +## Publish image for later use + +Following `make init alpine-image`, the generated VM image should be bundled as a container image with the name: `supabase-postgres-test` . Publish the built docker image to a registry of your choosing, and use the published image with e.g. KubeVirt. + +## Iterating on image + +For faster iteration, it's more convenient to build the image on an ubuntu bare-metal node that's part of the EKS cluster you're using. Build the image in the `k8s.io` namespace in order for it to be available for immediate use on that node. + +### Dependencies note + +Installing `docker.io` on an EKS node might interfere with the k8s setup of the node. You can instead install `nerdctl` and `buildkit`: + +```bash +curl -L -O https://github.com/containerd/nerdctl/releases/download/v2.0.0/nerdctl-2.0.0-linux-arm64.tar.gz +tar -xzf nerdctl-2.0.0-linux-arm64.tar.gz +mv ./nerdctl /usr/local/bin/ +curl -O -L https://github.com/moby/buildkit/releases/download/v0.17.1/buildkit-v0.17.1.linux-arm64.tar.gz +tar -xzf buildkit-v0.17.1.linux-arm64.tar.gz +mv bin/* /usr/local/bin/ +``` + +You'll need to run buildkit: `buildkitd` diff --git a/postgres_15.8.1.044/rfcs/0001-connection-pooling.md b/postgres_15.8.1.044/rfcs/0001-connection-pooling.md new file mode 100644 index 0000000..e66a19c --- /dev/null +++ b/postgres_15.8.1.044/rfcs/0001-connection-pooling.md @@ -0,0 +1,71 @@ +--- +feature: Connection Pooling +start-date: 2021-02-04 +author: kiwicopple +co-authors: steve-chavez, dragarcia +related-issues: (will contain links to implementation PRs) +--- + +# Summary +[summary]: #summary + +We would like to explore connection pooling on Supabase. This RFC is intended to decide: + +- Whether we should provide a pooler +- Which connection pooler we should use +- Where in the stack it would be installed - i.e. if should bundle it with the Postgres build + + +# Motivation +[motivation]: #motivation + +In Postgres, every connection is a process. Because of this, a lot of connections to the database can be very expensive on memory. + +When connecting to Postgres database from serverless functions, there is no connection pooling, and so the server needs to maintain hundreds/thousands of connections. + + +# Detailed design +[design]: #detailed-design + +This is still in the "Gather Feedback" stage. To start the discussion: + + +### 1. Decide on a PG Pooler + +- `pg_bouncer` - https://www.pgbouncer.org/ +- `PG Pool II` - https://www.pgpool.net/mediawiki/index.php/Main_Page +- `odyssey` - https://github.com/yandex/odyssey +- others? + +### 2. Decide on configuration + +Most poolers allow different configurations. We would need to decide on how we would configure the pooler by default + +### 3. Decide if the user should be able re-configure the pooler + +Should a user be able to change the configuration? If so, how would they do it? + + +# Drawbacks +[drawbacks]: #drawbacks + +- Security +- Not directly relevant to the "supabase" stack, so it's additional non-core support + +# Alternatives +[alternatives]: #alternatives + +1. Since we already offer [PostgREST](https://github.com/postgrest/postgrest) and [postgres-meta](https://github.com/supabase/pg-api), this isn't entirely necessary for the Supabase stack. Bundling this is only beneficial for connecting external tools. +2. We could hold back on this implementation until we move to a full Postgres Operator, which would include a pooler. It would be nice to have something for local development though. + + +# Unresolved questions +[unresolved]: #unresolved-questions + +- Add any unresolved questions here + + +# Future work +[future]: #future-work + +- Add any future work here \ No newline at end of file diff --git a/postgres_15.8.1.044/scripts/00-python_install.sh b/postgres_15.8.1.044/scripts/00-python_install.sh new file mode 100644 index 0000000..3a7bb75 --- /dev/null +++ b/postgres_15.8.1.044/scripts/00-python_install.sh @@ -0,0 +1,3 @@ +sudo apt-get update +sudo apt-get install python -y +sudo apt-get install python-pip -y \ No newline at end of file diff --git a/postgres_15.8.1.044/scripts/01-postgres_check.sh b/postgres_15.8.1.044/scripts/01-postgres_check.sh new file mode 100644 index 0000000..d131528 --- /dev/null +++ b/postgres_15.8.1.044/scripts/01-postgres_check.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# +# Scripts in this directory are run during the build process. +# each script will be uploaded to /tmp on your build droplet, +# given execute permissions and run. The cleanup process will +# remove the scripts from your build system after they have run +# if you use the build_image task. +# +echo "Commencing Checks" + +function check_database_is_ready { + echo -e "\nChecking if database is ready and accepting connections:" + if [ "$(pg_isready)" = "/tmp:5432 - accepting connections" ]; then + echo "Database is ready" + else + echo "Error: Database is not ready. Exiting" + exit 1 + fi +} + +function check_postgres_owned_dir_exists { + DIR=$1 + USER="postgres" + + echo -e "\nChecking if $DIR exists and owned by postgres user:" + + if [ -d "$DIR" ]; then + echo "$DIR exists" + if [ $(stat -c '%U' $DIR) = "$USER" ]; then + echo "$DIR is owned by $USER" + else + echo "Error: $DIR is not owned by $USER" + exit 1 + fi + else + echo "Error: ${DIR} not found. Exiting." + exit 1 + fi +} + +function check_lse_enabled { + ARCH=$(uname -m) + if [ $ARCH = "aarch64" ]; then + echo -e "\nArchitecture is $ARCH. Checking for LSE:" + + LSE_COUNT=$(objdump -d /usr/lib/postgresql/bin/postgres | grep -i 'ldxr\|ldaxr\|stxr\|stlxr' | wc -l) + MOUTLINE_ATOMICS_COUNT=$(nm /usr/lib/postgresql/bin/postgres | grep __aarch64_have_lse_atomics | wc -l) + + # Checking for load and store exclusives + if [ $LSE_COUNT -gt 0 ]; then + echo "Postgres has LSE enabled" + else + echo "Error: Postgres failed to be compiled with LSE. Exiting" + exit 1 + fi + + # Checking if successfully compiled with -moutline-atomics + if [ $MOUTLINE_ATOMICS_COUNT -gt 0 ]; then + echo "Postgres has been compiled with -moutline-atomics" + else + echo "Error: Postgres failed to be compiled with -moutline-atomics. Exiting" + exit 1 + fi + else + echo "Architecture is $ARCH. Not checking for LSE." + fi +} + +check_database_is_ready +check_postgres_owned_dir_exists "/var/lib/postgresql" +check_postgres_owned_dir_exists "/etc/postgresql" +check_lse_enabled \ No newline at end of file diff --git a/postgres_15.8.1.044/scripts/02-credentials_cleanup.sh b/postgres_15.8.1.044/scripts/02-credentials_cleanup.sh new file mode 100644 index 0000000..a7b966f --- /dev/null +++ b/postgres_15.8.1.044/scripts/02-credentials_cleanup.sh @@ -0,0 +1 @@ +sudo rm /home/ubuntu/.ssh/authorized_keys diff --git a/postgres_15.8.1.044/scripts/11-lemp.sh b/postgres_15.8.1.044/scripts/11-lemp.sh new file mode 100644 index 0000000..c340f5e --- /dev/null +++ b/postgres_15.8.1.044/scripts/11-lemp.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +rm -rvf /etc/nginx/sites-enabled/default + +ln -s /etc/nginx/sites-available/digitalocean \ + /etc/nginx/sites-enabled/digitalocean + +rm -rf /var/www/html/index*debian.html + +chown -R www-data: /var/www \ No newline at end of file diff --git a/postgres_15.8.1.044/scripts/12-ufw-nginx.sh b/postgres_15.8.1.044/scripts/12-ufw-nginx.sh new file mode 100644 index 0000000..7c47366 --- /dev/null +++ b/postgres_15.8.1.044/scripts/12-ufw-nginx.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +ufw limit ssh +ufw allow 'Nginx Full' + +ufw --force enable \ No newline at end of file diff --git a/postgres_15.8.1.044/scripts/13-force-ssh-logout.sh b/postgres_15.8.1.044/scripts/13-force-ssh-logout.sh new file mode 100644 index 0000000..99e28c1 --- /dev/null +++ b/postgres_15.8.1.044/scripts/13-force-ssh-logout.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +cat >> /etc/ssh/sshd_config < /root/.bash_history +unset HISTFILE +find /var/log -mtime -1 -type f -exec truncate -s 0 {} \; +rm -rf /var/log/*.gz /var/log/*.[0-9] /var/log/*-???????? +rm -rf /var/lib/cloud/instances/* +rm -f /root/.ssh/authorized_keys /etc/ssh/*key* +touch /etc/ssh/revoked_keys +chmod 600 /etc/ssh/revoked_keys + +cat /dev/null > /var/log/lastlog +cat /dev/null > /var/log/wtmp diff --git a/postgres_15.8.1.044/scripts/90-cleanup.sh b/postgres_15.8.1.044/scripts/90-cleanup.sh new file mode 100644 index 0000000..f2e1968 --- /dev/null +++ b/postgres_15.8.1.044/scripts/90-cleanup.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +set -o errexit + +# Ensure /tmp exists and has the proper permissions before +# checking for security updates +# https://github.com/digitalocean/marketplace-partners/issues/94 +if [[ ! -d /tmp ]]; then + mkdir /tmp +fi +chmod 1777 /tmp + +if [ -n "$(command -v yum)" ]; then + yum update -y + yum clean all +elif [ -n "$(command -v apt-get)" ]; then + # Cleanup more packages + apt-get -y remove --purge \ + automake \ + autoconf \ + autotools-dev \ + cmake-data \ + cpp-8 \ + cpp-9 \ + cpp-10 \ + gcc-8 \ + gcc-9 \ + gcc-10 \ + git \ + git-man \ + ansible \ + libicu-dev \ + libcgal-dev \ + libgcc-9-dev \ + libgcc-8-dev \ + ansible + + add-apt-repository --yes --remove ppa:ansible/ansible + + source /etc/os-release + apt-get -y remove --purge linux-headers-5.11.0-1021-aws + + apt-get -y update + apt-get -y upgrade + apt-get -y autoremove + apt-get -y autoclean +fi +rm -rf /tmp/* /var/tmp/* +history -c +cat /dev/null > /root/.bash_history +unset HISTFILE +find /var/log -mtime -1 -type f -exec truncate -s 0 {} \; +rm -rf /var/log/*.gz /var/log/*.[0-9] /var/log/*-???????? +rm -rf /var/lib/cloud/instances/* +rm -f /root/.ssh/authorized_keys /etc/ssh/*key* +touch /etc/ssh/revoked_keys +chmod 600 /etc/ssh/revoked_keys + +# Securely erase the unused portion of the filesystem +GREEN='\033[0;32m' +NC='\033[0m' +printf "\n${GREEN}Writing zeros to the remaining disk space to securely +erase the unused portion of the file system. +Depending on your disk size this may take several minutes. +The secure erase will complete successfully when you see:${NC} + dd: writing to '/zerofile': No space left on device\n +Beginning secure erase now\n" + +dd if=/dev/zero of=/zerofile & + PID=$! + while [ -d /proc/$PID ] + do + printf "." + sleep 5 + done +sync; rm /zerofile; sync +cat /dev/null > /var/log/lastlog; cat /dev/null > /var/log/wtmp diff --git a/postgres_15.8.1.044/scripts/91-log_cleanup.sh b/postgres_15.8.1.044/scripts/91-log_cleanup.sh new file mode 100644 index 0000000..24073af --- /dev/null +++ b/postgres_15.8.1.044/scripts/91-log_cleanup.sh @@ -0,0 +1,24 @@ +#!/bin/bash +#Erasing all logs +# +echo "Clearing all log files" +rm -rf /var/log/* + +# creating system stats directory +mkdir /var/log/sysstat + +# https://github.com/fail2ban/fail2ban/issues/1593 +touch /var/log/auth.log + +touch /var/log/pgbouncer.log +chown pgbouncer:postgres /var/log/pgbouncer.log + +mkdir /var/log/postgresql +chown postgres:postgres /var/log/postgresql + +mkdir /var/log/wal-g +cd /var/log/wal-g +touch backup-push.log backup-fetch.log wal-push.log wal-fetch.log pitr.log +chown -R postgres:postgres /var/log/wal-g +chmod -R 0300 /var/log/wal-g + diff --git a/postgres_15.8.1.044/scripts/99-img_check.sh b/postgres_15.8.1.044/scripts/99-img_check.sh new file mode 100644 index 0000000..00b5476 --- /dev/null +++ b/postgres_15.8.1.044/scripts/99-img_check.sh @@ -0,0 +1,682 @@ +#!/bin/bash + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +VERSION="v. 1.6" +RUNDATE=$( date ) + +# Script should be run with SUDO +if [ "$EUID" -ne 0 ] + then echo "[Error] - This script must be run with sudo or as the root user." + exit 1 +fi + +STATUS=0 +PASS=0 +WARN=0 +FAIL=0 + +# $1 == command to check for +# returns: 0 == true, 1 == false +cmdExists() { + if command -v "$1" > /dev/null 2>&1; then + return 0 + else + return 1 + fi +} + +function getDistro { + if [ -f /etc/os-release ]; then + # freedesktop.org and systemd + . /etc/os-release + OS=$NAME + VER=$VERSION_ID +elif type lsb_release >/dev/null 2>&1; then + # linuxbase.org + OS=$(lsb_release -si) + VER=$(lsb_release -sr) +elif [ -f /etc/lsb-release ]; then + # For some versions of Debian/Ubuntu without lsb_release command + . /etc/lsb-release + OS=$DISTRIB_ID + VER=$DISTRIB_RELEASE +elif [ -f /etc/debian_version ]; then + # Older Debian/Ubuntu/etc. + OS=Debian + VER=$(cat /etc/debian_version) +elif [ -f /etc/SuSe-release ]; then + # Older SuSE/etc. + : +elif [ -f /etc/redhat-release ]; then + # Older Red Hat, CentOS, etc. + VER=$( cat /etc/redhat-release | cut -d" " -f3 | cut -d "." -f1) + d=$( cat /etc/redhat-release | cut -d" " -f1 | cut -d "." -f1) + if [[ $d == "CentOS" ]]; then + OS="CentOS Linux" + fi +else + # Fall back to uname, e.g. "Linux ", also works for BSD, etc. + OS=$(uname -s) + VER=$(uname -r) +fi +} +function loadPasswords { +SHADOW=$(cat /etc/shadow) +} + +function checkAgent { + # Check for the presence of the do-agent in the filesystem + if [ -d /var/opt/digitalocean/do-agent ];then + echo -en "\e[41m[FAIL]\e[0m DigitalOcean Monitoring Agent detected.\n" + ((FAIL++)) + STATUS=2 + if [[ $OS == "CentOS Linux" ]]; then + echo "The agent can be removed with 'sudo yum remove do-agent' " + elif [[ $OS == "Ubuntu" ]]; then + echo "The agent can be removed with 'sudo apt-get purge do-agent' " + fi + else + echo -en "\e[32m[PASS]\e[0m DigitalOcean Monitoring agent was not found\n" + ((PASS++)) + fi +} + +function checkLogs { + cp_ignore="/var/log/cpanel-install.log" + echo -en "\nChecking for log files in /var/log\n\n" + # Check if there are log archives or log files that have not been recently cleared. + for f in /var/log/*-????????; do + [[ -e $f ]] || break + if [ $f != $cp_ignore ]; then + echo -en "\e[93m[WARN]\e[0m Log archive ${f} found\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 + fi + fi + done + for f in /var/log/*.[0-9];do + [[ -e $f ]] || break + echo -en "\e[93m[WARN]\e[0m Log archive ${f} found\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 + fi + done + for f in /var/log/*.log; do + [[ -e $f ]] || break + if [[ "${f}" = '/var/log/lfd.log' && "$( cat "${f}" | egrep -v '/var/log/messages has been reset| Watching /var/log/messages' | wc -c)" -gt 50 ]]; then + if [ $f != $cp_ignore ]; then + echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 + fi + fi + elif [[ "${f}" != '/var/log/lfd.log' && "$( cat "${f}" | wc -c)" -gt 50 ]]; then + if [ $f != $cp_ignore ]; then + echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 + fi + fi + fi + done +} +function checkTMP { + # Check the /tmp directory to ensure it is empty. Warn on any files found. + return 1 +} +function checkRoot { + user="root" + uhome="/root" + for usr in $SHADOW + do + IFS=':' read -r -a u <<< "$usr" + if [[ "${u[0]}" == "${user}" ]]; then + if [[ ${u[1]} == "!" ]] || [[ ${u[1]} == "!!" ]] || [[ ${u[1]} == "*" ]]; then + echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n" + ((PASS++)) + else + echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account.\n" + ((FAIL++)) + STATUS=2 + fi + fi + done + if [ -d ${uhome}/ ]; then + if [ -d ${uhome}/.ssh/ ]; then + if ls ${uhome}/.ssh/*> /dev/null 2>&1; then + for key in ${uhome}/.ssh/* + do + if [ "${key}" == "${uhome}/.ssh/authorized_keys" ]; then + + if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then + echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a populated authorized_keys file in \e[93m${key}\e[0m\n" + akey=$(cat ${key}) + echo "File Contents:" + echo $akey + echo "--------------" + ((FAIL++)) + STATUS=2 + fi + elif [ "${key}" == "${uhome}/.ssh/id_rsa" ]; then + if [ "$( cat "${key}" | wc -c)" -gt 0 ]; then + echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a private key file in \e[93m${key}\e[0m\n" + akey=$(cat ${key}) + echo "File Contents:" + echo $akey + echo "--------------" + ((FAIL++)) + STATUS=2 + else + echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has empty private key file in \e[93m${key}\e[0m\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 + fi + fi + elif [ "${key}" != "${uhome}/.ssh/known_hosts" ]; then + echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a file in their .ssh directory at \e[93m${key}\e[0m\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 + fi + else + if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then + echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a populated known_hosts file in \e[93m${key}\e[0m\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 + fi + fi + fi + done + else + echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m has no SSH keys present\n" + fi + else + echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have an .ssh directory\n" + fi + if [ -f /root/.bash_history ];then + + BH_S=$( cat /root/.bash_history | wc -c) + + if [[ $BH_S -lt 200 ]]; then + echo -en "\e[32m[PASS]\e[0m ${user}'s Bash History appears to have been cleared\n" + ((PASS++)) + else + echo -en "\e[41m[FAIL]\e[0m ${user}'s Bash History should be cleared to prevent sensitive information from leaking\n" + ((FAIL++)) + STATUS=2 + fi + + return 1; + else + echo -en "\e[32m[PASS]\e[0m The Root User's Bash History is not present\n" + ((PASS++)) + fi + else + echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have a directory in /home\n" + fi + echo -en "\n\n" + return 1 +} + +function checkUsers { + # Check each user-created account + for user in $(awk -F: '$3 >= 1000 && $1 != "nobody" {print $1}' /etc/passwd;) + do + # Skip some other non-user system accounts + if [[ $user == "centos" ]]; then + : + elif [[ $user == "nfsnobody" ]]; then + : + else + echo -en "\nChecking user: ${user}...\n" + for usr in $SHADOW + do + IFS=':' read -r -a u <<< "$usr" + if [[ "${u[0]}" == "${user}" ]]; then + if [[ ${u[1]} == "!" ]] || [[ ${u[1]} == "!!" ]] || [[ ${u[1]} == "*" ]]; then + echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n" + ((PASS++)) + else + echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account. Only system users are allowed on the image.\n" + ((FAIL++)) + STATUS=2 + fi + fi + done + #echo "User Found: ${user}" + uhome="/home/${user}" + if [ -d "${uhome}/" ]; then + if [ -d "${uhome}/.ssh/" ]; then + if ls "${uhome}/.ssh/*"> /dev/null 2>&1; then + for key in ${uhome}/.ssh/* + do + if [ "${key}" == "${uhome}/.ssh/authorized_keys" ]; then + if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then + echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a populated authorized_keys file in \e[93m${key}\e[0m\n" + akey=$(cat ${key}) + echo "File Contents:" + echo $akey + echo "--------------" + ((FAIL++)) + STATUS=2 + fi + elif [ "${key}" == "${uhome}/.ssh/id_rsa" ]; then + if [ "$( cat "${key}" | wc -c)" -gt 0 ]; then + echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a private key file in \e[93m${key}\e[0m\n" + akey=$(cat ${key}) + echo "File Contents:" + echo $akey + echo "--------------" + ((FAIL++)) + STATUS=2 + else + echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has empty private key file in \e[93m${key}\e[0m\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 + fi + fi + elif [ "${key}" != "${uhome}/.ssh/known_hosts" ]; then + + echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a file in their .ssh directory named \e[93m${key}\e[0m\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 + fi + + else + if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then + echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a known_hosts file in \e[93m${key}\e[0m\n" + ((WARN++)) + if [[ $STATUS != 2 ]]; then + STATUS=1 + fi + fi + fi + + + done + else + echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m has no SSH keys present\n" + fi + else + echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have an .ssh directory\n" + fi + else + echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have a directory in /home\n" + fi + + # Check for an uncleared .bash_history for this user + if [ -f "${uhome}/.bash_history" ]; then + BH_S=$( cat "${uhome}/.bash_history" | wc -c ) + + if [[ $BH_S -lt 200 ]]; then + echo -en "\e[32m[PASS]\e[0m ${user}'s Bash History appears to have been cleared\n" + ((PASS++)) + else + echo -en "\e[41m[FAIL]\e[0m ${user}'s Bash History should be cleared to prevent sensitive information from leaking\n" + ((FAIL++)) + STATUS=2 + + fi + echo -en "\n\n" + fi + fi + done +} +function checkFirewall { + + if [[ $OS == "Ubuntu" ]]; then + fw="ufw" + ufwa=$(ufw status |head -1| sed -e "s/^Status:\ //") + if [[ $ufwa == "active" ]]; then + FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" + ((PASS++)) + else + FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" + ((WARN++)) + fi + elif [[ $OS == "CentOS Linux" ]]; then + if [ -f /usr/lib/systemd/system/csf.service ]; then + fw="csf" + if [[ $(systemctl status $fw >/dev/null 2>&1) ]]; then + + FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" + ((PASS++)) + elif cmdExists "firewall-cmd"; then + if [[ $(systemctl is-active firewalld >/dev/null 2>&1 && echo 1 || echo 0) ]]; then + FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" + ((PASS++)) + else + FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" + ((WARN++)) + fi + else + FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" + ((WARN++)) + fi + else + fw="firewalld" + if [[ $(systemctl is-active firewalld >/dev/null 2>&1 && echo 1 || echo 0) ]]; then + FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" + ((PASS++)) + else + FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" + ((WARN++)) + fi + fi + elif [[ "$OS" =~ Debian.* ]]; then + # user could be using a number of different services for managing their firewall + # we will check some of the most common + if cmdExists 'ufw'; then + fw="ufw" + ufwa=$(ufw status |head -1| sed -e "s/^Status:\ //") + if [[ $ufwa == "active" ]]; then + FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" + ((PASS++)) + else + FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" + ((WARN++)) + fi + elif cmdExists "firewall-cmd"; then + fw="firewalld" + if [[ $(systemctl is-active --quiet $fw) ]]; then + FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" + ((PASS++)) + else + FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" + ((WARN++)) + fi + else + # user could be using vanilla iptables, check if kernel module is loaded + fw="iptables" + if [[ $(lsmod | grep -q '^ip_tables' 2>/dev/null) ]]; then + FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" + ((PASS++)) + else + FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" + ((WARN++)) + fi + fi + fi + +} +function checkUpdates { + if [[ $OS == "Ubuntu" ]] || [[ "$OS" =~ Debian.* ]]; then + # Ensure /tmp exists and has the proper permissions before + # checking for security updates + # https://github.com/digitalocean/marketplace-partners/issues/94 + if [[ ! -d /tmp ]]; then + mkdir /tmp + fi + chmod 1777 /tmp + + echo -en "\nUpdating apt package database to check for security updates, this may take a minute...\n\n" + apt-get -y update > /dev/null + + uc=$(apt-get --just-print upgrade | grep -i "security" | wc -l) + if [[ $uc -gt 0 ]]; then + update_count=$(( ${uc} / 2 )) + else + update_count=0 + fi + + if [[ $update_count -gt 0 ]]; then + echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n" + echo -en + echo -en "Here is a list of the security updates that are not installed:\n" + sleep 2 + apt-get --just-print upgrade | grep -i security | awk '{print $2}' | awk '!seen[$0]++' + echo -en + ((FAIL++)) + STATUS=2 + else + echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n\n" + fi + elif [[ $OS == "CentOS Linux" ]]; then + echo -en "\nChecking for available security updates, this may take a minute...\n\n" + + update_count=$(yum check-update --security --quiet | wc -l) + if [[ $update_count -gt 0 ]]; then + echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n" + ((FAIL++)) + STATUS=2 + else + echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n" + ((PASS++)) + fi + else + echo "Error encountered" + exit 1 + fi + + return 1; +} +function checkCloudInit { + + if hash cloud-init 2>/dev/null; then + CI="\e[32m[PASS]\e[0m Cloud-init is installed.\n" + ((PASS++)) + else + CI="\e[41m[FAIL]\e[0m No valid verison of cloud-init was found.\n" + ((FAIL++)) + STATUS=2 + fi + return 1 +} +function checkMongoDB { + # Check if MongoDB is installed + # If it is, verify the version is allowed (non-SSPL) + + if [[ $OS == "Ubuntu" ]] || [[ "$OS" =~ Debian.* ]]; then + + if [[ -f "/usr/bin/mongod" ]]; then + version=$(/usr/bin/mongod --version --quiet | grep "db version" | sed -e "s/^db\ version\ v//") + + if version_gt $version 4.0.0; then + if version_gt $version 4.0.3; then + echo -en "\e[41m[FAIL]\e[0m An SSPL version of MongoDB is present, ${version}" + ((FAIL++)) + STATUS=2 + else + echo -en "\e[32m[PASS]\e[0m The version of MongoDB installed, ${version} is not under the SSPL" + ((PASS++)) + fi + else + if version_gt $version 3.6.8; then + echo -en "\e[41m[FAIL]\e[0m An SSPL version of MongoDB is present, ${version}" + ((FAIL++)) + STATUS=2 + else + echo -en "\e[32m[PASS]\e[0m The version of MongoDB installed, ${version} is not under the SSPL" + ((PASS++)) + fi + fi + + + else + echo -en "\e[32m[PASS]\e[0m MongoDB is not installed" + ((PASS++)) + fi + + elif [[ $OS == "CentOS Linux" ]]; then + + if [[ -f "/usr/bin/mongod" ]]; then + version=$(/usr/bin/mongod --version --quiet | grep "db version" | sed -e "s/^db\ version\ v//") + + + if version_gt $version 4.0.0; then + if version_gt $version 4.0.3; then + echo -en "\e[41m[FAIL]\e[0m An SSPL version of MongoDB is present" + ((FAIL++)) + STATUS=2 + else + echo -en "\e[32m[PASS]\e[0m The version of MongoDB installed is not under the SSPL" + ((PASS++)) + fi + else + if version_gt $version 3.6.8; then + echo -en "\e[41m[FAIL]\e[0m An SSPL version of MongoDB is present" + ((FAIL++)) + STATUS=2 + else + echo -en "\e[32m[PASS]\e[0m The version of MongoDB installed is not under the SSPL" + ((PASS++)) + fi + fi + + + + else + echo -en "\e[32m[PASS]\e[0m MongoDB is not installed" + ((PASS++)) + fi + + else + echo "ERROR: Unable to identify distribution" + ((FAIL++)) + STATUS 2 + return 1 + fi + + +} + +function version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; } + + +clear +echo "DigitalOcean Marketplace Image Validation Tool ${VERSION}" +echo "Executed on: ${RUNDATE}" +echo "Checking local system for Marketplace compatibility..." + +getDistro + +echo -en "\n\e[1mDistribution:\e[0m ${OS}\n" +echo -en "\e[1mVersion:\e[0m ${VER}\n\n" + +ost=0 +osv=0 + +if [[ $OS == "Ubuntu" ]]; then + ost=1 + if [[ $VER == "20.04" ]]; then + osv=1 + elif [[ $VER == "18.04" ]]; then + osv=1 + elif [[ $VER == "16.04" ]]; then + osv=1 + else + osv=0 + fi + +elif [[ "$OS" =~ Debian.* ]]; then + ost=1 + case "$VER" in + 9) + osv=1 + ;; + 10) + osv=1 + ;; + *) + osv=2 + ;; + esac + +elif [[ $OS == "CentOS Linux" ]]; then + ost=1 + if [[ $VER == "8" ]]; then + osv=1 + elif [[ $VER == "7" ]]; then + osv=1 + elif [[ $VER == "6" ]]; then + osv=1 + else + osv=2 + fi +else + ost=0 +fi + +if [[ $ost == 1 ]]; then + echo -en "\e[32m[PASS]\e[0m Supported Operating System Detected: ${OS}\n" + ((PASS++)) +else + echo -en "\e[41m[FAIL]\e[0m ${OS} is not a supported Operating System\n" + ((FAIL++)) + STATUS=2 +fi + +if [[ $osv == 1 ]]; then + echo -en "\e[32m[PASS]\e[0m Supported Release Detected: ${VER}\n" + ((PASS++)) +elif [[ $ost == 1 ]]; then + echo -en "\e[41m[FAIL]\e[0m ${OS} ${VER} is not a supported Operating System Version\n" + ((FAIL++)) + STATUS=2 +else + echo "Exiting..." + exit 1 +fi + +checkCloudInit + +echo -en "${CI}" + +checkFirewall + +echo -en "${FW_VER}" + +checkUpdates + +loadPasswords + +checkLogs + +echo -en "\n\nChecking all user-created accounts...\n" +checkUsers + +echo -en "\n\nChecking the root account...\n" +checkRoot + +checkAgent + +checkMongoDB + + +# Summary +echo -en "\n\n---------------------------------------------------------------------------------------------------\n" + +if [[ $STATUS == 0 ]]; then + echo -en "Scan Complete.\n\e[32mAll Tests Passed!\e[0m\n" +elif [[ $STATUS == 1 ]]; then + echo -en "Scan Complete. \n\e[93mSome non-critical tests failed. Please review these items.\e[0m\e[0m\n" +else + echo -en "Scan Complete. \n\e[41mOne or more tests failed. Please review these items and re-test.\e[0m\n" +fi +echo "---------------------------------------------------------------------------------------------------" +echo -en "\e[1m${PASS} Tests PASSED\e[0m\n" +echo -en "\e[1m${WARN} WARNINGS\e[0m\n" +echo -en "\e[1m${FAIL} Tests FAILED\e[0m\n" +echo -en "---------------------------------------------------------------------------------------------------\n" + +if [[ $STATUS == 0 ]]; then + echo -en "We did not detect any issues with this image. Please be sure to manually ensure that all software installed on the base system is functional, secure and properly configured (or facilities for configuration on first-boot have been created).\n\n" + exit 0 +elif [[ $STATUS == 1 ]]; then + echo -en "Please review all [WARN] items above and ensure they are intended or resolved. If you do not have a specific requirement, we recommend resolving these items before image submission\n\n" + exit 0 +else + echo -en "Some critical tests failed. These items must be resolved and this scan re-run before you submit your image to the DigitalOcean Marketplace.\n\n" + exit 1 +fi \ No newline at end of file diff --git a/postgres_15.8.1.044/scripts/nix-provision.sh b/postgres_15.8.1.044/scripts/nix-provision.sh new file mode 100644 index 0000000..a2cda7f --- /dev/null +++ b/postgres_15.8.1.044/scripts/nix-provision.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +set -o errexit +set -o pipefail +set -o xtrace + +function install_packages { + # Setup Ansible on host VM + sudo apt-get update && sudo apt-get install software-properties-common -y + sudo add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y + ansible-galaxy collection install community.general + +} + + + +function install_nix() { + sudo su -c "curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf \"substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com\" \ + --extra-conf \"trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=\" " -s /bin/bash root + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + +} + + +function execute_stage2_playbook { + echo "POSTGRES_MAJOR_VERSION: ${POSTGRES_MAJOR_VERSION}" + echo "GIT_SHA: ${GIT_SHA}" + sudo tee /etc/ansible/ansible.cfg < str: + return base64.b64encode(gzip.compress(s.encode())).decode() + + instance = list( + ec2.create_instances( + BlockDeviceMappings=[ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "VolumeSize": 8, # gb + "Encrypted": True, + "DeleteOnTermination": True, + "VolumeType": "gp3", + }, + }, + ], + MetadataOptions={ + "HttpTokens": "required", + "HttpEndpoint": "enabled", + }, + IamInstanceProfile={"Name": "pg-ap-southeast-1"}, + InstanceType="t4g.micro", + MinCount=1, + MaxCount=1, + ImageId=image.id, + NetworkInterfaces=[ + { + "DeviceIndex": 0, + "AssociatePublicIpAddress": True, + "Groups": ["sg-0a883ca614ebfbae0", "sg-014d326be5a1627dc"], + } + ], + UserData=f"""#cloud-config +hostname: db-aaaaaaaaaaaaaaaaaaaa +write_files: + - {{path: /etc/postgresql.schema.sql, content: {gzip_then_base64_encode(postgresql_schema_sql_content)}, permissions: '0600', encoding: gz+b64}} + - {{path: /etc/realtime.env, content: {gzip_then_base64_encode(realtime_env_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/adminapi/adminapi.yaml, content: {gzip_then_base64_encode(adminapi_yaml_content)}, permissions: '0600', owner: 'adminapi:root', encoding: gz+b64}} + - {{path: /etc/postgresql-custom/pgsodium_root.key, content: {gzip_then_base64_encode(pgsodium_root_key_content)}, permissions: '0600', owner: 'postgres:postgres', encoding: gz+b64}} + - {{path: /etc/postgrest/base.conf, content: {gzip_then_base64_encode(postgrest_base_conf_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/gotrue.env, content: {gzip_then_base64_encode(gotrue_env_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/wal-g/config.json, content: {gzip_then_base64_encode(walg_config_json_content)}, permissions: '0664', owner: 'wal-g:wal-g', encoding: gz+b64}} + - {{path: /tmp/init.json, content: {gzip_then_base64_encode(init_json_content)}, permissions: '0600', encoding: gz+b64}} +runcmd: + - 'sudo echo \"pgbouncer\" \"postgres\" >> /etc/pgbouncer/userlist.txt' + - 'cd /tmp && aws s3 cp --region ap-southeast-1 s3://init-scripts-staging/project/init.sh .' + - 'bash init.sh "staging"' + - 'rm -rf /tmp/*' +""", + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [ + {"Key": "Name", "Value": "ci-ami-test-nix"}, + {"Key": "creator", "Value": "testinfra-ci"}, + {"Key": "testinfra-run-id", "Value": RUN_ID}, + ], + } + ], + ) + )[0] + instance.wait_until_running() + + ec2logger = EC2InstanceConnectLogger(debug=False) + temp_key = EC2InstanceConnectKey(ec2logger.get_logger()) + ec2ic = boto3.client("ec2-instance-connect", region_name="ap-southeast-1") + response = ec2ic.send_ssh_public_key( + InstanceId=instance.id, + InstanceOSUser="ubuntu", + SSHPublicKey=temp_key.get_pub_key(), + ) + assert response["Success"] + + # instance doesn't have public ip yet + while not instance.public_ip_address: + logger.warning("waiting for ip to be available") + sleep(5) + instance.reload() + + while True: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if sock.connect_ex((instance.public_ip_address, 22)) == 0: + break + else: + logger.warning("waiting for ssh to be available") + sleep(10) + + def get_ssh_connection(instance_ip, ssh_identity_file, max_retries=10): + for attempt in range(max_retries): + try: + return testinfra.get_host( + f"paramiko://ubuntu@{instance_ip}?timeout=60", + ssh_identity_file=ssh_identity_file, + ) + except Exception as e: + if attempt == max_retries - 1: + raise + logger.warning( + f"Ssh connection failed, retrying: {attempt + 1}/{max_retries} failed, retrying ..." + ) + sleep(5) + + host = get_ssh_connection( + # paramiko is an ssh backend + instance.public_ip_address, + temp_key.get_priv_key_file(), + ) + + def is_healthy(host, instance_ip, ssh_identity_file) -> bool: + health_checks = [ + ( + "postgres", + lambda h: h.run("sudo -u postgres /usr/bin/pg_isready -U postgres"), + ), + ( + "adminapi", + lambda h: h.run( + f"curl -sf -k --connect-timeout 30 --max-time 60 https://localhost:8085/health -H 'apikey: {supabase_admin_key}'" + ), + ), + ( + "postgrest", + lambda h: h.run( + "curl -sf --connect-timeout 30 --max-time 60 http://localhost:3001/ready" + ), + ), + ( + "gotrue", + lambda h: h.run( + "curl -sf --connect-timeout 30 --max-time 60 http://localhost:8081/health" + ), + ), + ("kong", lambda h: h.run("sudo kong health")), + ("fail2ban", lambda h: h.run("sudo fail2ban-client status")), + ] + + for service, check in health_checks: + try: + cmd = check(host) + if cmd.failed is True: + logger.warning(f"{service} not ready") + return False + except Exception: + logger.warning( + f"Connection failed during {service} check, attempting reconnect..." + ) + host = get_ssh_connection(instance_ip, ssh_identity_file) + return False + + return True + + while True: + if is_healthy( + host=host, + instance_ip=instance.public_ip_address, + ssh_identity_file=temp_key.get_priv_key_file(), + ): + break + sleep(1) + + # return a testinfra connection to the instance + yield host + + # at the end of the test suite, destroy the instance + instance.terminate() + + +def test_postgrest_is_running(host): + postgrest = host.service("postgrest") + assert postgrest.is_running + + +def test_postgrest_responds_to_requests(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/", + headers={ + "apikey": anon_key, + "authorization": f"Bearer {anon_key}", + }, + ) + assert res.ok + + +def test_postgrest_can_connect_to_db(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "apikey": service_role_key, + "authorization": f"Bearer {service_role_key}", + "accept-profile": "storage", + }, + ) + assert res.ok + + +# There would be an error if the `apikey` query parameter isn't removed, +# since PostgREST treats query parameters as conditions. +# +# Worth testing since remove_apikey_query_parameters uses regexp instead +# of parsed query parameters. +def test_postgrest_starting_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "apikey": service_role_key, + "id": "eq.absent", + "name": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_middle_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "apikey": service_role_key, + "name": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_ending_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "name": "eq.absent", + "apikey": service_role_key, + }, + ) + assert res.ok + + +# There would be an error if the empty key query parameter isn't removed, +# since PostgREST treats empty key query parameters as malformed input. +# +# Worth testing since remove_apikey_and_empty_key_query_parameters uses regexp instead +# of parsed query parameters. +def test_postgrest_starting_empty_key_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "": "empty_key", + "id": "eq.absent", + "apikey": service_role_key, + }, + ) + assert res.ok + + +def test_postgrest_middle_empty_key_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "apikey": service_role_key, + "": "empty_key", + "id": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_ending_empty_key_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "apikey": service_role_key, + "": "empty_key", + }, + ) + assert res.ok diff --git a/postgres_15.8.1.044/tests/pg_upgrade/.env b/postgres_15.8.1.044/tests/pg_upgrade/.env new file mode 100644 index 0000000..505503f --- /dev/null +++ b/postgres_15.8.1.044/tests/pg_upgrade/.env @@ -0,0 +1,6 @@ +POSTGRES_PASSWORD=postgres +POSTGRES_HOST=/var/run/postgresql +POSTGRES_INITDB_ARGS=--lc-ctype=C.UTF-8 +PG_MAJOR_VERSION=15 +IS_CI=true +SCRIPT_DIR=/tmp/upgrade diff --git a/postgres_15.8.1.044/tests/pg_upgrade/.gitignore b/postgres_15.8.1.044/tests/pg_upgrade/.gitignore new file mode 100644 index 0000000..c8ff8c3 --- /dev/null +++ b/postgres_15.8.1.044/tests/pg_upgrade/.gitignore @@ -0,0 +1,4 @@ +# excluding these since running debug.sh will download the files locally +pg_upgrade_bin*.tar.gz +pg_upgrade_scripts*.tar.gz +pg_upgrade_scripts/ diff --git a/postgres_15.8.1.044/tests/pg_upgrade/debug.sh b/postgres_15.8.1.044/tests/pg_upgrade/debug.sh new file mode 100644 index 0000000..b05d894 --- /dev/null +++ b/postgres_15.8.1.044/tests/pg_upgrade/debug.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +set -eEuo pipefail + +export PGPASSWORD=postgres +export PGUSER=supabase_admin +export PGHOST=localhost +export PGDATABASE=postgres + +ARTIFACTS_BUCKET_NAME=${1:-} +if [ -z "$ARTIFACTS_BUCKET_NAME" ]; then + echo "Usage: $0 [INITIAL_PG_VERSION]" + exit 1 +fi + +INITIAL_PG_VERSION=${2:-15.1.1.60} +LATEST_PG_VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' ../../common.vars.pkr.hcl) + +LATEST_VERSION_SCRIPTS="scripts/pg_upgrade_scripts-${LATEST_PG_VERSION}.tar.gz" +LATEST_VERSION_BIN="scripts/pg_upgrade_bin-${LATEST_PG_VERSION}.tar.gz" + +if [ ! -f "$LATEST_VERSION_SCRIPTS" ]; then + aws s3 cp "s3://${ARTIFACTS_BUCKET_NAME}/upgrades/postgres/supabase-postgres-${LATEST_PG_VERSION}/pg_upgrade_scripts.tar.gz" "$LATEST_VERSION_SCRIPTS" +fi + +if [ ! -f "$LATEST_VERSION_BIN" ]; then + aws s3 cp "s3://${ARTIFACTS_BUCKET_NAME}/upgrades/postgres/supabase-postgres-${LATEST_PG_VERSION}/20.04.tar.gz" "$LATEST_VERSION_BIN" +fi + +rm -rf scripts/pg_upgrade_scripts +cp "$LATEST_VERSION_SCRIPTS" scripts/pg_upgrade_scripts.tar.gz +cp "$LATEST_VERSION_BIN" scripts/pg_upgrade_bin.tar.gz + +docker rm -f pg_upgrade_test || true + +docker run -t --name pg_upgrade_test --env-file .env \ + -v "$(pwd)/scripts:/tmp/upgrade" \ + --entrypoint /tmp/upgrade/entrypoint.sh -d \ + -p 5432:5432 \ + "supabase/postgres:${INITIAL_PG_VERSION}" + +sleep 3 +while ! docker exec -it pg_upgrade_test bash -c "pg_isready"; do + echo "Waiting for postgres to start..." + sleep 1 +done + +echo "Running migrations" +docker cp ../../migrations/db/migrations "pg_upgrade_test:/docker-entrypoint-initdb.d/" +docker exec -it pg_upgrade_test bash -c '/docker-entrypoint-initdb.d/migrate.sh > /tmp/migrate.log 2>&1; exit $?' +if [ $? -ne 0 ]; then + echo "Running migrations failed. Exiting." + exit 1 +fi + +echo "Running tests" +pg_prove "../../migrations/tests/test.sql" +psql -f "./tests/97-enable-extensions.sql" +psql -f "./tests/98-data-fixtures.sql" +psql -f "./tests/99-fixtures.sql" + +echo "Initiating pg_upgrade" +docker exec -it pg_upgrade_test bash -c '/tmp/upgrade/pg_upgrade_scripts/initiate.sh "$PG_MAJOR_VERSION"; exit $?' +if [ $? -ne 0 ]; then + echo "Initiating pg_upgrade failed. Exiting." + exit 1 +fi + +sleep 3 +echo "Completing pg_upgrade" +docker exec -it pg_upgrade_test bash -c 'rm -f /tmp/pg-upgrade-status; /tmp/upgrade/pg_upgrade_scripts/complete.sh; exit $?' +if [ $? -ne 0 ]; then + echo "Completing pg_upgrade failed. Exiting." + exit 1 +fi + +pg_prove tests/01-schema.sql +pg_prove tests/02-data.sql +pg_prove tests/03-settings.sql + diff --git a/postgres_15.8.1.044/tests/pg_upgrade/scripts/entrypoint.sh b/postgres_15.8.1.044/tests/pg_upgrade/scripts/entrypoint.sh new file mode 100644 index 0000000..d9d80ac --- /dev/null +++ b/postgres_15.8.1.044/tests/pg_upgrade/scripts/entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -e + +SCRIPT_DIR=$(dirname -- "$0";) + +ls -la "$SCRIPT_DIR" + +tar -xzf "${SCRIPT_DIR}/pg_upgrade_scripts.tar.gz" -C "${SCRIPT_DIR}" + +mkdir -p /tmp/persistent +cp "$SCRIPT_DIR/pg_upgrade_bin.tar.gz" /tmp/persistent + +export PATH="$(pg_config --bindir):$PATH" + +sed -i "s/|--version//g" /usr/local/bin/docker-entrypoint.sh +/usr/local/bin/docker-entrypoint.sh postgres --version || true + +su postgres -c "$(pg_config --bindir)/pg_ctl start -o '-c config_file=/etc/postgresql/postgresql.conf' -l /tmp/postgres.log" + +RECEIVED_EXIT_SIGNAL=false +trap 'RECEIVED_EXIT_SIGNAL=true' SIGINT SIGTERM SIGUSR1 +while ! ((RECEIVED_EXIT_SIGNAL)); do + sleep 5 +done diff --git a/postgres_15.8.1.044/tests/pg_upgrade/tests/01-schema.sql b/postgres_15.8.1.044/tests/pg_upgrade/tests/01-schema.sql new file mode 100644 index 0000000..3cf3a83 --- /dev/null +++ b/postgres_15.8.1.044/tests/pg_upgrade/tests/01-schema.sql @@ -0,0 +1,26 @@ +CREATE EXTENSION IF NOT EXISTS pgtap; + +BEGIN; +SELECT plan(15); + +select has_schema('public'); +select has_schema('auth'); +select has_schema('storage'); +select has_schema('realtime'); +select has_schema('pgsodium'); +select has_schema('vault'); +select has_schema('extensions'); + +SELECT has_enum('public', 'continents', 'Enum continents should exist'); + +SELECT has_table('public', 'countries', 'Table countries should exist'); +SELECT has_column('public', 'countries', 'id', 'Column id should exist'); +SELECT has_column('public', 'countries', 'name', 'Column name should exist'); +SELECT has_column('public', 'countries', 'iso2', 'Column iso2 should exist'); +SELECT has_column('public', 'countries', 'iso3', 'Column iso3 should exist'); +SELECT has_column('public', 'countries', 'continent', 'Column continent should exist'); + +SELECT has_materialized_view('public', 'european_countries', 'Materialized view european_countries should exist'); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/postgres_15.8.1.044/tests/pg_upgrade/tests/02-data.sql b/postgres_15.8.1.044/tests/pg_upgrade/tests/02-data.sql new file mode 100644 index 0000000..d83e346 --- /dev/null +++ b/postgres_15.8.1.044/tests/pg_upgrade/tests/02-data.sql @@ -0,0 +1,27 @@ +CREATE EXTENSION IF NOT EXISTS pgtap; + +BEGIN; +SELECT plan(4); + +SELECT results_eq( + 'SELECT count(*)::int FROM public.countries', + ARRAY[ 249 ] +); + +SELECT results_eq( + 'SELECT count(*)::int FROM public.countries where continent = ''Europe''', + ARRAY[ 45 ] +); + +SELECT results_eq( + 'SELECT count(*)::int FROM public.european_countries', + ARRAY[ 45 ] +); + +SELECT results_eq( + 'SELECT count(*) FROM public.countries where continent = ''Europe''', + 'SELECT count(*) FROM public.european_countries' +); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/postgres_15.8.1.044/tests/pg_upgrade/tests/03-settings.sql b/postgres_15.8.1.044/tests/pg_upgrade/tests/03-settings.sql new file mode 100644 index 0000000..32fc71a --- /dev/null +++ b/postgres_15.8.1.044/tests/pg_upgrade/tests/03-settings.sql @@ -0,0 +1,17 @@ +CREATE EXTENSION IF NOT EXISTS pgtap; + +BEGIN; +SELECT plan(2); + +SELECT results_eq( + 'SELECT setting FROM pg_settings where name = ''jit''', + ARRAY[ 'off' ] +); + +select results_eq( + 'SELECT setting FROM pg_settings WHERE name = ''password_encryption''', + ARRAY[ 'scram-sha-256' ] +); + +SELECT * FROM finish(); +ROLLBACK; diff --git a/postgres_15.8.1.044/tests/pg_upgrade/tests/97-enable-extensions.sql b/postgres_15.8.1.044/tests/pg_upgrade/tests/97-enable-extensions.sql new file mode 100644 index 0000000..34c730b --- /dev/null +++ b/postgres_15.8.1.044/tests/pg_upgrade/tests/97-enable-extensions.sql @@ -0,0 +1,10 @@ +do $$ +declare + ext record; +begin + for ext in (select * from pg_available_extensions where name not in (select extname from pg_extension) order by name) + loop + execute 'create extension if not exists ' || ext.name || ' cascade'; + end loop; +end; +$$; diff --git a/postgres_15.8.1.044/tests/pg_upgrade/tests/98-data-fixtures.sql b/postgres_15.8.1.044/tests/pg_upgrade/tests/98-data-fixtures.sql new file mode 100644 index 0000000..1a675e2 --- /dev/null +++ b/postgres_15.8.1.044/tests/pg_upgrade/tests/98-data-fixtures.sql @@ -0,0 +1,273 @@ +create type public.continents as enum ( + 'Africa', + 'Antarctica', + 'Asia', + 'Europe', + 'Oceania', + 'North America', + 'South America' +); +create table public.countries ( + id bigint generated by default as identity primary key, + name text, + iso2 text not null, + iso3 text, + local_name text, + continent continents +); +comment on table countries is 'Full list of countries.'; +comment on column countries.name is 'Full country name.'; +comment on column countries.iso2 is 'ISO 3166-1 alpha-2 code.'; +comment on column countries.iso3 is 'ISO 3166-1 alpha-3 code.'; +comment on column countries.local_name is 'Local variation of the name.'; +insert into public.countries (name,iso2,iso3,local_name,continent) values + ('Bonaire, Sint Eustatius and Saba','BQ','BES',null,null), + ('Curaçao','CW','CUW',null,null), + ('Guernsey','GG','GGY',null,null), + ('Isle of Man','IM','IMN',null,null), + ('Jersey','JE','JEY',null,null), + ('Åland Islands','AX','ALA',null,null), + ('Montenegro','ME','MNE',null,null), + ('Saint Barthélemy','BL','BLM',null,null), + ('Saint Martin (French part)','MF','MAF',null,null), + ('Serbia','RS','SRB',null,null), + ('Sint Maarten (Dutch part)','SX','SXM',null,null), + ('South Sudan','SS','SSD',null,null), + ('Timor-Leste','TL','TLS',null,null), + ('American Samoa','as','ASM','Amerika Samoa','Oceania'), + ('Andorra','AD','AND','Andorra','Europe'), + ('Angola','AO','AGO','Angola','Africa'), + ('Anguilla','AI','AIA','Anguilla','North America'), + ('Antarctica','AQ','ATA','','Antarctica'), + ('Antigua and Barbuda','AG','ATG','Antigua and Barbuda','North America'), + ('Argentina','AR','ARG','Argentina','South America'), + ('Armenia','AM','ARM','Hajastan','Asia'), + ('Aruba','AW','ABW','Aruba','North America'), + ('Australia','AU','AUS','Australia','Oceania'), + ('Austria','AT','AUT','Österreich','Europe'), + ('Azerbaijan','AZ','AZE','Azerbaijan','Asia'), + ('Bahamas','BS','BHS','The Bahamas','North America'), + ('Bahrain','BH','BHR','Al-Bahrayn','Asia'), + ('Bangladesh','BD','BGD','Bangladesh','Asia'), + ('Barbados','BB','BRB','Barbados','North America'), + ('Belarus','BY','BLR','Belarus','Europe'), + ('Belgium','BE','BEL','Belgium/Belgique','Europe'), + ('Belize','BZ','BLZ','Belize','North America'), + ('Benin','BJ','BEN','Benin','Africa'), + ('Bermuda','BM','BMU','Bermuda','North America'), + ('Bhutan','BT','BTN','Druk-Yul','Asia'), + ('Bolivia','BO','BOL','Bolivia','South America'), + ('Bosnia and Herzegovina','BA','BIH','Bosna i Hercegovina','Europe'), + ('Botswana','BW','BWA','Botswana','Africa'), + ('Bouvet Island','BV','BVT','Bouvet Island','Antarctica'), + ('Brazil','BR','BRA','Brasil','South America'), + ('British Indian Ocean Territory','IO','IOT','British Indian Ocean Territory','Africa'), + ('Brunei Darussalam','BN','BRN','Brunei Darussalam','Asia'), + ('Bulgaria','BG','BGR','Balgarija','Europe'), + ('Burkina Faso','BF','BFA','Burkina Faso','Africa'), + ('Burundi','BI','BDI','Burundi/Uburundi','Africa'), + ('Cambodia','KH','KHM','Cambodia','Asia'), + ('Cameroon','CM','CMR','Cameroun/Cameroon','Africa'), + ('Canada','CA','CAN','Canada','North America'), + ('Cape Verde','CV','CPV','Cabo Verde','Africa'), + ('Cayman Islands','KY','CYM','Cayman Islands','North America'), + ('Central African Republic','CF','CAF','Centrafrique','Africa'), + ('Chad','TD','TCD','Tchad/Tshad','Africa'), + ('Chile','CL','CHL','Chile','South America'), + ('China','CN','CHN','Zhongquo','Asia'), + ('Christmas Island','CX','CXR','Christmas Island','Oceania'), + ('Cocos (Keeling) Islands','CC','CCK','Cocos (Keeling) Islands','Oceania'), + ('Colombia','CO','COL','Colombia','South America'), + ('Comoros','KM','COM','Komori/Comores','Africa'), + ('Congo','CG','COG','Congo','Africa'), + ('Congo, the Democratic Republic of the','CD','COD','Republique Democratique du Congo','Africa'), + ('Cook Islands','CK','COK','The Cook Islands','Oceania'), + ('Costa Rica','CR','CRI','Costa Rica','North America'), + ('Cote DIvoire','CI','CIV','Côte dIvoire','Africa'), + ('Croatia','HR','HRV','Hrvatska','Europe'), + ('Cuba','CU','CUB','Cuba','North America'), + ('Cyprus','CY','CYP','Cyprus','Asia'), + ('Czech Republic','CZ','CZE','Czech','Europe'), + ('Denmark','DK','DNK','Danmark','Europe'), + ('Djibouti','DJ','DJI','Djibouti/Jibuti','Africa'), + ('Dominica','DM','DMA','Dominica','North America'), + ('Dominican Republic','DO','DOM','Republica Dominicana','North America'), + ('Ecuador','EC','ECU','Ecuador','South America'), + ('Egypt','EG','EGY','Misr','Africa'), + ('El Salvador','SV','SLV','El Salvador','North America'), + ('Equatorial Guinea','GQ','GNQ','Guinea Ecuatorial','Africa'), + ('Eritrea','ER','ERI','Ertra','Africa'), + ('Estonia','EE','EST','Eesti','Europe'), + ('Ethiopia','ET','ETH','Yeityopiya','Africa'), + ('Falkland Islands (Malvinas)','FK','FLK','Falkland Islands','South America'), + ('Faroe Islands','FO','FRO','Faroe Islands','Europe'), + ('Fiji','FJ','FJI','Fiji Islands','Oceania'), + ('Finland','FI','FIN','Suomi','Europe'), + ('France','FR','FRA','France','Europe'), + ('French Guiana','GF','GUF','Guyane francaise','South America'), + ('French Polynesia','PF','PYF','Polynésie française','Oceania'), + ('French Southern Territories','TF','ATF','Terres australes françaises','Antarctica'), + ('Gabon','GA','GAB','Le Gabon','Africa'), + ('Gambia','GM','GMB','The Gambia','Africa'), + ('Georgia','GE','GEO','Sakartvelo','Asia'), + ('Germany','DE','DEU','Deutschland','Europe'), + ('Ghana','GH','GHA','Ghana','Africa'), + ('Gibraltar','GI','GIB','Gibraltar','Europe'), + ('Greece','GR','GRC','Greece','Europe'), + ('Greenland','GL','GRL','Kalaallit Nunaat','North America'), + ('Grenada','GD','GRD','Grenada','North America'), + ('Guadeloupe','GP','GLP','Guadeloupe','North America'), + ('Guam','GU','GUM','Guam','Oceania'), + ('Guatemala','GT','GTM','Guatemala','North America'), + ('Guinea','GN','GIN','Guinea','Africa'), + ('Guinea-Bissau','GW','GNB','Guinea-Bissau','Africa'), + ('Guyana','GY','GUY','Guyana','South America'), + ('Haiti','HT','HTI','Haiti/Dayti','North America'), + ('Heard Island and Mcdonald Islands','HM','HMD','Heard and McDonald Islands','Antarctica'), + ('Holy See (Vatican City State)','VA','VAT','Santa Sede/Città del Vaticano','Europe'), + ('Honduras','HN','HND','Honduras','North America'), + ('Hong Kong','HK','HKG','Xianggang/Hong Kong','Asia'), + ('Hungary','HU','HUN','Hungary','Europe'), + ('Iceland','IS','ISL','Iceland','Europe'), + ('India','IN','IND','Bharat/India','Asia'), + ('Indonesia','ID','IDN','Indonesia','Asia'), + ('Iran, Islamic Republic of','IR','IRN','Iran','Asia'), + ('Iraq','IQ','IRQ','Al-Irāq','Asia'), + ('Ireland','IE','IRL','Ireland','Europe'), + ('Israel','IL','ISR','Yisrael','Asia'), + ('Italy','IT','ITA','Italia','Europe'), + ('Jamaica','JM','JAM','Jamaica','North America'), + ('Japan','JP','JPN','Nihon/Nippon','Asia'), + ('Jordan','JO','JOR','Al-Urdunn','Asia'), + ('Kazakhstan','KZ','KAZ','Qazaqstan','Asia'), + ('Kenya','KE','KEN','Kenya','Africa'), + ('Kiribati','KI','KIR','Kiribati','Oceania'), + ('Korea, Democratic People''s Republic of','KP','PRK','Choson Minjujuui Inmin Konghwaguk (Bukhan)','Asia'), + ('Korea, Republic of','KR','KOR','Taehan-minguk (Namhan)','Asia'), + ('Kuwait','KW','KWT','Al-Kuwayt','Asia'), + ('Kyrgyzstan','KG','KGZ','Kyrgyzstan','Asia'), + ('Lao People''s Democratic Republic','LA','LAO','Lao','Asia'), + ('Latvia','LV','LVA','Latvija','Europe'), + ('Lebanon','LB','LBN','Lubnan','Asia'), + ('Lesotho','LS','LSO','Lesotho','Africa'), + ('Liberia','LR','LBR','Liberia','Africa'), + ('Libya','LY','LBY','Libiya','Africa'), + ('Liechtenstein','LI','LIE','Liechtenstein','Europe'), + ('Lithuania','LT','LTU','Lietuva','Europe'), + ('Luxembourg','LU','LUX','Luxembourg','Europe'), + ('Macao','MO','MAC','Macau/Aomen','Asia'), + ('Macedonia, the Former Yugoslav Republic of','MK','MKD','Makedonija','Europe'), + ('Madagascar','MG','MDG','Madagasikara/Madagascar','Africa'), + ('Malawi','MW','MWI','Malawi','Africa'), + ('Malaysia','MY','MYS','Malaysia','Asia'), + ('Maldives','MV','MDV','Dhivehi Raajje/Maldives','Asia'), + ('Mali','ML','MLI','Mali','Africa'), + ('Malta','MT','MLT','Malta','Europe'), + ('Marshall Islands','MH','MHL','Marshall Islands/Majol','Oceania'), + ('Martinique','MQ','MTQ','Martinique','North America'), + ('Mauritania','MR','MRT','Muritaniya/Mauritanie','Africa'), + ('Mauritius','MU','MUS','Mauritius','Africa'), + ('Mayotte','YT','MYT','Mayotte','Africa'), + ('Mexico','MX','MEX','Mexico','North America'), + ('Micronesia, Federated States of','FM','FSM','Micronesia','Oceania'), + ('Moldova, Republic of','MD','MDA','Moldova','Europe'), + ('Monaco','MC','MCO','Monaco','Europe'), + ('Mongolia','MN','MNG','Mongol Uls','Asia'), + ('Albania','AL','ALB','Republika e Shqipërisë','Europe'), + ('Montserrat','MS','MSR','Montserrat','North America'), + ('Morocco','MA','MAR','Al-Maghrib','Africa'), + ('Mozambique','MZ','MOZ','Mozambique','Africa'), + ('Myanmar','MM','MMR','Myanma Pye','Asia'), + ('Namibia','NA','NAM','Namibia','Africa'), + ('Nauru','NR','NRU','Naoero/Nauru','Oceania'), + ('Nepal','NP','NPL','Nepal','Asia'), + ('Netherlands','NL','NLD','Nederland','Europe'), + ('New Caledonia','NC','NCL','Nouvelle-Calédonie','Oceania'), + ('New Zealand','NZ','NZL','New Zealand/Aotearoa','Oceania'), + ('Nicaragua','NI','NIC','Nicaragua','North America'), + ('Niger','NE','NER','Niger','Africa'), + ('Nigeria','NG','NGA','Nigeria','Africa'), + ('Niue','NU','NIU','Niue','Oceania'), + ('Norfolk Island','NF','NFK','Norfolk Island','Oceania'), + ('Northern Mariana Islands','MP','MNP','Northern Mariana Islands','Oceania'), + ('Norway','NO','NOR','Norge','Europe'), + ('Oman','OM','OMN','Oman','Asia'), + ('Pakistan','PK','PAK','Pakistan','Asia'), + ('Palau','PW','PLW','Belau/Palau','Oceania'), + ('Palestine, State of','PS','PSE','Filastin','Asia'), + ('Panama','PA','PAN','República de Panamá','North America'), + ('Papua New Guinea','PG','PNG','Papua New Guinea/Papua Niugini','Oceania'), + ('Paraguay','PY','PRY','Paraguay','South America'), + ('Peru','PE','PER','Perú/Piruw','South America'), + ('Philippines','PH','PHL','Pilipinas','Asia'), + ('Pitcairn','PN','PCN','Pitcairn','Oceania'), + ('Poland','PL','POL','Polska','Europe'), + ('Portugal','PT','PRT','Portugal','Europe'), + ('Puerto Rico','PR','PRI','Puerto Rico','North America'), + ('Qatar','QA','QAT','Qatar','Asia'), + ('Reunion','RE','REU','Reunion','Africa'), + ('Romania','RO','ROM','Romania','Europe'), + ('Russian Federation','RU','RUS','Rossija','Europe'), + ('Rwanda','RW','RWA','Rwanda/Urwanda','Africa'), + ('Saint Helena, Ascension and Tristan da Cunha','SH','SHN','Saint Helena','Africa'), + ('Saint Kitts and Nevis','KN','KNA','Saint Kitts and Nevis','North America'), + ('Saint Lucia','LC','LCA','Saint Lucia','North America'), + ('Saint Pierre and Miquelon','PM','SPM','Saint-Pierre-et-Miquelon','North America'), + ('Saint Vincent and the Grenadines','VC','VCT','Saint Vincent and the Grenadines','North America'), + ('Samoa','WS','WSM','Samoa','Oceania'), + ('San Marino','SM','SMR','San Marino','Europe'), + ('Sao Tome and Principe','ST','STP','São Tomé e Príncipe','Africa'), + ('Saudi Arabia','SA','SAU','Al-Mamlaka al-Arabiya as-Saudiya','Asia'), + ('Senegal','SN','SEN','Sénégal/Sounougal','Africa'), + ('Seychelles','SC','SYC','Sesel/Seychelles','Africa'), + ('Sierra Leone','SL','SLE','Sierra Leone','Africa'), + ('Singapore','SG','SGP','Singapore/Singapura/Xinjiapo/Singapur','Asia'), + ('Slovakia','SK','SVK','Slovensko','Europe'), + ('Slovenia','SI','SVN','Slovenija','Europe'), + ('Solomon Islands','SB','SLB','Solomon Islands','Oceania'), + ('Somalia','SO','SOM','Soomaaliya','Africa'), + ('South Africa','ZA','ZAF','South Africa','Africa'), + ('South Georgia and the South Sandwich Islands','GS','SGS','South Georgia and the South Sandwich Islands','Antarctica'), + ('Spain','ES','ESP','España','Europe'), + ('Sri Lanka','LK','LKA','Sri Lanka/Ilankai','Asia'), + ('Sudan','SD','SDN','As-Sudan','Africa'), + ('Suriname','SR','SUR','Suriname','South America'), + ('Svalbard and Jan Mayen','SJ','SJM','Svalbard og Jan Mayen','Europe'), + ('Swaziland','SZ','SWZ','kaNgwane','Africa'), + ('Sweden','SE','SWE','Sverige','Europe'), + ('Switzerland','CH','CHE','Schweiz/Suisse/Svizzera/Svizra','Europe'), + ('Syrian Arab Republic','SY','SYR','Suriya','Asia'), + ('Taiwan (Province of China)','TW','TWN','Tai-wan','Asia'), + ('Tajikistan','TJ','TJK','Tajikistan','Asia'), + ('Tanzania, United Republic of','TZ','TZA','Tanzania','Africa'), + ('Thailand','TH','THA','Prathet Thai','Asia'), + ('Togo','TG','TGO','Togo','Africa'), + ('Tokelau','TK','TKL','Tokelau','Oceania'), + ('Tonga','TO','TON','Tonga','Oceania'), + ('Trinidad and Tobago','TT','TTO','Trinidad and Tobago','North America'), + ('Tunisia','TN','TUN','Tunis/Tunisie','Africa'), + ('Turkey','TR','TUR','Türkiye','Asia'), + ('Turkmenistan','TM','TKM','Türkmenistan','Asia'), + ('Turks and Caicos Islands','TC','TCA','The Turks and Caicos Islands','North America'), + ('Tuvalu','TV','TUV','Tuvalu','Oceania'), + ('Uganda','UG','UGA','Uganda','Africa'), + ('Ukraine','UA','UKR','Ukrajina','Europe'), + ('United Arab Emirates','AE','ARE','Al-Amirat al-Arabiya al-Muttahida','Asia'), + ('United Kingdom','GB','GBR','United Kingdom','Europe'), + ('United States','US','USA','United States','North America'), + ('United States Minor Outlying Islands','UM','UMI','United States Minor Outlying Islands','Oceania'), + ('Uruguay','UY','URY','Uruguay','South America'), + ('Uzbekistan','UZ','UZB','Uzbekiston','Asia'), + ('Vanuatu','VU','VUT','Vanuatu','Oceania'), + ('Venezuela','VE','VEN','Venezuela','South America'), + ('Viet Nam','VN','VNM','Viet Nam','Asia'), + ('Virgin Islands (British)','VG','VGB','British Virgin Islands','North America'), + ('Virgin Islands (U.S.)','VI','VIR','Virgin Islands of the United States','North America'), + ('Wallis and Futuna','WF','WLF','Wallis-et-Futuna','Oceania'), + ('Western Sahara','EH','ESH','As-Sahrawiya','Africa'), + ('Yemen','YE','YEM','Al-Yaman','Asia'), + ('Zambia','ZM','ZMB','Zambia','Africa'), + ('Zimbabwe','ZW','ZWE','Zimbabwe','Africa'), + ('Afghanistan','AF','AFG','Afganistan/Afqanestan','Asia'), + ('Algeria','DZ','DZA','Al-Jazair/Algerie','Africa'); + \ No newline at end of file diff --git a/postgres_15.8.1.044/tests/pg_upgrade/tests/99-fixtures.sql b/postgres_15.8.1.044/tests/pg_upgrade/tests/99-fixtures.sql new file mode 100644 index 0000000..2b93d45 --- /dev/null +++ b/postgres_15.8.1.044/tests/pg_upgrade/tests/99-fixtures.sql @@ -0,0 +1,12 @@ +-- enable JIT to ensure the upgrade process disables it +alter system set jit = on; +alter system set password_encryption = 'md5'; +select pg_reload_conf(); + +-- create materialized view +create materialized view public.european_countries as + select * from public.countries where continent = 'Europe' +with no data; +refresh materialized view public.european_countries; + +select count(*) from public.european_countries; diff --git a/postgres_15.8.1.044/user-data-cloudimg b/postgres_15.8.1.044/user-data-cloudimg new file mode 100644 index 0000000..9a74d23 --- /dev/null +++ b/postgres_15.8.1.044/user-data-cloudimg @@ -0,0 +1,16 @@ +#cloud-config +users: + - name: root + lock_passwd: false + ssh_redirect_user: true + hashed_passwd: "$6$canonical.$0zWaW71A9ke9ASsaOcFTdQ2tx1gSmLxMPrsH0rF0Yb.2AEKNPV1lrF94n6YuPJmnUy2K2/JSDtxuiBDey6Lpa/" + - name: ubuntu + lock_passwd: false + hashed_passwd: "$6$canonical.$0zWaW71A9ke9ASsaOcFTdQ2tx1gSmLxMPrsH0rF0Yb.2AEKNPV1lrF94n6YuPJmnUy2K2/JSDtxuiBDey6Lpa/" + ssh_redired_user: false + sudo: "ALL=(ALL) NOPASSWD:ALL" + shell: /usr/bin/bash + groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] +ssh_pwauth: True +disable_root: false +preserve_hostname: true diff --git a/postgrest_v12.2.8/.cirrus.yml b/postgrest_v12.2.8/.cirrus.yml new file mode 100644 index 0000000..854a39f --- /dev/null +++ b/postgrest_v12.2.8/.cirrus.yml @@ -0,0 +1,42 @@ +freebsd_instance: + image_family: freebsd-14-1 + +build_task: + # Don't change this name without adjusting .github/workflows/build.yaml + name: Build FreeBSD (Stack) + install_script: pkg install -y postgresql16-client hs-stack git + + only_if: | + $CIRRUS_TAG != '' || $CIRRUS_BRANCH == 'main' || $CIRRUS_BRANCH =~ 'v*' || + changesInclude( + '.github/workflows/build.yaml', + '.github/actions/artifact-from-cirrus/**', + '.cirrus.yml', + 'postgrest.cabal', + 'stack.yaml*', + '**.hs' + ) + + stack_cache: + folders: /.stack + fingerprint_script: + - echo $CIRRUS_OS + - stack --version + - md5sum postgrest.cabal + - md5sum stack.yaml.lock + + stack_work_cache: + folders: .stack-work + fingerprint_script: + - echo $CIRRUS_OS + - stack --version + - md5sum postgrest.cabal + - md5sum stack.yaml.lock + - find main src -type f -iname '*.hs' -exec md5sum "{}" + + + build_script: | + stack build -j 1 --local-bin-path . --copy-bins --stack-yaml stack-21.7.yaml + strip postgrest + + bin_artifacts: + path: postgrest diff --git a/postgrest_v12.2.8/.gitattributes b/postgrest_v12.2.8/.gitattributes new file mode 100644 index 0000000..3d432e0 --- /dev/null +++ b/postgrest_v12.2.8/.gitattributes @@ -0,0 +1 @@ +/CHANGELOG.md merge=union diff --git a/postgrest_v12.2.8/.gitignore b/postgrest_v12.2.8/.gitignore new file mode 100644 index 0000000..49b3be5 --- /dev/null +++ b/postgrest_v12.2.8/.gitignore @@ -0,0 +1,26 @@ +.DS_Store +db +dist +.cabal-sandbox +cabal.sandbox.config +hscope.out +codex.tags +.anvil +.stack-work* +tags +site +*~ +*#* +.#* +*.swp +result* +dist-newstyle +postgrest.hp +postgrest.prof +__pycache__ +*.tix +coverage +.hpc +loadtest +.history +.docs-build diff --git a/postgrest_v12.2.8/.readthedocs.yaml b/postgrest_v12.2.8/.readthedocs.yaml new file mode 100644 index 0000000..0d7162c --- /dev/null +++ b/postgrest_v12.2.8/.readthedocs.yaml @@ -0,0 +1,10 @@ +version: 2 +sphinx: + configuration: docs/conf.py +python: + install: + - requirements: docs/requirements.txt +build: + os: ubuntu-22.04 + tools: + python: "3.11" diff --git a/postgrest_v12.2.8/.stylish-haskell.yaml b/postgrest_v12.2.8/.stylish-haskell.yaml new file mode 100644 index 0000000..3bdbfc8 --- /dev/null +++ b/postgrest_v12.2.8/.stylish-haskell.yaml @@ -0,0 +1,225 @@ +# stylish-haskell configuration file +# ================================== + +# The stylish-haskell tool is mainly configured by specifying steps. These steps +# are a list, so they have an order, and one specific step may appear more than +# once (if needed). Each file is processed by these steps in the given order. +steps: + # Convert some ASCII sequences to their Unicode equivalents. This is disabled + # by default. + # - unicode_syntax: + # # In order to make this work, we also need to insert the UnicodeSyntax + # # language pragma. If this flag is set to true, we insert it when it's + # # not already present. You may want to disable it if you configure + # # language extensions using some other method than pragmas. Default: + # # true. + # add_language_pragma: true + + # Align the right hand side of some elements. This is quite conservative + # and only applies to statements where each element occupies a single + # line. + - simple_align: + cases: true + top_level_patterns: true + records: true + + # Import cleanup + - imports: + # There are different ways we can align names and lists. + # + # - global: Align the import names and import list throughout the entire + # file. + # + # - file: Like global, but don't add padding when there are no qualified + # imports in the file. + # + # - group: Only align the imports per group (a group is formed by adjacent + # import lines). + # + # - none: Do not perform any alignment. + # + # Default: global. + align: group + + # The following options affect only import list alignment. + # + # List align has following options: + # + # - after_alias: Import list is aligned with end of import including + # 'as' and 'hiding' keywords. + # + # > import qualified Data.List as List (concat, foldl, foldr, head, + # > init, last, length) + # + # - with_alias: Import list is aligned with start of alias or hiding. + # + # > import qualified Data.List as List (concat, foldl, foldr, head, + # > init, last, length) + # + # - new_line: Import list starts always on new line. + # + # > import qualified Data.List as List + # > (concat, foldl, foldr, head, init, last, length) + # + # Default: after_alias + list_align: after_alias + + # Right-pad the module names to align imports in a group: + # + # - true: a little more readable + # + # > import qualified Data.List as List (concat, foldl, foldr, + # > init, last, length) + # > import qualified Data.List.Extra as List (concat, foldl, foldr, + # > init, last, length) + # + # - false: diff-safe + # + # > import qualified Data.List as List (concat, foldl, foldr, init, + # > last, length) + # > import qualified Data.List.Extra as List (concat, foldl, foldr, + # > init, last, length) + # + # Default: true + pad_module_names: true + + # Long list align style takes effect when import is too long. This is + # determined by 'columns' setting. + # + # - inline: This option will put as much specs on same line as possible. + # + # - new_line: Import list will start on new line. + # + # - new_line_multiline: Import list will start on new line when it's + # short enough to fit to single line. Otherwise it'll be multiline. + # + # - multiline: One line per import list entry. + # Type with constructor list acts like single import. + # + # > import qualified Data.Map as M + # > ( empty + # > , singleton + # > , ... + # > , delete + # > ) + # + # Default: inline + long_list_align: inline + + # Align empty list (importing instances) + # + # Empty list align has following options + # + # - inherit: inherit list_align setting + # + # - right_after: () is right after the module name: + # + # > import Vector.Instances () + # + # Default: inherit + empty_list_align: inherit + + # List padding determines indentation of import list on lines after import. + # This option affects 'long_list_align'. + # + # - : constant value + # + # - module_name: align under start of module name. + # Useful for 'file' and 'group' align settings. + list_padding: 4 + + # Separate lists option affects formatting of import list for type + # or class. The only difference is single space between type and list + # of constructors, selectors and class functions. + # + # - true: There is single space between Foldable type and list of it's + # functions. + # + # > import Data.Foldable (Foldable (fold, foldl, foldMap)) + # + # - false: There is no space between Foldable type and list of it's + # functions. + # + # > import Data.Foldable (Foldable(fold, foldl, foldMap)) + # + # Default: true + separate_lists: true + + # Space surround option affects formatting of import lists on a single + # line. The only difference is single space after the initial + # parenthesis and a single space before the terminal parenthesis. + # + # - true: There is single space associated with the enclosing + # parenthesis. + # + # > import Data.Foo ( foo ) + # + # - false: There is no space associated with the enclosing parenthesis + # + # > import Data.Foo (foo) + # + # Default: false + space_surround: false + + # Language pragmas + - language_pragmas: + # We can generate different styles of language pragma lists. + # + # - vertical: Vertical-spaced language pragmas, one per line. + # + # - compact: A more compact style. + # + # - compact_line: Similar to compact, but wrap each line with + # `{-#LANGUAGE #-}'. + # + # Default: vertical. + style: vertical + + # Align affects alignment of closing pragma brackets. + # + # - true: Brackets are aligned in same column. + # + # - false: Brackets are not aligned together. There is only one space + # between actual import and closing bracket. + # + # Default: true + align: true + + # stylish-haskell can detect redundancy of some language pragmas. If this + # is set to true, it will remove those redundant pragmas. Default: true. + remove_redundant: true + + # Replace tabs by spaces. This is disabled by default. + # - tabs: + # # Number of spaces to use for each tab. Default: 8, as specified by the + # # Haskell report. + # spaces: 8 + + # Remove trailing whitespace + - trailing_whitespace: {} + +# A common setting is the number of columns (parts of) code will be wrapped +# to. Different steps take this into account. Default: 80. +columns: 70 + +# By default, line endings are converted according to the OS. You can override +# preferred format here. +# +# - native: Native newline format. CRLF on Windows, LF on other OSes. +# +# - lf: Convert to LF ("\n"). +# +# - crlf: Convert to CRLF ("\r\n"). +# +# Default: native. +newline: native + +# Sometimes, language extensions are specified in a cabal file or from the +# command line instead of using language pragmas in the file. stylish-haskell +# needs to be aware of these, so it can parse the file correctly. +# +# No language extensions are enabled by default. +language_extensions: + - TemplateHaskell + - QuasiQuotes + - CPP diff --git a/postgrest_v12.2.8/BACKERS.md b/postgrest_v12.2.8/BACKERS.md new file mode 100644 index 0000000..af227a0 --- /dev/null +++ b/postgrest_v12.2.8/BACKERS.md @@ -0,0 +1,102 @@ +# Sponsors & Backers + +PostgREST ongoing development is only possible thanks to our Sponsors and Backers, listed below. If you'd like to join them, you can do so by supporting the PostgREST organization on [Patreon](https://www.patreon.com/postgrest). + +## Sponsors + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + +
+ +## Lead Backers + +- [Roboflow](https://github.com/roboflow) +- Evans Fernandes +- [Jan Sommer](https://github.com/nerfpops) +- [Franz Gusenbauer](https://www.igutech.at/) + +## Backers + +- Zac Miller +- Tsingson Qin +- Michel Pelletier +- Jay Hannah +- Robert Stolarz +- Nicholas DiBiase +- Christopher Reid +- Nathan Bouscal +- Daniel Rafaj +- David Fenko +- Remo Rechkemmer +- Severin Ibarluzea +- Tom Saleeba +- Pawel Tyll + +## Former Backers + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ +- [Christiaan Westerbeek](https://devotis.nl) +- [Daniel Babiak](https://github.com/dbabiak) +- Kofi Gumbs diff --git a/postgrest_v12.2.8/CHANGELOG.md b/postgrest_v12.2.8/CHANGELOG.md new file mode 100644 index 0000000..8b388b2 --- /dev/null +++ b/postgrest_v12.2.8/CHANGELOG.md @@ -0,0 +1,1100 @@ +# Change Log + +All notable changes to this project will be documented in this file. +This project adheres to [Semantic Versioning](http://semver.org/). + +## Unreleased + +## [12.2.8] - 2025-02-10 + +### Fixed + + - #3841, Log `503` client error to stderr - @taimoorzaeem + +## [12.2.7] - 2025-02-03 + +### Fixed + + - #2524, Fix schema reloading notice on windows - @diogob + +## [12.2.6] - 2025-01-29 + +### Fixed + + - #3788, Fix jwt cache does not remove expired entries - @taimoorzaeem + +## [12.2.5] - 2025-01-20 + +### Fixed + + - #3867, Fix startup for arm64 docker image - @wolfgangwalther + +## [12.2.4] - 2025-01-18 + +### Fixed + + - #3779, Always log the schema cache load time - @steve-chavez + - #3706, Fix insert with `missing=default` uses default value of domain instead of column - @taimoorzaeem + +## [12.2.3] - 2024-08-01 + +### Fixed + + - #3091, Broken link in OpenAPI description `externalDocs` - @salim-b + - #3659, Embed One-to-One relationship with different column order properly - @wolfgangwalther + - #3504, Remove `format` from `rowFilter` parameters in OpenAPI - @dantheman2865 + - #3660, Fix regression that loaded the schema cache before the in-database configuration - @steve-chavez, @laurenceisla + +## [12.2.2] - 2024-07-10 + +### Fixed + + - #3093, Nested empty embeds no longer show empty values and are correctly omitted - @laurenceisla + - #3644, Make --dump-schema work with in-database pgrst.db_schemas setting - @wolfgangwalther + - #3644, Show number of timezones in schema cache load report - @wolfgangwalther + - #3644, List correct enum options in OpenApi output when multiple types with same name are present - @wolfgangwalther + - #3523, Fix schema cache loading retry without backoff - @steve-chavez + +## [12.2.1] - 2024-06-27 + +### Fixed + + - #3147, Don't reload schema cache on every listener failure - @steve-chavez + +### Documentation + + - #3592, Architecture diagram now supports dark mode and has links - @laurenceisla + - #3616, The schema isolation diagram now supports dark mode and uses well-known schemas - @laurenceisla + +## [12.2.0] - 2024-06-11 + +### Added + + - #2887, Add Preference `max-affected` to limit affected resources - @taimoorzaeem + - #3171, Add an ability to dump config via admin API - @skywriter + - #3171, #3046, Log schema cache stats to stderr - @steve-chavez + - #3210, Dump schema cache through admin API - @taimoorzaeem + - #2676, Performance improvement on bulk json inserts, around 10% increase on requests per second by removing `json_typeof` from write queries - @steve-chavez + - #3435, Add log-level=debug, for development purposes - @steve-chavez + - #1526, Add `/metrics` endpoint on admin server - @steve-chavez + - Exposes connection pool metrics, schema cache metrics + - #3404, Show the failed MESSAGE or DETAIL in the `details` field of the `PGRST121` (could not parse RAISE 'PGRST') error - @laurenceisla + - #3404, Show extra information in the `PGRST121` (could not parse RAISE 'PGRST') error - @laurenceisla + + Shows the failed MESSAGE or DETAIL in the `details` field + + Shows the correct JSON format in the `hints` field + - #3340, Log when the LISTEN channel gets a notification - @steve-chavez + - #3184, Log full pg version to stderr on connection - @steve-chavez + - #3242. Add config `db-hoisted-tx-settings` to apply only hoisted function settings - @taimoorzaeem + - #3214, #3229 Log connection pool events on log-level=debug - @steve-chavez, @laurenceisla + +### Fixed + + - #3237, Dump media handlers and timezones with --dump-schema - @wolfgangwalther + - #3323, #3324, Don't hide error on LISTEN channel failure - @steve-chavez + - #3330, Incorrect admin server `/ready` response on slow schema cache loads - @steve-chavez + - #3345, Fix in-database configuration values not loading for `pgrst.server_trace_header` and `pgrst.server_cors_allowed_origins` - @laurenceisla + - #3404, Clarify the `PGRST121` (could not parse RAISE 'PGRST') error message - @laurenceisla + - #3267, Fix wrong `503 Service Unavailable` on pg error `53400` - @taimoorzaeem + - #2985, Fix not adding `application_name` on all connection strings - @steve-chavez + - #3424, Admin `/live` and `/ready` now differentiates a failure as 500 status - @steve-chavez + + 503 status is still given when postgREST is in a recovering state + - #3478, Media Types are parsed case insensitively - @develop7 + - #3533, #3536, Fix listener silently failing on read replica - @steve-chavez + + If the LISTEN connection fails, it's retried with exponential backoff + - #3414, Force listener to connect to read-write instances using `target_session_attrs` - @steve-chavez + - #3255, Fix incorrect `413 Request Entity Too Large` on pg errors `54*` - @taimoorzaeem + - #3549, Remove verbosity from error logs starting with "An error occurred..." and replacing it with "Failed to..." - @laurenceisla + +### Deprecated + + - Support for PostgreSQL versions 9.6, 10 and 11 is deprecated. From this on version onwards, PostgREST will only support non-end-of-life PostgreSQL versions. See https://www.postgresql.org/support/versioning/. + - `Prefer: params=single-object` is deprecated. Use [a function with a single unnamed JSON parameter](https://postgrest.org/en/latest/references/api/functions.html#function-single-json) instead. - @steve-chavez + +### Documentation + + - #3289, Add dark mode. Can be toggled by a button in the bottom right corner. - @laurenceisla + - #3384, Add architecture diagram and documentation - @steve-chavez + +## [12.0.3] - 2024-05-09 + +### Fixed + + - #3149, Misleading "Starting PostgREST.." logs on schema cache reloading - @steve-chavez + - #3205, Fix wrong subquery error returning a status of 400 Bad Request - @steve-chavez + - #3224, Return status code 406 for non-accepted media type instead of code 415 - @wolfgangwalther + - #3160, Fix using select= query parameter for custom media type handlers - @wolfgangwalther + - #3361, Clarify PGRST204(column not found) error message - @steve-chavez + - #3373, Remove rejected mediatype `application/vnd.pgrst.object+json` from response - @taimoorzaeem + - #3418, Fix OpenAPI not tagging a FK column correctly on O2O relationships - @laurenceisla + - #3256, Fix wrong http status for pg error `42P17 infinite recursion` - @taimoorzaeem + +## [12.0.2] - 2023-12-20 + +### Fixed + + - #3124, Fix table's media type handlers not working for all schemas - @steve-chavez + - #3126, Fix empty row on media type handler function - @steve-chavez + +## [12.0.1] - 2023-12-12 + +### Fixed + + - #3054, Fix not allowing special characters in JSON keys - @laurenceisla + - #2344, Replace JSON parser error with a clearer generic message - @develop7 + - #3100, Add missing in-database configuration option for `jwt-cache-max-lifetime` - @laurenceisla + - #3089, The any media type handler now sets `Content-Type: application/octet-stream` by default instead of `Content-Type: application/json` - @steve-chavez + +## [12.0.0] - 2023-12-01 + +### Added + + - #1614, Add `db-pool-automatic-recovery` configuration to disable connection retrying - @taimoorzaeem + - #2492, Allow full response control when raising exceptions - @taimoorzaeem, @laurenceisla + - #2771, #2983, #3062, #3055 Add `Server-Timing` response header - @taimoorzaeem, @develop7, @laurenceisla + - #2698, Add config `jwt-cache-max-lifetime` and implement JWT caching - @taimoorzaeem + - #2943, Add `handling=strict/lenient` for Prefer header - @taimoorzaeem + - #2441, Add config `server-cors-allowed-origins` to specify CORS origins - @taimoorzaeem + - #2825, SQL handlers for custom media types - @steve-chavez + + Solves #1548, #2699, #2763, #2170, #1462, #1102, #1374, #2901 + - #2799, Add timezone in Prefer header - @taimoorzaeem + - #3001, Add `statement_timeout` set on functions - @taimoorzaeem + - #3045, Apply superuser settings on impersonated roles if they have PostgreSQL 15 `GRANT SET ON PARAMETER` privilege - @steve-chavez + - #915, Add support for aggregate functions - @timabdulla + + The aggregate functions SUM(), MAX(), MIN(), AVG(), and COUNT() are now supported. + + It's disabled by default, you can enable it with `db-aggregates-enabled`. + - #3057, Log all internal database errors to stderr - @laurenceisla + +### Fixed + + - #3015, Fix unnecessary count() on RPC returning single - @steve-chavez + - #1070, Fix HTTP status responses for upserts - @taimoorzaeem + + `PUT` returns `201` instead of `200` when rows are inserted + + `POST` with `Prefer: resolution=merge-duplicates` returns `200` instead of `201` when no rows are inserted + - #3019, Transaction-Scoped Settings are now shown clearly in the Postgres logs - @laurenceisla + + Shows `set_config('pgrst.setting_name', $1)` instead of `setconfig($1, $2)` + + Does not apply to role settings and `app.settings.*` + - #2420, Fix bogus message when listening on port 0 - @develop7 + - #3067, Fix Acquision Timeout errors logging to stderr when `log-level=crit` - @laurenceisla + +### Changed + + - Removed [raw-media-types config](https://postgrest.org/en/v11.1/references/configuration.html#raw-media-types) - @steve-chavez + - Removed `application/octet-stream`, `text/plain`, `text/xml` [builtin support for scalar results](https://postgrest.org/en/v11.1/references/api/resource_representation.html#scalar-function-response-format) - @steve-chavez + - Removed default `application/openapi+json` media type for [db-root-spec](https://postgrest.org/en/v11.1/references/configuration.html#db-root-spec) - @steve-chavez + - Removed [db-use-legacy-gucs](https://postgrest.org/en/v11.2/references/configuration.html#db-use-legacy-gucs) - @laurenceisla + + All PostgreSQL versions now use GUCs in JSON format for [Headers, Cookies and JWT claims](https://postgrest.org/en/v12/references/transactions.html#request-headers-cookies-and-jwt-claims). + +## [11.2.2] - 2023-10-25 + +### Fixed + + - #2824, Fix regression by reverting fix that returned 206 when first position = length in a `Range` header - @laurenceisla, @strengthless + +## [11.2.1] - 2023-10-03 + +### Fixed + + - #2899, Fix `application/vnd.pgrst.array` not accepted as a valid mediatype - @taimoorzaeem + - #2524, Fix schema cache and configuration reloading with `NOTIFY` not working on Windows - @diogob, @laurenceisla + - #2915, Fix duplicate headers in response - @taimoorzaeem + - #2824, Fix range request with first position same as length return status 206 - @taimoorzaeem + - #2939, Fix wrong `Preference-Applied` with `Prefer: tx=commit` when transaction is rollbacked - @steve-chavez + - #2939, Fix `count=exact` not being included in `Preference-Applied` - @steve-chavez + - #2800, Fix not including to-one embed resources that had a `NULL` value in any of the selected fields when doing null filtering on them - @laurenceisla + - #2846, Fix error when requesting `Prefer: count=` and doing null filtering on embedded resources - @laurenceisla + - #2959, Fix setting `default_transaction_isolation` unnecessarily - @steve-chavez + - #2929, Fix arrow filtering on RPC returning dynamic TABLE with composite type - @steve-chavez + - #2963, Fix RPCs not embedding correctly when using overloaded functions for computed relationships - @laurenceisla + - #2970, Fix regression that rejects URI connection strings with certain unescaped characters in the password - @laurenceisla, @steve-chavez + +## [11.2.0] - 2023-08-10 + +### Added + + - #2523, Data representations - @aljungberg + + Allows for flexible API output formatting and input parsing on a per-column type basis using regular SQL functions configured in the database + + Enables greater flexibility in the form and shape of your APIs, both for output and input, making PostgREST a more versatile general-purpose API server + + Examples include base64 encode/decode your binary data (like a `bytea` column containing an image), choose whether to present a timestamp column as seconds since the Unix epoch or as an ISO 8601 string, or represent fixed precision decimals as strings, not doubles, to preserve precision + + ...and accept the same in `POST/PUT/PATCH` by configuring the reverse transformation(s) + + Other use-cases include custom representation of enums, arrays, nested objects, CSS hex colour strings, gzip compressed fields, metric to imperial conversions, and much more + + Works when using the `select` parameter to select only a subset of columns, embedding through complex joins, renaming fields, with views and computed columns + + Works when filtering on a formatted column without extra indexes by parsing to the canonical representation + + Works for data `RETURNING` operations, such as requesting the full body in a POST/PUT/PATCH with `Prefer: return=representation` + + Works for batch updates and inserts + + Completely optional, define the functions in the database and they will be used automatically everywhere + + Data representations preserve the ability to write to the original column and require no extra storage or complex triggers (compared to using `GENERATED ALWAYS` columns) + + Note: data representations require Postgres 10 (Postgres 11 if using `IN` predicates); data representations are not implemented for RPC + - #2647, Allow to verify the PostgREST version in SQL: `select distinct application_name from pg_stat_activity`. - @laurenceisla + - #2856, Add the `--version` CLI option that prints the version information - @laurenceisla + - #1655, Improve `details` field of the singular error response - @taimoorzaeem + - #740, Add `Preference-Applied` in response for `Prefer: return=representation/headers-only/minimal` - @taimoorzaeem + - #1601, Add optional `nulls=stripped` parameter for mediatypes `application/vnd.pgrst.array+json` and `application/vnd.pgrst.object+json` - @taimoorzaeem + +### Fixed + + - #2821, Fix OPTIONS not accepting all available media types - @steve-chavez + - #2834, Fix compilation on Ubuntu by being compatible with GHC 9.0.2 - @steve-chavez + - #2840, Fix `Prefer: missing=default` with DOMAIN default values - @steve-chavez + - #2849, Fix HEAD unnecessarily executing aggregates - @steve-chavez + - #2594, Fix unused index on jsonb/jsonb arrow filter and order (``/bets?data->>contractId=eq.1`` and ``/bets?order=data->>contractId``) - @steve-chavez + - #2861, Fix character and bit columns with fixed length not inserting/updating properly - @laurenceisla + + Fixes the error "value too long for type character(1)" when the char length of the column was bigger than one. + - #2862, Fix null filtering on embedded resource when using a column name equal to the relation name - @steve-chavez + - #1586, Fix function parameters of type character and bit not ignoring length - @laurenceisla + + Fixes the error "value too long for type character(1)" when the char length of the parameter was bigger than one. + - #2881, Fix error when a function returns `RECORD` or `SET OF RECORD` - @laurenceisla + - #2896, Fix applying superuser settings for impersonated role - @steve-chavez + +### Deprecated + + - #2863, Deprecate resource embedding target disambiguation - @steve-chavez + + The `/table?select=*,other!fk(*)` must be used to disambiguate + + The server aids in choosing the `!fk` by sending a `hint` on the error whenever an ambiguous request happens. + +## [11.1.0] - 2023-06-07 + +### Added + + - #2786, Limit idle postgresql connection lifetime - @robx + + New option `db-pool-max-idletime` (default 30s). + + This is equivalent to the old option `db-pool-timeout` of PostgREST 10.0.0. + + A config alias for `db-pool-timeout` is included. + - #2703, Add pre-config function - @steve-chavez + + New config option `db-pre-config`(empty by default) + + Allows using the in-database configuration without SUPERUSER + - #2781, When `db-channel-enabled` is false, start automatic connection recovery on a new request when pool connections are closed with `pg_terminate_backend` - @steve-chavez + + Mitigates the lack of LISTEN/NOTIFY for schema cache reloading on read replicas. + +### Fixed + + - #2791, Fix dropping schema cache reload notifications - @steve-chavez + - #2801, Stop retrying connection when "no password supplied" - @steve-chavez + +## [11.0.1] - 2023-04-27 + +### Fixed + + - #2762, Fixes "permission denied for schema" error during schema cache load - @steve-chavez + - #2756, Fix bad error message on generated columns when using `Prefer: missing=default` - @steve-chavez + - #1139, Allow a 30 second skew for JWT validation - @steve-chavez + + It used to be 1 second, which was too strict + +## [11.0.0] - 2023-04-16 + +### Added + + - #1414, Add related orders - @steve-chavez + + On a many-to-one or one-to-one relationship, you can order a parent by a child column `/projects?select=*,clients(*)&order=clients(name).desc.nullsfirst` + - #1233, #1907, #2566, Allow spreading embedded resources - @steve-chavez + + On a many-to-one or one-to-one relationship, you can unnest a json object with `/projects?select=*,...clients(client_name:name)` + + Allows including the join table columns when resource embedding + + Allows disambiguating a recursive m2m embed + + Allows disambiguating an embed that has a many-to-many relationship using two foreign keys on a junction + - #2340, Allow embedding without selecting any column - @steve-chavez + - #2563, Allow `is.null` or `not.is.null` on an embedded resource - @steve-chavez + + Offers a more flexible replacement for `!inner`, e.g. `/projects?select=*,clients(*)&clients=not.is.null` + + Allows doing an anti join, e.g. `/projects?select=*,clients(*)&clients=is.null` + + Allows using or across related tables conditions + - #1100, Customizable OpenAPI title - @AnthonyFisi + - #2506, Add `server-trace-header` for tracing HTTP requests. - @steve-chavez + + When the client sends the request header specified in the config it will be included in the response headers. + - #2694, Make `db-root-spec` stable. - @steve-chavez + + This can be used to override the OpenAPI spec with a custom database function + - #1567, On bulk inserts, missing values can get the column DEFAULT by using the `Prefer: missing=default` header - @steve-chavez + - #2501, Allow filtering by`IS DISTINCT FROM` using the `isdistinct` operator, e.g. `/people?alias=isdistinct.foo` + - #1569, Allow `any/all` modifiers on the `eq,like,ilike,gt,gte,lt,lte,match,imatch` operators, e.g. `/tbl?id=eq(any).{1,2,3}` - @steve-chavez + - This converts the input into an array type + - #2561, Configurable role settings - @steve-chavez + - Database roles that are members of the connection role get their settings applied, e.g. doing + `ALTER ROLE anon SET statement_timeout TO '5s'` will result in that `statement_timeout` getting applied for that role. + - Works when switching roles when a JWT is sent + - Settings can be reloaded with `NOTIFY pgrst, 'reload config'`. + - #2468, Configurable transaction isolation level with `default_transaction_isolation` - @steve-chavez + - Can be set per function `create function .. set default_transaction_isolation = 'repeatable read'` + - Or per role `alter role .. set default_transaction_isolation = 'serializable'` + +### Fixed + + - #2651, Add the missing `get` path item for RPCs to the OpenAPI output - @laurenceisla + - #2648, Fix inaccurate error codes with new ones - @laurenceisla + + `PGRST204`: Column is not found + + `PGRST003`: Timed out when acquiring connection to db + - #1652, Fix function call with arguments not inlining - @steve-chavez + - #2705, Fix bug when using the `Range` header on `PATCH/DELETE` - @laurenceisla + + Fix the`"message": "syntax error at or near \"RETURNING\""` error + + Fix doing a limited update/delete when an `order` query parameter was present + - #2742, Fix db settings and pg version queries not getting prepared - @steve-chavez + - #2618, Fix `PATCH` requests not recognizing embedded filters and using the top-level resource instead - @steve-chavez + +### Changed + + - #2705, The `Range` header is now only considered on `GET` requests and is ignored for any other method - @laurenceisla + + Other methods should use the `limit/offset` query parameters for sub-ranges + + `PUT` requests no longer return an error when this header is present (using `limit/offset` still triggers the error) + - #2733, Remove bulk RPC call with the `Prefer: params=multiple-objects` header. A function with a JSON array or object parameter should be used instead. + +## [10.2.0] - 2023-04-12 + +### Added + + - #2663, Limit maximal postgresql connection lifetime - @robx + + New option `db-pool-max-lifetime` (default 30m) + + `db-pool-acquisition-timeout` is no longer optional and defaults to 10s + + Fixes postgresql resource leak with long-lived connections (#2638) + +### Fixed + + - #2667, Fix `db-pool-acquisition-timeout` not logging to stderr when the timeout is reached - @steve-chavez + +## [10.1.2] - 2023-02-01 + +### Fixed + + - #2565, Fix bad M2M embedding on RPC - @steve-chavez + - #2575, Replace misleading error message when no function is found with a hint containing functions/parameters names suggestions - @laurenceisla + - #2582, Move explanation about "single parameters" from the `message` to the `details` in the error output - @laurenceisla + - #2569, Replace misleading error message when no relationship is found with a hint containing parent/child names suggestions - @laurenceisla + - #1405, Add the required OpenAPI items object when the parameter is an array - @laurenceisla + - #2592, Add upsert headers for POST requests to the OpenAPI output - @laurenceisla + - #2623, Fix FK pointing to VIEW instead of TABLE in OpenAPI output - @laurenceisla + - #2622, Consider any PostgreSQL authentication failure as fatal and exit immediately - @michivi + - #2620, Fix `NOTIFY pgrst` not reloading the db connections catalog cache - @steve-chavez + +## [10.1.1] - 2022-11-08 + +### Fixed + + - #2548, Fix regression when embedding views with partial references to multi column FKs - @wolfgangwalther + - #2558, Fix regression when requesting limit=0 and `db-max-row` is set - @laurenceisla + - #2542, Return a clear error without hitting the database when trying to update or insert an unknown column with `?columns` - @aljungberg + +## [10.1.0] - 2022-10-28 + +### Added + + - #2348, Add `db-pool-acquisition-timeout` configuration option, time in seconds to wait to acquire a connection. - @robx + +### Fixed + + - #2261, #2349, #2467, Reduce allocations communication with PostgreSQL, particularly for request bodies. - @robx + - #2401, #2444, Fix SIGUSR1 to fully flush connections pool. - @robx + - #2428, Fix opening an empty transaction on failed resource embedding - @steve-chavez + - #2455, Fix embedding the same table multiple times - @steve-chavez + - #2518, Fix a regression when embedding views where base tables have a different column order for FK columns - @wolfgangwalther + - #2458, Fix a regression with the location header when inserting into views with PKs from multiple tables - @wolfgangwalther + - #2356, Fix a regression in openapi output with mode follow-privileges - @wolfgangwalther + - #2283, Fix infinite recursion when loading schema cache with self-referencing view - @wolfgangwalther + - #2343, Return status code 200 for PATCH requests which don't affect any rows - @wolfgangwalther + - #2481, Treat computed relationships not marked SETOF as M2O/O2O relationship - @wolfgangwalther + - #2534, Fix embedding a computed relationship with a normal relationship - @steve-chavez + - #2362, Fix error message when [] is used inside select - @wolfgangwalther + - #2475, Disallow !inner on computed columns - @wolfgangwalther + - #2285, Ignore leading and trailing spaces in column names when parsing the query string - @wolfgangwalther + - #2545, Fix UPSERT with PostgreSQL 15 - @wolfgangwalther + - #2459, Fix embedding views with multiple references to the same base column - @wolfgangwalther + +### Changed + + - #2444, Removed `db-pool-timeout` option, because this was removed upstream in hasql-pool. - @robx + - #2343, PATCH requests that don't affect any rows no longer return 404 - @wolfgangwalther + - #2537, Stricter parsing of query string. Instead of silently ignoring, the parser now throws on invalid syntax like json paths for embeddings, hints for regular columns, empty casts or fts languages, etc. - @wolfgangwalther + +### Deprecated + + - #1385, Deprecate bulk-calls when including the `Prefer: params=multiple-objects` in the request. A function with a JSON array or object parameter should be used instead for a better performance. + +## [10.0.0] - 2022-08-18 + +### Added + + - #1933, #2109, Add a minimal health check endpoint - @steve-chavez + + For enabling this, the `admin-server-port` config must be set explictly + + A `:/live` endpoint is available for checking if postgrest is running on its port/socket. 200 OK = alive, 503 = dead. + + A `:/ready` endpoint is available for checking a correct internal state(the database connection plus the schema cache). 200 OK = ready, 503 = not ready. + - #1988, Add the current user to the request log on stdout - @DavidLindbom, @wolfgangwalther + - #1823, Add the ability to run postgrest without any configuration. - @wolfgangwalther + + #1991, Add the ability to run without `db-uri` using libpq's PG environment variables to connect. - @wolfgangwalther + + #1769, Add the ability to run without `db-schemas`, defaulting to `db-schemas=public`. - @wolfgangwalther + + #1689, Add the ability to run without `db-anon-role` disabling anonymous access. - @wolfgangwalther + - #1543, Allow access to fields of composite types in select=, order= and filters through JSON operators -> and ->>. - @wolfgangwalther + - #2075, Allow access to array items in ?select=, ?order= and filters through JSON operators -> and ->>. - @wolfgangwalther + - #2156, #2211, Allow applying `limit/offset` to UPDATE/DELETE to only affect a subset of rows - @steve-chavez + + It requires an explicit `order` on a unique column(s) + - #1917, Add error codes with the `"PGRST"` prefix to the error response body to differentiate PostgREST errors from PostgreSQL errors - @laurenceisla + - #1917, Normalize the error response body by always having the `detail` and `hint` error fields with a `null` value if they are empty - @laurenceisla + - #2176, Errors raised with `SQLSTATE` now include the message and the code in the response body - @laurenceisla + - #2236, Support POSIX regular expression operators for row filtering - @enote-kane + - #2202, Allow returning XML from RPCs - @fjf2002 + - #2268, Allow returning XML from single-column queries - @fjf2002 + - #2300, RPC POST for function w/single unnamed XML param #2300 - @fjf2002 + - #1564, Allow geojson output by specifying the `Accept: application/geo+json` media type - @steve-chavez + + Requires postgis >= 3.0 + + Works for GET, RPC, POST/PATCH/DELETE with `Prefer: return=representation`. + + Resource embedding works and the embedded rows will go into the `properties` key + + In case of multiple geometries in the same table, you can choose which one will go into the `geometry` key with the usual `?select` query parameter. + - #1082, Add security definitions to the OpenAPI output - @laurenceisla + - #2378, Support http OPTIONS method on RPC and root path - @steve-chavez + - #2354, Allow getting the EXPLAIN plan of a request by using the `Accept: application/vnd.pgrst.plan` header - @steve-chavez + + Only allowed if the `db-plan-enabled` config is set to true + + Can generate the plan for different media types using the `for` parameter: `Accept: application/vnd.pgrst.plan; for="application/vnd.pgrst.object"` + + Different options for the plan can be used with the `options` parameter: `Accept: application/vnd.pgrst.plan; options=analyze|verbose|settings|buffers|wal` + + The plan can be obtained in text or json by using different media type suffixes: `Accept: application/vnd.pgrst.plan+text` and `Accept: application/vnd.pgrst.plan+json`. + - #2144, Support computed relationships which allow extending and overriding relationships for resource embedding - @steve-chavez, @wolfgangwalther + - #1984, Detect one-to-one relationships for resource embedding - @steve-chavez + + Detected when there's a foreign key with a unique constraint or when a foreign key is also a primary key + +### Fixed + + - #2058, Return 204 No Content without Content-Type for PUT - @wolfgangwalther + - #2107, Clarify error for failed schema cache load. - @steve-chavez + + From `Database connection lost. Retrying the connection` to `Could not query the database for the schema cache. Retrying.` + - #1771, Fix silently ignoring filter on a non-existent embedded resource - @steve-chavez + - #2152, Remove functions, which are uncallable because of unnamend arguments from schema cache and OpenAPI output. - @wolfgangwalther + - #2145, Fix accessing json array fields with -> and ->> in ?select= and ?order=. - @wolfgangwalther + - #2155, Ignore `max-rows` on POST, PATCH, PUT and DELETE - @steve-chavez + - #2254, Fix inferring a foreign key column as a primary key column on views - @steve-chavez + - #2070, Restrict generated many-to-many relationships - @steve-chavez + + Only adds many-to-many relationships when: a table has FKs to two other tables and these FK columns are part of the table's PK columns. + - #2278, Allow casting to types with underscores and numbers(e.g. `select=oid_array::_int4`) - @steve-chavez + - #2277, #2238, #1643, Prevent views from breaking one-to-many/many-to-one embeds when using column or FK as target - @steve-chavez + + When using a column or FK as target for embedding(`/tbl?select=*,col-or-fk(*)`), only tables are now detected and views are not. + + You can still use a column or an inferred FK on a view to embed a table(`/view?select=*,col-or-fk(*)`) + - #2317, Increase the `db-pool-timeout` to 1 hour to prevent frequent high connection latency - @steve-chavez + - #2341, The search path now correctly identifies schemas with uppercase and special characters in their names (regression) - @laurenceisla + - #2364, "404 Not Found" on nested routes and "405 Method Not Allowed" errors no longer start an empty database transaction - @steve-chavez + - #2342, Fix inaccurate result count when an inner embed was selected after a normal embed in the query string - @laurenceisla + - #2376, OPTIONS requests no longer start an empty database transaction - @steve-chavez + - #2395, Allow using columns with dollar sign($) without double quoting in filters and `select` - @steve-chavez + - #2410, Fix loop crash error on startup in Postgres 15 beta 3. Log: "UNION types \"char\" and text cannot be matched". - @yevon + - #2397, Fix race conditions managing database connection helper - @robx + - #2269, Allow `limit=0` in the request query to return an empty array - @gautam1168, @laurenceisla + - #2401, Ensure database connections can't outlive SIGUSR1 - @robx + +### Changed + + - #2001, Return 204 No Content without Content-Type for RPCs returning VOID - @wolfgangwalther + + Previously, those RPCs would return "null" as a body with Content-Type: application/json. + - #2156, `limit/offset` now limits the affected rows on UPDATE/DELETE - @steve-chavez + + Previously, `limit/offset` only limited the returned rows but not the actual updated rows + - #2155, `max-rows` is no longer applied on POST/PATCH/PUT/DELETE returned rows - @steve-chavez + + This was misleading because the affected rows were not really affected by `max-rows`, only the returned rows were limited + - #2070, Restrict generated many-to-many relationships - @steve-chavez + + A primary key that contains the foreign key columns is now needed for generating many-to-many relationships. + - #2277, Views now are not detected when embedding using the column or FK as target (`/view?select=*,column(*)`) - @steve-chavez + + This embedding form was easily made ambiguous whenever a new view was added. + + You can use computed relationships to keep this embedding form working + - #2312, Using `Prefer: return=representation` no longer returns a `Location` header - @laurenceisla + - #1984, For the cases where one to one relationships are detected, json objects will be returned instead of json arrays of length 1 + + If you wish to override this behavior, you can use computed relationships to return arrays again + + You can get the newly detected one-to-one relationships by using the `--dump-schema` option and filtering with [jq](https://github.com/jqlang/jq). + ``` + ./postgrest --dump-schema \ + | jq '[.dbRelationships | .[] | .[1] | .[] | select(.relCardinality.tag == "O2O" and .relFTableIsView == false and .relTableIsView == false) | del(.relFTableIsView,.relTableIsView,.tag,.relIsSelf)]' + ``` + +## [9.0.1] - 2022-06-03 + +### Fixed + +- #2165, Fix json/jsonb columns should not have type in OpenAPI spec - @clrnd +- #2020, Execute deferred constraint triggers when using `Prefer: tx=rollback` - @wolfgangwalther +- #2077, Fix `is` not working with upper or mixed case values like `NULL, TrUe, FaLsE` - @steve-chavez +- #2024, Fix schema cache loading when views with XMLTABLE and DEFAULT are present - @wolfgangwalther +- #1724, Fix wrong CORS header Authentication -> Authorization - @wolfgangwalther +- #2120, Fix reading database configuration properly when `=` is present in value - @wolfgangwalther +- #2135, Remove trigger functions from schema cache and OpenAPI output, because they can't be called directly anyway. - @wolfgangwalther +- #2101, Remove aggregates, procedures and window functions from the schema cache and OpenAPI output. - @wolfgangwalther +- #2153, Fix --dump-schema running with a wrong PG version. - @wolfgangwalther +- #2042, Keep working when EMFILE(Too many open files) is reached. - @steve-chavez +- #2147, Ignore `Content-Type` headers for `GET` requests when calling RPCs. - @laurenceisla + + Previously, `GET` without parameters, but with `Content-Type: text/plain` or `Content-Type: application/octet-stream` would fail with `404 Not Found`, even if a function without arguments was available. +- #2239, Fix misleading disambiguation error where the content of the `relationship` key looks like valid syntax - @laurenceisla +- #2294, Disable parallel GC for better performance on higher core CPUs - @steve-chavez +- #1076, Fix using CPU while idle - @steve-chavez + +## [9.0.0] - 2021-11-25 + +### Added + + - #1783, Include partitioned tables into the schema cache. Allows embedding, UPSERT, INSERT with Location response, OPTIONS request and OpenAPI support for partitioned tables - @laurenceisla + - #1878, Add Retry-After hint header when in recovery mode - @gautam1168 + - #1735, Allow calling function with single unnamed param through RPC POST. - @steve-chavez + + Enables calling a function with a single json parameter without using `Prefer: params=single-object` + + Enables uploading bytea to a function with `Content-Type: application/octet-stream` + + Enables uploading raw text to a function with `Content-Type: text/plain` + - #1938, Allow escaping inside double quotes with a backslash, e.g. `?col=in.("Double\"Quote")`, `?col=in.("Back\\slash")` - @steve-chavez + - #1075, Allow filtering top-level resource based on embedded resources filters. This is enabled by adding `!inner` to the embedded resource, e.g. `/projects?select=*,clients!inner(*)&clients.id=eq.12`- @steve-chavez, @Iced-Sun + - #1857, Make GUC names for headers, cookies and jwt claims compatible with PostgreSQL v14 - @laurenceisla, @robertsosinski + + Getting the value for a header GUC on PostgreSQL 14 is done using `current_setting('request.headers')::json->>'name-of-header'` and in a similar way for `request.cookies` and `request.jwt.claims` + + PostgreSQL versions below 14 can opt in to the new JSON GUCs by setting the `db-use-legacy-gucs` config option to false (true by default) + - #1988, Allow specifying `unknown` for the `is` operator - @steve-chavez + - #2031, Improve error message for ambiguous embedding and add a relevant hint that includes unambiguous embedding suggestions - @laurenceisla + +### Fixed + + - #1871, Fix OpenAPI missing default values for String types and identify Array types as "array" instead of "string" - @laurenceisla + - #1930, Fix RPC return type handling for `RETURNS TABLE` with a single column. Regression of #1615. - @wolfgangwalther + - #1938, Fix using single double quotes(`"`) and backslashes(`/`) as values on the "in" operator - @steve-chavez + - #1992, Fix schema cache query failing with standard_conforming_strings = off - @wolfgangwalther + +### Changed + + - #1949, Drop support for embedding hints used with '.'(`select=projects.client_id(*)`), '!' should be used instead(`select=projects!client_id(*)`) - @steve-chavez + - #1783, Partitions (created using `PARTITION OF`) are no longer included in the schema cache. - @laurenceisla + - #2038, Dropped support for PostgreSQL 9.5 - @wolfgangwalther + +## [8.0.0] - 2021-07-25 + +### Added + + - #1525, Allow http status override through response.status guc - @steve-chavez + - #1512, Allow schema cache reloading with NOTIFY - @steve-chavez + - #1119, Allow config file reloading with SIGUSR2 - @steve-chavez + - #1558, Allow 'Bearer' with and without capitalization as authentication schema - @wolfgangwalther + - #1470, Allow calling RPC with variadic argument by passing repeated params - @wolfgangwalther + - #1559, No downtime when reloading the schema cache with SIGUSR1 - @steve-chavez + - #504, Add `log-level` config option. The admitted levels are: crit, error, warn and info - @steve-chavez + - #1607, Enable embedding through multiple views recursively - @wolfgangwalther + - #1598, Allow rollback of the transaction with Prefer tx=rollback - @wolfgangwalther + - #1633, Enable prepared statements for GET filters. When behind a connection pooler, you can disable preparing with `db-prepared-statements=false` + + This increases throughput by around 30% for simple GET queries(no embedding, with filters applied). + - #1729, #1760, Get configuration parameters from the db and allow reloading config with NOTIFY - @steve-chavez + - #1824, Allow OPTIONS to generate certain HTTP methods for a DB view - @laurenceisla + - #1872, Show timestamps in startup/worker logs - @steve-chavez + - #1881, Add `openapi-mode` config option that allows ignoring roles privileges when showing the OpenAPI output - @steve-chavez + - CLI options(for debugging): + + #1678, Add --dump-config CLI option that prints loaded config and exits - @wolfgangwalther + + #1691, Add --example CLI option to show example config file - @wolfgangwalther + + #1697, #1723, Add --dump-schema CLI option for debugging purposes - @monacoremo, @wolfgangwalther + - #1794, (Experimental) Add `request.spec` GUC for db-root-spec - @steve-chavez + +### Fixed + + - #1592, Removed single column restriction to allow composite foreign keys in join tables - @goteguru + - #1530, Fix how the PostgREST version is shown in the help text when the `.git` directory is not available - @monacoremo + - #1094, Fix expired JWTs starting an empty transaction on the db - @steve-chavez + - #1162, Fix location header for POST request with select= without PK - @wolfgangwalther + - #1585, Fix error messages on connection failure for localized postgres on Windows - @wolfgangwalther + - #1636, Fix `application/octet-stream` appending `charset=utf-8` - @steve-chavez + - #1469, #1638 Fix overloading of functions with unnamed arguments - @wolfgangwalther + - #1560, Return 405 Method not Allowed for GET of volatile RPC instead of 500 - @wolfgangwalther + - #1584, Fix RPC return type handling and embedding for domains with composite base type (#1615) - @wolfgangwalther + - #1608, #1635, Fix embedding through views that have COALESCE with subselect - @wolfgangwalther + - #1572, Fix parsing of boolean config values for Docker environment variables, now it accepts double quoted truth values ("true", "false") and numbers("1", "0") - @wolfgangwalther + - #1624, Fix using `app.settings.xxx` config options in Docker, now they can be used as `PGRST_APP_SETTINGS_xxx` - @wolfgangwalther + - #1814, Fix panic when attempting to run with unix socket on non-unix host and properly close unix domain socket on exit - @monacoremo + - #1825, Disregard internal junction(in non-exposed schema) when embedding - @steve-chavez + - #1846, Fix requests for overloaded functions from html forms to no longer hang (#1848) - @laurenceisla + - #1858, Add a hint and clarification to the no relationship found error - @laurenceisla + - #1841, Show comprehensive error when an RPC is not found in a stale schema cache - @laurenceisla + - #1875, Fix Location headers in headers only representation for null PK inserts on views - @laurenceisla + +### Changed + + - #1522, #1528, #1535, Docker images are now built from scratch based on a the static PostgREST executable (#1494) and with Nix instead of a `Dockerfile`. This reduces the compressed image size from over 30mb to about 4mb - @monacoremo + - #1461, Location header for POST request is only included when PK is available on the table - @wolfgangwalther + - #1560, Volatile RPC called with GET now returns 405 Method not Allowed instead of 500 - @wolfgangwalther + - #1584, #1849 Functions that declare `returns composite_type` no longer return a single object array by default, only functions with `returns setof composite_type` return an array of objects - @wolfgangwalther + - #1604, Change the default logging level to `log-level=error`. Only requests with a status greater or equal than 500 will be logged. If you wish to go back to the previous behaviour and log all the requests, use `log-level=info` - @steve-chavez + + Because currently there's no buffering for logging, defaulting to the `error` level(minimum logging) increases throughput by around 15% for simple GET queries(no embedding, with filters applied). + - #1617, Dropped support for PostgreSQL 9.4 - @wolfgangwalther + - #1679, Renamed config settings with fallback aliases. e.g. `max-rows` is now `db-max-rows`, but `max-rows` is still accepted - @wolfgangwalther + - #1656, Allow `Prefer=headers-only` on POST requests and change default to `minimal` (#1813) - @laurenceisla + - #1854, Dropped undocumented support for gzip compression (which was surprisingly slow and easily enabled by mistake). In some use-cases this makes Postgres up to 3x faster - @aljungberg + - #1872, Send startup/worker logs to stderr to differentiate from access logs on stdout - @steve-chavez + +## [7.0.1] - 2020-05-18 + +### Fixed + +- #1473, Fix overloaded computed columns on RPC - @wolfgangwalther +- #1471, Fix POST, PATCH, DELETE with ?select= and return=minimal and PATCH with empty body - @wolfgangwalther +- #1500, Fix missing `openapi-server-proxy-uri` config option - @steve-chavez +- #1508, Fix `Content-Profile` not working for POST RPC - @steve-chavez +- #1452, Fix PUT restriction for all columns - @steve-chavez + +### Changed + +- From this version onwards, the release page will only include a single Linux static executable that can be run on any Linux distribution. + +## [7.0.0] - 2020-04-03 + +### Added + +- #1417, `Accept: application/vnd.pgrst.object+json` behavior is now enforced for POST/PATCH/DELETE regardless of `Prefer: return=representation/minimal` - @dwagin +- #1415, Add support for user defined socket permission via `server-unix-socket-mode` config option - @Dansvidania +- #1383, Add support for HEAD request - @steve-chavez +- #1378, Add support for `Prefer: count=planned` and `Prefer: count=estimated` on GET /table - @steve-chavez, @LorenzHenk +- #1327, Add support for optional query parameter `on_conflict` to upsert with specified keys for POST - @ykst +- #1430, Allow specifying the foreign key constraint name(`/source?select=fk_constraint(*)`) to disambiguate an embedding - @steve-chavez +- #1168, Allow access to the `Authorization` header through the `request.header.authorization` GUC - @steve-chavez +- #1435, Add `request.method` and `request.path` GUCs - @steve-chavez +- #1088, Allow adding headers to GET/POST/PATCH/PUT/DELETE responses through the `response.headers` GUC - @steve-chavez +- #1427, Allow overriding provided headers(Location, Content-Type, etc) through the `response.headers` GUC - @steve-chavez +- #1450, Allow multiple schemas to be exposed in one instance. The schema to use can be selected through the headers `Accept-Profile` for GET/HEAD and `Content-Profile` for POST/PATCH/PUT/DELETE - @steve-chavez, @mahmoudkassem + +### Fixed + +- #1301, Fix self join resource embedding on PATCH - @herulume, @steve-chavez +- #1389, Fix many to many resource embedding on RPC/PATCH - @steve-chavez +- #1355, Allow PATCH/DELETE without `return=minimal` on tables with no select privileges - @steve-chavez +- #1361, Fix embedding a VIEW when its source foreign key is UNIQUE - @bwbroersma + +### Changed + +- #1385, bulk RPC call now should be done by specifying a `Prefer: params=multiple-objects` header - @steve-chavez +- #1401, resource embedding now outputs an error when multiple relationships between two tables are found - @steve-chavez +- #1423, default Unix Socket file mode from 755 to 660 - @dwagin +- #1430, Remove embedding with duck typed column names `GET /projects?select=client(*)`- @steve-chavez + + You can rename the foreign key to `client` to make this request work in the new version: `alter table projects rename constraint projects_client_id_fkey to client` +- #1413, Change `server-proxy-uri` config option to `openapi-server-proxy-uri` - @steve-chavez + +## [6.0.2] - 2019-08-22 + +### Fixed + +- #1369, Change `raw-media-types` to accept a string of comma separated MIME types - @Dansvidania +- #1368, Fix long column descriptions being truncated at 63 characters in PostgreSQL 12 - @amedeedaboville +- #1348, Go back to converting plus "+" to space " " in querystrings by default - @steve-chavez + +### Deprecated + +- #1348, Deprecate `.` symbol for disambiguating resource embedding(added in #918). The url-safe '!' should be used instead. We refrained from using `+` as part of our syntax because it conflicts with some http clients and proxies. + +## [6.0.1] - 2019-07-30 + +### Added + +- #1349, Add user defined raw output media types via `raw-media-types` config option - @Dansvidania +- #1243, Add websearch_to_tsquery support - @herulume + +### Fixed + +- #1336, Error when testing on Chrome/Firefox: text/html requested but a single column was not selected - @Dansvidania +- #1334, Unable to compile v6.0.0 on windows - @steve-chavez + +## [6.0.0] - 2019-06-21 + +### Added + +- #1186, Add support for user defined unix socket via `server-unix-socket` config option - @Dansvidania +- #690, Add `?columns` query parameter for faster bulk inserts, also ignores unspecified json keys in a payload - @steve-chavez +- #1239, Add support for resource embedding on materialized views - @vitorbaptista +- #1264, Add support for bulk RPC call - @steve-chavez +- #1278, Add db-pool-timeout config option - @qu4tro +- #1285, Abort on wrong database password - @qu4tro +- #790, Allow override of OpenAPI spec through `root-spec` config option - @steve-chavez +- #1308, Accept `text/plain` and `text/html` for raw output - @steve-chavez + + +### Fixed + +- #1223, Fix incorrect OpenAPI externalDocs url - @steve-chavez +- #1221, Fix embedding other resources when having a self join - @steve-chavez +- #1242, Fix embedding a view having a select in a where - @steve-chavez +- #1238, Fix PostgreSQL to OpenAPI type mappings for numeric and character types - @fpusch +- #1265, Fix query generated on bulk upsert with an empty array - @qu4tro +- #1273, Fix RPC ignoring unknown arguments by default - @steve-chavez +- #1257, Fix incorrect status when a PATCH request doesn't find rows to change - @qu4tro + +### Changed + +- #1288, Change server-host default of 127.0.0.1 to !4 + +### Deprecated + +- #1288, Deprecate `.` symbol for disambiguating resource embedding(added in #918). '+' should be used instead. Though '+' is url safe, certain clients might need to encode it to '%2B'. + +### Removed + +- #1288, Removed support for schema reloading with SIGHUP, SIGUSR1 should be used instead - @steve-chavez + +## [5.2.0] - 2018-12-12 + +### Added + +- #1205, Add support for parsing JSON Web Key Sets - @russelldavies +- #1203, Add support for reading db-uri from a separate file - @zhoufeng1989 +- #1200, Add db-extra-search-path config for adding schemas to the search_path, solves issues related to extensions created on the public schema - @steve-chavez +- #1219, Add ability to quote column names on filters - @steve-chavez + +### Fixed + +- #1182, Fix embedding on views with composite pks - @steve-chavez +- #1180, Fix embedding on views with subselects in pg10 - @steve-chavez +- #1197, Allow CORS for PUT - @bkylerussell +- #1181, Correctly qualify function argument of custom type in public schema - @steve-chavez +- #1008, Allow columns that contain spaces in filters - @steve-chavez + +## [5.1.0] - 2018-08-31 + +### Added + +- #1099, Add support for getting json/jsonb by array index - @steve-chavez +- #1145, Add materialized view columns to OpenAPI output - @steve-chavez +- #709, Allow embedding on views with subselects/CTE - @steve-chavez +- #1148, OpenAPI: add `required` section for the non-nullable columns - @laughedelic +- #1158, Add summary to OpenAPI doc for RPC functions - @mdr1384 + +### Fixed + +- #1113, Fix UPSERT failing when having a camel case PK column - @steve-chavez +- #945, Fix slow start-up time on big schemas - @steve-chavez +- #1129, Fix view embedding when table is capitalized - @steve-chavez +- #1149, OpenAPI: Change `GET` response type to array - @laughedelic +- #1152, Fix RPC failing when having arguments with reserved or uppercase keywords - @mdr1384 +- #905, Fix intermittent empty replies - @steve-chavez +- #1139, Fix JWTIssuedAtFuture failure for valid iat claim - @steve-chavez +- #1141, Fix app.settings resetting on pool timeout - @steve-chavez + +### Changed + +- #1099, Numbers in json path `?select=data->1->>key` now get treated as json array indexes instead of keys - @steve-chavez +- #1128, Allow finishing a json path with a single arrow `->`. Now a json can be obtained without resorting to casting, Previously: `/json_arr?select=data->>2::json`, now: `/json_arr?select=data->2` - @steve-chavez +- #724, Change server-host default of *4 to 127.0.0.1 + +### Deprecated + +- #724, SIGHUP deprecated, SIGUSR1 should be used instead + +## [0.5.0.0] - 2018-05-14 + +### Added + +- The configuration (e.g. `postgrest.conf`) now accepts arbitrary settings that will be passed through as session-local database settings. This can be used to pass in secret keys directly as strings, or via OS environment variables. For instance: `app.settings.jwt_secret = "$(MYAPP_JWT_SECRET)"` will take `MYAPP_JWT_SECRET` from the environment and make it available to postgresql functions as `current_setting('app.settings.jwt_secret')`. Only `app.settings.*` values in the configuration file are treated in this way. - @canadaduane +- #256, Add support for bulk UPSERT with POST and single UPSERT with PUT - @steve-chavez +- #1078, Add ability to specify source column in embed - @steve-chavez +- #821, Allow embeds alias to be used in filters - @steve-chavez +- #906, Add jspath configurable `role-claim-key` - @steve-chavez +- #1061, Add foreign tables to OpenAPI output - @rhyamada + +### Fixed + +- #828, Fix computed column only working in public schema - @steve-chavez +- #925, Fix RPC high memory usage by using parametrized query and avoiding json encoding - @steve-chavez +- #987, Fix embedding with self-reference foreign key - @steve-chavez +- #1044, Fix view parent embedding when having many views - @steve-chavez +- #781, Fix accepting misspelled desc/asc ordering modificators - @onporat, @steve-chavez + +### Changed + +- #828, A `SET SCHEMA ` is done on each request, this has the following implications: + - Computed columns now only work if they belong to the db-schema + - Stored procedures might require a `search_path` to work properly, for further details see https://postgrest.org/en/v5.0/api.html#explicit-qualification +- To use RPC now the `json_to_record/json_to_recordset` functions are needed, these are available starting from PostgreSQL 9.4 - @steve-chavez +- Overloaded functions now depend on the `dbStructure`, restart/sighup may be needed for their correct functioning - @steve-chavez +- #1098, Removed support for: + + curly braces `{}` in embeds, i.e. `/clients?select=*,projects{*}` can no longer be used, from now on parens `()` should be used `/clients?select=*,projects(*)` - @steve-chavez + + "in" operator without parens, i.e. `/clients?id=in.1,2,3` no longer supported, `/clients?id=in.(1,2,3)` should be used - @steve-chavez + + "@@", "@>" and "<@" operators, from now on their mnemonic equivalents should be used "fts", "cs" and "cd" respectively - @steve-chavez + +## [0.4.4.0] - 2018-01-08 + +### Added + +- #887, #601, #1007, Allow specifying dictionary and plain/phrase tsquery in full text search - @steve-chavez +- #328, Allow doing GET on rpc - @steve-chavez +- #917, Add ability to map RAISE errorcode/message to http status - @steve-chavez +- #940, Add ability to map GUC to http response headers - @steve-chavez +- #1022, Include git sha in version report - @begriffs +- Faster queries using json_agg - @ruslantalpa + +### Fixed + +- #876, Read secret files as binary, discard final LF if any - @eric-brechemier +- #968, Treat blank proxy uri as missing - @begriffs +- #933, OpenAPI externals docs url to current version - @steve-chavez +- #962, OpenAPI don't err on nonexistent schema - @steve-chavez +- #954, make OpenAPI rpc output dependent on user privileges - @steve-chavez +- #955, Support configurable aud claim - @statik +- #996, Fix embedded column conflicts table name - @grotsev +- #974, Fix RPC error when function has single OUT param - @steve-chavez +- #1021, Reduce join size in allColumns for faster program start - @nextstopsun +- #411, Remove the need for pk in &select for parent embed - @steve-chavez +- #1016, Fix anonymous requests when configured with jwt-aud - @ruslantalpa + +## [0.4.3.0] - 2017-09-06 + +### Added + +- #567, Support more JWT signing algorithms, including RSA - @begriffs +- #889, Allow more than two conditions in a single and/or - @steve-chavez +- #883, Binary output support for RPC - @steve-chavez +- #885, Postgres COMMENTs on SCHEMA/TABLE/COLUMN are used for OpenAPI - @ldesgoui +- #907, Ability to embed using a specific relation when there are multiple between tables - @ruslantalpa +- #930, Split table comment on newline to get OpenAPI operation summary and description - @daurnimator +- #938, Support for range operators - @russelldavies + +### Fixed + +- #877, Base64 secret read from a file ending with a newline - @eric-brechemier +- #896, Boolean env var interpolation in config file - @begriffs +- #885, OpenAPI repetition reduced by using more definitions- @ldesgoui +- #924, Improve relations initialization time - @9too +- #927, Treat blank pre-request as missing - @begriffs + +### Changed + +- #938, Deprecate symbol operators with mnemonic names. - @russelldavies + +## [0.4.2.0] - 2017-06-11 + +### Added + +- #742, Add connection retrying on startup and SIGHUP - @steve-chavez +- #652, Add and/or params for complex boolean logic - @steve-chavez +- #808, Env var interpolation in config file (helps Docker) - @begriffs +- #878 - CSV output support for RPC - @begriffs + +### Fixed + +- #822, Treat blank string JWT secret as no secret - @begriffs + +## [0.4.1.0] - 2017-04-25 + +### Added +- Allow requesting binary output on GET - @steve-chavez +- Accept clients requesting `Content-Type: application/json` from / - @feynmanliang +- #493, Updating with empty JSON object makes zero updates @koulakis +- Make HTTP headers and cookies available as GUCs #800 - @ruslantalpa +- #701, Ability to quote values on IN filters - @steve-chavez +- #641, Allow IN filter to have no values - @steve-chavez + +### Fixed +- #827, Avoid Warp reaper, extend socket timeout to 1 hour - @majorcode +- #791, malformed nested JSON error - @diogob +- Resource embedding in views referencing tables in public schema - @fab1an +- #777, Empty body is allowed when calling a non-parameterized RPC - @koulakis +- #831, Fix proc resource embedding issue with search_path - @steve-chavez +- #547, Use read-only transaction for stable/immutable RPC - @begriffs + +## [0.4.0.0] - 2017-01-19 + +### Added +- Allow test database to be on another host - @dsimunic +- `Prefer: params=single-object` to treat payload as single json argument in RPC - @dsimunic +- Ability to generate an OpenAPI spec - @mainx07, @hudayou, @ruslantalpa, @begriffs +- Ability to generate an OpenAPI spec behind a proxy - @hudayou +- Ability to set addresses to listen on - @hudayou +- Filtering, shaping and embedding with &select for the /rpc path - @ruslantalpa +- Output names of used-defined types (instead of 'USER-DEFINED') - @martingms +- Implement support for singular representation responses for POST/PATCH requests - @ehamberg +- Include RPC endpoints in OpenAPI output - @begriffs, @LogvinovLeon +- Custom request validation with `--pre-request` argument - @begriffs +- Ability to order by jsonb keys - @steve-chavez +- Ability to specify offset for a deeper level - @ruslantalpa +- Ability to use binary base64 encoded secrets - @TrevorBasinger + +### Fixed +- Do not apply limit to parent items - @ruslantalpa +- Fix bug in relation detection when selecting parents two levels up by using the name of the FK - @ruslantalpa +- Customize content negotiation per route - @begriffs +- Allow using nulls order without explicit order direction - @steve-chavez +- Fatal error on postgres unsupported version, format supported version in error message - @steve-chavez +- Prevent database memory cosumption by prepared statements caches - @ruslantalpa +- Use specific columns in the RETURNING section - @ruslantalpa +- Fix columns alias for RETURNING - @steve-chavez + +### Changed +- Replace `Prefer: plurality=singular` with `Accept: application/vnd.pgrst.object` - @begriffs +- Standardize arrays in responses for `Prefer: return=representation` - @begriffs +- Calling unknown RPC gives 404, not 400 - @begriffs +- Use HTTP 400 for raise\_exception - @begriffs +- Remove non-OpenAPI schema description - @begriffs +- Use comma rather than semicolon to separate Prefer header values - @begriffs +- Omit total query count by default - @begriffs +- No more reserved `jwt_claims` return type - @begriffs +- HTTP 401 rather than 400 for expired JWT - @begriffs +- Remove default JWT secret - @begriffs +- Use GUC request.jwt.claim.foo rather than postgrest.claims.foo - @begriffs +- Use config file rather than command line arguments - @begriffs + +## [0.3.2.0] - 2016-06-10 + +### Added +- Reload database schema on SIGHUP - @begriffs +- Support "-" in column names - @ruslantalpa +- Support column/node renaming `alias:column` - @ruslantalpa +- Accept posts from HTML forms - @begriffs +- Ability to order embedded entities - @ruslantalpa +- Ability to paginate using &limit and &offset parameters - @ruslantalpa +- Ability to apply limits to embedded entities and enforce --max-rows on all levels - @ruslantalpa, @begriffs +- Add allow response header in OPTIONS - @begriffs + +### Fixed +- Return 401 or 403 for access denied rather than 404 - @begriffs +- Omit Content-Type header for empty body - @begriffs +- Prevent role from being changed twice - @begriffs +- Use read-only transaction for read requests - @ruslantalpa +- Include entities from the same parent table using two different foreign keys - @ruslantalpa +- Ensure that Location header in 201 response is URL-encoded - @league +- Fix garbage collector CPU leak - @ruslantalpa et al. +- Return deleted items when return=representation header is sent - @ruslantalpa +- Use table default values for empty object inserts - @begriffs + +## [0.3.1.1] - 2016-03-28 + +### Fixed +- Preserve unicode values in insert,update,rpc (regression) - @begriffs +- Prevent duplicate call to stored procs (regression) - @begriffs +- Allow SQL functions to generate registered JWT claims - @begriffs +- Terminate gracefully on SIGTERM (for use in Docker) - @recmo +- Relation detection fix for views that depend on multiple tables - @ruslantalpa +- Avoid count on plurality=singular and allow multiple Prefer values - @ruslantalpa + +## [0.3.1.0] - 2016-02-28 + +### Fixed +- Prevent query error from infecting later connection - @begriffs, @ruslantalpa, @nikita-volkov, @jwiegley + +### Added +- Applies range headers to RPC calls - @diogob + +## [0.3.0.4] - 2016-02-12 + +### Fixed +- Improved usage screen - @begriffs +- Reject non-POSTs to rpc endpoints - @begriffs +- Throw an error for OPTIONS on nonexistent tables - @calebmer +- Remove deadlock on simultaneous contentious updates - @ruslantalpa, @begriffs + +## [0.3.0.3] - 2016-01-08 + +### Fixed +- Fix bug in many-many relation detection - @ruslantalpa +- Inconsistent escaping of table names in read queries - @calebmer + +## [0.3.0.2] - 2015-12-16 + +### Fixed +- Miscalculation of time used for expiring tokens - @calebmer +- Remove bcrypt dependency to fix Windows build - @begriffs +- Detect relations event when authenticator does not have rights to intermediate tables - @ruslantalpa +- Ensure db connections released on sigint - @begriffs +- Fix #396 include records with missing parents - @ruslantalpa +- `pgFmtIdent` always quotes #388 - @calebmer +- Default schema, changed from `"1"` to `public` - @calebmer +- #414 revert to separate count query - @ruslantalpa +- Fix #399, allow inserting in tables with no select privileges using "Prefer: representation=minimal" - @ruslantalpa + +### Added +- Allow order by computed columns - @diogob +- Set max rows in response with --max-rows - @begriffs +- Selection by column name (can detect if `_id` is not included) - @calebmer + +## [0.3.0.1] - 2015-11-27 + +### Fixed +- Filter columns on embedded parent items - @ruslantalpa + +## [0.3.0.0] - 2015-11-24 + +### Fixed +- Use reasonable amount of memory during bulk inserts - @begriffs + +### Added +- Ensure JWT expires - @calebmer +- Postgres connection string argument - @calebmer +- Encode JWT for procs that return type `jwt_claims` - @diogob +- Full text operators `@>`,`<@` - @ruslantalpa +- Shaping of the response body (filter columns, embed relations) with &select parameter for POST/PATCH - @ruslantalpa +- Detect relationships between public views and private tables - @calebmer +- `Prefer: plurality=singular` for selecting single objects - @calebmer + +### Removed +- API versioning feature - @calebmer +- `--db-x` command line arguments - @calebmer +- Secure flag - @calebmer +- PUT request handling - @ruslantalpa + +### Changed +- Embed foreign keys with {} rather than () - @begriffs +- Remove version number from binary filename in release - @begriffs + +## [0.2.12.1] - 2015-11-12 + +### Fixed +- Correct order for -> and ->> in a json path - @ruslantalpa +- Return empty array instead of 500 when a set returning function returns an empty result set - @diogob + +## [0.2.12.0] - 2015-10-25 + +### Added +- Embed associations, e.g. `/film?select=*,director(*)` - @ruslantalpa +- Filter columns, e.g. `?select=col1,col2` - @ruslantalpa +- Does not execute the count total if header "Prefer: count=none" - @diogob + +### Fixed +- Tolerate a missing role in user creation - @calebmer +- Avoid unnecessary text re-encoding - @ruslantalpa + +## [0.2.11.1] - 2015-09-01 + +### Fixed +- Accepts `*/*` in Accept header - @diogob + +## [0.2.11.0] - 2015-08-28 +### Added +- Negate any filter in a uniform way, e.g. `?col=not.eq=foo` - @diogob +- Call stored procedures +- Filter NOT IN values, e.g. `?col=notin.1,2,3` - @rall +- CSV responses to GET requests with `Accept: text/csv` - @diogob +- Debian init scripts - @mkhon +- Allow filters by computed columns - @diogob + +### Fixed +- Reset user role on error +- Compatible with Stack +- Add materialized views to results in GET / - @diogob +- Indicate insertable=true for views that are insertable through triggers - @diogob +- Builds under GHC 7.10 +- Allow the use of columns named "count" in relations queried - @diogob + +## [0.2.10.0] - 2015-06-03 +### Added +- Full text search, eg `/foo?text_vector=@@.bar` +- Include auth id as well as db role to views (for row-level security) + +## [0.2.9.1] - 2015-05-20 +### Fixed +- Put -Werror behind a cabal flag (for CI) so Hackage accepts package + +## [0.2.9.0] - 2015-05-20 +### Added +- Return range headers in PATCH +- Return PATCHed resources if header "Prefer: return=representation" +- Allow nested objects and arrays in JSON post for jsonb columns +- JSON Web Tokens - [Federico Rampazzo](https://github.com/framp) +- Expose PostgREST as a Haskell package + +### Fixed +- Return 404 if no records updated by PATCH + +## [0.2.8.0] - 2015-04-17 +### Added +- Option to specify nulls first or last, eg `/people?order=age.desc.nullsfirst` +- Filter nulls, `?col=is.null` and `?col=isnot.null` +- Filter within jsonb, `?col->a->>b=eq.c` +- Accept CSV in post body for bulk inserts + +### Fixed +- Allow NULL values in posts +- Show full command line usage on param errors + +## [0.2.7.0] - 2015-03-03 +### Added +- Server response logging +- Filter IN values, e.g. `?col=in.1,2,3` +- Return POSTed resource if header "Prefer: return=representation" +- Allow override of default (v1) schema + +## [0.2.6.0] - 2015-02-18 +### Added +- A changelog +- Filter by substring match, e.g. `?col=like.*hello*` (or ilike for + case insensitivity). +- Access-Control-Expose-Headers for CORS + +### Fixed +- Make filter position match docs, e.g. `?order=col.asc` rather + than `?order=asc.col`. diff --git a/postgrest_v12.2.8/CODE_OF_CONDUCT.md b/postgrest_v12.2.8/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..38f9ab0 --- /dev/null +++ b/postgrest_v12.2.8/CODE_OF_CONDUCT.md @@ -0,0 +1,132 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at support@postgrest.org. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available +at [https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/postgrest_v12.2.8/CONTRIBUTING.md b/postgrest_v12.2.8/CONTRIBUTING.md new file mode 100644 index 0000000..be92f4d --- /dev/null +++ b/postgrest_v12.2.8/CONTRIBUTING.md @@ -0,0 +1,3 @@ +This repository follows the same contribution guidelines as the main PostgREST repository contribution guidelines: + +https://github.com/PostgREST/postgrest/blob/main/.github/CONTRIBUTING.md diff --git a/postgrest_v12.2.8/Dockerfile b/postgrest_v12.2.8/Dockerfile new file mode 100644 index 0000000..c213fa2 --- /dev/null +++ b/postgrest_v12.2.8/Dockerfile @@ -0,0 +1,21 @@ +# PostgREST Docker Hub image for aarch64. +# The x86-64 is a single-static-binary image built via Nix, see: +# nix/tools/docker/README.md + +FROM ubuntu:noble@sha256:80dd3c3b9c6cecb9f1667e9290b3bc61b78c2678c02cbdae5f0fea92cc6734ab AS postgrest + +RUN apt-get update -y \ + && apt install -y --no-install-recommends libpq-dev zlib1g-dev jq gcc libnuma-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +COPY postgrest /usr/bin/postgrest +RUN chmod +x /usr/bin/postgrest + +EXPOSE 3000 + +USER 1000 + +# Use the array form to avoid running the command using bash, which does not handle `SIGTERM` properly. +# See https://docs.docker.com/compose/faq/#why-do-my-services-take-10-seconds-to-recreate-or-stop +CMD ["postgrest"] diff --git a/postgrest_v12.2.8/Dockerfile_build_postrest_bin b/postgrest_v12.2.8/Dockerfile_build_postrest_bin new file mode 100644 index 0000000..e513423 --- /dev/null +++ b/postgrest_v12.2.8/Dockerfile_build_postrest_bin @@ -0,0 +1,38 @@ +# 使用 Ubuntu 22.04 作为基础镜像 +FROM ubuntu:22.04 + +# 设置环境变量,避免时区配置的交互提示 +ENV DEBIAN_FRONTEND=noninteractive + +# 安装必要的系统依赖(包括构建工具链) +RUN apt-get update && apt-get install -y \ + build-essential \ + libpq-dev \ + zlib1g-dev \ + pkg-config \ + postgresql \ + curl \ + git \ + wget \ + ca-certificates \ + libffi-dev \ + libssl-dev \ + libgmp-dev \ + libtinfo6 \ + && rm -rf /var/lib/apt/lists/* + +# 安装 Stack(Haskell 构建工具) +RUN curl -sSL https://get.haskellstack.org/ | sh + +# 设置工作目录 +WORKDIR /app + +# 复制当前目录的内容到容器的 /app 目录 +COPY . /app + +# 公开端口(假设 PostgREST 在 3000 端口上运行) +EXPOSE 3000 + +# 设置容器启动时的默认命令,进入交互式 shell +CMD ["/bin/bash"] + diff --git a/postgrest_v12.2.8/LICENSE b/postgrest_v12.2.8/LICENSE new file mode 100644 index 0000000..4d0857d --- /dev/null +++ b/postgrest_v12.2.8/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2014 Joe Nelson +Copyright (c) 2019 Steve Chavez + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/postgrest_v12.2.8/README.md b/postgrest_v12.2.8/README.md new file mode 100644 index 0000000..dd2bd39 --- /dev/null +++ b/postgrest_v12.2.8/README.md @@ -0,0 +1,158 @@ +![Logo](static/postgrest.png "Logo") + +[![Donate](https://img.shields.io/badge/Donate-Patreon-orange.svg?colorB=F96854)](https://www.patreon.com/postgrest) +[![Docs](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://postgrest.org) +[![Docker Stars](https://img.shields.io/docker/pulls/postgrest/postgrest.svg)](https://hub.docker.com/r/postgrest/postgrest/) +[![Build Status](https://github.com/postgrest/postgrest/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/PostgREST/postgrest/actions?query=branch%3Amain) +[![Coverage Status](https://img.shields.io/codecov/c/github/postgrest/postgrest/main)](https://app.codecov.io/gh/PostgREST/postgrest) +[![Hackage docs](https://img.shields.io/hackage/v/postgrest.svg?label=hackage)](http://hackage.haskell.org/package/postgrest) + +PostgREST serves a fully RESTful API from any existing PostgreSQL +database. It provides a cleaner, more standards-compliant, faster +API than you are likely to write from scratch. + +## Sponsors + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + +
+ +Big thanks to our sponsors! You can join them by supporting PostgREST on [Patreon](https://www.patreon.com/postgrest). + +## Usage + +1. Download the binary ([latest release](https://github.com/PostgREST/postgrest/releases/latest)) + for your platform. +2. Invoke for help: + + ```bash + postgrest --help + ``` +## [Documentation](http://postgrest.org) + +Latest documentation is at [postgrest.org](http://postgrest.org). You can contribute to the docs in [PostgREST/postgrest/docs](https://github.com/PostgREST/postgrest/tree/main/docs). + +## Performance + +TLDR; subsecond response times for up to 2000 requests/sec on Heroku +free tier. If you're used to servers written in interpreted languages, +prepare to be pleasantly surprised by PostgREST performance. + +Three factors contribute to the speed. First the server is written +in [Haskell](https://www.haskell.org/) using the +[Warp](http://www.yesodweb.com/blog/2011/03/preliminary-warp-cross-language-benchmarks) +HTTP server (aka a compiled language with lightweight threads). +Next it delegates as much calculation as possible to the database +including + +* Serializing JSON responses directly in SQL +* Data validation +* Authorization +* Combined row counting and retrieval +* Data post in single command (`returning *`) + +Finally it uses the database efficiently with the +[Hasql](https://nikita-volkov.github.io/hasql-benchmarks/) library +by + +* Keeping a pool of db connections +* Using the PostgreSQL binary protocol +* Being stateless to allow horizontal scaling + +## Security + +PostgREST [handles +authentication](http://postgrest.org/en/stable/auth.html) (via JSON Web +Tokens) and delegates authorization to the role information defined in +the database. This ensures there is a single declarative source of truth +for security. When dealing with the database the server assumes the +identity of the currently authenticated user, and for the duration of +the connection cannot do anything the user themselves couldn't. Other +forms of authentication can be built on top of the JWT primitive. See +the docs for more information. + +## Versioning + +A robust long-lived API needs the freedom to exist in multiple +versions. PostgREST does versioning through database schemas. This +allows you to expose tables and views without making the app brittle. +Underlying tables can be superseded and hidden behind public facing +views. + +## Self-documentation + +PostgREST uses the [OpenAPI](https://openapis.org/) standard to +generate up-to-date documentation for APIs. You can use a tool like +[Swagger-UI](https://github.com/swagger-api/swagger-ui) to render +interactive documentation for demo requests against the live API server. + +This project uses HTTP to communicate other metadata as well. For +instance the number of rows returned by an endpoint is reported by - +and limited with - range headers. More about +[that](http://begriffs.com/posts/2014-03-06-beyond-http-header-links.html). + +## Data Integrity + +Rather than relying on an Object Relational Mapper and custom +imperative coding, this system requires you put declarative constraints +directly into your database. Hence no application can corrupt your +data (including your API server). + +The PostgREST exposes HTTP interface with safeguards to prevent +surprises, such as enforcing idempotent PUT requests. + +See examples of [PostgreSQL +constraints](http://www.tutorialspoint.com/postgresql/postgresql_constraints.htm) +and the [API guide](http://postgrest.org/en/stable/api.html). + +## Supporting development + +You can help PostgREST ongoing maintenance and development by making a regular donation through Patreon https://www.patreon.com/postgrest + +Every donation will be spent on making PostgREST better for the whole community. + +## Thanks + +The PostgREST organization is grateful to: + +- The project [sponsors and backers](https://github.com/PostgREST/postgrest/blob/main/BACKERS.md) who support PostgREST's development. +- The project [contributors](https://github.com/PostgREST/postgrest/graphs/contributors) who have improved PostgREST immensely with their code + and good judgement. See more details in the [changelog](https://github.com/PostgREST/postgrest/blob/main/CHANGELOG.md). + +The cool logo came from [Mikey Casalaina](https://github.com/casalaina). diff --git a/postgrest_v12.2.8/Setup.hs b/postgrest_v12.2.8/Setup.hs new file mode 100644 index 0000000..06b6694 --- /dev/null +++ b/postgrest_v12.2.8/Setup.hs @@ -0,0 +1,3 @@ +-- This file is required by Hackage. +import Distribution.Simple +main = defaultMain diff --git a/postgrest_v12.2.8/cabal.project b/postgrest_v12.2.8/cabal.project new file mode 100644 index 0000000..aa3aa3a --- /dev/null +++ b/postgrest_v12.2.8/cabal.project @@ -0,0 +1,4 @@ +packages: postgrest.cabal +tests: true +package * + ghc-options: -split-sections diff --git a/postgrest_v12.2.8/cabal.project.freeze b/postgrest_v12.2.8/cabal.project.freeze new file mode 100644 index 0000000..1ef087e --- /dev/null +++ b/postgrest_v12.2.8/cabal.project.freeze @@ -0,0 +1 @@ +index-state: hackage.haskell.org 2025-02-01T14:59:33Z diff --git a/postgrest_v12.2.8/default.nix b/postgrest_v12.2.8/default.nix new file mode 100644 index 0000000..a55011e --- /dev/null +++ b/postgrest_v12.2.8/default.nix @@ -0,0 +1,160 @@ +{ system ? builtins.currentSystem + +, compiler ? "ghc948" + +, # Commit of the Nixpkgs repository that we want to use. + nixpkgsVersion ? import nix/nixpkgs-version.nix + +, # Nix files that describe the Nixpkgs repository. We evaluate the expression + # using `import` below. + nixpkgs ? let inherit (nixpkgsVersion) owner repo rev tarballHash; in + builtins.fetchTarball { + url = "https://github.com/${owner}/${repo}/archive/${rev}.tar.gz"; + sha256 = tarballHash; + } +}: + +let + name = + "postgrest"; + + # PostgREST source files, filtered based on the rules in the .gitignore files + # and file extensions. We want to include as litte as possible, as the files + # added here will increase the space used in the Nix store and trigger the + # build of new Nix derivations when changed. + src = + pkgs.lib.sourceFilesBySuffices + (pkgs.gitignoreSource ./.) + [ ".cabal" ".hs" ".lhs" "LICENSE" ]; + + allOverlays = + import nix/overlays; + + overlays = + [ + allOverlays.build-toolbox + allOverlays.checked-shell-script + allOverlays.gitignore + allOverlays.postgresql-libpq + allOverlays.postgresql-legacy + allOverlays.postgresql-future + allOverlays.postgis + (allOverlays.haskell-packages { inherit compiler; }) + allOverlays.slocat + ]; + + # Evaluated expression of the Nixpkgs repository. + pkgs = + import nixpkgs { inherit overlays system; }; + + postgresqlVersions = + [ + { name = "postgresql-16"; postgresql = pkgs.postgresql_16.withPackages (p: [ p.postgis p.pg_safeupdate ]); } + { name = "postgresql-15"; postgresql = pkgs.postgresql_15.withPackages (p: [ p.postgis p.pg_safeupdate ]); } + { name = "postgresql-14"; postgresql = pkgs.postgresql_14.withPackages (p: [ p.postgis p.pg_safeupdate ]); } + { name = "postgresql-13"; postgresql = pkgs.postgresql_13.withPackages (p: [ p.postgis p.pg_safeupdate ]); } + { name = "postgresql-12"; postgresql = pkgs.postgresql_12.withPackages (p: [ p.postgis p.pg_safeupdate ]); } + { name = "postgresql-11"; postgresql = pkgs.postgresql_11.withPackages (p: [ p.postgis p.pg_safeupdate ]); } + { name = "postgresql-10"; postgresql = pkgs.postgresql_10.withPackages (p: [ p.postgis p.pg_safeupdate ]); } + { name = "postgresql-9_6"; postgresql = pkgs.postgresql_9_6.withPackages (p: [ p.postgis p.pg_safeupdate ]); } + ]; + + # Dynamic derivation for PostgREST + postgrest = + pkgs.haskell.packages."${compiler}".callCabal2nix name src { }; + + staticHaskellPackage = import nix/static.nix { inherit compiler name pkgs src; }; + + # Options passed to cabal in dev tools and tests + devCabalOptions = + "-f dev --test-show-detail=direct"; + + profiledHaskellPackages = + pkgs.haskell.packages."${compiler}".extend (_: super: + { + mkDerivation = + args: + super.mkDerivation (args // { enableLibraryProfiling = true; }); + } + ); + + inherit (pkgs.haskell) lib; +in +rec { + inherit nixpkgs pkgs; + + # Derivation for the PostgREST Haskell package, including the executable, + # libraries and documentation. We disable running the test suite on Nix + # builds, as they require a database to be set up. + postgrestPackage = + lib.dontCheck postgrest; + + # Profiled dynamic executable. + postgrestProfiled = + lib.enableExecutableProfiling ( + lib.dontHaddock ( + lib.dontCheck (profiledHaskellPackages.callCabal2nix name src { }) + ) + ); + + inherit (postgrest) env; + + # Tooling for analyzing Haskell imports and exports. + hsie = + pkgs.callPackage nix/hsie { + inherit (pkgs.haskell.packages."${compiler}") ghcWithPackages; + }; + + ### Tools + + cabalTools = + pkgs.callPackage nix/tools/cabalTools.nix { inherit devCabalOptions postgrest; }; + + withTools = + pkgs.callPackage nix/tools/withTools.nix { inherit postgresqlVersions postgrest; }; + + # Development tools. + devTools = + pkgs.callPackage nix/tools/devTools.nix { inherit tests style devCabalOptions hsie withTools; }; + + # Documentation tools. + docs = + pkgs.callPackage nix/tools/docs.nix { }; + + # Load testing tools. + loadtest = + pkgs.callPackage nix/tools/loadtest.nix { inherit withTools; }; + + # Script for running memory tests. + memory = + pkgs.callPackage nix/tools/memory.nix { inherit postgrestProfiled withTools; }; + + # Utility for updating the pinned version of Nixpkgs. + nixpkgsTools = + pkgs.callPackage nix/tools/nixpkgsTools.nix { }; + + # Scripts for publishing new releases. + release = + pkgs.callPackage nix/tools/release { }; + + # Linting and styling tools. + style = + pkgs.callPackage nix/tools/style.nix { inherit hsie; }; + + # Scripts for running tests. + tests = + pkgs.callPackage nix/tools/tests.nix { + inherit postgrest devCabalOptions withTools; + ghc = pkgs.haskell.compiler."${compiler}"; + inherit (pkgs.haskell.packages."${compiler}") hpc-codecov; + inherit (pkgs.haskell.packages."${compiler}") weeder; + }; +} // pkgs.lib.optionalAttrs pkgs.stdenv.isLinux rec { + # Static executable. + inherit (staticHaskellPackage) postgrestStatic; + inherit (staticHaskellPackage) packagesStatic; + + # Docker images and loading script. + docker = + pkgs.callPackage nix/tools/docker { postgrest = postgrestStatic; }; +} diff --git a/postgrest_v12.2.8/docker-hub-readme.md b/postgrest_v12.2.8/docker-hub-readme.md new file mode 100644 index 0000000..685a275 --- /dev/null +++ b/postgrest_v12.2.8/docker-hub-readme.md @@ -0,0 +1,70 @@ +# PostgREST + +[![Donate](https://img.shields.io/badge/Donate-Patreon-orange.svg?colorB=F96854)](https://www.patreon.com/postgrest) +[![Docs](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://postgrest.org) +[![Build Status](https://github.com/postgrest/postgrest/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/PostgREST/postgrest/actions?query=branch%3Amain) + +PostgREST serves a fully RESTful API from any existing PostgreSQL database. It +provides a cleaner, more standards-compliant, faster API than you are likely to +write from scratch. + +## Sponsors + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + +
+ +# Usage + +To learn how to use this container, see the [PostgREST Docker +documentation](https://postgrest.org/en/stable/install.html#docker). + +You can configure the PostgREST image by setting +[environment variables](https://postgrest.org/en/stable/configuration.html). + +# How this image is built + +The image is built from scratch using +[Nix](https://nixos.org/nixpkgs/manual/#sec-pkgs-dockerTools) instead of a +`Dockerfile`, which yields a highly secure and optimized image. This is also why +no commands are listed in the image history. See the [PostgREST +respository](https://github.com/PostgREST/postgrest/tree/main/nix/tools/docker) for +details on the build process and how to inspect the image. + +This does not apply to the arm64 variant, which is based on Ubuntu. diff --git a/postgrest_v12.2.8/docs/.gitignore b/postgrest_v12.2.8/docs/.gitignore new file mode 100644 index 0000000..f0ad786 --- /dev/null +++ b/postgrest_v12.2.8/docs/.gitignore @@ -0,0 +1,8 @@ +_build +Pipfile.lock +*.aux +*.log +_diagrams/db.pdf +misspellings +unuseddict +*.mo diff --git a/postgrest_v12.2.8/docs/README.md b/postgrest_v12.2.8/docs/README.md new file mode 100644 index 0000000..6c7a9ea --- /dev/null +++ b/postgrest_v12.2.8/docs/README.md @@ -0,0 +1,28 @@ +# PostgREST documentation https://postgrest.org/ + +PostgREST docs use the reStructuredText format, check this [cheatsheet](https://github.com/ralsina/rst-cheatsheet/blob/master/rst-cheatsheet.rst) to get acquainted with it. + +To build the docs locally, see [the Nix development readme](/nix/README.md#documentation). + +## Documentation structure + +This documentation is structured according to tutorials-howtos-topics-references. For more details on the rationale of this structure, +see https://www.divio.com/blog/documentation. + +## Translating + +To create `.po` files for translation into a new language pass the language code as the first argument to `postgrest-docs-build`. + +Example to add German/de: + +``` +postgrest-docs-build de +``` + +The livereload server also supports a language/locale argument to show the translated docs during translation: + +``` +postgrest-docs-serve de +``` + +Spellcheck is currently only available for the default language. diff --git a/postgrest_v12.2.8/docs/_diagrams/README.md b/postgrest_v12.2.8/docs/_diagrams/README.md new file mode 100644 index 0000000..1142b48 --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/README.md @@ -0,0 +1,24 @@ +## ERD + +The ER diagrams were created with https://github.com/BurntSushi/erd/. + +You can go download erd from https://github.com/BurntSushi/erd/releases and then do: + +```bash +./erd_static-x86-64 -i ./er/film.er -o ../_static/film.png +``` + +The fonts used belong to the GNU FreeFont family. You can download them here: http://ftp.gnu.org/gnu/freefont/ + +## UML + +The UML diagrams are created with https://plantuml.com/. + +PlantUML only creates one diagram per file. +That's why we need to create another one for dark mode. +For example, for the file [uml/arch.uml](uml/arch.uml) there's [uml/dark/arch-dark.uml](uml/dark/arch-dark.uml) which includes the first one: + +```bash +plantuml -tsvg uml/arch.uml -o ../../_static +plantuml -tsvg -darkmode uml/dark/arch-dark.uml -o ../../../_static +``` diff --git a/postgrest_v12.2.8/docs/_diagrams/er/boxoffice.er b/postgrest_v12.2.8/docs/_diagrams/er/boxoffice.er new file mode 100644 index 0000000..7d6b0a1 --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/er/boxoffice.er @@ -0,0 +1,15 @@ +entity {font: "FreeSans"} +relationship {font: "FreeMono"} + +[Box_Office] +*bo_date +*+film_id +gross_revenue + +[Films] +*id ++director_id +title +`...` + +Box_Office +--1 Films diff --git a/postgrest_v12.2.8/docs/_diagrams/er/employees.er b/postgrest_v12.2.8/docs/_diagrams/er/employees.er new file mode 100644 index 0000000..4632f5b --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/er/employees.er @@ -0,0 +1,12 @@ +# Build using: -e ortho + +entity {font: "FreeSans"} +relationship {font: "FreeMono"} + +[Employees] +*id +first_name +last_name ++supervisor_id + +Employees 1--* Employees diff --git a/postgrest_v12.2.8/docs/_diagrams/er/film.er b/postgrest_v12.2.8/docs/_diagrams/er/film.er new file mode 100644 index 0000000..cc54c48 --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/er/film.er @@ -0,0 +1,51 @@ +entity {font: "FreeSans"} +relationship {font: "FreeSerif"} + +[Films] +*id ++director_id +title +year +rating +language + +[Directors] +*id +first_name +last_name + +[Actors] +*id +first_name +last_name + +[Roles] +*+film_id +*+actor_id +character + +[Competitions] +*id +name +year + +[Nominations] +*+competition_id +*+film_id +rank + +[Technical_Specs] +*+film_id +runtime +camera +sound + +Roles *--1 Actors +Roles *--1 Films + +Nominations *--1 Competitions +Nominations *--1 Films + +Films *--1 Directors + +Films 1--1 Technical_Specs diff --git a/postgrest_v12.2.8/docs/_diagrams/er/orders.er b/postgrest_v12.2.8/docs/_diagrams/er/orders.er new file mode 100644 index 0000000..86f0805 --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/er/orders.er @@ -0,0 +1,20 @@ +# Build using: -e ortho + +entity {font: "FreeSans"} +relationship {font: "FreeMono"} + +[Addresses] +*id +name +city +state +postal_code + +[Orders] +*id +name ++billing_address_id ++shipping_address_id + +Orders *--1 Addresses +Orders *--1 Addresses diff --git a/postgrest_v12.2.8/docs/_diagrams/er/premieres.er b/postgrest_v12.2.8/docs/_diagrams/er/premieres.er new file mode 100644 index 0000000..6099e74 --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/er/premieres.er @@ -0,0 +1,16 @@ +entity {font: "FreeSans"} +relationship {font: "FreeMono"} + +[Premieres] +*id +location +date ++film_id + +[Films] +*id ++director_id +title +`...` + +Premieres *--1 Films diff --git a/postgrest_v12.2.8/docs/_diagrams/er/presidents.er b/postgrest_v12.2.8/docs/_diagrams/er/presidents.er new file mode 100644 index 0000000..ca7cf71 --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/er/presidents.er @@ -0,0 +1,12 @@ +# Build using: -e ortho + +entity {font: "FreeSans"} +relationship {font: "FreeMono"} + +[Presidents] +*id +first_name +last_name ++predecessor_id + +Presidents 1--? Presidents diff --git a/postgrest_v12.2.8/docs/_diagrams/er/users.er b/postgrest_v12.2.8/docs/_diagrams/er/users.er new file mode 100644 index 0000000..8ec0113 --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/er/users.er @@ -0,0 +1,18 @@ +# Build using: -e ortho + +entity {font: "FreeSans"} +relationship {font: "FreeMono"} + +[Users] +*id +first_name +last_name +username + +[Subscriptions] +*+subscriber_id +*+subscribed_id +type + +Users 1--* Subscriptions +Subscriptions *--1 Users diff --git a/postgrest_v12.2.8/docs/_diagrams/uml/arch.uml b/postgrest_v12.2.8/docs/_diagrams/uml/arch.uml new file mode 100644 index 0000000..d61a1a8 --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/uml/arch.uml @@ -0,0 +1,84 @@ +@startuml + +skinparam backgroundColor transparent + +package "PostgREST" { + () HTTP as HTTPAPI + HTTPAPI - [Auth] + [Auth] -r.> [ApiRequest] + [ApiRequest] -r.> [Plan] + [Plan] -r.> [Query] + [Query] - () "Connection Pool" : "\t" + [Plan] -u-> [Schema Cache]:uses + [Schema Cache] <- () Listener : reloads + + () HTTP as HTTPADMIN + [Admin] -r- () HTTPADMIN + [Config] -l- () CLI + + [Config] <-r~ Listener + + HTTPADMIN -[hidden]r- CLI + [Schema Cache] -l[hidden]- [Config] + [Schema Cache] -l[hidden]- [Admin] + [Schema Cache] -l[hidden]- CLI +} + + +database "PostgreSQL" { + node Authorization { + rectangle "Roles, GRANT, RLS" + } + node "API schema" as API { + rectangle "Functions, Views" + } + rectangle "Tables, extensions" as tbs + API -d- tbs + + API -l[hidden]- Authorization +} + +:user: +hexagon Proxy +:user: .r-> Proxy +HTTPAPI <.l- Proxy + +:operator: .d-> HTTPADMIN +:operator: .d-> CLI + + +PostgreSQL <.developer : "\t" +Listener -r.> "PostgreSQL" +"Connection Pool" -r.> "PostgreSQL" : "\t\t" + + +note bottom of Auth + Authenticates the user request +end note + +note bottom of ApiRequest + Parses the URL syntax +end note + +note bottom of Query + Generates the SQL +end note + +note top of Listener + LISTEN session +end note + +url of Admin is [[../references/admin_server.html#admin-server]] +url of API is [[../explanations/schema_isolation.html]] +url of Auth is [[../references/auth.html#authn]] +url of Authorization is [[../explanations/db_authz.html]] +url of CLI is [[../references/cli.html#cli]] +url of "Connection Pool" is [[../references/connection_pool.html]] +url of Config is [[../references/configuration.html#configuration]] +url of HTTPADMIN is [[https://aosabook.org/en/posa/warp.html]] +url of HTTPAPI is [[https://aosabook.org/en/posa/warp.html]] +url of Listener is [[../references/listener.html#listener]] +url of Proxy is [[../explanations/nginx.html]] +url of "Schema Cache" is [[../references/schema_cache.html#schema-cache]] + +@enduml diff --git a/postgrest_v12.2.8/docs/_diagrams/uml/dark/arch-dark.uml b/postgrest_v12.2.8/docs/_diagrams/uml/dark/arch-dark.uml new file mode 100644 index 0000000..a655c3a --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/uml/dark/arch-dark.uml @@ -0,0 +1,3 @@ +@startuml +!include ../arch.uml +@enduml diff --git a/postgrest_v12.2.8/docs/_diagrams/uml/dark/sch-iso-dark.uml b/postgrest_v12.2.8/docs/_diagrams/uml/dark/sch-iso-dark.uml new file mode 100644 index 0000000..df4220b --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/uml/dark/sch-iso-dark.uml @@ -0,0 +1,3 @@ +@startuml +!include ../sch-iso.uml +@enduml diff --git a/postgrest_v12.2.8/docs/_diagrams/uml/sch-iso.uml b/postgrest_v12.2.8/docs/_diagrams/uml/sch-iso.uml new file mode 100644 index 0000000..30fb808 --- /dev/null +++ b/postgrest_v12.2.8/docs/_diagrams/uml/sch-iso.uml @@ -0,0 +1,29 @@ +@startuml + +skinparam backgroundColor transparent +skinparam linetype ortho + +skinparam node { + backgroundColor transparent + borderThickness 1 +} + +database "PostgreSQL" { + node public { + rectangle tables_public as "tables" + } + + node extensions as "**extensions**" { + } + + node API as "api" { + rectangle vf_api as "views + functions" + } + + tables_public <-- vf_api + extensions <-- vf_api +} + +vf_api <-[thickness=3]-> () PostgREST + +@enduml diff --git a/postgrest_v12.2.8/docs/_static/arch-dark.svg b/postgrest_v12.2.8/docs/_static/arch-dark.svg new file mode 100644 index 0000000..f55c127 --- /dev/null +++ b/postgrest_v12.2.8/docs/_static/arch-dark.svg @@ -0,0 +1 @@ +PostgRESTPostgreSQLAuthorizationAPI schemaHTTPAuthApiRequestPlanQueryConnection PoolSchema CacheListenerHTTPAdminConfigCLITables, extensionsRoles, GRANT, RLSFunctions, ViewsuserProxyoperatordeveloperAuthenticates the user requestParses the URL syntaxGenerates the SQLLISTEN sessionusesreloads \ No newline at end of file diff --git a/postgrest_v12.2.8/docs/_static/arch.svg b/postgrest_v12.2.8/docs/_static/arch.svg new file mode 100644 index 0000000..b2005f2 --- /dev/null +++ b/postgrest_v12.2.8/docs/_static/arch.svg @@ -0,0 +1 @@ +PostgRESTPostgreSQLAuthorizationAPI schemaHTTPAuthApiRequestPlanQueryConnection PoolSchema CacheListenerHTTPAdminConfigCLITables, extensionsRoles, GRANT, RLSFunctions, ViewsuserProxyoperatordeveloperAuthenticates the user requestParses the URL syntaxGenerates the SQLLISTEN sessionusesreloads \ No newline at end of file diff --git a/postgrest_v12.2.8/docs/_static/boxoffice.png b/postgrest_v12.2.8/docs/_static/boxoffice.png new file mode 100644 index 0000000..87249a6 Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/boxoffice.png differ diff --git a/postgrest_v12.2.8/docs/_static/css/custom.css b/postgrest_v12.2.8/docs/_static/css/custom.css new file mode 100644 index 0000000..c71e0de --- /dev/null +++ b/postgrest_v12.2.8/docs/_static/css/custom.css @@ -0,0 +1,155 @@ +.wy-nav-content { + max-width: initial; +} + +#postgrest-documentation > h1 { + display: none; +} + +div.wy-menu.rst-pro { + display: none !important; +} + +div.highlight { + background: #fff !important; +} + +div.line-block { + margin-bottom: 0px !important; +} + +#sponsors { + text-align: center; +} + +#sponsors h2 { + text-align: left; +} + +#sponsors img{ + margin: 10px; + width: 13em; /* ".. image::" does not apply width properly to SVGs */ +} + +#thanks{ + text-align: center; +} + +#thanks img{ + margin: 10px; +} + +#thanks h2{ + text-align: left; +} + +#thanks p{ + text-align: left; +} + +#thanks ul{ + text-align: left; +} + +.image-container { + max-width: 800px; + display: block; + margin-left: auto; + margin-right: auto; + margin-bottom: 24px; +} + +.wy-table-responsive table td { + white-space: normal !important; +} + +.wy-table-responsive { + overflow: visible !important; +} + +#tutorials span.caption-text { + display: none; +} + +#references span.caption-text { + display: none; +} + +#explanations span.caption-text { + display: none; +} + +#how-tos span.caption-text { + display: none; +} + +#ecosystem span.caption-text { + display: none; +} + +#integrations span.caption-text { + display: none; +} + +#api span.caption-text { + display: none; +} + +/* Tweaks for dark mode from extension: sphinx-rtd-dark-theme */ + +html[data-theme="dark"] .highlight { + background-color: #17181c !important; +} + +html[data-theme="dark"] .sphinx-tabs-tab { + color: var(--dark-link-color); +} + +html[data-theme="dark"] .sphinx-tabs-panel { + border: 1px solid #404040; + border-top: 0; + background: #141414; +} + +html[data-theme="dark"] .sphinx-tabs-tab[aria-selected="true"] { + border: 1px solid #404040; + border-bottom: 1px solid #141414; + background-color: #141414; +} + +html[data-theme="dark"] [role="tablist"] { + border-bottom: 1px solid #404040; +} + +html[data-theme="dark"] .btn-neutral { + color: white !important; +} + +html[data-theme="dark"] .img-dark { + display: inline; +} + +html:not([data-theme="dark"]) .img-dark { + display: none; +} + +html[data-theme="dark"] .img-light { + display: none; +} + +html:not([data-theme="dark"]) .img-light { + display: inline; +} + +html[data-theme="dark"] .img-translucent img { + background-color: #cccccc; +} + +.img-translucent img { + transition: background-color 0.3s; + margin-bottom: 24px; +} + +.svg-container-md { + max-width: 400px; +} diff --git a/postgrest_v12.2.8/docs/_static/employees.png b/postgrest_v12.2.8/docs/_static/employees.png new file mode 100644 index 0000000..b21153c Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/employees.png differ diff --git a/postgrest_v12.2.8/docs/_static/empty.png b/postgrest_v12.2.8/docs/_static/empty.png new file mode 100644 index 0000000..99fabe4 Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/empty.png differ diff --git a/postgrest_v12.2.8/docs/_static/favicon.ico b/postgrest_v12.2.8/docs/_static/favicon.ico new file mode 100644 index 0000000..a9e16d3 Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/favicon.ico differ diff --git a/postgrest_v12.2.8/docs/_static/film.png b/postgrest_v12.2.8/docs/_static/film.png new file mode 100644 index 0000000..03b9b2b Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/film.png differ diff --git a/postgrest_v12.2.8/docs/_static/how-tos/htmx-demo.gif b/postgrest_v12.2.8/docs/_static/how-tos/htmx-demo.gif new file mode 100644 index 0000000..777b87d Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/how-tos/htmx-demo.gif differ diff --git a/postgrest_v12.2.8/docs/_static/how-tos/htmx-edit-delete.gif b/postgrest_v12.2.8/docs/_static/how-tos/htmx-edit-delete.gif new file mode 100644 index 0000000..6d5a1a7 Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/how-tos/htmx-edit-delete.gif differ diff --git a/postgrest_v12.2.8/docs/_static/how-tos/htmx-insert.gif b/postgrest_v12.2.8/docs/_static/how-tos/htmx-insert.gif new file mode 100644 index 0000000..e4031b0 Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/how-tos/htmx-insert.gif differ diff --git a/postgrest_v12.2.8/docs/_static/how-tos/htmx-simple.jpg b/postgrest_v12.2.8/docs/_static/how-tos/htmx-simple.jpg new file mode 100644 index 0000000..4c27689 Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/how-tos/htmx-simple.jpg differ diff --git a/postgrest_v12.2.8/docs/_static/orders.png b/postgrest_v12.2.8/docs/_static/orders.png new file mode 100644 index 0000000..0ac19aa Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/orders.png differ diff --git a/postgrest_v12.2.8/docs/_static/presidents.png b/postgrest_v12.2.8/docs/_static/presidents.png new file mode 100644 index 0000000..09c4e6f Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/presidents.png differ diff --git a/postgrest_v12.2.8/docs/_static/sch-iso-dark.svg b/postgrest_v12.2.8/docs/_static/sch-iso-dark.svg new file mode 100644 index 0000000..bf6c8a0 --- /dev/null +++ b/postgrest_v12.2.8/docs/_static/sch-iso-dark.svg @@ -0,0 +1 @@ +PostgreSQLpublicapitablesextensionsviews + functionsPostgREST \ No newline at end of file diff --git a/postgrest_v12.2.8/docs/_static/sch-iso.svg b/postgrest_v12.2.8/docs/_static/sch-iso.svg new file mode 100644 index 0000000..0a54cba --- /dev/null +++ b/postgrest_v12.2.8/docs/_static/sch-iso.svg @@ -0,0 +1 @@ +PostgreSQLpublicapitablesextensionsviews + functionsPostgREST \ No newline at end of file diff --git a/postgrest_v12.2.8/docs/_static/security-anon-choice.png b/postgrest_v12.2.8/docs/_static/security-anon-choice.png new file mode 100644 index 0000000..ea02a23 Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/security-anon-choice.png differ diff --git a/postgrest_v12.2.8/docs/_static/security-roles.png b/postgrest_v12.2.8/docs/_static/security-roles.png new file mode 100644 index 0000000..f45ba8e Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/security-roles.png differ diff --git a/postgrest_v12.2.8/docs/_static/tuts/tut0-request-flow.png b/postgrest_v12.2.8/docs/_static/tuts/tut0-request-flow.png new file mode 100644 index 0000000..24f2986 Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/tuts/tut0-request-flow.png differ diff --git a/postgrest_v12.2.8/docs/_static/tuts/tut1-jwt-io.png b/postgrest_v12.2.8/docs/_static/tuts/tut1-jwt-io.png new file mode 100644 index 0000000..488b87f Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/tuts/tut1-jwt-io.png differ diff --git a/postgrest_v12.2.8/docs/_static/users.png b/postgrest_v12.2.8/docs/_static/users.png new file mode 100644 index 0000000..d94f097 Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/users.png differ diff --git a/postgrest_v12.2.8/docs/_static/win-err-dialog.png b/postgrest_v12.2.8/docs/_static/win-err-dialog.png new file mode 100644 index 0000000..e60a71c Binary files /dev/null and b/postgrest_v12.2.8/docs/_static/win-err-dialog.png differ diff --git a/postgrest_v12.2.8/docs/conf.py b/postgrest_v12.2.8/docs/conf.py new file mode 100644 index 0000000..37f6470 --- /dev/null +++ b/postgrest_v12.2.8/docs/conf.py @@ -0,0 +1,319 @@ +# -*- coding: utf-8 -*- +# +# PostgREST documentation build configuration file, created by +# sphinx-quickstart on Sun Oct 9 16:53:00 2016. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx_tabs.tabs", + "sphinx_copybutton", + "sphinxext.opengraph", + "sphinx_rtd_dark_mode", +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = ".rst" + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# This is overriden by readthedocs with the version tag anyway +version = "12.2" +# To avoid repetition in we set this to an empty string. +release = "" + +# General information about the project. +project = "PostgREST " + version +author = "Joe Nelson, Steve Chavez" +copyright = "2017, " + author + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = "en" + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "shared/*.rst"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "sphinx_rtd_theme" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = {"display_version": False} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. +# "<project> v<release> documentation" by default. +# html_title = u'PostgREST v0.4.0.0' + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (relative to this directory) to use as a favicon of +# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +html_favicon = "_static/favicon.ico" + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not None, a 'Last updated on:' timestamp is inserted at every page +# bottom, using the given strftime format. +# The empty string is equivalent to '%b %d, %Y'. +# html_last_updated_fmt = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a <link> tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# 'ja' uses this config value. +# 'zh' user can custom change `jieba` dictionary path. +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "PostgRESTdoc" + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, "PostgREST.tex", "PostgREST Documentation", author, "manual"), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, "postgrest", "PostgREST Documentation", [author], 1)] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "PostgREST", + "PostgREST Documentation", + author, + "PostgREST", + "REST API for any PostgreSQL database", + "Web", + ), +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + +# -- Custom setup --------------------------------------------------------- + + +def setup(app): + app.add_css_file("css/custom.css") + + +# taken from https://github.com/sphinx-doc/sphinx/blob/82dad44e5bd3776ecb6fd8ded656bc8151d0e63d/sphinx/util/requests.py#L42 +user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:25.0) Gecko/20100101 Firefox/25.0" + +linkcheck_ignore = [ + r"https://www.patreon.com/postgrest", + r"https://blog.frankel.ch/poor-man-api", +] + +# sphinx-tabs configuration +sphinx_tabs_disable_tab_closing = True + +# sphinx_rtd_dark_mode configuration +default_dark_mode = False + +# sphinxext-opengraph configuration + +ogp_image = "_images/logo.png" +ogp_use_first_image = True +ogp_enable_meta_description = True +ogp_description_length = 300 + +## RTD sets html_baseurl, ensures we use the correct env for canonical URLs +## Useful to generate correct meta tags for Open Graph +## Refs: https://github.com/readthedocs/readthedocs.org/issues/10226, https://github.com/urllib3/urllib3/pull/3064 +html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "/") diff --git a/postgrest_v12.2.8/docs/ecosystem.rst b/postgrest_v12.2.8/docs/ecosystem.rst new file mode 100644 index 0000000..f3a11a5 --- /dev/null +++ b/postgrest_v12.2.8/docs/ecosystem.rst @@ -0,0 +1,98 @@ +.. _community_tutorials: + +Community Tutorials +------------------- + +* `Building a Contacts List with PostgREST and Vue.js <https://www.youtube.com/watch?v=iHtsALtD5-U>`_ - + In this video series, DigitalOcean shows how to build and deploy an Nginx + PostgREST(using a managed PostgreSQL database) + Vue.js webapp in an Ubuntu server droplet. + +* `PostgREST + Auth0: Create REST API in mintutes, and add social login using Auth0 <https://samkhawase.com/blog/postgrest/>`_ - A step-by-step tutorial to show how to dockerize and integrate Auth0 to PostgREST service. + +* `PostgREST + PostGIS API tutorial in 5 minutes <https://gis-ops.com/postgrest-postgis-api-tutorial-geospatial-api-in-5-minutes/>`_ - + In this tutorial, GIS • OPS shows how to perform PostGIS calculations through PostgREST :ref:`functions` interface. + +* `"CodeLess" backend using postgres, postgrest and oauth2 authentication with keycloak <https://www.mathieupassenaud.fr/codeless_backend/>`_ - + A step-by-step tutorial for using PostgREST with KeyCloak(hosted on a managed service). + +* `How PostgreSQL triggers work when called with a PostgREST PATCH HTTP request <https://blog.fgribreau.com/2020/11/how-postgresql-triggers-works-when.html>`_ - A tutorial to see how the old and new values are set or not when doing a PATCH request to PostgREST. + +* `REST Data Service on YugabyteDB / PostgreSQL <https://dev.to/yugabyte/rest-data-service-on-yugabytedb-postgresql-5f2h>`_ + +* `Build data-driven applications with Workers and PostgreSQL <https://developers.cloudflare.com/workers/tutorials/postgres/>`_ - A tutorial on how to integrate with PostgREST and PostgreSQL using Cloudflare Workers. + +* `A poor man's API <https://blog.frankel.ch/poor-man-api>`_ - Shows how to integrate PostgREST with Apache APISIX as an alternative to Nginx. + +.. * `Accessing a PostgreSQL database in Godot 4 via PostgREST <https://peterkingsbury.com/2022/08/16/godot-postgresql-postgrest/>`_ + +.. _templates: + +Templates +--------- + +* `compose-postgrest <https://github.com/mattddowney/compose-postgrest>`_ - docker-compose setup with Nginx and HTML example +* `svelte-postgrest-template <https://github.com/guyromm/svelte-postgrest-template>`_ - Svelte/SvelteKit, PostgREST, EveryLayout and social auth + +.. _eco_example_apps: + +Example Apps +------------ + +* `delibrium-postgrest <https://gitlab.com/delibrium/delibrium-postgrest/>`_ - example school API and front-end in Vue.js +* `ETH-transactions-storage <https://github.com/Adamant-im/ETH-transactions-storage>`_ - indexer for Ethereum to get transaction list by ETH address +* `general <https://github.com/PierreRochard/general>`_ - example auth back-end +* `guild-operators <https://github.com/cardano-community/koios-artifacts/tree/main/files/grest>`_ - example queries and functions that the Cardano Community uses for their Guild Operators' Repository +* `PostGUI <https://github.com/priyank-purohit/PostGUI>`_ - React Material UI admin panel +* `prospector <https://github.com/sfcta/prospector>`_ - data warehouse and visualization platform + +.. _devops: + +DevOps +------ + +* `cloudgov-demo-postgrest <https://github.com/GSA/cloudgov-demo-postgrest>`_ - demo for a federally-compliant REST API on cloud.gov +* `cloudstark/helm-charts <https://github.com/cloudstark/helm-charts/tree/master/postgrest>`_ - helm chart to deploy PostgREST to a Kubernetes cluster via a Deployment and Service +* `cyril-sabourault/postgrest-cloud-run <https://github.com/cyril-sabourault/postgrest-cloud-run>`_ - expose a PostgreSQL database on Cloud SQL using Cloud Run +* `eyberg/postgrest <https://repo.ops.city/v2/packages/eyberg/postgrest/10.1.1/x86_64/show>`_ - run PostgREST as a Nanos unikernel +* `jbkarle/postgrest <https://github.com/jbkarle/postgrest>`_ - helm chart with a demo database for development and test purposes + +.. _eco_external_notification: + +External Notification +--------------------- + +These are PostgreSQL bridges that propagate LISTEN/NOTIFY to external queues for further processing. This allows functions to initiate actions outside the database such as sending emails. + +* `pg-notify-stdout <https://github.com/mkleczek/pg-notify-stdout>`_ - writes notifications to standard output (use in shell scripts etc.) +* `pg-notify-webhook <https://github.com/vbalasu/pg-notify-webhook>`_ - trigger webhooks from PostgreSQL's LISTEN/NOTIFY +* `pgsql-listen-exchange <https://github.com/gmr/pgsql-listen-exchange>`_ - RabbitMQ +* `postgres-websockets <https://github.com/diogob/postgres-websockets>`_ - expose web sockets for PostgreSQL's LISTEN/NOTIFY +* `postgresql2websocket <https://github.com/frafra/postgresql2websocket>`_ - Websockets + + +.. _eco_extensions: + +Extensions +---------- + +* `aiodata <https://github.com/Exahilosys/aiodata>`_ - Python, event-based proxy and caching client. +* `pg-safeupdate <https://github.com/eradman/pg-safeupdate>`_ - prevent full-table updates or deletes +* `postgrest-node <https://github.com/seveibar/postgrest-node>`_ - Run a PostgREST server in Node.js via npm module +* `PostgREST-writeAPI <https://github.com/ppKrauss/PostgREST-writeAPI>`_ - generate Nginx rewrite rules to fit an OpenAPI spec + +.. _clientside_libraries: + +Client-Side Libraries +--------------------- + +* `postgrest-csharp <https://github.com/supabase-community/postgrest-csharp>`_ - C# +* `postgrest-dart <https://github.com/supabase/postgrest-dart>`_ - Dart +* `postgrest-ex <https://github.com/supabase-community/postgrest-ex>`_ - Elixir +* `postgrest-go <https://github.com/supabase-community/postgrest-go>`_ - Go +* `postgrest-js <https://github.com/supabase/postgrest-js>`_ - TypeScript/JavaScript +* `postgrest-kt <https://github.com/supabase-community/postgrest-kt>`_ - Kotlin +* `postgrest-py <https://github.com/supabase-community/postgrest-py>`_ - Python +* `postgrest-rs <https://github.com/supabase-community/postgrest-rs>`_ - Rust +* `postgrest-swift <https://github.com/supabase-community/postgrest-swift>`_ - Swift +* `redux-postgrest <https://github.com/andytango/redux-postgrest>`_ - TypeScript/JS, client integrated with (React) Redux. +* `vue-postgrest <https://github.com/technowledgy/vue-postgrest>`_ - Vue.js + diff --git a/postgrest_v12.2.8/docs/explanations/architecture.rst b/postgrest_v12.2.8/docs/explanations/architecture.rst new file mode 100644 index 0000000..23a3775 --- /dev/null +++ b/postgrest_v12.2.8/docs/explanations/architecture.rst @@ -0,0 +1,95 @@ +Architecture +############ + +This page describes the architecture of PostgREST. + +Bird's Eye View +=============== + +You can click on the components to navigate to their respective documentation. + + .. container:: img-dark + + .. See https://github.com/sphinx-doc/sphinx/issues/2240#issuecomment-187366626 + + .. raw:: html + + <object width="100%" data="../_static/arch-dark.svg" type="image/svg+xml"></object> + + .. container:: img-light + + .. raw:: html + + <object width="100%" data="../_static/arch.svg" type="image/svg+xml"></object> + + +Code Map +======== + +This section talks briefly about various important modules. + +Main +---- + +The starting point of the program is `Main.hs <https://github.com/PostgREST/postgrest/blob/main/main/Main.hs>`_. + +CLI +--- + +Main then calls `CLI.hs <https://github.com/PostgREST/postgrest/blob/main/src/PostgREST/CLI.hs>`_, which is in charge of :ref:`cli`. + +App +--- + +`App.hs <https://github.com/PostgREST/postgrest/blob/main/src/PostgREST/App.hs>`_ is then in charge of composing the different modules. + +Auth +---- + +`Auth.hs <https://github.com/PostgREST/postgrest/blob/main/src/PostgREST/Auth.hs>`_ is in charge of :ref:`authn`. + +Api Request +----------- + +`ApiRequest.hs <https://github.com/PostgREST/postgrest/blob/main/src/PostgREST/ApiRequest.hs>`_ is in charge of parsing the URL query string (following PostgREST syntax), the request headers, and the request body. + +A request might be rejected at this level if it's invalid. For example when providing an unknown media type to PostgREST or using an unknown HTTP method. + +Plan +---- + +Using the Schema Cache, `Plan.hs <https://github.com/PostgREST/postgrest/blob/main/src/PostgREST/Plan.hs>`_ fills in out-of-band SQL details (like an ``ON CONFLICT (pk)`` clause) required to complete the user request. + +A request might be rejected at this level if it's invalid. For example when doing resource embedding on a nonexistent resource. + +Query +----- + +`Query.hs <https://github.com/PostgREST/postgrest/blob/main/src/PostgREST/Query.hs>`_ generates the SQL queries (parametrized and prepared) required to satisfy the user request. + +Only at this stage a connection from the pool might be used. + +Schema Cache +------------ + +`SchemaCache.hs <https://github.com/PostgREST/postgrest/blob/main/src/PostgREST/SchemaCache.hs>`_ is in charge of :ref:`schema_cache`. + +Config +------ + +`Config.hs <https://github.com/PostgREST/postgrest/blob/main/src/PostgREST/Config.hs>`_ is in charge of :ref:`configuration`. + +Admin +----- + +`Admin.hs <https://github.com/PostgREST/postgrest/blob/main/src/PostgREST/Admin.hs>`_ is in charge of the :ref:`admin_server`. + +HTTP +---- + +The HTTP server is provided by `Warp <https://aosabook.org/en/posa/warp.html>`_. + +Listener +-------- + +`Listener.hs <https://github.com/PostgREST/postgrest/blob/main/src/PostgREST/Listener.hs>`_ is in charge of the :ref:`listener`. diff --git a/postgrest_v12.2.8/docs/explanations/db_authz.rst b/postgrest_v12.2.8/docs/explanations/db_authz.rst new file mode 100644 index 0000000..259faf4 --- /dev/null +++ b/postgrest_v12.2.8/docs/explanations/db_authz.rst @@ -0,0 +1,198 @@ +.. _db_authz: + +Database Authorization +###################### + +Database authorization is the process of granting and verifying database access permissions. PostgreSQL manages permissions using the concept of roles. + +Users and Groups +================ + +A role can be thought of as either a database user, or a group of database users, depending on how the role is set up. + +Roles for Each Web User +----------------------- + +PostgREST can accommodate either viewpoint. If you treat a role as a single user then the :ref:`jwt_impersonation` does most of what you need. When an authenticated user makes a request PostgREST will switch into the database role for that user, which in addition to restricting queries, is available to SQL through the :code:`current_user` variable. + +You can use row-level security to flexibly restrict visibility and access for the current user. Here is an `example <https://www.2ndquadrant.com/en/blog/application-users-vs-row-level-security/>`_ from Tomas Vondra, a chat table storing messages sent between users. Users can insert rows into it to send messages to other users, and query it to see messages sent to them by other users. + +.. code-block:: postgres + + CREATE TABLE chat ( + message_uuid UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + message_time TIMESTAMP NOT NULL DEFAULT now(), + message_from NAME NOT NULL DEFAULT current_user, + message_to NAME NOT NULL, + message_subject VARCHAR(64) NOT NULL, + message_body TEXT + ); + + ALTER TABLE chat ENABLE ROW LEVEL SECURITY; + +We want to enforce a policy that ensures a user can see only those messages sent by them or intended for them. Also we want to prevent a user from forging the ``message_from`` column with another person's name. + +PostgreSQL allows us to set this policy with row-level security: + +.. code-block:: postgres + + CREATE POLICY chat_policy ON chat + USING ((message_to = current_user) OR (message_from = current_user)) + WITH CHECK (message_from = current_user) + +Anyone accessing the generated API endpoint for the chat table will see exactly the rows they should, without our needing custom imperative server-side coding. + +.. warning:: + + Roles are namespaced per-cluster rather than per-database so they may be prone to collision. + +Web Users Sharing Role +---------------------- + +Alternately database roles can represent groups instead of (or in addition to) individual users. You may choose that all signed-in users for a web app share the role ``webuser``. You can distinguish individual users by including extra claims in the JWT such as email. + +.. code:: json + + { + "role": "webuser", + "email": "john@doe.com" + } + +SQL code can access claims through PostgREST :ref:`tx_settings`. For instance to get the email claim, call this function: + +.. code:: sql + + current_setting('request.jwt.claims', true)::json->>'email'; + +.. note:: + + For PostgreSQL < 14 + + .. code:: sql + + current_setting('request.jwt.claim.email', true); + +This allows JWT generation services to include extra information and your database code to react to it. For instance the RLS example could be modified to use this ``current_setting`` rather than ``current_user``. The second ``'true'`` argument tells ``current_setting`` to return NULL if the setting is missing from the current configuration. + +Hybrid User-Group Roles +----------------------- + +You can mix the group and individual role policies. For instance we could still have a webuser role and individual users which inherit from it: + +.. code-block:: postgres + + CREATE ROLE webuser NOLOGIN; + -- grant this role access to certain tables etc + + CREATE ROLE user000 NOLOGIN; + GRANT webuser TO user000; + -- now user000 can do whatever webuser can + + GRANT user000 TO authenticator; + -- allow authenticator to switch into user000 role + -- (the role itself has nologin) + +Schemas +======= + +You must explicitly allow roles to access the exposed schemas in :ref:`db-schemas`. + +.. code-block:: postgres + + GRANT USAGE ON SCHEMA api TO webuser; + +Tables +====== + +To let web users access tables you must grant them privileges for the operations you want them to do. + +.. code-block:: postgres + + GRANT + SELECT + , INSERT + , UPDATE(message_body) + , DELETE + ON chat TO webuser; + +You can also choose on which table columns the operation is valid. In the above example, the web user can only update the ``message_body`` column. + +.. _func_privs: + +Functions +========= + +By default, when a function is created, the privilege to execute it is not restricted by role. The function access is ``PUBLIC`` — executable by all roles (more details at `PostgreSQL Privileges page <https://www.postgresql.org/docs/current/ddl-priv.html>`_). This is not ideal for an API schema. To disable this behavior, you can run the following SQL statement: + +.. code-block:: postgres + + ALTER DEFAULT PRIVILEGES REVOKE EXECUTE ON FUNCTIONS FROM PUBLIC; + +This will change the privileges for all functions created in the future in all schemas. Currently there is no way to limit it to a single schema. In our opinion it's a good practice anyway. + +.. note:: + + It is however possible to limit the effect of this clause only to functions you define. You can put the above statement at the beginning of the API schema definition, and then at the end reverse it with: + + .. code-block:: postgres + + ALTER DEFAULT PRIVILEGES GRANT EXECUTE ON FUNCTIONS TO PUBLIC; + + This will work because the :code:`alter default privileges` statement has effect on function created *after* it is executed. See `PostgreSQL alter default privileges <https://www.postgresql.org/docs/current/sql-alterdefaultprivileges.html>`_ for more details. + +After that, you'll need to grant EXECUTE privileges on functions explicitly: + +.. code-block:: postgres + + GRANT EXECUTE ON FUNCTION login TO anonymous; + GRANT EXECUTE ON FUNCTION signup TO anonymous; + +You can also grant execute on all functions in a schema to a higher privileged role: + +.. code-block:: postgres + + GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO web_user; + +Security definer +---------------- + +A function is executed with the privileges of the user who calls it. This means that the user has to have all permissions to do the operations the function performs. +If the function accesses private database objects, your :ref:`API roles <roles>` won't be able to successfully execute the function. + +Another option is to define the function with the :code:`SECURITY DEFINER` option. Then only one permission check will take place, the permission to call the function, and the operations in the function will have the authority of the user who owns the function itself. + +.. code-block:: postgres + + -- login as a user wich has privileges on the private schemas + + -- create a sample function + create or replace function login(email text, pass text, out token text) as $$ + begin + -- access to a private schema called 'auth' + select auth.user_role(email, pass) into _role; + -- other operations + -- ... + end; + $$ language plpgsql security definer; + +Note the ``SECURITY DEFINER`` keywords at the end of the function. See `PostgreSQL documentation <https://www.postgresql.org/docs/current/sql-createfunction.html#SQL-CREATEFUNCTION-SECURITY>`_ for more details. + +Views +===== + +Views are invoked with the privileges of the view owner, much like functions with the ``SECURITY DEFINER`` option. When created by a SUPERUSER role, all `row-level security <https://www.postgresql.org/docs/current/ddl-rowsecurity.html>`_ policies will be bypassed. + +If you're on PostgreSQL >= 15, this behavior can be changed by specifying the ``security_invoker`` option. + +.. code-block:: postgres + + CREATE VIEW sample_view WITH (security_invoker = true) AS + SELECT * FROM sample_table; + +On PostgreSQL < 15, you can create a non-SUPERUSER role and make this role the view's owner. + +.. code-block:: postgres + + CREATE ROLE api_views_owner NOSUPERUSER NOBYPASSRLS; + ALTER VIEW sample_view OWNER TO api_views_owner; + diff --git a/postgrest_v12.2.8/docs/explanations/install.rst b/postgrest_v12.2.8/docs/explanations/install.rst new file mode 100644 index 0000000..c4d1518 --- /dev/null +++ b/postgrest_v12.2.8/docs/explanations/install.rst @@ -0,0 +1,220 @@ +.. _install: + +Installation +############ + +The release page has `pre-compiled binaries for macOS, Windows, Linux and FreeBSD <https://github.com/PostgREST/postgrest/releases/latest>`_. +The Linux binary is a static executable that can be run on any Linux distribution. + +You can also use your OS package manager. + +.. include:: ../shared/installation.rst + +.. _pg-dependency: + +Supported PostgreSQL versions +============================= + +=============== ================================= +**Supported** PostgreSQL >= 9.6 +=============== ================================= + +PostgREST works with all PostgreSQL versions starting from 9.6. + +.. note:: + + Support for PostgreSQL versions 9.6, 10 and 11 is deprecated. From this on version onwards, PostgREST will only support non-end-of-life PostgreSQL versions. + + See https://www.postgresql.org/support/versioning/. + +Running PostgREST +================= + +If you downloaded PostgREST from the release page, first extract the compressed file to obtain the executable. + +.. code-block:: bash + + # For UNIX platforms + tar Jxf postgrest-[version]-[platform].tar.xz + + # On Windows you should unzip the file + +Now you can run PostgREST with the :code:`--help` flag to see usage instructions: + +.. code-block:: bash + + # Running postgrest binary + ./postgrest --help + + # Running postgrest installed from a package manager + postgrest --help + + # You should see a usage help message + +The PostgREST server reads a configuration file as its only argument: + +.. code:: bash + + postgrest /path/to/postgrest.conf + + # You can also generate a sample config file with + # postgrest -e > postgrest.conf + # You'll need to edit this file and remove the usage parts for postgrest to read it + +For a complete reference of the configuration file, see :ref:`configuration`. + +.. note:: + + If you see a dialog box like this on Windows, it may be that the :code:`pg_config` program is not in your system path. + + .. image:: ../_static/win-err-dialog.png + + It usually lives in :code:`C:\Program Files\PostgreSQL\<version>\bin`. See this `article <https://www.howtogeek.com/118594/how-to-edit-your-system-path-for-easy-command-line-access/>`_ about how to modify the system path. + + To test that the system path is set correctly, run ``pg_config`` from the command line. You should see it output a list of paths. + +Docker +====== + +You can get the `official PostgREST Docker image <https://hub.docker.com/r/postgrest/postgrest>`_ with: + +.. code-block:: bash + + docker pull postgrest/postgrest + +To configure the container image, use :ref:`env_variables_config`. + +There are two ways to run the PostgREST container: with an existing external database, or through docker-compose. + +Containerized PostgREST with native PostgreSQL +---------------------------------------------- + +The first way to run PostgREST in Docker is to connect it to an existing native database on the host. + +.. code-block:: bash + + # Run the server + docker run --rm --net=host \ + -e PGRST_DB_URI="postgres://app_user:password@localhost/postgres" \ + postgrest/postgrest + +The database connection string above is just an example. Adjust the role and password as necessary. You may need to edit PostgreSQL's :code:`pg_hba.conf` to grant the user local login access. + +.. note:: + + Docker on Mac does not support the :code:`--net=host` flag. Instead you'll need to create an IP address alias to the host. Requests for the IP address from inside the container are unable to resolve and fall back to resolution by the host. + + .. code-block:: bash + + sudo ifconfig lo0 10.0.0.10 alias + + You should then use 10.0.0.10 as the host in your database connection string. Also remember to include the IP address in the :code:`listen_address` within postgresql.conf. For instance: + + .. code-block:: bash + + listen_addresses = 'localhost,10.0.0.10' + + You might also need to add a new IPv4 local connection within pg_hba.conf. For instance: + + .. code-block:: bash + + host all all 10.0.0.10/32 trust + + The docker command will then look like this: + + .. code-block:: bash + + # Run the server + docker run --rm -p 3000:3000 \ + -e PGRST_DB_URI="postgres://app_user:password@10.0.0.10/postgres" \ + postgrest/postgrest + +.. _pg-in-docker: + +Containerized PostgREST *and* db with docker-compose +---------------------------------------------------- + +To avoid having to install the database at all, you can run both it and the server in containers and link them together with docker-compose. Use this configuration: + +.. code-block:: yaml + + # docker-compose.yml + + version: '3' + services: + server: + image: postgrest/postgrest + ports: + - "3000:3000" + environment: + PGRST_DB_URI: postgres://app_user:password@db:5432/app_db + PGRST_OPENAPI_SERVER_PROXY_URI: http://127.0.0.1:3000 + depends_on: + - db + db: + image: postgres + ports: + - "5432:5432" + environment: + POSTGRES_DB: app_db + POSTGRES_USER: app_user + POSTGRES_PASSWORD: password + # Uncomment this if you want to persist the data. + # volumes: + # - "./pgdata:/var/lib/postgresql/data" + +Go into the directory where you saved this file and run :code:`docker-compose up`. You will see the logs of both the database and PostgREST, and be able to access the latter on port 3000. + +If you want to have a visual overview of your API in your browser you can add swagger-ui to your :code:`docker-compose.yml`: + +.. code-block:: yaml + + # in services: + swagger: + image: swaggerapi/swagger-ui + ports: + - "8080:8080" + expose: + - "8080" + environment: + API_URL: http://localhost:3000/ + +With this you can see the swagger-ui in your browser on port 8080. + +.. _build_source: + +Building from Source +==================== + +When a pre-built binary does not exist for your system you can build the project from source. + +You can build PostgREST from source with `Stack <https://github.com/commercialhaskell/stack>`_. It will install any necessary Haskell dependencies on your system. + +* `Install Stack <https://docs.haskellstack.org/en/stable/#how-to-install-stack>`_ for your platform +* Install Library Dependencies + + ===================== ======================================= + Operating System Dependencies + ===================== ======================================= + Ubuntu/Debian libpq-dev, libgmp-dev, zlib1g-dev + CentOS/Fedora/Red Hat postgresql-devel, zlib-devel, gmp-devel + BSD postgresql12-client + macOS libpq, gmp + ===================== ======================================= + +* Build and install binary + + .. code-block:: bash + + git clone https://github.com/PostgREST/postgrest.git + cd postgrest + + # adjust local-bin-path to taste + stack build --install-ghc --copy-bins --local-bin-path /usr/local/bin + +.. note:: + + - If building fails and your system has less than 1GB of memory, try adding a swap file. + - `--install-ghc` flag is only needed for the first build and can be omitted in the subsequent builds. + +* Check that the server is installed: :code:`postgrest --help`. diff --git a/postgrest_v12.2.8/docs/explanations/nginx.rst b/postgrest_v12.2.8/docs/explanations/nginx.rst new file mode 100644 index 0000000..6adb8e9 --- /dev/null +++ b/postgrest_v12.2.8/docs/explanations/nginx.rst @@ -0,0 +1,102 @@ +.. _nginx: + +Nginx +===== + +PostgREST is a fast way to construct a RESTful API. Its default behavior is great for scaffolding in development. When it's time to go to production it works great too, as long as you take precautions. +PostgREST is a small sharp tool that focuses on performing the API-to-database mapping. We rely on a reverse proxy like Nginx for additional safeguards. + +The first step is to create an Nginx configuration file that proxies requests to an underlying PostgREST server. + +.. code-block:: nginx + + http { + # ... + # upstream configuration + upstream postgrest { + server localhost:3000; + } + # ... + server { + # ... + # expose to the outside world + location /api/ { + default_type application/json; + proxy_hide_header Content-Location; + add_header Content-Location /api/$upstream_http_content_location; + proxy_set_header Connection ""; + proxy_http_version 1.1; + proxy_pass http://postgrest/; + } + # ... + } + } + +.. note:: + + For ubuntu, if you already installed nginx through :code:`apt` you can add this to the config file in + :code:`/etc/nginx/sites-enabled/default`. + +.. _https: + +HTTPS +----- + +PostgREST aims to do one thing well: add an HTTP interface to a PostgreSQL database. To keep the code small and focused we do not implement HTTPS. Use a reverse proxy such as NGINX to add this, `here's how <https://nginx.org/en/docs/http/configuring_https_servers.html>`_. + +Rate Limiting +------------- + +Nginx supports "leaky bucket" rate limiting (see `official docs <https://nginx.org/en/docs/http/ngx_http_limit_req_module.html>`_). Using standard Nginx configuration, routes can be grouped into *request zones* for rate limiting. For instance we can define a zone for login attempts: + +.. code-block:: nginx + + limit_req_zone $binary_remote_addr zone=login:10m rate=1r/s; + +This creates a shared memory zone called "login" to store a log of IP addresses that access the rate limited urls. The space reserved, 10 MB (:code:`10m`) will give us enough space to store a history of 160k requests. We have chosen to allow only allow one request per second (:code:`1r/s`). + +Next we apply the zone to certain routes, like a hypothetical function called :code:`login`. + +.. code-block:: nginx + + location /rpc/login/ { + # apply rate limiting + limit_req zone=login burst=5; + } + +The burst argument tells Nginx to start dropping requests if more than five queue up from a specific IP. + +Nginx rate limiting is general and indiscriminate. To rate limit each authenticated request individually you will need to add logic in a :ref:`Custom Validation <custom_validation>` function. + +Alternate URL Structure +----------------------- + +As discussed in :ref:`singular_plural`, there are no special URL forms for singular resources in PostgREST, only operators for filtering. Thus there are no URLs like :code:`/people/1`. It would be specified instead as + +.. code-block:: bash + + curl "http://localhost:3000/people?id=eq.1" \ + -H "Accept: application/vnd.pgrst.object+json" + +This allows compound primary keys and makes the intent for singular response independent of a URL convention. + +Nginx rewrite rules allow you to simulate the familiar URL convention. The following example adds a rewrite rule for all table endpoints, but you'll want to restrict it to those tables that have a numeric simple primary key named "id." + +.. code-block:: nginx + + # support /endpoint/:id url style + location ~ ^/([a-z_]+)/([0-9]+) { + + # make the response singular + proxy_set_header Accept 'application/vnd.pgrst.object+json'; + + # assuming an upstream named "postgrest" + proxy_pass http://postgrest/$1?id=eq.$2; + + } + +.. TODO +.. Administration +.. API Versioning +.. HTTP Caching +.. Upgrading diff --git a/postgrest_v12.2.8/docs/explanations/schema_isolation.rst b/postgrest_v12.2.8/docs/explanations/schema_isolation.rst new file mode 100644 index 0000000..a70f34e --- /dev/null +++ b/postgrest_v12.2.8/docs/explanations/schema_isolation.rst @@ -0,0 +1,25 @@ +.. _schema_isolation: + +Schema Isolation +================ + +A PostgREST instance exposes all the tables, views, and functions of a single `PostgreSQL schema <https://www.postgresql.org/docs/current/ddl-schemas.html>`_ (a namespace of database objects). This means private data or implementation details can go inside different private schemas and be invisible to HTTP clients. + +It is recommended that you don't expose tables on your API schema. Instead expose views and functions which insulate the internal details from the outside world. +This allows you to change the internals of your schema and maintain backwards compatibility. It also keeps your code easier to refactor, and provides a natural way to do API versioning. + +.. container:: svg-container-md + + .. container:: img-dark + + .. See https://github.com/sphinx-doc/sphinx/issues/2240#issuecomment-187366626 + + .. raw:: html + + <object width="100%" data="../_static/sch-iso-dark.svg" type="image/svg+xml"></object> + + .. container:: img-light + + .. raw:: html + + <object width="100%" data="../_static/sch-iso.svg" type="image/svg+xml"></object> diff --git a/postgrest_v12.2.8/docs/how-tos/create-soap-endpoint.rst b/postgrest_v12.2.8/docs/how-tos/create-soap-endpoint.rst new file mode 100644 index 0000000..5cfaa60 --- /dev/null +++ b/postgrest_v12.2.8/docs/how-tos/create-soap-endpoint.rst @@ -0,0 +1,201 @@ +.. _create_soap_endpoint: + +Create a SOAP endpoint +====================== + +:author: `fjf2002 <https://github.com/fjf2002>`_ + +PostgREST supports :ref:`custom_media`. With a bit of work, SOAP endpoints become possible. + +Minimal Example +--------------- + +This example will simply return the request body, inside a tag ``therequestbodywas``. + +Add the following function to your PostgreSQL database: + +.. code-block:: postgres + + create domain "text/xml" as pg_catalog.xml; + + CREATE OR REPLACE FUNCTION my_soap_endpoint(xml) RETURNS "text/xml" AS $$ + DECLARE + nsarray CONSTANT text[][] := ARRAY[ + ARRAY['soapenv', 'http://schemas.xmlsoap.org/soap/envelope/'] + ]; + BEGIN + RETURN xmlelement( + NAME "soapenv:Envelope", + XMLATTRIBUTES('http://schemas.xmlsoap.org/soap/envelope/' AS "xmlns:soapenv"), + xmlelement(NAME "soapenv:Header"), + xmlelement( + NAME "soapenv:Body", + xmlelement( + NAME theRequestBodyWas, + (xpath('/soapenv:Envelope/soapenv:Body', $1, nsarray))[1] + ) + ) + ); + END; + $$ LANGUAGE plpgsql; + +Do not forget to refresh the :ref:`PostgREST schema cache <schema_reloading>`. + +Use ``curl`` for a first test: + +.. code-block:: bash + + curl http://localhost:3000/rpc/my_soap_endpoint \ + --header 'Content-Type: text/xml' \ + --header 'Accept: text/xml' \ + --data-binary @- <<XML + <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"> + <soapenv:Header/> + <soapenv:Body> + <mySOAPContent> + My SOAP Content + </mySOAPContent> + </soapenv:Body> + </soapenv:Envelope> + XML + +The output should contain the original request body within the ``therequestbodywas`` entity, +and should roughly look like: + +.. code-block:: xml + + <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"> + <soapenv:Header/> + <soapenv:Body> + <therequestbodywas> + <soapenv:Body xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"> + <mySOAPContent> + My SOAP Content + </mySOAPContent> + </soapenv:Body> + </therequestbodywas> + </soapenv:Body> + </soapenv:Envelope> + +A more elaborate example +------------------------ + +Here we have a SOAP service that converts a fraction to a decimal value, +with pass-through of PostgreSQL errors to the SOAP response. +Please note that in production you probably should not pass through plain database errors +potentially disclosing internals to the client, but instead handle the errors directly. + + +.. code-block:: postgres + + -- helper function + CREATE OR REPLACE FUNCTION _soap_envelope(body xml) + RETURNS xml + LANGUAGE sql + AS $function$ + SELECT xmlelement( + NAME "soapenv:Envelope", + XMLATTRIBUTES('http://schemas.xmlsoap.org/soap/envelope/' AS "xmlns:soapenv"), + xmlelement(NAME "soapenv:Header"), + xmlelement(NAME "soapenv:Body", body) + ); + $function$; + + -- helper function + CREATE OR REPLACE FUNCTION _soap_exception( + faultcode text, + faultstring text + ) + RETURNS xml + LANGUAGE sql + AS $function$ + SELECT _soap_envelope( + xmlelement(NAME "soapenv:Fault", + xmlelement(NAME "faultcode", faultcode), + xmlelement(NAME "faultstring", faultstring) + ) + ); + $function$; + + CREATE OR REPLACE FUNCTION fraction_to_decimal(xml) + RETURNS "text/xml" + LANGUAGE plpgsql + AS $function$ + DECLARE + nsarray CONSTANT text[][] := ARRAY[ + ARRAY['soapenv', 'http://schemas.xmlsoap.org/soap/envelope/'] + ]; + exc_msg text; + exc_detail text; + exc_hint text; + exc_sqlstate text; + BEGIN + -- simulating a statement that results in an exception: + RETURN _soap_envelope(xmlelement( + NAME "decimalValue", + ( + (xpath('/soapenv:Envelope/soapenv:Body/fraction/numerator/text()', $1, nsarray))[1]::text::int + / + (xpath('/soapenv:Envelope/soapenv:Body/fraction/denominator/text()', $1, nsarray))[1]::text::int + )::text::xml + )); + EXCEPTION WHEN OTHERS THEN + GET STACKED DIAGNOSTICS + exc_msg := MESSAGE_TEXT, + exc_detail := PG_EXCEPTION_DETAIL, + exc_hint := PG_EXCEPTION_HINT, + exc_sqlstate := RETURNED_SQLSTATE; + RAISE WARNING USING + MESSAGE = exc_msg, + DETAIL = exc_detail, + HINT = exc_hint; + RETURN _soap_exception(faultcode => exc_sqlstate, faultstring => concat(exc_msg, ', DETAIL: ', exc_detail, ', HINT: ', exc_hint)); + END + $function$; + +Let's test the ``fraction_to_decimal`` service with illegal values: + +.. code-block:: bash + + curl http://localhost:3000/rpc/fraction_to_decimal \ + --header 'Content-Type: text/xml' \ + --header 'Accept: text/xml' \ + --data-binary @- <<XML + <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"> + <soapenv:Header/> + <soapenv:Body> + <fraction> + <numerator>42</numerator> + <denominator>0</denominator> + </fraction> + </soapenv:Body> + </soapenv:Envelope> + XML + +The output should roughly look like: + +.. code-block:: xml + + <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"> + <soapenv:Header/> + <soapenv:Body> + <soapenv:Fault> + <faultcode>22012</faultcode> + <faultstring>division by zero, DETAIL: , HINT: </faultstring> + </soapenv:Fault> + </soapenv:Body> + </soapenv:Envelope> + +References +---------- + +For more information concerning PostgREST, cf. + +- :ref:`function_single_unnamed` +- :ref:`custom_media`. See :ref:`any_handler`, if you need to support an ``application/soap+xml`` media type or if you want to respond with XML without sending a media type. +- :ref:`Nginx reverse proxy <nginx>` + +For SOAP reference, visit + +- the specification at https://www.w3.org/TR/soap/ +- shorter more practical advice is available at https://www.w3schools.com/xml/xml_soap.asp diff --git a/postgrest_v12.2.8/docs/how-tos/providing-html-content-using-htmx.rst b/postgrest_v12.2.8/docs/how-tos/providing-html-content-using-htmx.rst new file mode 100644 index 0000000..27941c2 --- /dev/null +++ b/postgrest_v12.2.8/docs/how-tos/providing-html-content-using-htmx.rst @@ -0,0 +1,326 @@ + +.. _providing_html_htmx: + +Providing HTML Content Using Htmx +================================= + +:author: `Laurence Isla <https://github.com/laurenceisla>`_ + +This how-to shows a way to return HTML content and use the `htmx library <https://htmx.org/>`_ to handle the AJAX requests. +Htmx expects an HTML response and uses it to replace an element inside the DOM (see the `htmx introduction <https://htmx.org/docs/#introduction>`_ in the docs). + +.. image:: ../_static/how-tos/htmx-demo.gif + +.. warning:: + + This is a proof of concept showing what can be achieved using both technologies. + We are working on `plmustache <https://github.com/PostgREST/plmustache>`_ which will further improve the HTML aspect of this how-to. + +Preparatory Configuration +------------------------- + +We will make a to-do app based on the :ref:`tut0`, so make sure to complete it before continuing. + +To simplify things, we won't be using authentication, so grant all permissions on the ``todos`` table to the ``web_anon`` user. + +.. code-block:: postgres + + grant all on api.todos to web_anon; + grant usage, select on sequence api.todos_id_seq to web_anon; + +Next, add the ``text/html`` as a :ref:`custom_media`. With this, PostgREST can identify the request made by your web browser (with the ``Accept: text/html`` header) +and return a raw HTML document file. + +.. code-block:: postgres + + create domain "text/html" as text; + +Creating an HTML Response +------------------------- + +Let's create a function that returns a basic HTML file, using `Pico CSS <https://picocss.com>`_ for styling and +`Ionicons <https://ionic.io/ionicons>`_ to show some icons later. + +.. code-block:: postgres + + create or replace function api.index() returns "text/html" as $$ + select $html$ + <!DOCTYPE html> + <html> + <head> + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width, initial-scale=1"> + <title>PostgREST + HTMX To-Do List + + + + +
+
+
+ PostgREST + HTMX To-Do List +
+
+
+ + + + + + $html$; + $$ language sql; + +The web browser will open the web page at ``http://localhost:3000/rpc/index``. + +.. image:: ../_static/how-tos/htmx-simple.jpg + +.. _html_htmx_list_create: + +Listing and Creating To-Dos +--------------------------- + +Now, let's show a list of the to-dos already inserted in the database. +For that, we'll also need a function to help us sanitize the HTML content that may be present in the task. + +.. code-block:: postgres + + create or replace function api.sanitize_html(text) returns text as $$ + select replace(replace(replace(replace(replace($1, '&', '&'), '"', '"'),'>', '>'),'<', '<'), '''', ''') + $$ language sql; + + create or replace function api.html_todo(api.todos) returns text as $$ + select format($html$ +
+ <%2$s> + %3$s + +
+ $html$, + $1.id, + case when $1.done then 's' else 'span' end, + api.sanitize_html($1.task) + ); + $$ language sql stable; + + create or replace function api.html_all_todos() returns text as $$ + select coalesce( + string_agg(api.html_todo(t), '
' order by t.id), + '

There is nothing else to do.

' + ) + from api.todos t; + $$ language sql; + +These two functions are used to build the to-do list template. We won't use them as PostgREST endpoints. + +- The ``api.html_todo`` function uses the table ``api.todos`` as a parameter and formats each item into a list element ``
  • ``. + The PostgreSQL `format `_ is useful to that end. + It replaces the values according to the position in the template, e.g. ``%1$s`` will be replaced with the value of ``$1.id`` (the first parameter). + +- The ``api.html_all_todos`` function returns the ``
      `` wrapper for all the list elements. + It uses `string_arg `_ to concatenate all the to-dos in a single text value. + It also returns an alternative message, instead of a list, when the ``api.todos`` table is empty. + +Next, let's add an endpoint to register a to-do in the database and modify the ``/rpc/index`` page accordingly. + +.. code-block:: postgres + + create or replace function api.add_todo(_task text) returns "text/html" as $$ + insert into api.todos(task) values (_task); + select api.html_all_todos(); + $$ language sql; + + create or replace function api.index() returns "text/html" as $$ + select $html$ + + + + + + PostgREST + HTMX To-Do List + + + + + + +
      +
      +
      + PostgREST + HTMX To-Do List +
      +
      + +
      +
      + $html$ + || api.html_all_todos() || + $html$ +
      +
      +
      + + + + + + $html$; + $$ language sql; + +- The ``/rpc/add_todo`` endpoint allows us to add a new to-do using the ``_task`` parameter and returns an ``html`` with all the to-dos in the database. + +- The ``/rpc/index`` now adds the ``hx-headers='{"Accept": "text/html"}'`` tag to the ````. + This will make sure that all htmx elements inside the body send this header, otherwise PostgREST won't recognize it as HTML. + + There is also a ``
      `` element that uses the htmx library. Let's break it down: + + + ``hx-post="/rpc/add_todo"``: sends an AJAX POST request to the ``/rpc/add_todo`` endpoint, with the value of the ``_task`` from the ```` element. + + + ``hx-target="#todo-list-area"``: the HTML content returned from the request will go inside ``
      `` (which is the list of to-dos). + + + ``hx-trigger="submit"``: htmx will do this request when submitting the form (by pressing enter while inside the ````). + + + ``hx-on="htmx:afterRequest: this.reset()">``: this is a Javascript command that clears the form `after the request is done `_. + +With this, the ``http://localhost:3000/rpc/index`` page lists all the todos and adds new ones by submitting tasks in the input element. +Don't forget to refresh the :ref:`schema cache `. + +.. image:: ../_static/how-tos/htmx-insert.gif + +Editing and Deleting To-Dos +--------------------------- + +Now, let's modify ``api.html_todo`` and make it more functional. + +.. code-block:: postgres + + create or replace function api.html_todo(api.todos) returns text as $$ + select format($html$ +
      +
      + + <%2$s style="cursor: pointer"> + %3$s + + +
      +
      + + +
      +
      + $html$, + $1.id, + case when $1.done then 's' else 'span' end, + api.sanitize_html($1.task), + (not $1.done)::text + ); + $$ language sql stable; + +Let's deconstruct the new htmx features added: + +- The ``
      `` element is configured as follows: + + + ``hx-post="/rpc/change_todo_state"``: does an AJAX POST request to that endpoint. It will toggle the ``done`` state of the to-do. + + + ``hx-vals='{"_id": %1$s, "_done": %4$s}'``: adds the parameters to the request. + This is an alternative to using hidden inputs inside the ````. + + + ``hx-trigger="click"``: htmx does the request after clicking on the element. + +- For the first ``