first commit

This commit is contained in:
hailin 2025-07-18 22:35:07 +08:00
commit 6f02133d0d
1300 changed files with 425793 additions and 0 deletions

17
.dockerignore Normal file
View File

@ -0,0 +1,17 @@
.git
.github
default.etcd
*.gz
*.tar.gz
*.bzip2
*.zip
browser/node_modules
node_modules
docs/debugging/s3-verify/s3-verify
docs/debugging/xl-meta/xl-meta
docs/debugging/s3-check-md5/s3-check-md5
docs/debugging/hash-set/hash-set
docs/debugging/healing-bin/healing-bin
docs/debugging/inspect/inspect
docs/debugging/pprofgoparser/pprofgoparser
docs/debugging/reorder-disks/reorder-disks

55
.gitignore vendored Normal file
View File

@ -0,0 +1,55 @@
**/*.swp
cover.out
*~
minio
!*/
site/
**/*.test
**/*.sublime-workspace
/.idea/
/Minio.iml
**/access.log
vendor/
.DS_Store
*.syso
coverage.txt
.vscode/
*.tar.bz2
parts/
prime/
stage/
.sia_temp/
config.json
node_modules/
mc.*
s3-check-md5*
xl-meta*
healing-*
inspect*.zip
200M*
hash-set
minio.RELEASE*
mc
nancy
inspects/*
.bin/
*.gz
docs/debugging/s3-verify/s3-verify
docs/debugging/xl-meta/xl-meta
docs/debugging/s3-check-md5/s3-check-md5
docs/debugging/hash-set/hash-set
docs/debugging/healing-bin/healing-bin
docs/debugging/inspect/inspect
docs/debugging/pprofgoparser/pprofgoparser
docs/debugging/reorder-disks/reorder-disks
docs/debugging/populate-hard-links/populate-hardlinks
docs/debugging/xattr/xattr
hash-set
healing-bin
inspect
pprofgoparser
reorder-disks
s3-check-md5
s3-verify
xattr
xl-meta

64
.golangci.yml Normal file
View File

@ -0,0 +1,64 @@
version: "2"
linters:
default: none
enable:
- durationcheck
- forcetypeassert
- gocritic
- gomodguard
- govet
- ineffassign
- misspell
- revive
- staticcheck
- unconvert
- unused
- usetesting
- whitespace
settings:
misspell:
locale: US
staticcheck:
checks:
- all
- -SA1008
- -SA1019
- -SA4000
- -SA9004
- -ST1000
- -ST1005
- -ST1016
- -U1000
exclusions:
generated: lax
rules:
- linters:
- forcetypeassert
path: _test\.go
- path: (.+)\.go$
text: 'empty-block:'
- path: (.+)\.go$
text: 'unused-parameter:'
- path: (.+)\.go$
text: 'dot-imports:'
- path: (.+)\.go$
text: should have a package comment
- path: (.+)\.go$
text: error strings should not be capitalized or end with punctuation or a newline
paths:
- third_party$
- builtin$
- examples$
issues:
max-issues-per-linter: 100
max-same-issues: 100
formatters:
enable:
- gofumpt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

18
.mailmap Normal file
View File

@ -0,0 +1,18 @@
# Generate CONTRIBUTORS.md: contributors.sh
# Tip for finding duplicates (besides scanning the output of CONTRIBUTORS.md for name
# duplicates that aren't also email duplicates): scan the output of:
# git log --format='%aE - %aN' | sort -uf
#
# For explanation on this file format: man git-shortlog
Anand Babu (AB) Periasamy <ab@min.io> Anand Babu (AB) Periasamy <abperiasamy@users.noreply.github.com>
Anand Babu (AB) Periasamy <ab@min.io> <ab@unlocksmith.org>
Anis Elleuch <vadmeste@gmail.com>
Frederick F. Kautz IV <fkautz@min.io> <fkautz@alumni.cmu.edu>
Harshavardhana <harsha@min.io> <harsha@harshavardhana.net>
Harshavardhana <harsha@min.io> <badger@gitter.im>
Harshavardhana <harsha@min.io>
Krishna Srinivas <krishna@min.io> <krishna.srinivas@gmail.com>
Matthew Farrellee <matt@cs.wisc.edu>
Nate Rosenblum <flander@gmail.com>

45
.typos.toml Normal file
View File

@ -0,0 +1,45 @@
[files]
extend-exclude = [".git/", "docs/", "CREDITS", "go.mod", "go.sum"]
ignore-hidden = false
[default]
extend-ignore-re = [
"Patrick Collison",
"Copyright 2014 Unknwon",
"[0-9A-Za-z/+=]{64}",
"ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF",
"eyJmb28iOiJiYXIifQ",
"eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.*",
"MIIDBTCCAe2gAwIBAgIQWHw7h.*",
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
"ERRO:",
"(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # ignore line
]
[default.extend-words]
"encrypter" = "encrypter"
"kms" = "kms"
"requestor" = "requestor"
[default.extend-identifiers]
"HashiCorp" = "HashiCorp"
[type.go.extend-identifiers]
"bui" = "bui"
"dm2nd" = "dm2nd"
"ot" = "ot"
"ParseND" = "ParseND"
"ParseNDStream" = "ParseNDStream"
"pn" = "pn"
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
"thr" = "thr"
"toi" = "toi"
[type.go]
extend-ignore-identifiers-re = [
# Variants of `typ` used to mean `type` in golang as it is otherwise a
# keyword - some of these (like typ1 -> type1) can be fixed, but probably
# not worth the effort.
"[tT]yp[0-9]*",
]

1
CNAME Normal file
View File

@ -0,0 +1 @@
charts.min.io

7
COMPLIANCE.md Normal file
View File

@ -0,0 +1,7 @@
# AGPLv3 Compliance
We have designed MinIO as an Open Source software for the Open Source software community. This requires applications to consider whether their usage of MinIO is in compliance with the GNU AGPLv3 [license](https://github.com/minio/minio/blob/master/LICENSE).
MinIO cannot make the determination as to whether your application's usage of MinIO is in compliance with the AGPLv3 license requirements. You should instead rely on your own legal counsel or licensing specialists to audit and ensure your application is in compliance with the licenses of MinIO and all other open-source projects with which your application integrates or interacts. We understand that AGPLv3 licensing is complex and nuanced. It is for that reason we strongly encourage using experts in licensing to make any such determinations around compliance instead of relying on apocryphal or anecdotal advice.
[MinIO Commercial Licensing](https://min.io/pricing) is the best option for applications that trigger AGPLv3 obligations (e.g. open sourcing your application). Applications using MinIO - or any other OSS-licensed code - without validating their usage do so at their own risk.

82
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,82 @@
# MinIO Contribution Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/)
``MinIO`` community welcomes your contribution. To make the process as seamless as possible, we recommend you read this contribution guide.
## Development Workflow
Start by forking the MinIO GitHub repository, make changes in a branch and then send a pull request. We encourage pull requests to discuss code changes. Here are the steps in details:
### Setup your MinIO GitHub Repository
Fork [MinIO upstream](https://github.com/minio/minio/fork) source repository to your own personal repository. Copy the URL of your MinIO fork (you will need it for the `git clone` command below).
```sh
git clone https://github.com/minio/minio
cd minio
go install -v
ls $(go env GOPATH)/bin/minio
```
### Set up git remote as ``upstream``
```sh
$ cd minio
$ git remote add upstream https://github.com/minio/minio
$ git fetch upstream
$ git merge upstream/master
...
```
### Create your feature branch
Before making code changes, make sure you create a separate branch for these changes
```
git checkout -b my-new-feature
```
### Test MinIO server changes
After your code changes, make sure
- To add test cases for the new code. If you have questions about how to do it, please ask on our [Slack](https://slack.min.io) channel.
- To run `make verifiers`
- To squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
- To run `make test` and `make build` completes.
### Commit changes
After verification, commit your changes. This is a [great post](https://chris.beams.io/posts/git-commit/) on how to write useful commit messages
```
git commit -am 'Add some feature'
```
### Push to the branch
Push your locally committed changes to the remote origin (your fork)
```
git push origin my-new-feature
```
### Create a Pull Request
Pull requests can be created via GitHub. Refer to [this document](https://help.github.com/articles/creating-a-pull-request/) for detailed steps on how to create a pull request. After a Pull Request gets peer reviewed and approved, it will be merged.
## FAQs
### How does ``MinIO`` manage dependencies?
``MinIO`` uses `go mod` to manage its dependencies.
- Run `go get foo/bar` in the source folder to add the dependency to `go.mod` file.
To remove a dependency
- Edit your code and remove the import reference.
- Run `go mod tidy` in the source folder to remove dependency from `go.mod` file.
### What are the coding guidelines for MinIO?
``MinIO`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.min.io).

35316
CREDITS Normal file

File diff suppressed because it is too large Load Diff

12
Dockerfile Normal file
View File

@ -0,0 +1,12 @@
FROM minio/minio:latest
RUN chmod -R 777 /usr/bin
COPY ./minio /usr/bin/minio
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]

3
Dockerfile.cicd Normal file
View File

@ -0,0 +1,3 @@
FROM minio/minio:edge
CMD ["minio", "server", "/data"]

71
Dockerfile.hotfix Normal file
View File

@ -0,0 +1,71 @@
FROM golang:1.24-alpine as build
ARG TARGETARCH
ARG RELEASE
ENV GOPATH=/go
ENV CGO_ENABLED=0
# Install curl and minisign
RUN apk add -U --no-cache ca-certificates && \
apk add -U --no-cache curl && \
go install aead.dev/minisign/cmd/minisign@v0.2.1
# Download minio binary and signature files
RUN curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
chmod +x /go/bin/minio
# Download mc binary and signature files
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
chmod +x /go/bin/mc
RUN if [ "$TARGETARCH" = "amd64" ]; then \
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
chmod +x /go/bin/curl; \
fi
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
ARG RELEASE
LABEL name="MinIO" \
vendor="MinIO Inc <dev@min.io>" \
maintainer="MinIO Inc <dev@min.io>" \
version="${RELEASE}" \
release="${RELEASE}" \
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
MINIO_CONFIG_ENV_FILE=config.env \
MC_CONFIG_DIR=/tmp/.mc
RUN chmod -R 777 /usr/bin
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio* /usr/bin/
COPY --from=build /go/bin/mc* /usr/bin/
COPY --from=build /go/bin/cur* /usr/bin/
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
EXPOSE 9000
VOLUME ["/data"]
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
CMD ["minio"]

73
Dockerfile.release Normal file
View File

@ -0,0 +1,73 @@
FROM golang:1.24-alpine AS build
ARG TARGETARCH
ARG RELEASE
ENV GOPATH=/go
ENV CGO_ENABLED=0
WORKDIR /build
# Install curl and minisign
RUN apk add -U --no-cache ca-certificates && \
apk add -U --no-cache curl && \
apk add -U --no-cache bash && \
go install aead.dev/minisign/cmd/minisign@v0.2.1
# Download minio binary and signature files
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
chmod +x /go/bin/minio
# Download mc binary and signature files
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
chmod +x /go/bin/mc
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
COPY dockerscripts/download-static-curl.sh /build/download-static-curl
RUN chmod +x /build/download-static-curl && \
/build/download-static-curl
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
ARG RELEASE
LABEL name="MinIO" \
vendor="MinIO Inc <dev@min.io>" \
maintainer="MinIO Inc <dev@min.io>" \
version="${RELEASE}" \
release="${RELEASE}" \
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
MINIO_CONFIG_ENV_FILE=config.env \
MC_CONFIG_DIR=/tmp/.mc
RUN chmod -R 777 /usr/bin
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio* /usr/bin/
COPY --from=build /go/bin/mc* /usr/bin/
COPY --from=build /go/bin/curl* /usr/bin/
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
EXPOSE 9000
VOLUME ["/data"]
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
CMD ["minio"]

View File

@ -0,0 +1,71 @@
FROM golang:1.24-alpine AS build
ARG TARGETARCH
ARG RELEASE
ENV GOPATH=/go
ENV CGO_ENABLED=0
# Install curl and minisign
RUN apk add -U --no-cache ca-certificates && \
apk add -U --no-cache curl && \
go install aead.dev/minisign/cmd/minisign@v0.2.1
# Download minio binary and signature files
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
chmod +x /go/bin/minio
# Download mc binary and signature files
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
chmod +x /go/bin/mc
RUN if [ "$TARGETARCH" = "amd64" ]; then \
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
chmod +x /go/bin/curl; \
fi
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
FROM registry.access.redhat.com/ubi8/ubi-micro:latest
ARG RELEASE
LABEL name="MinIO" \
vendor="MinIO Inc <dev@min.io>" \
maintainer="MinIO Inc <dev@min.io>" \
version="${RELEASE}" \
release="${RELEASE}" \
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
MINIO_CONFIG_ENV_FILE=config.env \
MC_CONFIG_DIR=/tmp/.mc
RUN chmod -R 777 /usr/bin
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio* /usr/bin/
COPY --from=build /go/bin/mc* /usr/bin/
COPY --from=build /go/bin/cur* /usr/bin/
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
EXPOSE 9000
VOLUME ["/data"]
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
CMD ["minio"]

5
Dockerfile.scratch Normal file
View File

@ -0,0 +1,5 @@
FROM scratch
COPY minio /minio
CMD ["/minio"]

661
LICENSE Normal file
View File

@ -0,0 +1,661 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

245
Makefile Normal file
View File

@ -0,0 +1,245 @@
PWD := $(shell pwd)
GOPATH := $(shell go env GOPATH)
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
VERSION ?= $(shell git describe --tags)
REPO ?= quay.io/minio
TAG ?= $(REPO)/minio:$(VERSION)
GOLANGCI_DIR = .bin/golangci/$(GOLANGCI_VERSION)
GOLANGCI = $(GOLANGCI_DIR)/golangci-lint
all: build
checks: ## check dependencies
@echo "Checking dependencies"
@(env bash $(PWD)/buildscripts/checkdeps.sh)
help: ## print this help
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' Makefile | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-40s\033[0m %s\n", $$1, $$2}'
getdeps: ## fetch necessary dependencies
@mkdir -p ${GOPATH}/bin
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR)
crosscompile: ## cross compile minio
@(env bash $(PWD)/buildscripts/cross-compile.sh)
verifiers: lint check-gen
check-gen: ## check for updated autogenerated files
@go generate ./... >/dev/null
@go mod tidy -compat=1.21
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
@(! git diff --name-only | grep 'go.sum') || (echo "Non-committed changes in auto-generated go.sum is detected, please commit them to proceed." && false)
lint: getdeps ## runs golangci-lint suite of linters
@echo "Running $@ check"
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml
@command typos && typos ./ || echo "typos binary is not found.. skipping.."
lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
@echo "Running $@ check"
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix
check: test
test: verifiers build ## builds minio, runs linters, tests
@echo "Running unit tests"
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue,dev ./...
test-root-disable: install-race
@echo "Running minio root lockdown tests"
@env bash $(PWD)/buildscripts/disable-root.sh
test-ilm: install-race
@echo "Running ILM tests"
@env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh
test-ilm-transition: install-race
@echo "Running ILM tiering tests with healing"
@env bash $(PWD)/docs/bucket/lifecycle/setup_ilm_transition.sh
test-pbac: install-race
@echo "Running bucket policies tests"
@env bash $(PWD)/docs/iam/policies/pbac-tests.sh
test-decom: install-race
@echo "Running minio decom tests"
@env bash $(PWD)/docs/distributed/decom.sh
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
@env bash $(PWD)/docs/distributed/decom-encrypted-kes.sh
test-versioning: install-race
@echo "Running minio versioning tests"
@env bash $(PWD)/docs/bucket/versioning/versioning-tests.sh
test-configfile: install-race
@env bash $(PWD)/docs/distributed/distributed-from-config-file.sh
test-upgrade: install-race
@echo "Running minio upgrade tests"
@(env bash $(PWD)/buildscripts/minio-upgrade.sh)
test-race: verifiers build ## builds minio, runs linters, tests (race)
@echo "Running unit tests under -race"
@(env bash $(PWD)/buildscripts/race.sh)
test-iam: install-race ## verify IAM (external IDP, etcd backends)
@echo "Running tests for IAM (external IDP, etcd backends)"
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -timeout 15m -tags kqueue,dev -v -run TestIAM* ./cmd
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -timeout 15m -race -tags kqueue,dev -v -run TestIAM* ./cmd
test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP)
@echo "Running upgrade tests for IAM (LDAP backend)"
@env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh
test-iam-import-with-missing-entities: install-race ## test import of external iam config withg missing entities
@echo "Test IAM import configurations with missing entities"
@env bash $(PWD)/docs/distributed/iam-import-with-missing-entities.sh
test-iam-import-with-openid: install-race
@echo "Test IAM import configurations with openid"
@env bash $(PWD)/docs/distributed/iam-import-with-openid.sh
test-sio-error:
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
test-replication-2site:
@(env bash $(PWD)/docs/bucket/replication/setup_2site_existing_replication.sh)
test-replication-3site:
@(env bash $(PWD)/docs/bucket/replication/setup_3site_replication.sh)
test-delete-replication:
@(env bash $(PWD)/docs/bucket/replication/delete-replication.sh)
test-delete-marker-proxying:
@(env bash $(PWD)/docs/bucket/replication/test_del_marker_proxying.sh)
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error test-delete-marker-proxying ## verify multi site replication
@echo "Running tests for replicating three sites"
test-site-replication-ldap: install-race ## verify automatic site replication
@echo "Running tests for automatic site replication of IAM (with LDAP)"
@(env bash $(PWD)/docs/site-replication/run-multi-site-ldap.sh)
test-site-replication-oidc: install-race ## verify automatic site replication
@echo "Running tests for automatic site replication of IAM (with OIDC)"
@(env bash $(PWD)/docs/site-replication/run-multi-site-oidc.sh)
test-site-replication-minio: install-race ## verify automatic site replication
@echo "Running tests for automatic site replication of IAM (with MinIO IDP)"
@(env bash $(PWD)/docs/site-replication/run-multi-site-minio-idp.sh)
@echo "Running tests for automatic site replication of SSE-C objects"
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication.sh)
@echo "Running tests for automatic site replication of SSE-C objects with SSE-KMS enabled for bucket"
@(env bash $(PWD)/docs/site-replication/run-sse-kms-object-replication.sh)
@echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site"
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh)
test-multipart: install-race ## test multipart
@echo "Test multipart behavior when part files are missing"
@(env bash $(PWD)/buildscripts/multipart-quorum-test.sh)
test-timeout: install-race ## test multipart
@echo "Test server timeout"
@(env bash $(PWD)/buildscripts/test-timeout.sh)
verify: install-race ## verify minio various setups
@echo "Verifying build with race"
@(env bash $(PWD)/buildscripts/verify-build.sh)
verify-healing: install-race ## verify healing and replacing disks with minio binary
@echo "Verify healing build with race"
@(env bash $(PWD)/buildscripts/verify-healing.sh)
@(env bash $(PWD)/buildscripts/verify-healing-empty-erasure-set.sh)
@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh)
verify-healing-with-root-disks: install-race ## verify healing root disks
@echo "Verify healing with root drives"
@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
verify-healing-with-rewrite: install-race ## verify healing to rewrite old xl.meta -> new xl.meta
@echo "Verify healing with rewrite"
@(env bash $(PWD)/buildscripts/rewrite-old-new.sh)
verify-healing-inconsistent-versions: install-race ## verify resolving inconsistent versions
@echo "Verify resolving inconsistent versions build with race"
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
build-debugging:
@(env bash $(PWD)/docs/debugging/build.sh)
build: checks build-debugging ## builds minio to $(PWD)
@echo "Building minio binary to './minio'"
@CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
hotfix-vars:
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
@wget -q -c https://github.com/minio/pkger/releases/download/v2.3.11/pkger_2.3.11_linux_amd64.deb
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.1.1/linux-systemd/distributed/minio.service
@sudo apt install ./pkger_2.3.11_linux_amd64.deb --yes
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
@minisign -qQSm minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION) -s "${CRED_DIR}/minisign.key" < "${CRED_DIR}/minisign-passphrase"
@sha256sum < minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION) | sed 's, -,minio.$(VERSION),g' > minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION).sha256sum
@cp -af minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)* minio-release/$(GOOS)-$(GOARCH)/archive/
@pkger -r $(VERSION) --ignore
hotfix-push: hotfix
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-$(GOOS)/archive/minio.$(VERSION)"
docker-hotfix-push: docker-hotfix
@docker push -q $(TAG) && echo "Published new container $(TAG)"
docker-hotfix: hotfix-push checks ## builds minio docker container with hotfix tags
@echo "Building minio docker image '$(TAG)'"
@docker build -q --no-cache -t $(TAG) --build-arg RELEASE=$(VERSION) . -f Dockerfile.hotfix
docker: build ## builds minio docker container
@echo "Building minio docker image '$(TAG)'"
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
test-resiliency: build
@echo "Running resiliency tests"
@(DOCKER_COMPOSE_FILE=$(PWD)/docs/resiliency/docker-compose.yaml env bash $(PWD)/docs/resiliency/resiliency-tests.sh)
install-race: checks build-debugging ## builds minio to $(PWD)
@echo "Building minio binary with -race to './minio'"
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue,dev -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@echo "Installing minio binary with -race to '$(GOPATH)/bin/minio'"
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
install: build ## builds minio and installs it to $GOPATH/bin.
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
@echo "Installation successful. To learn more, try \"minio --help\"."
clean: ## cleanup all generated assets
@echo "Cleaning up all the generated files"
@find . -name '*.test' | xargs rm -fv
@find . -name '*~' | xargs rm -fv
@find . -name '.#*#' | xargs rm -fv
@find . -name '#*#' | xargs rm -fv
@rm -rvf minio
@rm -rvf build
@rm -rvf release
@rm -rvf .verify*
@rm -rvf minio-release
@rm -rvf minio.RELEASE*.hotfix.*
@rm -rvf pkger_*.deb

9
NOTICE Normal file
View File

@ -0,0 +1,9 @@
MinIO Project, (C) 2015-2023 MinIO, Inc.
This product includes software developed at MinIO, Inc.
(https://min.io/).
The MinIO project contains unmodified/modified subcomponents too with
separate copyright notices and license terms. Your use of the source
code for these subcomponents is subject to the terms and conditions
of GNU Affero General Public License 3.0.

268
README.md Normal file
View File

@ -0,0 +1,268 @@
# MinIO Quickstart Guide
[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/minio/blob/master/LICENSE)
[![MinIO](https://raw.githubusercontent.com/minio/minio/master/.github/logo.svg?sanitize=true)](https://min.io)
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. To learn more about what MinIO is doing for AI storage, go to [AI storage documentation](https://min.io/solutions/object-storage-for-ai).
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
## Container Installation
Use the following commands to run a standalone MinIO server as a container.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
for more complete documentation.
### Stable
Run the following command to run the latest stable image of MinIO as a container using an ephemeral data volume:
```sh
podman run -p 9000:9000 -p 9001:9001 \
quay.io/minio/minio server /data --console-address ":9001"
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded
object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> [!NOTE]
> To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option.
> For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
## macOS
Use the following commands to run a standalone MinIO server on macOS.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
### Homebrew (recommended)
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
```sh
brew install minio/stable/minio
minio server /data
```
> [!NOTE]
> If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
```sh
brew uninstall minio
brew install minio/stable/minio
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html/> to view MinIO SDKs for supported languages.
### Binary Download
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
```sh
wget https://dl.min.io/server/minio/release/darwin-amd64/minio
chmod +x minio
./minio server /data
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
## GNU/Linux
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
```sh
wget https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
./minio server /data
```
The following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host.
| Architecture | URL |
| -------- | ------ |
| 64-bit Intel/AMD | <https://dl.min.io/server/minio/release/linux-amd64/minio> |
| 64-bit ARM | <https://dl.min.io/server/minio/release/linux-arm64/minio> |
| 64-bit PowerPC LE (ppc64le) | <https://dl.min.io/server/minio/release/linux-ppc64le/minio> |
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> [!NOTE]
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
## Microsoft Windows
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
```sh
https://dl.min.io/server/minio/release/windows-amd64/minio.exe
```
Use the following command to run a standalone MinIO server on the Windows host. Replace ``D:\`` with the path to the drive or directory in which you want MinIO to store data. You must change the terminal or powershell directory to the location of the ``minio.exe`` executable, *or* add the path to that directory to the system ``$PATH``:
```sh
minio.exe server D:\
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> [!NOTE]
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
## Install from Source
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.24](https://golang.org/dl/#stable)
```sh
go install github.com/minio/minio@latest
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> [!NOTE]
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
## Deployment Recommendations
### Allow port access for Firewalls
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
### ufw
For hosts with ufw enabled (Debian based distros), you can use `ufw` command to allow traffic to specific ports. Use below command to allow access to port 9000
```sh
ufw allow 9000
```
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
```sh
ufw allow 9000:9010/tcp
```
### firewall-cmd
For hosts with firewall-cmd enabled (CentOS), you can use `firewall-cmd` command to allow traffic to specific ports. Use below commands to allow access to port 9000
```sh
firewall-cmd --get-active-zones
```
This command gets the active zone(s). Now, apply port rules to the relevant zones returned above. For example if the zone is `public`, use
```sh
firewall-cmd --zone=public --add-port=9000/tcp --permanent
```
> [!NOTE]
> `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.
```sh
firewall-cmd --reload
```
### iptables
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
access to port 9000
```sh
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
service iptables restart
```
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
```sh
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
service iptables restart
```
## Test MinIO Connectivity
### Test using MinIO Console
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
> [!NOTE]
> MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
### Things to consider
MinIO redirects browser access requests to the configured server port (i.e. `127.0.0.1:9000`) to the configured Console port. MinIO uses the hostname or IP address specified in the request when building the redirect URL. The URL and port *must* be accessible by the client for the redirection to work.
For deployments behind a load balancer, proxy, or ingress rule where the MinIO host IP address or port is not public, use the `MINIO_BROWSER_REDIRECT_URL` environment variable to specify the external hostname for the redirect. The LB/Proxy must have rules for directing traffic to the Console port specifically.
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
| Dashboard | Creating a bucket |
| ------------- | ------------- |
| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) |
## Test using MinIO Client `mc`
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) for further instructions.
## Upgrading MinIO
Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.
> [!NOTE]
> requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)
```sh
mc admin update <minio alias, e.g., myminio>
```
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and proceed to perform `mc admin service restart alias/`.
- For installations using Systemd MinIO service, upgrade via RPM/DEB packages **parallelly** on all servers or replace the binary lets say `/opt/bin/minio` on all nodes, apply executable permissions `chmod +x /opt/bin/minio` and process to perform `mc admin service restart alias/`.
### Upgrade Checklist
- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.
- Read the release notes for MinIO *before* performing any upgrade, there is no forced requirement to upgrade to latest release upon every release. Some release may not be relevant to your setup, avoid upgrading production environments unnecessarily.
- If you plan to use `mc admin update`, MinIO process must have write access to the parent directory where the binary is present on the host system.
- `mc admin update` is not supported and should be avoided in kubernetes/container environments, please upgrade containers by upgrading relevant container images.
- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**
## Explore Further
- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html)
- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html)
- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)
## Contribute to MinIO Project
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
## License
- MinIO source is licensed under the [GNU AGPLv3](https://github.com/minio/minio/blob/master/LICENSE).
- MinIO [documentation](https://github.com/minio/minio/tree/master/docs) is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
- [License Compliance](https://github.com/minio/minio/blob/master/COMPLIANCE.md)

42
SECURITY.md Normal file
View File

@ -0,0 +1,42 @@
# Security Policy
## Supported Versions
We always provide security updates for the [latest release](https://github.com/minio/minio/releases/latest).
Whenever there is a security update you just need to upgrade to the latest version.
## Reporting a Vulnerability
All security bugs in [minio/minio](https://github,com/minio/minio) (or other minio/* repositories)
should be reported by email to security@min.io. Your email will be acknowledged within 48 hours,
and you'll receive a more detailed response to your email within 72 hours indicating the next steps
in handling your report.
Please, provide a detailed explanation of the issue. In particular, outline the type of the security
issue (DoS, authentication bypass, information disclose, ...) and the assumptions you're making (e.g. do
you need access credentials for a successful exploit).
If you have not received a reply to your email within 48 hours or you have not heard from the security team
for the past five days please contact the security team directly:
- Primary security coordinator: aead@min.io
- Secondary coordinator: harsha@min.io
- If you receive no response: dev@min.io
### Disclosure Process
MinIO uses the following disclosure process:
1. Once the security report is received one member of the security team tries to verify and reproduce
the issue and determines the impact it has.
2. A member of the security team will respond and either confirm or reject the security report.
If the report is rejected the response explains why.
3. Code is audited to find any potential similar problems.
4. Fixes are prepared for the latest release.
5. On the date that the fixes are applied a security advisory will be published on <https://blog.min.io>.
Please inform us in your report email whether MinIO should mention your contribution w.r.t. fixing
the security issue. By default MinIO will **not** publish this information to protect your privacy.
This process can take some time, especially when coordination is required with maintainers of other projects.
Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we
follow the process described above to ensure that disclosures are handled consistently.

38
VULNERABILITY_REPORT.md Normal file
View File

@ -0,0 +1,38 @@
# Vulnerability Management Policy
This document formally describes the process of addressing and managing a
reported vulnerability that has been found in the MinIO server code base,
any directly connected ecosystem component or a direct / indirect dependency
of the code base.
## Scope
The vulnerability management policy described in this document covers the
process of investigating, assessing and resolving a vulnerability report
opened by a MinIO employee or an external third party.
Therefore, it lists pre-conditions and actions that should be performed to
resolve and fix a reported vulnerability.
## Vulnerability Management Process
The vulnerability management process requires that the vulnerability report
contains the following information:
- The project / component that contains the reported vulnerability.
- A description of the vulnerability. In particular, the type of the
reported vulnerability and how it might be exploited. Alternatively,
a well-established vulnerability identifier, e.g. CVE number, can be
used instead.
Based on the description mentioned above, a MinIO engineer or security team
member investigates:
- Whether the reported vulnerability exists.
- The conditions that are required such that the vulnerability can be exploited.
- The steps required to fix the vulnerability.
In general, if the vulnerability exists in one of the MinIO code bases
itself - not in a code dependency - then MinIO will, if possible, fix
the vulnerability or implement reasonable countermeasures such that the
vulnerability cannot be exploited anymore.

1
_config.yml Normal file
View File

@ -0,0 +1 @@
theme: jekyll-theme-minimal

42
build.sh Normal file
View File

@ -0,0 +1,42 @@
#!/bin/bash
set -e # 出错即退出
IMAGE_NAME="plugai"
CONTAINER_NAME="minio"
DATA_DIR="/data/updsrv/cloud"
# 可选:配置构建代理(可注释掉或自定义)
export http_proxy="http://127.0.0.1:7890"
export https_proxy="http://127.0.0.1:7890"
echo "🧹 Step 1: 删除旧容器和镜像..."
docker rm -f $CONTAINER_NAME 2>/dev/null || true
docker rmi -f $IMAGE_NAME:latest 2>/dev/null || true
echo "🛠️ Step 2: 编译 minio 二进制..."
MINIO_RELEASE=on make
if [ ! -f ./minio ]; then
echo "❌ 编译失败,未找到 minio 可执行文件"
exit 1
fi
echo "🐳 Step 3: 构建 Docker 镜像..."
docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -t $IMAGE_NAME:latest .
echo "🚀 Step 4: 运行新容器(使用端口映射)..."
docker run -d \
--name $CONTAINER_NAME \
--restart unless-stopped \
-p 9000:9000 \
-p 9001:9001 \
-e MINIO_ROOT_USER=admin \
-e MINIO_ROOT_PASSWORD=Admin@123.. \
-e MINIO_BROWSER_REDIRECT_URL=https://console.szaiai.com \
-v "$DATA_DIR":/data \
$IMAGE_NAME:latest server /data --console-address "0.0.0.0:9001"
echo "✅ MinIO 已启动:"
echo "📦 API 接口: http://localhost:9000"
echo "🖥️ 控制台: http://localhost:9001"

143
buildscripts/checkdeps.sh Executable file
View File

@ -0,0 +1,143 @@
#!/usr/bin/env bash
#
_init() {
shopt -s extglob
## Minimum required versions for build dependencies
GIT_VERSION="1.0"
GO_VERSION="1.16"
OSX_VERSION="10.8"
KNAME=$(uname -s)
ARCH=$(uname -m)
case "${KNAME}" in
SunOS)
ARCH=$(isainfo -k)
;;
esac
}
## FIXME:
## In OSX, 'readlink -f' option does not exist, hence
## we have our own readlink -f behavior here.
## Once OSX has the option, below function is good enough.
##
## readlink() {
## return /bin/readlink -f "$1"
## }
##
readlink() {
TARGET_FILE=$1
cd $(dirname $TARGET_FILE)
TARGET_FILE=$(basename $TARGET_FILE)
# Iterate down a (possible) chain of symlinks
while [ -L "$TARGET_FILE" ]; do
TARGET_FILE=$(env readlink $TARGET_FILE)
cd $(dirname $TARGET_FILE)
TARGET_FILE=$(basename $TARGET_FILE)
done
# Compute the canonicalized name by finding the physical path
# for the directory we're in and appending the target file.
PHYS_DIR=$(pwd -P)
RESULT=$PHYS_DIR/$TARGET_FILE
echo $RESULT
}
## FIXME:
## In OSX, 'sort -V' option does not exist, hence
## we have our own version compare function.
## Once OSX has the option, below function is good enough.
##
## check_minimum_version() {
## versions=($(echo -e "$1\n$2" | sort -V))
## return [ "$1" == "${versions[0]}" ]
## }
##
check_minimum_version() {
IFS='.' read -r -a varray1 <<<"$1"
IFS='.' read -r -a varray2 <<<"$2"
for i in "${!varray1[@]}"; do
if [[ ${varray1[i]} -lt ${varray2[i]} ]]; then
return 0
elif [[ ${varray1[i]} -gt ${varray2[i]} ]]; then
return 1
fi
done
return 0
}
assert_is_supported_arch() {
case "${ARCH}" in
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64)
return
;;
*)
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]"
exit 1
;;
esac
}
assert_is_supported_os() {
case "${KNAME}" in
Linux | FreeBSD | OpenBSD | NetBSD | DragonFly | SunOS)
return
;;
Darwin)
osx_host_version=$(env sw_vers -productVersion)
if ! check_minimum_version "${OSX_VERSION}" "${osx_host_version}"; then
echo "OSX version '${osx_host_version}' is not supported. Minimum supported version: ${OSX_VERSION}"
exit 1
fi
return
;;
*)
echo "OS '${KNAME}' is not supported. Supported OS: [Linux, FreeBSD, OpenBSD, NetBSD, Darwin, DragonFly]"
exit 1
;;
esac
}
assert_check_golang_env() {
if ! which go >/dev/null 2>&1; then
echo "Cannot find go binary in your PATH configuration, please refer to Go installation document at https://golang.org/doc/install"
exit 1
fi
installed_go_version=$(go version | sed 's/^.* go\([0-9.]*\).*$/\1/')
if ! check_minimum_version "${GO_VERSION}" "${installed_go_version}"; then
echo "Go runtime version '${installed_go_version}' is unsupported. Minimum supported version: ${GO_VERSION} to compile."
exit 1
fi
}
assert_check_deps() {
# support unusual Git versions such as: 2.7.4 (Apple Git-66)
installed_git_version=$(git version | perl -ne '$_ =~ m/git version (.*?)( |$)/; print "$1\n";')
if ! check_minimum_version "${GIT_VERSION}" "${installed_git_version}"; then
echo "Git version '${installed_git_version}' is not supported. Minimum supported version: ${GIT_VERSION}"
exit 1
fi
}
main() {
## Check for supported arch
assert_is_supported_arch
## Check for supported os
assert_is_supported_os
## Check for Go environment
assert_check_golang_env
## Check for dependencies
assert_check_deps
}
_init && main "$@"

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

37
buildscripts/cross-compile.sh Executable file
View File

@ -0,0 +1,37 @@
#!/bin/bash
set -e
# Enable tracing if set.
[ -n "$BASH_XTRACEFD" ] && set -x
function _init() {
## All binaries are static make sure to disable CGO.
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
}
function _build() {
local osarch=$1
IFS=/ read -r -a arr <<<"$osarch"
os="${arr[0]}"
arch="${arr[1]}"
package=$(go list -f '{{.ImportPath}}')
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
# go build -trimpath to build the binary.
export GOOS=$os
export GOARCH=$arch
export GO111MODULE=on
go build -trimpath -tags kqueue -o /dev/null
}
function main() {
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
for each_osarch in ${SUPPORTED_OSARCH}; do
_build "${each_osarch}"
done
}
_init && main "$@"

124
buildscripts/disable-root.sh Executable file
View File

@ -0,0 +1,124 @@
#!/bin/bash
set -x
export MINIO_CI_CD=1
killall -9 minio
rm -rf ${HOME}/tmp/dist
scheme="http"
nr_servers=4
addr="localhost"
args=""
for ((i = 0; i < $((nr_servers)); i++)); do
args="$args $scheme://$addr:$((9100 + i))/${HOME}/tmp/dist/path1/$i"
done
echo $args
for ((i = 0; i < $((nr_servers)); i++)); do
(minio server --address ":$((9100 + i))" $args 2>&1 >/tmp/log$i.txt) &
done
sleep 10s
if [ ! -f ./mc ]; then
wget --quiet -O ./mc https://dl.minio.io/client/mc/release/linux-amd64/./mc &&
chmod +x mc
fi
set +e
export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/
./mc ready minioadm
./mc ls minioadm/
./mc admin config set minioadm/ api root_access=off
sleep 3s # let things settle a little
./mc ls minioadm/
if [ $? -eq 0 ]; then
echo "listing succeeded, 'minioadmin' was not disabled"
exit 1
fi
set -e
killall -9 minio
export MINIO_API_ROOT_ACCESS=on
for ((i = 0; i < $((nr_servers)); i++)); do
(minio server --address ":$((9100 + i))" $args 2>&1 >/tmp/log$i.txt) &
done
set +e
./mc ready minioadm/
./mc ls minioadm/
if [ $? -ne 0 ]; then
echo "listing failed, 'minioadmin' should be enabled"
exit 1
fi
killall -9 minio
rm -rf /tmp/multisitea/
rm -rf /tmp/multisiteb/
echo "Setup site-replication and then disable root credentials"
minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001
export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004
./mc ready sitea
./mc ready siteb
./mc admin replicate add sitea siteb
./mc admin user add sitea foobar foo12345
./mc admin policy attach sitea/ consoleAdmin --user=foobar
./mc admin user info siteb foobar
killall -9 minio
echo "turning off root access, however site replication must continue"
export MINIO_API_ROOT_ACCESS=off
minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
export MC_HOST_sitea=http://foobar:foo12345@127.0.0.1:9001
export MC_HOST_siteb=http://foobar:foo12345@127.0.0.1:9004
./mc ready sitea
./mc ready siteb
./mc admin user add sitea foobar-admin foo12345
sleep 2s
./mc admin user info siteb foobar-admin

121
buildscripts/gen-ldflags.go Normal file
View File

@ -0,0 +1,121 @@
//go:build ignore
// +build ignore
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"time"
)
func genLDFlags(version string) string {
releaseTag, date := releaseTag(version)
copyrightYear := strconv.Itoa(date.Year())
ldflagsStr := "-s -w"
ldflagsStr += " -X github.com/minio/minio/cmd.Version=" + version
ldflagsStr += " -X github.com/minio/minio/cmd.CopyrightYear=" + copyrightYear
ldflagsStr += " -X github.com/minio/minio/cmd.ReleaseTag=" + releaseTag
ldflagsStr += " -X github.com/minio/minio/cmd.CommitID=" + commitID()
ldflagsStr += " -X github.com/minio/minio/cmd.ShortCommitID=" + commitID()[:12]
ldflagsStr += " -X github.com/minio/minio/cmd.GOPATH=" + os.Getenv("GOPATH")
ldflagsStr += " -X github.com/minio/minio/cmd.GOROOT=" + os.Getenv("GOROOT")
return ldflagsStr
}
// genReleaseTag prints release tag to the console for easy git tagging.
func releaseTag(version string) (string, time.Time) {
relPrefix := "DEVELOPMENT"
if prefix := os.Getenv("MINIO_RELEASE"); prefix != "" {
relPrefix = prefix
}
relSuffix := ""
if hotfix := os.Getenv("MINIO_HOTFIX"); hotfix != "" {
relSuffix = hotfix
}
relTag := strings.Replace(version, " ", "-", -1)
relTag = strings.Replace(relTag, ":", "-", -1)
t, err := time.Parse("2006-01-02T15-04-05Z", relTag)
if err != nil {
panic(err)
}
relTag = strings.Replace(relTag, ",", "", -1)
relTag = relPrefix + "." + relTag
if relSuffix != "" {
relTag += "." + relSuffix
}
return relTag, t
}
// commitID returns the abbreviated commit-id hash of the last commit.
func commitID() string {
// git log --format="%H" -n1
var (
commit []byte
err error
)
cmdName := "git"
cmdArgs := []string{"log", "--format=%H", "-n1"}
if commit, err = exec.Command(cmdName, cmdArgs...).Output(); err != nil {
fmt.Fprintln(os.Stderr, "Error generating git commit-id: ", err)
os.Exit(1)
}
return strings.TrimSpace(string(commit))
}
func commitTime() time.Time {
// git log --format=%cD -n1
var (
commitUnix []byte
err error
)
cmdName := "git"
cmdArgs := []string{"log", "--format=%cI", "-n1"}
if commitUnix, err = exec.Command(cmdName, cmdArgs...).Output(); err != nil {
fmt.Fprintln(os.Stderr, "Error generating git commit-time: ", err)
os.Exit(1)
}
t, err := time.Parse(time.RFC3339, strings.TrimSpace(string(commitUnix)))
if err != nil {
fmt.Fprintln(os.Stderr, "Error generating git commit-time: ", err)
os.Exit(1)
}
return t.UTC()
}
func main() {
var version string
if len(os.Args) > 1 {
version = os.Args[1]
} else {
version = commitTime().Format(time.RFC3339)
}
fmt.Println(genLDFlags(version))
}

View File

@ -0,0 +1,92 @@
#!/bin/bash -e
set -E
set -o pipefail
set -x
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
function start_minio_4drive() {
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
mkdir ${WORK_DIR}
C_PWD=${PWD}
if [ ! -x "$PWD/mc" ]; then
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
fi
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...4}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 5
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${PWD}/mc" mb --with-versioning minio/bucket
for i in $(seq 1 4); do
"${PWD}/mc" cp /etc/hosts minio/bucket/testobj
sudo chown -R root. "${WORK_DIR}/disk${i}"
"${PWD}/mc" cp /etc/hosts minio/bucket/testobj
sudo chown -R ${USER}. "${WORK_DIR}/disk${i}"
done
for vid in $("${PWD}/mc" ls --json --versions minio/bucket/testobj | jq -r .versionId); do
"${PWD}/mc" cat --vid "${vid}" minio/bucket/testobj | md5sum
done
pkill minio
sleep 3
}
function main() {
start_port=$(shuf -i 10000-65000 -n 1)
start_minio_4drive ${start_port}
}
function purge() {
rm -rf "$1"
}
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@ -0,0 +1,86 @@
//go:build ignore
// +build ignore
//
// MinIO Object Storage (c) 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"time"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New(os.Args[1], os.Args[2], os.Args[3], false)
if err != nil {
log.Fatalln(err)
}
opts := madmin.HealOpts{
Recursive: true, // recursively heal all objects at 'prefix'
Remove: true, // remove content that has lost quorum and not recoverable
ScanMode: madmin.HealNormalScan, // by default do not do 'deep' scanning
}
start, _, err := madmClnt.Heal(context.Background(), "healing-rewrite-bucket", "", opts, "", false, false)
if err != nil {
log.Fatalln(err)
}
fmt.Println("Healstart sequence ===")
enc := json.NewEncoder(os.Stdout)
if err = enc.Encode(&start); err != nil {
log.Fatalln(err)
}
fmt.Println()
for {
_, status, err := madmClnt.Heal(context.Background(), "healing-rewrite-bucket", "", opts, start.ClientToken, false, false)
if status.Summary == "finished" {
fmt.Println("Healstatus on items ===")
for _, item := range status.Items {
if err = enc.Encode(&item); err != nil {
log.Fatalln(err)
}
}
break
}
if status.Summary == "stopped" {
fmt.Println("Healstatus on items ===")
fmt.Println("Heal failed with", status.FailureDetail)
break
}
for _, item := range status.Items {
if err = enc.Encode(&item); err != nil {
log.Fatalln(err)
}
}
time.Sleep(time.Second)
}
}

View File

@ -0,0 +1,127 @@
#!/bin/bash
# This script is used to test the migration of IAM content from old minio
# instance to new minio instance.
#
# To run it locally, start the LDAP server in github.com/minio/minio-iam-testing
# repo (e.g. make podman-run), and then run this script.
#
# This script assumes that LDAP server is at:
#
# `localhost:389`
#
# if this is not the case, set the environment variable
# `_MINIO_LDAP_TEST_SERVER`.
OLD_VERSION=RELEASE.2024-03-26T22-10-45Z
OLD_BINARY_LINK=https://dl.min.io/server/minio/release/linux-amd64/archive/minio.${OLD_VERSION}
__init__() {
if which curl &>/dev/null; then
echo "curl is already installed"
else
echo "Installing curl:"
sudo apt install curl -y
fi
export GOPATH=/tmp/gopath
export PATH="${PATH}":"${GOPATH}"/bin
if which mc &>/dev/null; then
echo "mc is already installed"
else
echo "Installing mc:"
go install github.com/minio/mc@latest
fi
if [ ! -x ./minio.${OLD_VERSION} ]; then
echo "Downloading minio.${OLD_VERSION} binary"
curl -o minio.${OLD_VERSION} ${OLD_BINARY_LINK}
chmod +x minio.${OLD_VERSION}
fi
if [ -z "$_MINIO_LDAP_TEST_SERVER" ]; then
export _MINIO_LDAP_TEST_SERVER=localhost:389
echo "Using default LDAP endpoint: $_MINIO_LDAP_TEST_SERVER"
fi
rm -rf /tmp/data
}
create_iam_content_in_old_minio() {
echo "Creating IAM content in old minio instance."
MINIO_CI_CD=1 ./minio.${OLD_VERSION} server /tmp/data/{1...4} &
sleep 5
set -x
mc alias set old-minio http://localhost:9000 minioadmin minioadmin
mc ready old-minio
mc idp ldap add old-minio \
server_addr=localhost:389 \
server_insecure=on \
lookup_bind_dn=cn=admin,dc=min,dc=io \
lookup_bind_password=admin \
user_dn_search_base_dn=dc=min,dc=io \
user_dn_search_filter="(uid=%s)" \
group_search_base_dn=ou=swengg,dc=min,dc=io \
group_search_filter="(&(objectclass=groupOfNames)(member=%d))"
mc admin service restart old-minio
mc idp ldap policy attach old-minio readwrite --user=UID=dillon,ou=people,ou=swengg,dc=min,dc=io
mc idp ldap policy attach old-minio readwrite --group=CN=project.c,ou=groups,ou=swengg,dc=min,dc=io
mc idp ldap policy entities old-minio
mc admin cluster iam export old-minio
set +x
mc admin service stop old-minio
}
import_iam_content_in_new_minio() {
echo "Importing IAM content in new minio instance."
# Assume current minio binary exists.
MINIO_CI_CD=1 ./minio server /tmp/data/{1...4} &
sleep 5
set -x
mc alias set new-minio http://localhost:9000 minioadmin minioadmin
echo "BEFORE IMPORT mappings:"
mc ready new-minio
mc idp ldap policy entities new-minio
mc admin cluster iam import new-minio ./old-minio-iam-info.zip
echo "AFTER IMPORT mappings:"
mc idp ldap policy entities new-minio
set +x
# mc admin service stop new-minio
}
verify_iam_content_in_new_minio() {
output=$(mc idp ldap policy entities new-minio --json)
groups=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .groups[]')
if [ "$groups" != "cn=project.c,ou=groups,ou=swengg,dc=min,dc=io" ]; then
echo "Failed to verify groups: $groups"
exit 1
fi
users=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .users[]')
if [ "$users" != "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" ]; then
echo "Failed to verify users: $users"
exit 1
fi
mc admin service stop new-minio
}
main() {
create_iam_content_in_old_minio
import_iam_content_in_new_minio
verify_iam_content_in_new_minio
}
(__init__ "$@" && main "$@")

113
buildscripts/minio-upgrade.sh Executable file
View File

@ -0,0 +1,113 @@
#!/bin/bash
trap 'cleanup $LINENO' ERR
# shellcheck disable=SC2120
cleanup() {
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
down || true
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
rm || true
for volume in $(docker volume ls -q | grep upgrade); do
docker volume rm ${volume} || true
done
docker volume prune -f
docker system prune -f || true
docker volume prune -f || true
docker volume rm $(docker volume ls -q -f dangling=true) || true
}
verify_checksum_after_heal() {
local sum1
sum1=$(curl -s "$2" | sha256sum)
mc admin heal --json -r "$1" >/dev/null # test after healing
local sum1_heal
sum1_heal=$(curl -s "$2" | sha256sum)
if [ "${sum1_heal}" != "${sum1}" ]; then
echo "mismatch expected ${sum1_heal}, got ${sum1}"
exit 1
fi
}
verify_checksum_mc() {
local expected
expected=$(mc cat "$1" | sha256sum)
local got
got=$(mc cat "$2" | sha256sum)
if [ "${expected}" != "${got}" ]; then
echo "mismatch - expected ${expected}, got ${got}"
exit 1
fi
echo "matches - ${expected}, got ${got}"
}
add_alias() {
for i in $(seq 1 4); do
echo "... attempting to add alias $i"
until (mc alias set minio http://127.0.0.1:9000 minioadmin minioadmin); do
echo "...waiting... for 5secs" && sleep 5
done
done
echo "Sleeping for nginx"
sleep 20
}
__init__() {
sudo apt install curl -y
export GOPATH=/tmp/gopath
export PATH=${PATH}:${GOPATH}/bin
go install github.com/minio/mc@latest
## this is needed because github actions don't have
## docker-compose on all runners
COMPOSE_VERSION=v2.35.1
mkdir -p /tmp/gopath/bin/
wget -O /tmp/gopath/bin/docker-compose https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-linux-x86_64
chmod +x /tmp/gopath/bin/docker-compose
cleanup
TAG=minio/minio:dev make docker
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
up -d --build
add_alias
mc mb minio/minio-test/
mc cp ./minio minio/minio-test/to-read/
mc cp /etc/hosts minio/minio-test/to-read/hosts
mc anonymous set download minio/minio-test
verify_checksum_mc ./minio minio/minio-test/to-read/minio
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
}
main() {
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
add_alias
verify_checksum_after_heal minio/minio-test http://127.0.0.1:9000/minio-test/to-read/hosts
verify_checksum_mc ./minio minio/minio-test/to-read/minio
verify_checksum_mc /etc/hosts minio/minio-test/to-read/hosts
cleanup
}
(__init__ "$@" && main "$@")

View File

@ -0,0 +1,126 @@
#!/bin/bash
if [ -n "$TEST_DEBUG" ]; then
set -x
fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
trap 'catch $LINENO' ERR
function purge() {
rm -rf "$1"
}
# shellcheck disable=SC2120
catch() {
if [ $# -ne 0 ]; then
echo "error on line $1"
fi
echo "Cleaning up instances of MinIO"
pkill minio || true
pkill -9 minio || true
purge "$WORK_DIR"
if [ $# -ne 0 ]; then
exit $#
fi
}
catch
function start_minio_10drive() {
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
mkdir ${WORK_DIR}
C_PWD=${PWD}
if [ ! -x "$PWD/mc" ]; then
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
fi
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...10}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 5
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${PWD}/mc" mb --with-versioning minio/bucket
export AWS_ACCESS_KEY_ID=minio
export AWS_SECRET_ACCESS_KEY=minio123
aws --endpoint-url http://localhost:"$start_port" s3api create-multipart-upload --bucket bucket --key obj-1 >upload-id.json
uploadId=$(jq -r '.UploadId' upload-id.json)
truncate -s 5MiB file-5mib
for i in {1..2}; do
aws --endpoint-url http://localhost:"$start_port" s3api upload-part \
--upload-id "$uploadId" --bucket bucket --key obj-1 \
--part-number "$i" --body ./file-5mib
done
for i in {1..6}; do
find ${WORK_DIR}/disk${i}/.minio.sys/multipart/ -type f -name "part.1" -delete
done
cat <<EOF >parts.json
{
"Parts": [
{
"PartNumber": 1,
"ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6"
},
{
"PartNumber": 2,
"ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6"
}
]
}
EOF
err=$(aws --endpoint-url http://localhost:"$start_port" s3api complete-multipart-upload --upload-id "$uploadId" --bucket bucket --key obj-1 --multipart-upload file://./parts.json 2>&1)
rv=$?
if [ $rv -eq 0 ]; then
echo "Failed to receive an error"
exit 1
fi
echo "Received an error during complete-multipart as expected: $err"
}
function main() {
start_port=$(shuf -i 10000-65000 -n 1)
start_minio_10drive ${start_port}
}
main "$@"

10
buildscripts/race.sh Executable file
View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
set -e
export GORACE="history_size=7"
export MINIO_API_REQUESTS_MAX=10000
for d in $(go list ./...); do
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
done

View File

@ -0,0 +1,72 @@
#!/bin/bash -e
set -E
set -o pipefail
set -x
set -e
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
function start_minio_5drive() {
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
"${WORK_DIR}/mc" cp --quiet -r "buildscripts/cicd-corpus/" "${WORK_DIR}/cicd-corpus/"
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/cicd-corpus/disk{1...5}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 5
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" stat minio/bucket/testobj
pkill minio
sleep 3
}
function main() {
start_port=$(shuf -i 10000-65000 -n 1)
start_minio_5drive ${start_port}
}
function purge() {
rm -rf "$1"
}
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

157
buildscripts/rewrite-old-new.sh Executable file
View File

@ -0,0 +1,157 @@
#!/bin/bash -e
set -E
set -o pipefail
set -x
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO_OLD=("$PWD/minio.RELEASE.2020-10-28T08-16-50Z" --config-dir "$MINIO_CONFIG_DIR" server)
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
function download_old_release() {
if [ ! -f minio.RELEASE.2020-10-28T08-16-50Z ]; then
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2020-10-28T08-16-50Z
chmod a+x minio.RELEASE.2020-10-28T08-16-50Z
fi
}
function verify_rewrite() {
start_port=$1
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
"${WORK_DIR}/mc" ready minio/
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" mb minio/healing-rewrite-bucket --quiet --with-lock
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
kill ${pid}
sleep 3
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
"${WORK_DIR}/mc" ready minio/
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! ./s3-check-md5 \
-debug \
-versions \
-access-key minio \
-secret-key minio123 \
-endpoint "http://127.0.0.1:${start_port}/" 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(
cd inspects
"${WORK_DIR}/mc" admin inspect minio/healing-rewrite-bucket/verify-build.sh/**
)
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
purge "$WORK_DIR"
exit 1
fi
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
sleep 1
if ! ./s3-check-md5 \
-debug \
-versions \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(
cd inspects
"${WORK_DIR}/mc" admin inspect minio/healing-rewrite-bucket/verify-build.sh/**
)
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
purge "$WORK_DIR"
exit 1
fi
kill ${pid}
}
function main() {
download_old_release
start_port=$(shuf -i 10000-65000 -n 1)
verify_rewrite ${start_port}
}
function purge() {
rm -rf "$1"
}
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@ -0,0 +1,137 @@
#!/bin/bash
if [ -n "$TEST_DEBUG" ]; then
set -x
fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
trap 'catch $LINENO' ERR
function purge() {
rm -rf "$1"
}
# shellcheck disable=SC2120
catch() {
if [ $# -ne 0 ]; then
echo "error on line $1"
fi
echo "Cleaning up instances of MinIO"
pkill minio || true
pkill -9 minio || true
purge "$WORK_DIR"
if [ $# -ne 0 ]; then
exit $#
fi
}
catch
function gen_put_request() {
hdr_sleep=$1
body_sleep=$2
echo "PUT /testbucket/testobject HTTP/1.1"
sleep $hdr_sleep
echo "Host: foo-header"
echo "User-Agent: curl/8.2.1"
echo "Accept: */*"
echo "Content-Length: 30"
echo ""
sleep $body_sleep
echo "random line 0"
echo "random line 1"
echo ""
echo ""
}
function send_put_object_request() {
hdr_timeout=$1
body_timeout=$2
start=$(date +%s)
timeout 5m bash -c "gen_put_request $hdr_timeout $body_timeout | netcat 127.0.0.1 $start_port | read" || return -1
[ $(($(date +%s) - start)) -gt $((srv_hdr_timeout + srv_idle_timeout + 1)) ] && return -1
return 0
}
function test_minio_with_timeout() {
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
export MINIO_CI_CD=1
mkdir ${WORK_DIR}
C_PWD=${PWD}
if [ ! -x "$PWD/mc" ]; then
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
fi
"${MINIO[@]}" --address ":$start_port" --read-header-timeout ${srv_hdr_timeout}s --idle-timeout ${srv_idle_timeout}s "${WORK_DIR}/disk/" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 1
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
set -e
"${PWD}/mc" mb minio/testbucket
"${PWD}/mc" anonymous set public minio/testbucket
# slow header writing
send_put_object_request 20 0 && exit -1
"${PWD}/mc" stat minio/testbucket/testobject && exit -1
# quick header write and slow bodywrite
send_put_object_request 0 40 && exit -1
"${PWD}/mc" stat minio/testbucket/testobject && exit -1
# quick header and body write
send_put_object_request 1 1 || exit -1
"${PWD}/mc" stat minio/testbucket/testobject || exit -1
}
function main() {
export start_port=$(shuf -i 10000-65000 -n 1)
export srv_hdr_timeout=5
export srv_idle_timeout=5
export -f gen_put_request
test_minio_with_timeout ${start_port}
}
main "$@"

View File

@ -0,0 +1,74 @@
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: minio/minio:${MINIO_VERSION}
command: server http://minio{1...4}/data{1...3}
env_file:
- ./minio.env
expose:
- "9000"
- "9001"
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
minio1:
<<: *minio-common
hostname: minio1
volumes:
- data1-1:/data1
- data1-2:/data2
- data1-3:/data3
minio2:
<<: *minio-common
hostname: minio2
volumes:
- data2-1:/data1
- data2-2:/data2
- data2-3:/data3
minio3:
<<: *minio-common
hostname: minio3
volumes:
- data3-1:/data1
- data3-2:/data2
- data3-3:/data3
minio4:
<<: *minio-common
hostname: minio4
volumes:
- data4-1:/data1
- data4-2:/data2
- data4-3:/data3
nginx:
image: nginx:1.19.2-alpine
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
ports:
- "9000:9000"
- "9001:9001"
depends_on:
- minio1
- minio2
- minio3
- minio4
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
data1-1:
data1-2:
data1-3:
data2-1:
data2-2:
data2-3:
data3-1:
data3-2:
data3-3:
data4-1:
data4-2:
data4-3:

View File

@ -0,0 +1,3 @@
MINIO_ACCESS_KEY=minioadmin
MINIO_SECRET_KEY=minioadmin
MINIO_BROWSER=off

View File

@ -0,0 +1,68 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
}
# main minio
server {
listen 9000;
listen [::]:9000;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass http://minio;
}
}
}

298
buildscripts/verify-build.sh Executable file
View File

@ -0,0 +1,298 @@
#!/bin/bash
#
set -e
set -E
set -o pipefail
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$PWD/.verify-$RANDOM"
export MINT_MODE=core
export MINT_DATA_DIR="$WORK_DIR/data"
export SERVER_ENDPOINT="127.0.0.1:9000"
export MC_HOST_verify="http://minio:minio123@${SERVER_ENDPOINT}/"
export MC_HOST_verify_ipv6="http://minio:minio123@[::1]:9000/"
export ACCESS_KEY="minio"
export SECRET_KEY="minio123"
export ENABLE_HTTPS=0
export GO111MODULE=on
export GOGC=25
export ENABLE_ADMIN=1
export MINIO_CI_CD=1
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR")
FILE_1_MB="$MINT_DATA_DIR/datafile-1-MB"
FILE_65_MB="$MINT_DATA_DIR/datafile-65-MB"
FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
function start_minio_fs() {
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
"${WORK_DIR}/mc" ready verify
}
function start_minio_erasure() {
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
"${WORK_DIR}/mc" ready verify
}
function start_minio_erasure_sets() {
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
"${MINIO[@]}" server >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
"${WORK_DIR}/mc" ready verify
}
function start_minio_pool_erasure_sets() {
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/pool-disk-sets{1...4} http://127.0.0.1:9001${WORK_DIR}/pool-disk-sets{5...8}"
"${MINIO[@]}" server --address ":9000" >"$WORK_DIR/pool-minio-9000.log" 2>&1 &
"${MINIO[@]}" server --address ":9001" >"$WORK_DIR/pool-minio-9001.log" 2>&1 &
"${WORK_DIR}/mc" ready verify
}
function start_minio_pool_erasure_sets_ipv6() {
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets-ipv6{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets-ipv6{5...8}"
"${MINIO[@]}" server --address="[::1]:9000" >"$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
"${MINIO[@]}" server --address="[::1]:9001" >"$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
"${WORK_DIR}/mc" ready verify_ipv6
}
function start_minio_dist_erasure() {
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/dist-disk1 http://127.0.0.1:9001${WORK_DIR}/dist-disk2 http://127.0.0.1:9002${WORK_DIR}/dist-disk3 http://127.0.0.1:9003${WORK_DIR}/dist-disk4"
for i in $(seq 0 3); do
"${MINIO[@]}" server --address ":900${i}" >"$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
done
"${WORK_DIR}/mc" ready verify
}
function run_test_fs() {
start_minio_fs
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
cat "$WORK_DIR/fs-minio.log"
fi
rm -f "$WORK_DIR/fs-minio.log"
return "$rv"
}
function run_test_erasure_sets() {
start_minio_erasure_sets
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
cat "$WORK_DIR/erasure-minio-sets.log"
fi
rm -f "$WORK_DIR/erasure-minio-sets.log"
return "$rv"
}
function run_test_pool_erasure_sets() {
start_minio_pool_erasure_sets
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 1); do
echo "server$i log:"
cat "$WORK_DIR/pool-minio-900$i.log"
done
fi
for i in $(seq 0 1); do
rm -f "$WORK_DIR/pool-minio-900$i.log"
done
return "$rv"
}
function run_test_pool_erasure_sets_ipv6() {
start_minio_pool_erasure_sets_ipv6
export SERVER_ENDPOINT="[::1]:9000"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
for i in $(seq 0 1); do
echo "server$i log:"
cat "$WORK_DIR/pool-minio-ipv6-900$i.log"
done
fi
for i in $(seq 0 1); do
rm -f "$WORK_DIR/pool-minio-ipv6-900$i.log"
done
return "$rv"
}
function run_test_erasure() {
start_minio_erasure
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
cat "$WORK_DIR/erasure-minio.log"
fi
rm -f "$WORK_DIR/erasure-minio.log"
return "$rv"
}
function run_test_dist_erasure() {
start_minio_dist_erasure
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
echo "server1 log:"
cat "$WORK_DIR/dist-minio-9000.log"
echo "server2 log:"
cat "$WORK_DIR/dist-minio-9001.log"
echo "server3 log:"
cat "$WORK_DIR/dist-minio-9002.log"
echo "server4 log:"
cat "$WORK_DIR/dist-minio-9003.log"
fi
rm -f "$WORK_DIR/dist-minio-9000.log" "$WORK_DIR/dist-minio-9001.log" "$WORK_DIR/dist-minio-9002.log" "$WORK_DIR/dist-minio-9003.log"
return "$rv"
}
function purge() {
rm -rf "$1"
}
function __init__() {
echo "Initializing environment"
mkdir -p "$WORK_DIR"
mkdir -p "$MINIO_CONFIG_DIR"
mkdir -p "$MINT_DATA_DIR"
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "${WORK_DIR}/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
shred -n 1 -s 1M - 1>"$FILE_1_MB" 2>/dev/null
shred -n 1 -s 65M - 1>"$FILE_65_MB" 2>/dev/null
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
if ! wget -q -O "$FUNCTIONAL_TESTS" https://raw.githubusercontent.com/minio/mc/master/functional-tests.sh; then
echo "failed to download https://raw.githubusercontent.com/minio/mc/master/functional-tests.sh"
exit 1
fi
sed -i 's|-sS|-sSg|g' "$FUNCTIONAL_TESTS"
chmod a+x "$FUNCTIONAL_TESTS"
}
function main() {
echo "Testing in FS setup"
if ! run_test_fs; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Erasure setup"
if ! run_test_erasure; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Erasure setup"
if ! run_test_dist_erasure; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Erasure setup as sets"
if ! run_test_erasure_sets; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Eraure expanded setup"
if ! run_test_pool_erasure_sets; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
echo "Testing in Distributed Erasure expanded setup with ipv6"
if ! run_test_pool_erasure_sets_ipv6; then
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
purge "$WORK_DIR"
}
(__init__ "$@" && main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@ -0,0 +1,151 @@
#!/bin/bash -e
#
set -E
set -o pipefail
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
function start_minio_3_node() {
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_ERASURE_SET_DRIVE_COUNT=6
export MINIO_CI_CD=1
start_port=$1
args=""
for i in $(seq 1 3); do
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
done
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
pid1=$!
disown ${pid1}
"${MINIO[@]}" --address ":$((start_port + 2))" $args >"${WORK_DIR}/dist-minio-server2.log" 2>&1 &
pid2=$!
disown $pid2
"${MINIO[@]}" --address ":$((start_port + 3))" $args >"${WORK_DIR}/dist-minio-server3.log" 2>&1 &
pid3=$!
disown $pid3
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
timeout 15m /tmp/mc ready myminio || fail
# Wait for all drives to be online and formatted
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done
# Wait for all drives to be healed
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].healing | select(. != null) | select(. == true)' | wc -l) -gt 0 ]; do sleep 1; done
# Wait for Status: in MinIO output
while true; do
rv=$(check_online)
if [ "$rv" != "1" ]; then
# success
break
fi
# Check if we should retry
retry=$((retry + 1))
if [ $retry -le 20 ]; then
sleep 5
continue
fi
# Failure
fail
done
if ! ps -p $pid1 1>&2 >/dev/null; then
echo "minio-server-1 is not running." && fail
fi
if ! ps -p $pid2 1>&2 >/dev/null; then
echo "minio-server-2 is not running." && fail
fi
if ! ps -p $pid3 1>&2 >/dev/null; then
echo "minio-server-3 is not running." && fail
fi
if ! pkill minio; then
fail
fi
sleep 1
if pgrep minio; then
# forcibly killing, to proceed further properly.
if ! pkill -9 minio; then
echo "no minio process running anymore, proceed."
fi
fi
}
function fail() {
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
}
function check_online() {
if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then
echo "1"
fi
}
function purge() {
echo rm -rf "$1"
}
function __init__() {
echo "Initializing environment"
mkdir -p "$WORK_DIR"
mkdir -p "$MINIO_CONFIG_DIR"
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
if [ ! -f /tmp/mc ]; then
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x /tmp/mc
fi
}
function perform_test() {
start_minio_3_node $2
echo "Testing Distributed Erasure setup healing of drives"
echo "Remove the contents of the disks belonging to '${1}' erasure set"
rm -rf ${WORK_DIR}/${1}/*/
set -x
start_minio_3_node $2
}
function main() {
# use same ports for all tests
start_port=$(shuf -i 10000-65000 -n 1)
perform_test "2" ${start_port}
perform_test "1" ${start_port}
perform_test "3" ${start_port}
}
(__init__ "$@" && main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@ -0,0 +1,96 @@
#!/bin/bash -e
set -E
set -o pipefail
set -x
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$(mktemp -d)"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
function start_minio() {
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
unset MINIO_CI_CD
unset CI
args=()
for i in $(seq 1 4); do
args+=("http://localhost:$((start_port + i))${WORK_DIR}/mnt/disk$i/ ")
done
for i in $(seq 1 4); do
"${MINIO[@]}" --address ":$((start_port + i))" ${args[@]} 2>&1 >"${WORK_DIR}/server$i.log" &
done
# Wait until all nodes return 403
for i in $(seq 1 4); do
while [ "$(curl -m 1 -s -o /dev/null -w "%{http_code}" http://localhost:$((start_port + i)))" -ne "403" ]; do
echo -n "."
sleep 1
done
done
}
# Prepare fake disks with losetup
function prepare_block_devices() {
set -e
mkdir -p ${WORK_DIR}/disks/ ${WORK_DIR}/mnt/
sudo modprobe loop
for i in 1 2 3 4; do
dd if=/dev/zero of=${WORK_DIR}/disks/img.${i} bs=1M count=2000
device=$(sudo losetup --find --show ${WORK_DIR}/disks/img.${i})
sudo mkfs.ext4 -F ${device}
mkdir -p ${WORK_DIR}/mnt/disk${i}/
sudo mount ${device} ${WORK_DIR}/mnt/disk${i}/
sudo chown "$(id -u):$(id -g)" ${device} ${WORK_DIR}/mnt/disk${i}/
done
set +e
}
# Start a distributed MinIO setup, unmount one disk and check if it is formatted
function main() {
start_port=$(shuf -i 10000-65000 -n 1)
start_minio ${start_port}
# Unmount the disk, after the unmount the device id
# /tmp/xxx/mnt/disk4 will be the same as '/' and it
# will be detected as root disk
while [ "$u" != "0" ]; do
sudo umount ${WORK_DIR}/mnt/disk4/
u=$?
sleep 1
done
# Wait until MinIO self heal kicks in
sleep 60
if [ -f ${WORK_DIR}/mnt/disk4/.minio.sys/format.json ]; then
echo "A root disk is formatted unexpectedely"
cat "${WORK_DIR}/server4.log"
exit -1
fi
}
function cleanup() {
pkill minio
sudo umount ${WORK_DIR}/mnt/disk{1..3}/
sudo rm /dev/minio-loopdisk*
rm -rf "$WORK_DIR"
}
(prepare_block_devices)
(main "$@")
rv=$?
cleanup
exit "$rv"

167
buildscripts/verify-healing.sh Executable file
View File

@ -0,0 +1,167 @@
#!/bin/bash -e
#
set -E
set -o pipefail
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
GOPATH=/tmp/gopath
function start_minio_3_node() {
for i in $(seq 1 3); do
rm "${WORK_DIR}/dist-minio-server$i.log"
done
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_ERASURE_SET_DRIVE_COUNT=6
export MINIO_CI_CD=1
first_time=$(find ${WORK_DIR}/ | grep format.json | wc -l)
start_port=$1
args=""
for d in $(seq 1 3 5); do
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
d=$((d + 1))
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
done
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
pid1=$!
disown ${pid1}
"${MINIO[@]}" --address ":$((start_port + 2))" $args >"${WORK_DIR}/dist-minio-server2.log" 2>&1 &
pid2=$!
disown $pid2
"${MINIO[@]}" --address ":$((start_port + 3))" $args >"${WORK_DIR}/dist-minio-server3.log" 2>&1 &
pid3=$!
disown $pid3
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
timeout 15m /tmp/mc ready myminio || fail
[ ${first_time} -eq 0 ] && upload_objects
[ ${first_time} -ne 0 ] && sleep 120
if ! ps -p $pid1 1>&2 >/dev/null; then
echo "minio server 1 is not running" && fail
fi
if ! ps -p $pid2 1>&2 >/dev/null; then
echo "minio server 2 is not running" && fail
fi
if ! ps -p $pid3 1>&2 >/dev/null; then
echo "minio server 3 is not running" && fail
fi
if ! pkill minio; then
fail
fi
sleep 1
if pgrep minio; then
# forcibly killing, to proceed further properly.
if ! pkill -9 minio; then
echo "no minio process running anymore, proceed."
fi
fi
}
function check_heal() {
if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then
return 1
fi
for ((i = 0; i < 20; i++)); do
test -f ${WORK_DIR}/$1/1/.minio.sys/format.json
v1=$?
nextInES=$(($1 + 1)) && [ $nextInES -gt 3 ] && nextInES=1
foundFiles1=$(find ${WORK_DIR}/$1/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
foundFiles2=$(find ${WORK_DIR}/$nextInES/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
test $foundFiles1 -eq $foundFiles2
v2=$?
[ $v1 == 0 -a $v2 == 0 ] && return 0
sleep 10
done
return 1
}
function purge() {
rm -rf "$1"
}
function fail() {
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
pkill -9 minio
echo "FAILED"
purge "$WORK_DIR"
exit 1
}
function __init__() {
echo "Initializing environment"
mkdir -p "$WORK_DIR"
mkdir -p "$MINIO_CONFIG_DIR"
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
if [ ! -f /tmp/mc ]; then
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x /tmp/mc
fi
}
function upload_objects() {
/tmp/mc mb myminio/testbucket/
for ((i = 0; i < 20; i++)); do
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i
done
}
function perform_test() {
start_port=$2
start_minio_3_node $start_port
echo "Testing Distributed Erasure setup healing of drives"
echo "Remove the contents of the disks belonging to '${1}' node"
rm -rf ${WORK_DIR}/${1}/*/
set -x
start_minio_3_node $start_port
check_heal ${1}
rv=$?
if [ "$rv" == "1" ]; then
fail
fi
}
function main() {
# use same ports for all tests
start_port=$(shuf -i 10000-65000 -n 1)
perform_test "2" ${start_port}
perform_test "1" ${start_port}
perform_test "3" ${start_port}
}
(__init__ "$@" && main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

280
cmd/acl-handlers.go Normal file
View File

@ -0,0 +1,280 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/xml"
"io"
"net/http"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v3/policy"
)
// Data types used for returning dummy access control
// policy XML, these variables shouldn't be used elsewhere
// they are only defined to be used in this file alone.
type grantee struct {
XMLNS string `xml:"xmlns:xsi,attr"`
XMLXSI string `xml:"xsi:type,attr"`
Type string `xml:"Type"`
ID string `xml:"ID,omitempty"`
DisplayName string `xml:"DisplayName,omitempty"`
URI string `xml:"URI,omitempty"`
}
type grant struct {
Grantee grantee `xml:"Grantee"`
Permission string `xml:"Permission"`
}
type accessControlPolicy struct {
XMLName xml.Name `xml:"AccessControlPolicy"`
Owner Owner `xml:"Owner"`
AccessControlList struct {
Grants []grant `xml:"Grant"`
} `xml:"AccessControlList"`
}
// PutBucketACLHandler - PUT Bucket ACL
// -----------------
// This operation uses the ACL subresource
// to set ACL for a bucket, this is a dummy call
// only responds success if the ACL is private.
func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Allow putBucketACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
aclHeader := r.Header.Get(xhttp.AmzACL)
if aclHeader == "" {
acl := &accessControlPolicy{}
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
if terr, ok := err.(*xml.SyntaxError); ok && terr.Msg == io.EOF.Error() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML),
r.URL)
return
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if len(acl.AccessControlList.Grants) == 0 {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
}
if aclHeader != "" && aclHeader != "private" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
}
// GetBucketACLHandler - GET Bucket ACL
// -----------------
// This operation uses the ACL
// subresource to return the ACL of a specified bucket.
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Allow getBucketACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
acl := &accessControlPolicy{}
acl.AccessControlList.Grants = append(acl.AccessControlList.Grants, grant{
Grantee: grantee{
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
XMLXSI: "CanonicalUser",
Type: "CanonicalUser",
},
Permission: "FULL_CONTROL",
})
if err := xml.NewEncoder(w).Encode(acl); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
// PutObjectACLHandler - PUT Object ACL
// -----------------
// This operation uses the ACL subresource
// to set ACL for a bucket, this is a dummy call
// only responds success if the ACL is private.
func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Allow putObjectACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Before proceeding validate if object exists.
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
aclHeader := r.Header.Get(xhttp.AmzACL)
if aclHeader == "" {
acl := &accessControlPolicy{}
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if len(acl.AccessControlList.Grants) == 0 {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
}
if aclHeader != "" && aclHeader != "private" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
return
}
}
// GetObjectACLHandler - GET Object ACL
// -----------------
// This operation uses the ACL
// subresource to return the ACL of a specified object.
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectACL")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Allow getObjectACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Before proceeding validate if object exists.
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
acl := &accessControlPolicy{}
acl.AccessControlList.Grants = append(acl.AccessControlList.Grants, grant{
Grantee: grantee{
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
XMLXSI: "CanonicalUser",
Type: "CanonicalUser",
},
Permission: "FULL_CONTROL",
})
if err := xml.NewEncoder(w).Encode(acl); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}

1115
cmd/admin-bucket-handlers.go Normal file

File diff suppressed because it is too large Load Diff

263
cmd/admin-handler-utils.go Normal file
View File

@ -0,0 +1,263 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/minio/kms-go/kes"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/v3/policy"
)
// validateAdminReq will validate request against and return whether it is allowed.
// If any of the supplied actions are allowed it will be successful.
// If nil ObjectLayer is returned, the operation is not permitted.
// When nil ObjectLayer has been returned an error has always been sent to w.
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, actions ...policy.AdminAction) (ObjectLayer, auth.Credentials) {
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return nil, auth.Credentials{}
}
for _, action := range actions {
// Validate request signature.
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, action, "")
switch adminAPIErr {
case ErrNone:
return objectAPI, cred
case ErrAccessDenied:
// Try another
continue
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return nil, cred
}
}
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return nil, auth.Credentials{}
}
// AdminError - is a generic error for all admin APIs.
type AdminError struct {
Code string
Message string
StatusCode int
}
func (ae AdminError) Error() string {
return ae.Message
}
func toAdminAPIErr(ctx context.Context, err error) APIError {
if err == nil {
return noError
}
var apiErr APIError
switch e := err.(type) {
case policy.Error:
apiErr = APIError{
Code: "XMinioMalformedIAMPolicy",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case config.ErrConfigNotFound:
apiErr = APIError{
Code: "XMinioConfigNotFoundError",
Description: e.Error(),
HTTPStatusCode: http.StatusNotFound,
}
case config.ErrConfigGeneric:
apiErr = APIError{
Code: "XMinioConfigError",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case AdminError:
apiErr = APIError{
Code: e.Code,
Description: e.Message,
HTTPStatusCode: e.StatusCode,
}
case SRError:
apiErr = errorCodes.ToAPIErrWithErr(e.Code, e.Cause)
case decomError:
apiErr = APIError{
Code: "XMinioDecommissionNotAllowed",
Description: e.Err,
HTTPStatusCode: http.StatusBadRequest,
}
default:
switch {
case errors.Is(err, errTooManyPolicies):
apiErr = APIError{
Code: "XMinioAdminInvalidRequest",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errDecommissionAlreadyRunning):
apiErr = APIError{
Code: "XMinioDecommissionNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errDecommissionComplete):
apiErr = APIError{
Code: "XMinioDecommissionNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errDecommissionRebalanceAlreadyRunning):
apiErr = APIError{
Code: "XMinioDecommissionNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errRebalanceDecommissionAlreadyRunning):
apiErr = APIError{
Code: "XMinioRebalanceNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errConfigNotFound):
apiErr = APIError{
Code: "XMinioConfigError",
Description: err.Error(),
HTTPStatusCode: http.StatusNotFound,
}
case errors.Is(err, errIAMActionNotAllowed):
apiErr = APIError{
Code: "XMinioIAMActionNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusForbidden,
}
case errors.Is(err, errIAMServiceAccountNotAllowed):
apiErr = APIError{
Code: "XMinioIAMServiceAccountNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errIAMNotInitialized):
apiErr = APIError{
Code: "XMinioIAMNotInitialized",
Description: err.Error(),
HTTPStatusCode: http.StatusServiceUnavailable,
}
case errors.Is(err, errPolicyInUse):
apiErr = APIError{
Code: "XMinioIAMPolicyInUse",
Description: "The policy cannot be removed, as it is in use",
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errSessionPolicyTooLarge):
apiErr = APIError{
Code: "XMinioIAMServiceAccountSessionPolicyTooLarge",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, kes.ErrKeyExists):
apiErr = APIError{
Code: "XMinioKMSKeyExists",
Description: err.Error(),
HTTPStatusCode: http.StatusConflict,
}
// Tier admin API errors
case errors.Is(err, madmin.ErrTierNameEmpty):
apiErr = APIError{
Code: "XMinioAdminTierNameEmpty",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, madmin.ErrTierInvalidConfig):
apiErr = APIError{
Code: "XMinioAdminTierInvalidConfig",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, madmin.ErrTierInvalidConfigVersion):
apiErr = APIError{
Code: "XMinioAdminTierInvalidConfigVersion",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, madmin.ErrTierTypeUnsupported):
apiErr = APIError{
Code: "XMinioAdminTierTypeUnsupported",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errIsTierPermError(err):
apiErr = APIError{
Code: "XMinioAdminTierInsufficientPermissions",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errTierInvalidConfig):
apiErr = APIError{
Code: "XMinioAdminTierInvalidConfig",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
default:
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
}
}
return apiErr
}
// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API
// specific error.
func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
if errors.Is(err, errErasureWriteQuorum) {
return ErrAdminConfigNoQuorum
}
return toAPIErrorCode(ctx, err)
}
// wraps export error for more context
func exportError(ctx context.Context, err error, fname, entity string) APIError {
if entity == "" {
return toAPIError(ctx, fmt.Errorf("error exporting %s with: %w", fname, err))
}
return toAPIError(ctx, fmt.Errorf("error exporting %s from %s with: %w", entity, fname, err))
}
// wraps import error for more context
func importError(ctx context.Context, err error, fname, entity string) APIError {
if entity == "" {
return toAPIError(ctx, fmt.Errorf("error importing %s with: %w", fname, err))
}
return toAPIError(ctx, fmt.Errorf("error importing %s from %s with: %w", entity, fname, err))
}
// wraps import error for more context
func importErrorWithAPIErr(ctx context.Context, apiErr APIErrorCode, err error, fname, entity string) APIError {
if entity == "" {
return errorCodes.ToAPIErrWithErr(apiErr, fmt.Errorf("error importing %s with: %w", fname, err))
}
return errorCodes.ToAPIErrWithErr(apiErr, fmt.Errorf("error importing %s from %s with: %w", entity, fname, err))
}

View File

@ -0,0 +1,542 @@
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"strconv"
"strings"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/config/etcd"
xldap "github.com/minio/minio/internal/config/identity/ldap"
"github.com/minio/minio/internal/config/identity/openid"
idplugin "github.com/minio/minio/internal/config/identity/plugin"
polplugin "github.com/minio/minio/internal/config/policy/plugin"
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/config/subnet"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v3/policy"
)
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
subSys, _, _, err := config.GetSubSys(string(kvBytes))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
cfg, err := readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = cfg.DelFrom(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(ctx, cfg, subSys); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
// Check if subnet proxy being deleted and if so the value of proxy of subnet
// target of logger webhook configuration also should be deleted
loggerWebhookProxyDeleted := setLoggerWebhookSubnetProxy(subSys, cfg)
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// freshly retrieve the config so that default values are loaded for reset config
if cfg, err = getValidConfig(objectAPI); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
dynamic := config.SubSystemsDynamic.Contains(subSys)
if dynamic {
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
if subSys == config.SubnetSubSys && loggerWebhookProxyDeleted {
// Logger webhook proxy deleted, apply the dynamic changes
applyDynamic(ctx, objectAPI, cfg, config.LoggerWebhookSubSys, r, w)
}
}
}
func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config, subSys string,
r *http.Request, w http.ResponseWriter,
) {
// Apply dynamic values.
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, cfg, subSys); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
globalNotificationSys.SignalConfigReload(subSys)
// Tell the client that dynamic config was applied.
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
}
type badConfigErr struct {
Err error
}
// Error - return the error message
func (bce badConfigErr) Error() string {
return bce.Err.Error()
}
// Unwrap the error to its underlying error.
func (bce badConfigErr) Unwrap() error {
return bce.Err
}
type setConfigResult struct {
Cfg config.Config
SubSys string
Dynamic bool
LoggerWebhookCfgUpdated bool
}
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
result, err := setConfigKV(ctx, objectAPI, kvBytes)
if err != nil {
switch err.(type) {
case badConfigErr:
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
default:
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
}
return
}
if result.Dynamic {
applyDynamic(ctx, objectAPI, result.Cfg, result.SubSys, r, w)
// If logger webhook config updated (proxy due to callhome), explicitly dynamically
// apply the config
if result.LoggerWebhookCfgUpdated {
applyDynamic(ctx, objectAPI, result.Cfg, config.LoggerWebhookSubSys, r, w)
}
}
writeSuccessResponseHeadersOnly(w)
}
func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (result setConfigResult, err error) {
result.Cfg, err = readServerConfig(ctx, objectAPI, nil)
if err != nil {
return
}
result.Dynamic, err = result.Cfg.ReadConfig(bytes.NewReader(kvBytes))
if err != nil {
return
}
result.SubSys, _, _, err = config.GetSubSys(string(kvBytes))
if err != nil {
return
}
tgts, err := config.ParseConfigTargetID(bytes.NewReader(kvBytes))
if err != nil {
return
}
ctx = context.WithValue(ctx, config.ContextKeyForTargetFromConfig, tgts)
if verr := validateConfig(ctx, result.Cfg, result.SubSys); verr != nil {
err = badConfigErr{Err: verr}
return
}
// Check if subnet proxy being set and if so set the same value to proxy of subnet
// target of logger webhook configuration
result.LoggerWebhookCfgUpdated = setLoggerWebhookSubnetProxy(result.SubSys, result.Cfg)
// Update the actual server config on disk.
if err = saveServerConfig(ctx, objectAPI, result.Cfg); err != nil {
return
}
// Write the config input KV to history.
err = saveServerConfigHistory(ctx, objectAPI, kvBytes)
return
}
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
//
// `key` can be one of three forms:
// 1. `subsys:target` -> request for config of a single subsystem and target pair.
// 2. `subsys:` -> request for config of a single subsystem and the default target.
// 3. `subsys` -> request for config of all targets for the given subsystem.
//
// This is a reporting API and config secrets are redacted in the response.
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
cfg := globalServerConfig.Clone()
vars := mux.Vars(r)
key := vars["key"]
var subSys, target string
{
ws := strings.SplitN(key, madmin.SubSystemSeparator, 2)
subSys = ws[0]
if len(ws) == 2 {
if ws[1] == "" {
target = madmin.Default
} else {
target = ws[1]
}
}
}
subSysConfigs, err := cfg.GetSubsysInfo(subSys, target, true)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var s strings.Builder
for _, subSysConfig := range subSysConfigs {
subSysConfig.WriteTo(&s, false)
}
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
restoreID := vars["restoreId"]
if restoreID == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if restoreID == "all" {
chEntries, err := listServerConfigHistory(ctx, objectAPI, false, -1)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for _, chEntry := range chEntries {
if err = delServerConfigHistory(ctx, objectAPI, chEntry.RestoreID); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
} else if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
restoreID := vars["restoreId"]
if restoreID == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
kvBytes, err := readServerConfigHistory(ctx, objectAPI, restoreID)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
cfg, err := readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(ctx, cfg, ""); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
delServerConfigHistory(ctx, objectAPI, restoreID)
}
// ListConfigHistoryKVHandler - lists all the KV ids.
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
count, err := strconv.Atoi(vars["count"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
chEntries, err := listServerConfigHistory(ctx, objectAPI, true, count)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(chEntries)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// HelpConfigKVHandler - GET /minio/admin/v3/help-config-kv?subSys={subSys}&key={key}
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
subSys := vars["subSys"]
key := vars["key"]
_, envOnly := r.Form["env"]
rd, err := GetHelp(subSys, key, envOnly)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
json.NewEncoder(w).Encode(rd)
}
// SetConfigHandler - PUT /minio/admin/v3/config
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
cfg := newServerConfig()
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(ctx, cfg, ""); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
// Update the actual server config on disk.
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write to the config input KV to history.
if err = saveServerConfigHistory(ctx, objectAPI, kvBytes); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseHeadersOnly(w)
}
// GetConfigHandler - GET /minio/admin/v3/config
//
// This endpoint is mainly for exporting and backing up the configuration.
// Secrets are not redacted.
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
cfg := globalServerConfig.Clone()
var s strings.Builder
hkvs := config.HelpSubSysMap[""]
for _, hkv := range hkvs {
// We ignore the error below, as we cannot get one.
cfgSubsysItems, _ := cfg.GetSubsysInfo(hkv.Key, "", false)
for _, item := range cfgSubsysItems {
off := item.Config.Get(config.Enable) == config.EnableOff
switch hkv.Key {
case config.EtcdSubSys:
off = !etcd.Enabled(item.Config)
case config.StorageClassSubSys:
off = !storageclass.Enabled(item.Config)
case config.PolicyPluginSubSys:
off = !polplugin.Enabled(item.Config)
case config.IdentityOpenIDSubSys:
off = !openid.Enabled(item.Config)
case config.IdentityLDAPSubSys:
off = !xldap.Enabled(item.Config)
case config.IdentityTLSSubSys:
off = !globalIAMSys.STSTLSConfig.Enabled
case config.IdentityPluginSubSys:
off = !idplugin.Enabled(item.Config)
}
item.WriteTo(&s, off)
}
}
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// setLoggerWebhookSubnetProxy - Sets the logger webhook's subnet proxy value to
// one being set for subnet proxy
func setLoggerWebhookSubnetProxy(subSys string, cfg config.Config) bool {
if subSys == config.SubnetSubSys || subSys == config.LoggerWebhookSubSys {
subnetWebhookCfg := cfg[config.LoggerWebhookSubSys][subnet.LoggerWebhookName]
loggerWebhookSubnetProxy := subnetWebhookCfg.Get(logger.Proxy)
subnetProxy := cfg[config.SubnetSubSys][config.Default].Get(logger.Proxy)
if loggerWebhookSubnetProxy != subnetProxy {
subnetWebhookCfg.Set(logger.Proxy, subnetProxy)
return true
}
}
return false
}

View File

@ -0,0 +1,439 @@
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config"
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
"github.com/minio/minio/internal/config/identity/openid"
"github.com/minio/mux"
"github.com/minio/pkg/v3/ldap"
"github.com/minio/pkg/v3/policy"
)
func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
// Ensure body content type is opaque to ensure that request body has not
// been interpreted as form data.
contentType := r.Header.Get("Content-Type")
if contentType != "application/octet-stream" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
idpCfgType := mux.Vars(r)["type"]
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
var subSys string
switch idpCfgType {
case madmin.OpenidIDPCfg:
subSys = madmin.IdentityOpenIDSubSys
case madmin.LDAPIDPCfg:
subSys = madmin.IdentityLDAPSubSys
}
cfgName := mux.Vars(r)["name"]
cfgTarget := madmin.Default
if cfgName != "" {
cfgTarget = cfgName
if idpCfgType == madmin.LDAPIDPCfg && cfgName != madmin.Default {
// LDAP does not support multiple configurations. So cfgName must be
// empty or `madmin.Default`.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigLDAPNonDefaultConfigName), r.URL)
return
}
}
// Check that this is a valid Create vs Update API call.
s := globalServerConfig.Clone()
if apiErrCode := handleCreateUpdateValidation(s, subSys, cfgTarget, isUpdate); apiErrCode != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL)
return
}
cfgData := ""
{
tgtSuffix := ""
if cfgTarget != madmin.Default {
tgtSuffix = config.SubSystemSeparator + cfgTarget
}
cfgData = subSys + tgtSuffix + config.KvSpaceSeparator + string(reqBytes)
}
cfg, err := readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
dynamic, err := cfg.ReadConfig(strings.NewReader(cfgData))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// IDP config is not dynamic. Sanity check.
if dynamic {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), "", r.URL)
return
}
if err = validateConfig(ctx, cfg, subSys); err != nil {
var validationErr ldap.Validation
if errors.As(err, &validationErr) {
// If we got an LDAP validation error, we need to send appropriate
// error message back to client (likely mc).
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigLDAPValidation),
validationErr.FormatError(), r.URL)
return
}
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
// Update the actual server config on disk.
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write to the config input KV to history.
if err = saveServerConfigHistory(ctx, objectAPI, []byte(cfgData)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseHeadersOnly(w)
}
func handleCreateUpdateValidation(s config.Config, subSys, cfgTarget string, isUpdate bool) APIErrorCode {
if cfgTarget != madmin.Default {
// This cannot give an error at this point.
subSysTargets, _ := s.GetAvailableTargets(subSys)
subSysTargetsSet := set.CreateStringSet(subSysTargets...)
if isUpdate && !subSysTargetsSet.Contains(cfgTarget) {
return ErrAdminConfigIDPCfgNameDoesNotExist
}
if !isUpdate && subSysTargetsSet.Contains(cfgTarget) {
return ErrAdminConfigIDPCfgNameAlreadyExists
}
return ErrNone
}
// For the default configuration name, since it will always be an available
// target, we need to check if a configuration value has been set previously
// to figure out if this is a valid create or update API call.
// This cannot really error (FIXME: improve the type for GetConfigInfo)
var cfgInfos []madmin.IDPCfgInfo
switch subSys {
case madmin.IdentityOpenIDSubSys:
cfgInfos, _ = globalIAMSys.OpenIDConfig.GetConfigInfo(s, cfgTarget)
case madmin.IdentityLDAPSubSys:
cfgInfos, _ = globalIAMSys.LDAPConfig.GetConfigInfo(s, cfgTarget)
}
if len(cfgInfos) > 0 && !isUpdate {
return ErrAdminConfigIDPCfgNameAlreadyExists
}
if len(cfgInfos) == 0 && isUpdate {
return ErrAdminConfigIDPCfgNameDoesNotExist
}
return ErrNone
}
// AddIdentityProviderCfg: adds a new IDP config for openid/ldap.
//
// PUT <admin-prefix>/idp-cfg/openid/dex1 -> create named config `dex1`
//
// PUT <admin-prefix>/idp-cfg/openid/_ -> create (default) named config `_`
func (a adminAPIHandlers) AddIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
addOrUpdateIDPHandler(ctx, w, r, false)
}
// UpdateIdentityProviderCfg: updates an existing IDP config for openid/ldap.
//
// POST <admin-prefix>/idp-cfg/openid/dex1 -> update named config `dex1`
//
// POST <admin-prefix>/idp-cfg/openid/_ -> update (default) named config `_`
func (a adminAPIHandlers) UpdateIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
addOrUpdateIDPHandler(ctx, w, r, true)
}
// ListIdentityProviderCfg:
//
// GET <admin-prefix>/idp-cfg/openid -> lists openid provider configs.
func (a adminAPIHandlers) ListIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
password := cred.SecretKey
idpCfgType := mux.Vars(r)["type"]
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
var cfgList []madmin.IDPListItem
var err error
switch idpCfgType {
case madmin.OpenidIDPCfg:
cfg := globalServerConfig.Clone()
cfgList, err = globalIAMSys.OpenIDConfig.GetConfigList(cfg)
case madmin.LDAPIDPCfg:
cfg := globalServerConfig.Clone()
cfgList, err = globalIAMSys.LDAPConfig.GetConfigList(cfg)
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(cfgList)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// GetIdentityProviderCfg:
//
// GET <admin-prefix>/idp-cfg/openid/dex_test
func (a adminAPIHandlers) GetIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
idpCfgType := mux.Vars(r)["type"]
cfgName := mux.Vars(r)["name"]
password := cred.SecretKey
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
cfg := globalServerConfig.Clone()
var cfgInfos []madmin.IDPCfgInfo
var err error
switch idpCfgType {
case madmin.OpenidIDPCfg:
cfgInfos, err = globalIAMSys.OpenIDConfig.GetConfigInfo(cfg, cfgName)
case madmin.LDAPIDPCfg:
cfgInfos, err = globalIAMSys.LDAPConfig.GetConfigInfo(cfg, cfgName)
}
if err != nil {
if errors.Is(err, openid.ErrProviderConfigNotFound) || errors.Is(err, cfgldap.ErrProviderConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
res := madmin.IDPConfig{
Type: idpCfgType,
Name: cfgName,
Info: cfgInfos,
}
data, err := json.Marshal(res)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// DeleteIdentityProviderCfg:
//
// DELETE <admin-prefix>/idp-cfg/openid/dex_test
func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
idpCfgType := mux.Vars(r)["type"]
cfgName := mux.Vars(r)["name"]
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
cfgCopy := globalServerConfig.Clone()
var subSys string
switch idpCfgType {
case madmin.OpenidIDPCfg:
subSys = config.IdentityOpenIDSubSys
cfgInfos, err := globalIAMSys.OpenIDConfig.GetConfigInfo(cfgCopy, cfgName)
if err != nil {
if errors.Is(err, openid.ErrProviderConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
hasEnv := false
for _, ci := range cfgInfos {
if ci.IsCfg && ci.IsEnv {
hasEnv = true
break
}
}
if hasEnv {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigEnvOverridden), r.URL)
return
}
case madmin.LDAPIDPCfg:
subSys = config.IdentityLDAPSubSys
cfgInfos, err := globalIAMSys.LDAPConfig.GetConfigInfo(cfgCopy, cfgName)
if err != nil {
if errors.Is(err, openid.ErrProviderConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
hasEnv := false
for _, ci := range cfgInfos {
if ci.IsCfg && ci.IsEnv {
hasEnv = true
break
}
}
if hasEnv {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigEnvOverridden), r.URL)
return
}
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
cfg, err := readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
cfgKey := fmt.Sprintf("%s:%s", subSys, cfgName)
if cfgName == madmin.Default {
cfgKey = subSys
}
if err = cfg.DelKVS(cfgKey); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(ctx, cfg, subSys); err != nil {
var validationErr ldap.Validation
if errors.As(err, &validationErr) {
// If we got an LDAP validation error, we need to send appropriate
// error message back to client (likely mc).
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigLDAPValidation),
validationErr.FormatError(), r.URL)
return
}
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
dynamic := config.SubSystemsDynamic.Contains(subSys)
if dynamic {
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
}
}

View File

@ -0,0 +1,653 @@
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/mux"
xldap "github.com/minio/pkg/v3/ldap"
"github.com/minio/pkg/v3/policy"
)
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
//
// GET <admin-prefix>/idp/ldap/policy-entities?[query-params]
//
// Query params:
//
// user=... -> repeatable query parameter, specifying users to query for
// policy mapping
//
// group=... -> repeatable query parameter, specifying groups to query for
// policy mapping
//
// policy=... -> repeatable query parameter, specifying policy to query for
// user/group mapping
//
// When all query parameters are omitted, returns mappings for all policies.
func (a adminAPIHandlers) ListLDAPPolicyMappingEntities(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Check authorization.
objectAPI, cred := validateAdminReq(ctx, w, r,
policy.ListGroupsAdminAction, policy.ListUsersAdminAction, policy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
// Validate API arguments.
q := madmin.PolicyEntitiesQuery{
Users: r.Form["user"],
Groups: r.Form["group"],
Policy: r.Form["policy"],
}
// Query IAM
res, err := globalIAMSys.QueryLDAPPolicyEntities(r.Context(), q)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Encode result and send response.
data, err := json.Marshal(res)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// AttachDetachPolicyLDAP attaches or detaches policies from an LDAP entity
// (user or group).
//
// POST <admin-prefix>/idp/ldap/policy/{operation}
func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Check authorization.
objectAPI, cred := validateAdminReq(ctx, w, r, policy.UpdatePolicyAssociationAction)
if objectAPI == nil {
return
}
// fail if ldap is not enabled
if !globalIAMSys.LDAPConfig.Enabled() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
// Ensure body content type is opaque to ensure that request body has not
// been interpreted as form data.
contentType := r.Header.Get("Content-Type")
if contentType != "application/octet-stream" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
// Validate operation
operation := mux.Vars(r)["operation"]
if operation != "attach" && operation != "detach" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
return
}
isAttach := operation == "attach"
// Validate API arguments in body.
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
var par madmin.PolicyAssociationReq
err = json.Unmarshal(reqBytes, &par)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if err := par.IsValid(); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
// Call IAM subsystem
updatedAt, addedOrRemoved, _, err := globalIAMSys.PolicyDBUpdateLDAP(ctx, isAttach, par)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
respBody := madmin.PolicyAssociationResp{
UpdatedAt: updatedAt,
}
if isAttach {
respBody.PoliciesAttached = addedOrRemoved
} else {
respBody.PoliciesDetached = addedOrRemoved
}
data, err := json.Marshal(respBody)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// AddServiceAccountLDAP adds a new service account for provided LDAP username or DN
//
// PUT /minio/admin/v3/idp/ldap/add-service-account
func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.Request) {
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r, true)
if APIError.Code != "" {
writeErrorResponseJSON(ctx, w, APIError, r.URL)
return
}
// fail if ldap is not enabled
if !globalIAMSys.LDAPConfig.Enabled() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL)
return
}
// Find the user for the request sender (as it may be sent via a service
// account or STS account):
requestorUser := cred.AccessKey
requestorParentUser := cred.AccessKey
requestorGroups := cred.Groups
requestorIsDerivedCredential := false
if cred.IsServiceAccount() || cred.IsTemp() {
requestorParentUser = cred.ParentUser
requestorIsDerivedCredential = true
}
// Check if we are creating svc account for request sender.
isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser
var (
targetGroups []string
err error
)
// If we are creating svc account for request sender, ensure that targetUser
// is a real user (i.e. not derived credentials).
if isSvcAccForRequestor {
if requestorIsDerivedCredential {
if requestorParentUser == "" {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx,
errors.New("service accounts cannot be generated for temporary credentials without parent")), r.URL)
return
}
targetUser = requestorParentUser
}
targetGroups = requestorGroups
// Deny if the target user is not LDAP
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if foundResult == nil {
err := errors.New("Specified user does not exist on LDAP server")
APIErr := errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err)
writeErrorResponseJSON(ctx, w, APIErr, r.URL)
return
}
// In case of LDAP/OIDC we need to set `opts.claims` to ensure
// it is associated with the LDAP/OIDC user properly.
for k, v := range cred.Claims {
if k == expClaim {
continue
}
opts.claims[k] = v
}
} else {
// We still need to ensure that the target user is a valid LDAP user.
//
// The target user may be supplied as a (short) username or a DN.
// However, for now, we only support using the short username.
isDN := globalIAMSys.LDAPConfig.ParsesAsDN(targetUser)
opts.claims[ldapUserN] = targetUser // simple username
var lookupResult *xldap.DNSearchResult
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
if err != nil {
// if not found, check if DN
if strings.Contains(err.Error(), "User DN not found for:") {
if isDN {
// warn user that DNs are not allowed
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminLDAPExpectedLoginName, err), r.URL)
} else {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
}
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
targetUser = lookupResult.NormDN
opts.claims[ldapUser] = targetUser // DN
opts.claims[ldapActualUser] = lookupResult.ActualDN
// Check if this user or their groups have a policy applied.
ldapPolicies, err := globalIAMSys.PolicyDBGet(targetUser, targetGroups...)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if len(ldapPolicies) == 0 {
err = fmt.Errorf("No policy set for user `%s` or any of their groups: `%s`", opts.claims[ldapActualUser], strings.Join(targetGroups, "`,`"))
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
return
}
// Add LDAP attributes that were looked up into the claims.
for attribKey, attribValue := range lookupResult.Attributes {
opts.claims[ldapAttribPrefix+attribKey] = attribValue
}
}
newCred, updatedAt, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
createResp := madmin.AddServiceAccountResp{
Credentials: madmin.Credentials{
AccessKey: newCred.AccessKey,
SecretKey: newCred.SecretKey,
Expiration: newCred.Expiration,
},
}
data, err := json.Marshal(createResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
// Call hook for cluster-replication if the service account is not for a
// root user.
if newCred.ParentUser != globalActiveCred.AccessKey {
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{
Create: &madmin.SRSvcAccCreate{
Parent: newCred.ParentUser,
AccessKey: newCred.AccessKey,
SecretKey: newCred.SecretKey,
Groups: newCred.Groups,
Name: newCred.Name,
Description: newCred.Description,
Claims: opts.claims,
SessionPolicy: madmin.SRSessionPolicy(createReq.Policy),
Status: auth.AccountOn,
Expiration: createReq.Expiration,
},
},
UpdatedAt: updatedAt,
}))
}
}
// ListAccessKeysLDAP - GET /minio/admin/v3/idp/ldap/list-access-keys
func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
userDN := r.Form.Get("userDN")
// If listing is requested for a specific user (who is not the request
// sender), check that the user has permissions.
if userDN != "" && userDN != cred.ParentUser {
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
} else {
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
DenyOnly: true,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
userDN = cred.AccessKey
if cred.ParentUser != "" {
userDN = cred.ParentUser
}
}
dnResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if dnResult == nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errNoSuchUser), r.URL)
return
}
targetAccount := dnResult.NormDN
listType := r.Form.Get("listType")
if listType != "sts-only" && listType != "svcacc-only" && listType != "" {
// default to both
listType = ""
}
var serviceAccounts []auth.Credentials
var stsKeys []auth.Credentials
if listType == "" || listType == "sts-only" {
stsKeys, err = globalIAMSys.ListSTSAccounts(ctx, targetAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
if listType == "" || listType == "svcacc-only" {
serviceAccounts, err = globalIAMSys.ListServiceAccounts(ctx, targetAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
var serviceAccountList []madmin.ServiceAccountInfo
var stsKeyList []madmin.ServiceAccountInfo
for _, svc := range serviceAccounts {
expiryTime := svc.Expiration
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
AccessKey: svc.AccessKey,
Expiration: &expiryTime,
})
}
for _, sts := range stsKeys {
expiryTime := sts.Expiration
stsKeyList = append(stsKeyList, madmin.ServiceAccountInfo{
AccessKey: sts.AccessKey,
Expiration: &expiryTime,
})
}
listResp := madmin.ListAccessKeysLDAPResp{
ServiceAccounts: serviceAccountList,
STSKeys: stsKeyList,
}
data, err := json.Marshal(listResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// ListAccessKeysLDAPBulk - GET /minio/admin/v3/idp/ldap/list-access-keys-bulk
func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
dnList := r.Form["userDNs"]
isAll := r.Form.Get("all") == "true"
selfOnly := !isAll && len(dnList) == 0
if isAll && len(dnList) > 0 {
// This should be checked on client side, so return generic error
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
// Empty DN list and not self, list access keys for all users
if isAll {
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListUsersAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
} else if len(dnList) == 1 {
var dn string
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(dnList[0])
if err == nil {
dn = foundResult.NormDN
}
if dn == cred.ParentUser || dnList[0] == cred.ParentUser {
selfOnly = true
}
}
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
DenyOnly: selfOnly,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
if selfOnly && len(dnList) == 0 {
selfDN := cred.AccessKey
if cred.ParentUser != "" {
selfDN = cred.ParentUser
}
dnList = append(dnList, selfDN)
}
var ldapUserList []string
if isAll {
ldapUsers, err := globalIAMSys.ListLDAPUsers(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for user := range ldapUsers {
ldapUserList = append(ldapUserList, user)
}
} else {
for _, userDN := range dnList {
// Validate the userDN
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if foundResult == nil {
continue
}
ldapUserList = append(ldapUserList, foundResult.NormDN)
}
}
listType := r.Form.Get("listType")
var listSTSKeys, listServiceAccounts bool
switch listType {
case madmin.AccessKeyListUsersOnly:
listSTSKeys = false
listServiceAccounts = false
case madmin.AccessKeyListSTSOnly:
listSTSKeys = true
listServiceAccounts = false
case madmin.AccessKeyListSvcaccOnly:
listSTSKeys = false
listServiceAccounts = true
case madmin.AccessKeyListAll:
listSTSKeys = true
listServiceAccounts = true
default:
err := errors.New("invalid list type")
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
return
}
accessKeyMap := make(map[string]madmin.ListAccessKeysLDAPResp)
for _, internalDN := range ldapUserList {
externalDN := globalIAMSys.LDAPConfig.DecodeDN(internalDN)
accessKeys := madmin.ListAccessKeysLDAPResp{}
if listSTSKeys {
stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, internalDN)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for _, sts := range stsKeys {
accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{
AccessKey: sts.AccessKey,
Expiration: &sts.Expiration,
})
}
// if only STS keys, skip if user has no STS keys
if !listServiceAccounts && len(stsKeys) == 0 {
continue
}
}
if listServiceAccounts {
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, internalDN)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for _, svc := range serviceAccounts {
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
AccessKey: svc.AccessKey,
Expiration: &svc.Expiration,
})
}
// if only service accounts, skip if user has no service accounts
if !listSTSKeys && len(serviceAccounts) == 0 {
continue
}
}
accessKeyMap[externalDN] = accessKeys
}
data, err := json.Marshal(accessKeyMap)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}

View File

@ -0,0 +1,246 @@
// Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/json"
"errors"
"net/http"
"sort"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/pkg/v3/policy"
)
const dummyRoleARN = "dummy-internal"
// ListAccessKeysOpenIDBulk - GET /minio/admin/v3/idp/openid/list-access-keys-bulk
func (a adminAPIHandlers) ListAccessKeysOpenIDBulk(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if !globalIAMSys.OpenIDConfig.Enabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminOpenIDNotEnabled), r.URL)
return
}
userList := r.Form["users"]
isAll := r.Form.Get("all") == "true"
selfOnly := !isAll && len(userList) == 0
cfgName := r.Form.Get("configName")
allConfigs := r.Form.Get("allConfigs") == "true"
if cfgName == "" && !allConfigs {
cfgName = madmin.Default
}
if isAll && len(userList) > 0 {
// This should be checked on client side, so return generic error
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
// Empty DN list and not self, list access keys for all users
if isAll {
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListUsersAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
} else if len(userList) == 1 && userList[0] == cred.ParentUser {
selfOnly = true
}
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
DenyOnly: selfOnly,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
if selfOnly && len(userList) == 0 {
selfDN := cred.AccessKey
if cred.ParentUser != "" {
selfDN = cred.ParentUser
}
userList = append(userList, selfDN)
}
listType := r.Form.Get("listType")
var listSTSKeys, listServiceAccounts bool
switch listType {
case madmin.AccessKeyListUsersOnly:
listSTSKeys = false
listServiceAccounts = false
case madmin.AccessKeyListSTSOnly:
listSTSKeys = true
listServiceAccounts = false
case madmin.AccessKeyListSvcaccOnly:
listSTSKeys = false
listServiceAccounts = true
case madmin.AccessKeyListAll:
listSTSKeys = true
listServiceAccounts = true
default:
err := errors.New("invalid list type")
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
return
}
s := globalServerConfig.Clone()
roleArnMap := make(map[string]string)
// Map of configs to a map of users to their access keys
cfgToUsersMap := make(map[string]map[string]madmin.OpenIDUserAccessKeys)
configs, err := globalIAMSys.OpenIDConfig.GetConfigList(s)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for _, config := range configs {
if !allConfigs && cfgName != config.Name {
continue
}
arn := dummyRoleARN
if config.RoleARN != "" {
arn = config.RoleARN
}
roleArnMap[arn] = config.Name
newResp := make(map[string]madmin.OpenIDUserAccessKeys)
cfgToUsersMap[config.Name] = newResp
}
if len(roleArnMap) == 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
userSet := set.CreateStringSet(userList...)
accessKeys, err := globalIAMSys.ListAllAccessKeys(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for _, accessKey := range accessKeys {
// Filter out any disqualifying access keys
_, ok := accessKey.Claims[subClaim]
if !ok {
continue // OpenID access keys must have a sub claim
}
if (!listSTSKeys && !accessKey.IsServiceAccount()) || (!listServiceAccounts && accessKey.IsServiceAccount()) {
continue // skip if not the type we want
}
arn, ok := accessKey.Claims[roleArnClaim].(string)
if !ok {
if _, ok := accessKey.Claims[iamPolicyClaimNameOpenID()]; !ok {
continue // skip if no roleArn and no policy claim
}
}
matchingCfgName, ok := roleArnMap[arn]
if !ok {
continue // skip if not part of the target config
}
var id string
if idClaim := globalIAMSys.OpenIDConfig.GetUserIDClaim(matchingCfgName); idClaim != "" {
id, _ = accessKey.Claims[idClaim].(string)
}
if !userSet.IsEmpty() && !userSet.Contains(accessKey.ParentUser) && !userSet.Contains(id) {
continue // skip if not in the user list
}
openIDUserAccessKeys, ok := cfgToUsersMap[matchingCfgName][accessKey.ParentUser]
// Add new user to map if not already present
if !ok {
var readableClaim string
if rc := globalIAMSys.OpenIDConfig.GetUserReadableClaim(matchingCfgName); rc != "" {
readableClaim, _ = accessKey.Claims[rc].(string)
}
openIDUserAccessKeys = madmin.OpenIDUserAccessKeys{
MinioAccessKey: accessKey.ParentUser,
ID: id,
ReadableName: readableClaim,
}
}
svcAccInfo := madmin.ServiceAccountInfo{
AccessKey: accessKey.AccessKey,
Expiration: &accessKey.Expiration,
}
if accessKey.IsServiceAccount() {
openIDUserAccessKeys.ServiceAccounts = append(openIDUserAccessKeys.ServiceAccounts, svcAccInfo)
} else {
openIDUserAccessKeys.STSKeys = append(openIDUserAccessKeys.STSKeys, svcAccInfo)
}
cfgToUsersMap[matchingCfgName][accessKey.ParentUser] = openIDUserAccessKeys
}
// Convert map to slice and sort
resp := make([]madmin.ListAccessKeysOpenIDResp, 0, len(cfgToUsersMap))
for cfgName, usersMap := range cfgToUsersMap {
users := make([]madmin.OpenIDUserAccessKeys, 0, len(usersMap))
for _, user := range usersMap {
users = append(users, user)
}
sort.Slice(users, func(i, j int) bool {
return users[i].MinioAccessKey < users[j].MinioAccessKey
})
resp = append(resp, madmin.ListAccessKeysOpenIDResp{
ConfigName: cfgName,
Users: users,
})
}
sort.Slice(resp, func(i, j int) bool {
return resp[i].ConfigName < resp[j].ConfigName
})
data, err := json.Marshal(resp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}

393
cmd/admin-handlers-pools.go Normal file
View File

@ -0,0 +1,393 @@
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/minio/mux"
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/policy"
)
var (
errRebalanceDecommissionAlreadyRunning = errors.New("Rebalance cannot be started, decommission is already in progress")
errDecommissionRebalanceAlreadyRunning = errors.New("Decommission cannot be started, rebalance is already in progress")
)
func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.DecommissionAdminAction)
if objectAPI == nil {
return
}
// Legacy args style such as non-ellipses style is not supported with this API.
if globalEndpoints.Legacy() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
z, ok := objectAPI.(*erasureServerPools)
if !ok || len(z.serverPools) == 1 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if z.IsDecommissionRunning() {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errDecommissionAlreadyRunning), r.URL)
return
}
if z.IsRebalanceStarted() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
return
}
vars := mux.Vars(r)
v := vars["pool"]
byID := vars["by-id"] == "true"
pools := strings.Split(v, ",")
poolIndices := make([]int, 0, len(pools))
for _, pool := range pools {
var idx int
if byID {
var err error
idx, err = strconv.Atoi(pool)
if err != nil {
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
return
}
} else {
idx = globalEndpoints.GetPoolIdx(pool)
if idx == -1 {
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
return
}
}
var pool *erasureSets
for pidx := range z.serverPools {
if pidx == idx {
pool = z.serverPools[idx]
break
}
}
if pool == nil {
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
return
}
poolIndices = append(poolIndices, idx)
}
if len(poolIndices) == 0 || !proxyDecommissionRequest(ctx, globalEndpoints[poolIndices[0]].Endpoints[0], w, r) {
if err := z.Decommission(r.Context(), poolIndices...); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
}
func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.DecommissionAdminAction)
if objectAPI == nil {
return
}
// Legacy args style such as non-ellipses style is not supported with this API.
if globalEndpoints.Legacy() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
vars := mux.Vars(r)
v := vars["pool"]
byID := vars["by-id"] == "true"
idx := -1
if byID {
if i, err := strconv.Atoi(v); err == nil && i >= 0 && i < len(globalEndpoints) {
idx = i
}
} else {
idx = globalEndpoints.GetPoolIdx(v)
}
if idx == -1 {
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
return
}
if !proxyDecommissionRequest(ctx, globalEndpoints[idx].Endpoints[0], w, r) {
if err := pools.DecommissionCancel(ctx, idx); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
}
func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerInfoAdminAction, policy.DecommissionAdminAction)
if objectAPI == nil {
return
}
// Legacy args style such as non-ellipses style is not supported with this API.
if globalEndpoints.Legacy() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
vars := mux.Vars(r)
v := vars["pool"]
byID := vars["by-id"] == "true"
idx := -1
if byID {
if i, err := strconv.Atoi(v); err == nil && i >= 0 && i < len(globalEndpoints) {
idx = i
}
} else {
idx = globalEndpoints.GetPoolIdx(v)
}
if idx == -1 {
apiErr := toAdminAPIErr(ctx, errInvalidArgument)
apiErr.Description = fmt.Sprintf("specified pool '%s' not found, please specify a valid pool", v)
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, apiErr, r.URL)
return
}
status, err := pools.Status(r.Context(), idx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
adminLogIf(r.Context(), json.NewEncoder(w).Encode(&status))
}
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerInfoAdminAction, policy.DecommissionAdminAction)
if objectAPI == nil {
return
}
// Legacy args style such as non-ellipses style is not supported with this API.
if globalEndpoints.Legacy() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
poolsStatus := make([]PoolStatus, len(globalEndpoints))
for idx := range globalEndpoints {
status, err := pools.Status(r.Context(), idx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
poolsStatus[idx] = status
}
adminLogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
}
func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.RebalanceAdminAction)
if objectAPI == nil {
return
}
// NB rebalance-start admin API is always coordinated from first pool's
// first node. The following is required to serialize (the effects of)
// concurrent rebalance-start commands.
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Host == ep.Host {
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
return
}
}
}
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok || len(pools.serverPools) == 1 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if pools.IsDecommissionRunning() {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errRebalanceDecommissionAlreadyRunning), r.URL)
return
}
if pools.IsRebalanceStarted() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
return
}
bucketInfos, err := objectAPI.ListBuckets(ctx, BucketOptions{})
if err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
buckets := make([]string, 0, len(bucketInfos))
for _, bInfo := range bucketInfos {
buckets = append(buckets, bInfo.Name)
}
var id string
if id, err = pools.initRebalanceMeta(ctx, buckets); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Rebalance routine is run on the first node of any pool participating in rebalance.
pools.StartRebalance()
b, err := json.Marshal(struct {
ID string `json:"id"`
}{ID: id})
if err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, b)
// Notify peers to load rebalance.bin and start rebalance routine if they happen to be
// participating pool's leader node
globalNotificationSys.LoadRebalanceMeta(ctx, true)
}
func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.RebalanceAdminAction)
if objectAPI == nil {
return
}
// Proxy rebalance-status to first pool first node, so that users see a
// consistent view of rebalance progress even though different rebalancing
// pools may temporarily have out of date info on the others.
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Host == ep.Host {
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
return
}
}
}
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
rs, err := rebalanceStatus(ctx, pools)
if err != nil {
if errors.Is(err, errRebalanceNotStarted) || errors.Is(err, errConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL)
return
}
adminLogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
adminLogIf(r.Context(), json.NewEncoder(w).Encode(rs))
}
func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.RebalanceAdminAction)
if objectAPI == nil {
return
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Cancel any ongoing rebalance operation
globalNotificationSys.StopRebalance(r.Context())
writeSuccessResponseHeadersOnly(w)
adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
globalNotificationSys.LoadRebalanceMeta(ctx, false)
}
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
host := env.Get("_MINIO_DECOM_ENDPOINT_HOST", defaultEndPoint.Host)
if host == "" {
return
}
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Host == host && !proxyEp.IsLocal {
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
return true
}
}
}
return
}

View File

@ -0,0 +1,623 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/gob"
"encoding/json"
"errors"
"io"
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go/v3"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/mux"
"github.com/minio/pkg/v3/policy"
)
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
var sites []madmin.PeerSite
if err := parseJSONBody(ctx, r.Body, &sites, cred.SecretKey); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
opts := getSRAddOptions(r)
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts)
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(status)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) {
opts.ReplicateILMExpiry = r.Form.Get("replicateILMExpiry") == "true"
return
}
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
//
// used internally to tell current cluster to enable SR with
// the provided peer clusters and service account.
func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
var joinArg madmin.SRPeerJoinReq
if err := parseJSONBody(ctx, r.Body, &joinArg, cred.SecretKey); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SRPeerBucketOps - PUT /minio/admin/v3/site-replication/bucket-ops?bucket=x&operation=y
func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
operation := madmin.BktOp(vars["operation"])
var err error
switch operation {
default:
err = errSRInvalidRequest(errInvalidArgument)
case madmin.MakeWithVersioningBktOp:
createdAt, cerr := time.Parse(time.RFC3339Nano, strings.TrimSpace(r.Form.Get("createdAt")))
if cerr != nil {
createdAt = timeSentinel
}
opts := MakeBucketOptions{
LockEnabled: r.Form.Get("lockEnabled") == "true",
VersioningEnabled: r.Form.Get("versioningEnabled") == "true",
ForceCreate: r.Form.Get("forceCreate") == "true",
CreatedAt: createdAt,
}
err = globalSiteReplicationSys.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts)
case madmin.ConfigureReplBktOp:
err = globalSiteReplicationSys.PeerBucketConfigureReplHandler(ctx, bucket)
case madmin.DeleteBucketBktOp, madmin.ForceDeleteBucketBktOp:
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, DeleteBucketOptions{
Force: operation == madmin.ForceDeleteBucketBktOp,
SRDeleteOp: getSRBucketDeleteOp(true),
})
case madmin.PurgeDeletedBucketOp:
globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket)
}
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SRPeerReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
var item madmin.SRIAMItem
if err := parseJSONBody(ctx, r.Body, &item, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var err error
switch item.Type {
default:
err = errSRInvalidRequest(errInvalidArgument)
case madmin.SRIAMItemPolicy:
if item.Policy == nil {
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil, item.UpdatedAt)
} else {
policy, perr := policy.ParseConfig(bytes.NewReader(item.Policy))
if perr != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, perr), r.URL)
return
}
if policy.IsEmpty() {
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil, item.UpdatedAt)
} else {
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, policy, item.UpdatedAt)
}
}
case madmin.SRIAMItemSvcAcc:
err = globalSiteReplicationSys.PeerSvcAccChangeHandler(ctx, item.SvcAccChange, item.UpdatedAt)
case madmin.SRIAMItemPolicyMapping:
err = globalSiteReplicationSys.PeerPolicyMappingHandler(ctx, item.PolicyMapping, item.UpdatedAt)
case madmin.SRIAMItemSTSAcc:
err = globalSiteReplicationSys.PeerSTSAccHandler(ctx, item.STSCredential, item.UpdatedAt)
case madmin.SRIAMItemIAMUser:
err = globalSiteReplicationSys.PeerIAMUserChangeHandler(ctx, item.IAMUser, item.UpdatedAt)
case madmin.SRIAMItemGroupInfo:
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt)
}
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/peer/bucket-meta
func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
var item madmin.SRBucketMeta
if err := parseJSONBody(ctx, r.Body, &item, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if item.Bucket == "" {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errSRInvalidRequest(errInvalidArgument)), r.URL)
return
}
var err error
switch item.Type {
default:
err = globalSiteReplicationSys.PeerBucketMetadataUpdateHandler(ctx, item)
case madmin.SRBucketMetaTypePolicy:
if item.Policy == nil {
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil, item.UpdatedAt)
} else {
bktPolicy, berr := policy.ParseBucketPolicyConfig(bytes.NewReader(item.Policy), item.Bucket)
if berr != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, berr), r.URL)
return
}
if bktPolicy.IsEmpty() {
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil, item.UpdatedAt)
} else {
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, bktPolicy, item.UpdatedAt)
}
}
case madmin.SRBucketMetaTypeQuotaConfig:
if item.Quota == nil {
err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, nil, item.UpdatedAt)
} else {
quotaConfig, err := parseBucketQuota(item.Bucket, item.Quota)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, quotaConfig, item.UpdatedAt); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
case madmin.SRBucketMetaTypeVersionConfig:
err = globalSiteReplicationSys.PeerBucketVersioningHandler(ctx, item.Bucket, item.Versioning, item.UpdatedAt)
case madmin.SRBucketMetaTypeTags:
err = globalSiteReplicationSys.PeerBucketTaggingHandler(ctx, item.Bucket, item.Tags, item.UpdatedAt)
case madmin.SRBucketMetaTypeObjectLockConfig:
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig, item.UpdatedAt)
case madmin.SRBucketMetaTypeSSEConfig:
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig, item.UpdatedAt)
case madmin.SRBucketMetaLCConfig:
err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt)
}
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SiteReplicationInfo - GET /minio/admin/v3/site-replication/info
func (a adminAPIHandlers) SiteReplicationInfo(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationInfoAction)
if objectAPI == nil {
return
}
info, err := globalSiteReplicationSys.GetClusterInfo(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = json.NewEncoder(w).Encode(info); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
idpSettings := globalSiteReplicationSys.GetIDPSettings(ctx)
if err := json.NewEncoder(w).Encode(idpSettings); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
data, err := io.ReadAll(body)
if err != nil {
return SRError{
Cause: err,
Code: ErrSiteReplicationInvalidRequest,
}
}
if encryptionKey != "" {
data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data))
if err != nil {
return SRError{
Cause: err,
Code: ErrSiteReplicationInvalidRequest,
}
}
}
return json.Unmarshal(data, v)
}
// SiteReplicationStatus - GET /minio/admin/v3/site-replication/status
func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationInfoAction)
if objectAPI == nil {
return
}
opts := getSRStatusOptions(r)
// default options to all if status options are unset for backward compatibility
var dfltOpts madmin.SRStatusOptions
if opts == dfltOpts {
opts.Buckets = true
opts.Users = true
opts.Policies = true
opts.Groups = true
opts.ILMExpiryRules = true
}
info, err := globalSiteReplicationSys.SiteReplicationStatus(ctx, objectAPI, opts)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Report the ILMExpiryStats only if at least one site has replication of ILM expiry enabled
var replicateILMExpiry bool
for _, site := range info.Sites {
if site.ReplicateILMExpiry {
replicateILMExpiry = true
break
}
}
if !replicateILMExpiry {
// explicitly send nil for ILMExpiryStats
info.ILMExpiryStats = nil
}
if err = json.NewEncoder(w).Encode(info); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SiteReplicationMetaInfo - GET /minio/admin/v3/site-replication/metainfo
func (a adminAPIHandlers) SiteReplicationMetaInfo(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationInfoAction)
if objectAPI == nil {
return
}
opts := getSRStatusOptions(r)
info, err := globalSiteReplicationSys.SiteReplicationMetaInfo(ctx, objectAPI, opts)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = json.NewEncoder(w).Encode(info); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SiteReplicationEdit - PUT /minio/admin/v3/site-replication/edit
func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, cred := validateAdminReq(ctx, w, r, policy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
var site madmin.PeerInfo
err := parseJSONBody(ctx, r.Body, &site, cred.SecretKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
opts := getSREditOptions(r)
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts)
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(status)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) {
opts.DisableILMExpiryReplication = r.Form.Get("disableILMExpiryReplication") == "true"
opts.EnableILMExpiryReplication = r.Form.Get("enableILMExpiryReplication") == "true"
return
}
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
//
// used internally to tell current cluster to update endpoint for peer
func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
var pi madmin.PeerInfo
if err := parseJSONBody(ctx, r.Body, &pi, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SRStateEdit - PUT /minio/admin/v3/site-replication/state/edit
//
// used internally to tell current cluster to update site replication state
func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
var state madmin.SRStateEditReq
if err := parseJSONBody(ctx, r.Body, &state, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
q := r.Form
opts.Buckets = q.Get("buckets") == "true"
opts.Policies = q.Get("policies") == "true"
opts.Groups = q.Get("groups") == "true"
opts.Users = q.Get("users") == "true"
opts.ILMExpiryRules = q.Get("ilm-expiry-rules") == "true"
opts.PeerState = q.Get("peer-state") == "true"
opts.Entity = madmin.GetSREntityType(q.Get("entity"))
opts.EntityValue = q.Get("entityvalue")
opts.ShowDeleted = q.Get("showDeleted") == "true"
opts.Metrics = q.Get("metrics") == "true"
return
}
// SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove
func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationRemoveAction)
if objectAPI == nil {
return
}
var rreq madmin.SRRemoveReq
err := parseJSONBody(ctx, r.Body, &rreq, "")
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq)
if err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(status)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// SRPeerRemove - PUT /minio/admin/v3/site-replication/peer/remove
//
// used internally to tell current cluster to update endpoint for peer
func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationRemoveAction)
if objectAPI == nil {
return
}
var req madmin.SRRemoveReq
if err := parseJSONBody(ctx, r.Body, &req, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil {
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SiteReplicationResyncOp - PUT /minio/admin/v3/site-replication/resync/op
func (a adminAPIHandlers) SiteReplicationResyncOp(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationResyncAction)
if objectAPI == nil {
return
}
var peerSite madmin.PeerInfo
if err := parseJSONBody(ctx, r.Body, &peerSite, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
vars := mux.Vars(r)
op := madmin.SiteResyncOp(vars["operation"])
var (
status madmin.SRResyncOpStatus
err error
)
switch op {
case madmin.SiteResyncStart:
status, err = globalSiteReplicationSys.startResync(ctx, objectAPI, peerSite)
case madmin.SiteResyncCancel:
status, err = globalSiteReplicationSys.cancelResync(ctx, objectAPI, peerSite)
default:
err = errSRInvalidRequest(errInvalidArgument)
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(status)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// SiteReplicationDevNull - everything goes to io.Discard
// [POST] /minio/admin/v3/site-replication/devnull
func (a adminAPIHandlers) SiteReplicationDevNull(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
globalSiteNetPerfRX.Connect()
defer globalSiteNetPerfRX.Disconnect()
connectTime := time.Now()
for {
n, err := io.CopyN(xioutil.Discard, r.Body, 128*humanize.KiByte)
atomic.AddUint64(&globalSiteNetPerfRX.RX, uint64(n))
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
// If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec)
// would mean the network is not stable. Logging here will help in debugging network issues.
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
adminLogIf(ctx, err)
}
}
if err != nil {
if errors.Is(err, io.EOF) {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusBadRequest)
}
break
}
}
}
// SiteReplicationNetPerf - everything goes to io.Discard
// [POST] /minio/admin/v3/site-replication/netperf
func (a adminAPIHandlers) SiteReplicationNetPerf(w http.ResponseWriter, r *http.Request) {
durationStr := r.Form.Get(peerRESTDuration)
duration, _ := time.ParseDuration(durationStr)
if duration < globalNetPerfMinDuration {
duration = globalNetPerfMinDuration
}
result := siteNetperf(r.Context(), duration)
adminLogIf(r.Context(), gob.NewEncoder(w).Encode(result))
}

View File

@ -0,0 +1,152 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//go:build !race
// +build !race
// Tests in this file are not run under the `-race` flag as they are too slow
// and cause context deadline errors.
package cmd
import (
"context"
"fmt"
"runtime"
"testing"
"time"
"github.com/minio/madmin-go/v3"
minio "github.com/minio/minio-go/v7"
"github.com/minio/pkg/v3/sync/errgroup"
)
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
suite.SetUpSuite(c)
suite.TestDeleteUserRace(c)
suite.TearDownSuite(c)
}
func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
if runtime.GOOS == globalWindowsOSName {
t.Skip("windows is clunky")
}
baseTestCases := []TestSuiteCommon{
// Init and run test on ErasureSD backend with signature v4.
{serverType: "ErasureSD", signer: signerV4},
// Init and run test on ErasureSD backend, with tls enabled.
{serverType: "ErasureSD", signer: signerV4, secure: true},
// Init and run test on Erasure backend.
{serverType: "Erasure", signer: signerV4},
// Init and run test on ErasureSet backend.
{serverType: "ErasureSet", signer: signerV4},
}
testCases := []*TestSuiteIAM{}
for _, bt := range baseTestCases {
testCases = append(testCases,
newTestSuiteIAM(bt, false),
newTestSuiteIAM(bt, true),
)
}
for i, testCase := range testCases {
etcdStr := ""
if testCase.withEtcdBackend {
etcdStr = " (with etcd backend)"
}
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
func(t *testing.T) {
runAllIAMConcurrencyTests(testCase, &check{t, testCase.serverType})
},
)
}
}
func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// Create a policy policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
userCount := 50
accessKeys := make([]string, userCount)
secretKeys := make([]string, userCount)
for i := 0; i < userCount; i++ {
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
userReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
User: accessKey,
}
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
c.Fatalf("Unable to attach policy: %v", err)
}
accessKeys[i] = accessKey
secretKeys[i] = secretKey
}
g := errgroup.Group{}
for i := 0; i < userCount; i++ {
g.Go(func(i int) func() error {
return func() error {
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
err := s.adm.RemoveUser(ctx, accessKeys[i])
if err != nil {
return err
}
c.mustNotListObjects(ctx, uClient, bucket)
return nil
}
}(i), i)
}
if errs := g.Wait(); len(errs) > 0 {
c.Fatalf("unable to remove users: %v", errs)
}
}

2999
cmd/admin-handlers-users.go Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

3562
cmd/admin-handlers.go Normal file

File diff suppressed because it is too large Load Diff

535
cmd/admin-handlers_test.go Normal file
View File

@ -0,0 +1,535 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"sync"
"testing"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/mux"
)
// adminErasureTestBed - encapsulates subsystems that need to be setup for
// admin-handler unit tests.
type adminErasureTestBed struct {
erasureDirs []string
objLayer ObjectLayer
router *mux.Router
done context.CancelFunc
}
// prepareAdminErasureTestBed - helper function that setups a single-node
// Erasure backend for admin-handler tests.
func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) {
ctx, cancel := context.WithCancel(ctx)
// reset global variables to start afresh.
resetTestGlobals()
// Set globalIsErasure to indicate that the setup uses an erasure
// code backend.
globalIsErasure = true
// Initializing objectLayer for HealFormatHandler.
objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx)
if xlErr != nil {
cancel()
return nil, xlErr
}
// Initialize minio server config.
if err := newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
cancel()
return nil, err
}
// Initialize boot time
globalBootTime = UTCNow()
globalEndpoints = mustGetPoolEndpoints(0, erasureDirs...)
initAllSubsystems(ctx)
initConfigSubsystem(ctx, objLayer)
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
// Setup admin mgmt REST API handlers.
adminRouter := mux.NewRouter()
registerAdminRouter(adminRouter, true)
return &adminErasureTestBed{
erasureDirs: erasureDirs,
objLayer: objLayer,
router: adminRouter,
done: cancel,
}, nil
}
// TearDown - method that resets the test bed for subsequent unit
// tests to start afresh.
func (atb *adminErasureTestBed) TearDown() {
atb.done()
removeRoots(atb.erasureDirs)
resetTestGlobals()
}
// initTestObjLayer - Helper function to initialize an Erasure-based object
// layer and set globalObjectAPI.
func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
erasureDirs, err := getRandomDisks(16)
if err != nil {
return nil, nil, err
}
endpoints := mustGetPoolEndpoints(0, erasureDirs...)
globalPolicySys = NewPolicySys()
objLayer, err := newErasureServerPools(ctx, endpoints)
if err != nil {
return nil, nil, err
}
// Make objLayer available to all internal services via globalObjectAPI.
globalObjLayerMutex.Lock()
globalObjectAPI = objLayer
globalObjLayerMutex.Unlock()
return objLayer, erasureDirs, nil
}
// cmdType - Represents different service subcomands like status, stop
// and restart.
type cmdType int
const (
restartCmd cmdType = iota
stopCmd
)
// toServiceSignal - Helper function that translates a given cmdType
// value to its corresponding serviceSignal value.
func (c cmdType) toServiceSignal() serviceSignal {
switch c {
case restartCmd:
return serviceRestart
case stopCmd:
return serviceStop
}
return serviceRestart
}
func (c cmdType) toServiceAction() madmin.ServiceAction {
switch c {
case restartCmd:
return madmin.ServiceActionRestart
case stopCmd:
return madmin.ServiceActionStop
}
return madmin.ServiceActionRestart
}
// testServiceSignalReceiver - Helper function that simulates a
// go-routine waiting on service signal.
func testServiceSignalReceiver(cmd cmdType, t *testing.T) {
expectedCmd := cmd.toServiceSignal()
serviceCmd := <-globalServiceSignalCh
if serviceCmd != expectedCmd {
t.Errorf("Expected service command %v but received %v", expectedCmd, serviceCmd)
}
}
// getServiceCmdRequest - Constructs a management REST API request for service
// subcommands for a given cmdType value.
func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, error) {
queryVal := url.Values{}
queryVal.Set("action", string(cmd.toServiceAction()))
queryVal.Set("type", "2")
resource := adminPathPrefix + adminAPIVersionPrefix + "/service?" + queryVal.Encode()
req, err := newTestRequest(http.MethodPost, resource, 0, nil)
if err != nil {
return nil, err
}
// management REST API uses signature V4 for authentication.
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
return nil, err
}
return req, nil
}
// testServicesCmdHandler - parametrizes service subcommand tests on
// cmdType value.
func testServicesCmdHandler(cmd cmdType, t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminErasureTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.", err)
}
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls. Note: In a
// single node setup, this degenerates to a simple function
// call under the hood.
globalMinioAddr = "127.0.0.1:9000"
var wg sync.WaitGroup
// Setting up a go routine to simulate ServerRouter's
// handleServiceSignals for stop and restart commands.
if cmd == restartCmd {
wg.Add(1)
go func() {
defer wg.Done()
testServiceSignalReceiver(cmd, t)
}()
}
credentials := globalActiveCred
req, err := getServiceCmdRequest(cmd, credentials)
if err != nil {
t.Fatalf("Failed to build service status request %v", err)
}
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
resp, _ := io.ReadAll(rec.Body)
if rec.Code != http.StatusOK {
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
http.StatusOK, rec.Code, string(resp))
}
result := &serviceResult{}
if err := json.Unmarshal(resp, result); err != nil {
t.Error(err)
}
_ = result
// Wait until testServiceSignalReceiver() called in a goroutine quits.
wg.Wait()
}
// Test for service restart management REST API.
func TestServiceRestartHandler(t *testing.T) {
testServicesCmdHandler(restartCmd, t)
}
// buildAdminRequest - helper function to build an admin API request.
func buildAdminRequest(queryVal url.Values, method, path string,
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error,
) {
req, err := newTestRequest(method,
adminPathPrefix+adminAPIVersionPrefix+path+"?"+queryVal.Encode(),
contentLength, bodySeeker)
if err != nil {
return nil, err
}
cred := globalActiveCred
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
return nil, err
}
return req, nil
}
func TestAdminServerInfo(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
adminTestBed, err := prepareAdminErasureTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.", err)
}
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
globalMinioAddr = "127.0.0.1:9000"
// Prepare query params for set-config mgmt REST API.
queryVal := url.Values{}
queryVal.Set("info", "")
req, err := buildAdminRequest(queryVal, http.MethodGet, "/info", 0, nil)
if err != nil {
t.Fatalf("Failed to construct get-config object request - %v", err)
}
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Errorf("Expected to succeed but failed with %d", rec.Code)
}
results := madmin.InfoMessage{}
err = json.NewDecoder(rec.Body).Decode(&results)
if err != nil {
t.Fatalf("Failed to decode set config result json %v", err)
}
if results.Region != globalMinioDefaultRegion {
t.Errorf("Expected %s, got %s", globalMinioDefaultRegion, results.Region)
}
}
// TestToAdminAPIErrCode - test for toAdminAPIErrCode helper function.
func TestToAdminAPIErrCode(t *testing.T) {
testCases := []struct {
err error
expectedAPIErr APIErrorCode
}{
// 1. Server not in quorum.
{
err: errErasureWriteQuorum,
expectedAPIErr: ErrAdminConfigNoQuorum,
},
// 2. No error.
{
err: nil,
expectedAPIErr: ErrNone,
},
// 3. Non-admin API specific error.
{
err: errDiskNotFound,
expectedAPIErr: toAPIErrorCode(GlobalContext, errDiskNotFound),
},
}
for i, test := range testCases {
actualErr := toAdminAPIErrCode(GlobalContext, test.err)
if actualErr != test.expectedAPIErr {
t.Errorf("Test %d: Expected %v but received %v",
i+1, test.expectedAPIErr, actualErr)
}
}
}
func TestExtractHealInitParams(t *testing.T) {
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
v := url.Values{}
if clientToken != "" {
v.Add(mgmtClientToken, clientToken)
}
if forceStart {
v.Add(mgmtForceStart, "")
}
if forceStop {
v.Add(mgmtForceStop, "")
}
return v
}
qParamsArr := []url.Values{
// Invalid cases
mkParams("", true, true),
mkParams("111", true, true),
mkParams("111", true, false),
mkParams("111", false, true),
// Valid cases follow
mkParams("", true, false),
mkParams("", false, true),
mkParams("", false, false),
mkParams("111", false, false),
}
varsArr := []map[string]string{
// Invalid cases
{mgmtPrefix: "objprefix"},
// Valid cases
{},
{mgmtBucket: "bucket"},
{mgmtBucket: "bucket", mgmtPrefix: "objprefix"},
}
// Body is always valid - we do not test JSON decoding.
body := `{"recursive": false, "dryRun": true, "remove": false, "scanMode": 0}`
// Test all combinations!
for pIdx, params := range qParamsArr {
for vIdx, vars := range varsArr {
_, err := extractHealInitParams(vars, params, bytes.NewReader([]byte(body)))
isErrCase := false
if pIdx < 4 || vIdx < 1 {
isErrCase = true
}
if err != ErrNone && !isErrCase {
t.Errorf("Got unexpected error: %v %v %v", pIdx, vIdx, err)
} else if err == ErrNone && isErrCase {
t.Errorf("Got no error but expected one: %v %v", pIdx, vIdx)
}
}
}
}
type byResourceUID struct{ madmin.LockEntries }
func (b byResourceUID) Less(i, j int) bool {
toUniqLock := func(entry madmin.LockEntry) string {
return fmt.Sprintf("%s/%s", entry.Resource, entry.ID)
}
return toUniqLock(b.LockEntries[i]) < toUniqLock(b.LockEntries[j])
}
func TestTopLockEntries(t *testing.T) {
locksHeld := make(map[string][]lockRequesterInfo)
var owners []string
for i := 0; i < 4; i++ {
owners = append(owners, fmt.Sprintf("node-%d", i))
}
// Simulate DeleteObjects of 10 objects in a single request. i.e same lock
// request UID, but 10 different resource names associated with it.
var lris []lockRequesterInfo
uuid := mustGetUUID()
for i := 0; i < 10; i++ {
resource := fmt.Sprintf("bucket/delete-object-%d", i)
lri := lockRequesterInfo{
Name: resource,
Writer: true,
UID: uuid,
Owner: owners[i%len(owners)],
Group: true,
Quorum: 3,
}
lris = append(lris, lri)
locksHeld[resource] = []lockRequesterInfo{lri}
}
// Add a few concurrent read locks to the mix
for i := 0; i < 50; i++ {
resource := fmt.Sprintf("bucket/get-object-%d", i)
lri := lockRequesterInfo{
Name: resource,
UID: mustGetUUID(),
Owner: owners[i%len(owners)],
Quorum: 2,
}
lris = append(lris, lri)
locksHeld[resource] = append(locksHeld[resource], lri)
// concurrent read lock, same resource different uid
lri.UID = mustGetUUID()
lris = append(lris, lri)
locksHeld[resource] = append(locksHeld[resource], lri)
}
var peerLocks []*PeerLocks
for _, owner := range owners {
peerLocks = append(peerLocks, &PeerLocks{
Addr: owner,
Locks: locksHeld,
})
}
var exp madmin.LockEntries
for _, lri := range lris {
lockType := func(lri lockRequesterInfo) string {
if lri.Writer {
return "WRITE"
}
return "READ"
}
exp = append(exp, madmin.LockEntry{
Resource: lri.Name,
Type: lockType(lri),
ServerList: owners,
Owner: lri.Owner,
ID: lri.UID,
Quorum: lri.Quorum,
Timestamp: time.Unix(0, lri.Timestamp),
})
}
testCases := []struct {
peerLocks []*PeerLocks
expected madmin.LockEntries
}{
{
peerLocks: peerLocks,
expected: exp,
},
}
// printEntries := func(entries madmin.LockEntries) {
// for i, entry := range entries {
// fmt.Printf("%d: %s %s %s %s %v %d\n", i, entry.Resource, entry.ID, entry.Owner, entry.Type, entry.ServerList, entry.Elapsed)
// }
// }
check := func(exp, got madmin.LockEntries) (int, bool) {
if len(exp) != len(got) {
return 0, false
}
sort.Slice(exp, byResourceUID{exp}.Less)
sort.Slice(got, byResourceUID{got}.Less)
// printEntries(exp)
// printEntries(got)
for i, e := range exp {
if !e.Timestamp.Equal(got[i].Timestamp) {
return i, false
}
// Skip checking elapsed since it's time sensitive.
// if e.Elapsed != got[i].Elapsed {
// return false
// }
if e.Resource != got[i].Resource {
return i, false
}
if e.Type != got[i].Type {
return i, false
}
if e.Source != got[i].Source {
return i, false
}
if e.Owner != got[i].Owner {
return i, false
}
if e.ID != got[i].ID {
return i, false
}
if len(e.ServerList) != len(got[i].ServerList) {
return i, false
}
for j := range e.ServerList {
if e.ServerList[j] != got[i].ServerList[j] {
return i, false
}
}
}
return 0, true
}
for i, tc := range testCases {
got := topLockEntries(tc.peerLocks, false)
if idx, ok := check(tc.expected, got); !ok {
t.Fatalf("%d: mismatch at %d \n expected %#v but got %#v", i, idx, tc.expected[idx], got[idx])
}
}
}

923
cmd/admin-heal-ops.go Normal file
View File

@ -0,0 +1,923 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"sort"
"sync"
"time"
"github.com/minio/madmin-go/v3"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
)
// healStatusSummary - overall short summary of a healing sequence
type healStatusSummary string
// healStatusSummary constants
const (
healNotStartedStatus healStatusSummary = "not started"
healRunningStatus = "running"
healStoppedStatus = "stopped"
healFinishedStatus = "finished"
)
const (
// a heal sequence with this many un-consumed heal result
// items blocks until heal-status consumption resumes or is
// aborted due to timeout.
maxUnconsumedHealResultItems = 1000
// if no heal-results are consumed (via the heal-status API)
// for this timeout duration, the heal sequence is aborted.
healUnconsumedTimeout = 24 * time.Hour
// time-duration to keep heal sequence state after it
// completes.
keepHealSeqStateDuration = time.Minute * 10
// nopHeal is a no operating healing action to
// wait for the current healing operation to finish
nopHeal = ""
)
var (
errHealIdleTimeout = errors.New("healing results were not consumed for too long")
errHealStopSignalled = errors.New("heal stop signaled")
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
apiErr := toAdminAPIErr(ctx, err)
return fmt.Errorf("Heal internal error: %s: %s",
apiErr.Code, apiErr.Description)
}
)
// healSequenceStatus - accumulated status of the heal sequence
type healSequenceStatus struct {
// summary and detail for failures
Summary healStatusSummary `json:"Summary"`
FailureDetail string `json:"Detail,omitempty"`
StartTime time.Time `json:"StartTime"`
// settings for the heal sequence
HealSettings madmin.HealOpts `json:"Settings"`
// slice of available heal result records
Items []madmin.HealResultItem `json:"Items"`
}
// structure to hold state of all heal sequences in server memory
type allHealState struct {
sync.RWMutex
// map of heal path to heal sequence
healSeqMap map[string]*healSequence // Indexed by endpoint
// keep track of the healing status of disks in the memory
// false: the disk needs to be healed but no healing routine is started
// true: the disk is currently healing
healLocalDisks map[Endpoint]bool
healStatus map[string]healingTracker // Indexed by disk ID
}
// newHealState - initialize global heal state management
func newHealState(ctx context.Context, cleanup bool) *allHealState {
hstate := &allHealState{
healSeqMap: make(map[string]*healSequence),
healLocalDisks: make(map[Endpoint]bool),
healStatus: make(map[string]healingTracker),
}
if cleanup {
go hstate.periodicHealSeqsClean(ctx)
}
return hstate
}
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
ahs.Lock()
defer ahs.Unlock()
for _, ep := range healLocalDisks {
delete(ahs.healLocalDisks, ep)
}
for id, disk := range ahs.healStatus {
for _, ep := range healLocalDisks {
if disk.Endpoint == ep.String() {
delete(ahs.healStatus, id)
}
}
}
}
// updateHealStatus will update the heal status.
func (ahs *allHealState) updateHealStatus(tracker *healingTracker) {
ahs.Lock()
defer ahs.Unlock()
tracker.mu.RLock()
t := *tracker
t.QueuedBuckets = append(make([]string, 0, len(tracker.QueuedBuckets)), tracker.QueuedBuckets...)
t.HealedBuckets = append(make([]string, 0, len(tracker.HealedBuckets)), tracker.HealedBuckets...)
ahs.healStatus[tracker.ID] = t
tracker.mu.RUnlock()
}
// Sort by zone, set and disk index
func sortDisks(disks []madmin.Disk) {
sort.Slice(disks, func(i, j int) bool {
a, b := &disks[i], &disks[j]
if a.PoolIndex != b.PoolIndex {
return a.PoolIndex < b.PoolIndex
}
if a.SetIndex != b.SetIndex {
return a.SetIndex < b.SetIndex
}
return a.DiskIndex < b.DiskIndex
})
}
// getLocalHealingDisks returns local healing disks indexed by endpoint.
func (ahs *allHealState) getLocalHealingDisks() map[string]madmin.HealingDisk {
ahs.RLock()
defer ahs.RUnlock()
dst := make(map[string]madmin.HealingDisk, len(ahs.healStatus))
for _, v := range ahs.healStatus {
dst[v.Endpoint] = v.toHealingDisk()
}
return dst
}
// getHealLocalDiskEndpoints() returns the list of disks that need
// to be healed but there is no healing routine in progress on them.
func (ahs *allHealState) getHealLocalDiskEndpoints() Endpoints {
ahs.RLock()
defer ahs.RUnlock()
var endpoints Endpoints
for ep, healing := range ahs.healLocalDisks {
if !healing {
endpoints = append(endpoints, ep)
}
}
return endpoints
}
// Set, in the memory, the state of the disk as currently healing or not
func (ahs *allHealState) setDiskHealingStatus(ep Endpoint, healing bool) {
ahs.Lock()
defer ahs.Unlock()
ahs.healLocalDisks[ep] = healing
}
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
ahs.Lock()
defer ahs.Unlock()
for _, ep := range healLocalDisks {
ahs.healLocalDisks[ep] = false
}
}
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
// Launch clean-up routine to remove this heal sequence (after
// it ends) from the global state after timeout has elapsed.
periodicTimer := time.NewTimer(time.Minute * 5)
defer periodicTimer.Stop()
for {
select {
case <-periodicTimer.C:
now := UTCNow()
ahs.Lock()
for path, h := range ahs.healSeqMap {
if h.hasEnded() && h.endTime.Add(keepHealSeqStateDuration).Before(now) {
delete(ahs.healSeqMap, path)
}
}
ahs.Unlock()
periodicTimer.Reset(time.Minute * 5)
case <-ctx.Done():
// server could be restarting - need
// to exit immediately
return
}
}
}
// getHealSequenceByToken - Retrieve a heal sequence by token. The second
// argument returns if a heal sequence actually exists.
func (ahs *allHealState) getHealSequenceByToken(token string) (h *healSequence, exists bool) {
ahs.RLock()
defer ahs.RUnlock()
for _, healSeq := range ahs.healSeqMap {
if healSeq.clientToken == token {
return healSeq, true
}
}
return nil, false
}
// getHealSequence - Retrieve a heal sequence by path. The second
// argument returns if a heal sequence actually exists.
func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists bool) {
ahs.RLock()
defer ahs.RUnlock()
h, exists = ahs.healSeqMap[path]
return h, exists
}
func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
var hsp madmin.HealStopSuccess
he, exists := ahs.getHealSequence(path)
if !exists {
hsp = madmin.HealStopSuccess{
ClientToken: "unknown",
StartTime: UTCNow(),
}
} else {
clientToken := he.clientToken
if globalIsDistErasure {
clientToken = fmt.Sprintf("%s%s%d", he.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
}
hsp = madmin.HealStopSuccess{
ClientToken: clientToken,
ClientAddress: he.clientAddress,
StartTime: he.startTime,
}
he.stop()
for !he.hasEnded() {
time.Sleep(1 * time.Second)
}
ahs.Lock()
defer ahs.Unlock()
// Heal sequence explicitly stopped, remove it.
delete(ahs.healSeqMap, path)
}
b, err := json.Marshal(&hsp)
return b, toAdminAPIErr(GlobalContext, err)
}
// LaunchNewHealSequence - launches a background routine that performs
// healing according to the healSequence argument. For each heal
// sequence, state is stored in the `globalAllHealState`, which is a
// map of the heal path to `healSequence` which holds state about the
// heal sequence.
//
// Heal results are persisted in server memory for
// `keepHealSeqStateDuration`. This function also launches a
// background routine to clean up heal results after the
// aforementioned duration.
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
respBytes []byte, apiErr APIError, errMsg string,
) {
if h.forceStarted {
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
if apiErr.Code != "" {
return respBytes, apiErr, ""
}
} else {
oh, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object))
if exists && !oh.hasEnded() {
errMsg = "Heal is already running on the given path " +
"(use force-start option to stop and start afresh). " +
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
h.clientAddress, h.startTime.Format(http.TimeFormat), h.clientToken)
return nil, errorCodes.ToAPIErr(ErrHealAlreadyRunning), errMsg
}
}
ahs.Lock()
defer ahs.Unlock()
// Check if new heal sequence to be started overlaps with any
// existing, running sequence
hpath := pathJoin(h.bucket, h.object)
for k, hSeq := range ahs.healSeqMap {
if !hSeq.hasEnded() && (HasPrefix(k, hpath) || HasPrefix(hpath, k)) {
errMsg = "The provided heal sequence path overlaps with an existing " +
fmt.Sprintf("heal path: %s", k)
return nil, errorCodes.ToAPIErr(ErrHealOverlappingPaths), errMsg
}
}
// Add heal state and start sequence
ahs.healSeqMap[hpath] = h
clientToken := h.clientToken
if globalIsDistErasure {
clientToken = fmt.Sprintf("%s%s%d", h.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
}
if h.clientToken == bgHealingUUID {
// For background heal do nothing, do not spawn an unnecessary goroutine.
} else {
// Launch top-level background heal go-routine
go h.healSequenceStart(objAPI)
}
b, err := json.Marshal(madmin.HealStartSuccess{
ClientToken: clientToken,
ClientAddress: h.clientAddress,
StartTime: h.startTime,
})
if err != nil {
bugLogIf(h.ctx, err)
return nil, toAdminAPIErr(h.ctx, err), ""
}
return b, noError, ""
}
// PopHealStatusJSON - Called by heal-status API. It fetches the heal
// status results from global state and returns its JSON
// representation. The clientToken helps ensure there aren't
// conflicting clients fetching status.
func (ahs *allHealState) PopHealStatusJSON(hpath string,
clientToken string) ([]byte, APIErrorCode,
) {
// fetch heal state for given path
h, exists := ahs.getHealSequence(hpath)
if !exists {
// heal sequence doesn't exist, must have finished.
jbytes, err := json.Marshal(healSequenceStatus{
Summary: healFinishedStatus,
})
return jbytes, toAdminAPIErrCode(GlobalContext, err)
}
// Check if client-token is valid
if clientToken != h.clientToken {
return nil, ErrHealInvalidClientToken
}
// Take lock to access and update the heal-sequence
h.mutex.Lock()
defer h.mutex.Unlock()
numItems := len(h.currentStatus.Items)
// calculate index of most recently available heal result
// record.
lastResultIndex := h.lastSentResultIndex
if numItems > 0 {
lastResultIndex = h.currentStatus.Items[numItems-1].ResultIndex
}
h.lastSentResultIndex = lastResultIndex
jbytes, err := json.Marshal(h.currentStatus)
if err != nil {
h.currentStatus.Items = nil
bugLogIf(h.ctx, err)
return nil, ErrInternalError
}
h.currentStatus.Items = nil
return jbytes, ErrNone
}
// healSource denotes single entity and heal option.
type healSource struct {
bucket string
object string
versionID string
noWait bool // a non blocking call, if task queue is full return right away.
opts *madmin.HealOpts // optional heal option overrides default setting
}
// healSequence - state for each heal sequence initiated on the
// server.
type healSequence struct {
// bucket, and object on which heal seq. was initiated
bucket, object string
// Report healing progress
reportProgress bool
// time at which heal sequence was started
startTime time.Time
// time at which heal sequence has ended
endTime time.Time
// Heal client info
clientToken, clientAddress string
// was this heal sequence force started?
forceStarted bool
// heal settings applied to this heal sequence
settings madmin.HealOpts
// current accumulated status of the heal sequence
currentStatus healSequenceStatus
// channel signaled by background routine when traversal has
// completed
traverseAndHealDoneCh chan error
// canceler to cancel heal sequence.
cancelCtx context.CancelFunc
// the last result index sent to client
lastSentResultIndex int64
// Number of total items scanned against item type
scannedItemsMap map[madmin.HealItemType]int64
// Number of total items healed against item type
healedItemsMap map[madmin.HealItemType]int64
// Number of total items where healing failed against item type
healFailedItemsMap map[madmin.HealItemType]int64
// The time of the last scan/heal activity
lastHealActivity time.Time
// Holds the request-info for logging
ctx context.Context
// used to lock this structure as it is concurrently accessed
mutex sync.RWMutex
}
// NewHealSequence - creates healSettings, assumes bucket and
// objPrefix are already validated.
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
hs madmin.HealOpts, forceStart bool,
) *healSequence {
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
reqInfo.AppendTags("prefix", objPrefix)
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
clientToken := mustGetUUID()
return &healSequence{
bucket: bucket,
object: objPrefix,
reportProgress: true,
startTime: UTCNow(),
clientToken: clientToken,
clientAddress: clientAddr,
forceStarted: forceStart,
settings: hs,
currentStatus: healSequenceStatus{
Summary: healNotStartedStatus,
HealSettings: hs,
},
traverseAndHealDoneCh: make(chan error),
cancelCtx: cancel,
ctx: ctx,
scannedItemsMap: make(map[madmin.HealItemType]int64),
healedItemsMap: make(map[madmin.HealItemType]int64),
healFailedItemsMap: make(map[madmin.HealItemType]int64),
}
}
// getScannedItemsCount - returns a count of all scanned items
func (h *healSequence) getScannedItemsCount() int64 {
var count int64
h.mutex.RLock()
defer h.mutex.RUnlock()
for _, v := range h.scannedItemsMap {
count += v
}
return count
}
// getScannedItemsMap - returns map of all scanned items against type
func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
for k, v := range h.scannedItemsMap {
retMap[k] = v
}
return retMap
}
// getHealedItemsMap - returns the map of all healed items against type
func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
for k, v := range h.healedItemsMap {
retMap[k] = v
}
return retMap
}
// getHealFailedItemsMap - returns map of all items where heal failed against
// drive endpoint and status
func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
for k, v := range h.healFailedItemsMap {
retMap[k] = v
}
return retMap
}
func (h *healSequence) countFailed(healType madmin.HealItemType) {
h.mutex.Lock()
defer h.mutex.Unlock()
h.healFailedItemsMap[healType]++
h.lastHealActivity = UTCNow()
}
func (h *healSequence) countScanned(healType madmin.HealItemType) {
h.mutex.Lock()
defer h.mutex.Unlock()
h.scannedItemsMap[healType]++
h.lastHealActivity = UTCNow()
}
func (h *healSequence) countHealed(healType madmin.HealItemType) {
h.mutex.Lock()
defer h.mutex.Unlock()
h.healedItemsMap[healType]++
h.lastHealActivity = UTCNow()
}
// isQuitting - determines if the heal sequence is quitting (due to an
// external signal)
func (h *healSequence) isQuitting() bool {
select {
case <-h.ctx.Done():
return true
default:
return false
}
}
// check if the heal sequence has ended
func (h *healSequence) hasEnded() bool {
h.mutex.RLock()
defer h.mutex.RUnlock()
// background heal never ends
if h.clientToken == bgHealingUUID {
return false
}
return !h.endTime.IsZero()
}
// stops the heal sequence - safe to call multiple times.
func (h *healSequence) stop() {
h.cancelCtx()
}
// pushHealResultItem - pushes a heal result item for consumption in
// the heal-status API. It blocks if there are
// maxUnconsumedHealResultItems. When it blocks, the heal sequence
// routine is effectively paused - this happens when the server has
// accumulated the maximum number of heal records per heal
// sequence. When the client consumes further records, the heal
// sequence automatically resumes. The return value indicates if the
// operation succeeded.
func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
// start a timer to keep an upper time limit to find an empty
// slot to add the given heal result - if no slot is found it
// means that the server is holding the maximum amount of
// heal-results in memory and the client has not consumed it
// for too long.
unconsumedTimer := time.NewTimer(healUnconsumedTimeout)
defer unconsumedTimer.Stop()
var itemsLen int
for {
h.mutex.Lock()
itemsLen = len(h.currentStatus.Items)
if itemsLen == maxUnconsumedHealResultItems {
// wait for a second, or quit if an external
// stop signal is received or the
// unconsumedTimer fires.
select {
// Check after a second
case <-time.After(time.Second):
h.mutex.Unlock()
continue
case <-h.ctx.Done():
h.mutex.Unlock()
// discard result and return.
return errHealStopSignalled
// Timeout if no results consumed for too long.
case <-unconsumedTimer.C:
h.mutex.Unlock()
return errHealIdleTimeout
}
}
break
}
// Set the correct result index for the new result item
if itemsLen > 0 {
r.ResultIndex = 1 + h.currentStatus.Items[itemsLen-1].ResultIndex
} else {
r.ResultIndex = 1 + h.lastSentResultIndex
}
// append to results
h.currentStatus.Items = append(h.currentStatus.Items, r)
// release lock
h.mutex.Unlock()
return nil
}
// healSequenceStart - this is the top-level background heal
// routine. It launches another go-routine that actually traverses
// on-disk data, checks and heals according to the selected
// settings. This go-routine itself, (1) monitors the traversal
// routine for completion, and (2) listens for external stop
// signals. When either event happens, it sets the finish status for
// the heal-sequence.
func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
// Set status as running
h.mutex.Lock()
h.currentStatus.Summary = healRunningStatus
h.currentStatus.StartTime = UTCNow()
h.mutex.Unlock()
go h.traverseAndHeal(objAPI)
select {
case err, ok := <-h.traverseAndHealDoneCh:
if !ok {
return
}
h.mutex.Lock()
h.endTime = UTCNow()
// Heal traversal is complete.
if err == nil {
// heal traversal succeeded.
h.currentStatus.Summary = healFinishedStatus
} else {
// heal traversal had an error.
h.currentStatus.Summary = healStoppedStatus
h.currentStatus.FailureDetail = err.Error()
}
h.mutex.Unlock()
case <-h.ctx.Done():
h.mutex.Lock()
h.endTime = UTCNow()
h.currentStatus.Summary = healFinishedStatus
h.mutex.Unlock()
// drain traverse channel so the traversal
// go-routine does not leak.
go func() {
// Eventually the traversal go-routine closes
// the channel and returns, so this go-routine
// itself will not leak.
<-h.traverseAndHealDoneCh
}()
}
}
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
// Send heal request
task := healTask{
bucket: source.bucket,
object: source.object,
versionID: source.versionID,
opts: h.settings,
}
if source.opts != nil {
task.opts = *source.opts
} else {
task.opts.ScanMode = madmin.HealNormalScan
}
h.countScanned(healType)
if source.noWait {
select {
case globalBackgroundHealRoutine.tasks <- task:
if serverDebugLog {
fmt.Printf("Task in the queue: %#v\n", task)
}
default:
// task queue is full, no more workers, we shall move on and heal later.
return nil
}
// Don't wait for result
return nil
}
// respCh must be set to wait for result.
// We make it size 1, so a result can always be written
// even if we aren't listening.
task.respCh = make(chan healResult, 1)
select {
case globalBackgroundHealRoutine.tasks <- task:
if serverDebugLog {
fmt.Printf("Task in the queue: %#v\n", task)
}
case <-h.ctx.Done():
return nil
}
countOKDrives := func(drives []madmin.HealDriveInfo) (count int) {
for _, drive := range drives {
if drive.State == madmin.DriveStateOk {
count++
}
}
return count
}
// task queued, now wait for the response.
select {
case res := <-task.respCh:
if res.err == nil {
h.countHealed(healType)
} else {
h.countFailed(healType)
}
if !h.reportProgress {
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
return nil
}
// Report caller of any failure
return res.err
}
res.result.Type = healType
if res.err != nil {
res.result.Detail = res.err.Error()
}
if res.result.ParityBlocks > 0 && res.result.DataBlocks > 0 && res.result.DataBlocks > res.result.ParityBlocks {
if got := countOKDrives(res.result.After.Drives); got < res.result.ParityBlocks {
res.result.Detail = fmt.Sprintf("quorum loss - expected %d minimum, got drive states in OK %d", res.result.ParityBlocks, got)
}
}
return h.pushHealResultItem(res.result)
case <-h.ctx.Done():
return nil
}
}
func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
// Start healing the config prefix.
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
}
func (h *healSequence) healItems(objAPI ObjectLayer) error {
if h.clientToken == bgHealingUUID {
// For background heal do nothing.
return nil
}
if h.bucket == "" { // heal internal meta only during a site-wide heal
if err := h.healDiskMeta(objAPI); err != nil {
return err
}
}
// Heal buckets and objects
return h.healBuckets(objAPI)
}
// traverseAndHeal - traverses on-disk data and performs healing
// according to settings. At each "safe" point it also checks if an
// external quit signal has been received and quits if so. Since the
// healing traversal may be mutating on-disk data when an external
// quit signal is received, this routine cannot quit immediately and
// has to wait until a safe point is reached, such as between scanning
// two objects.
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
h.traverseAndHealDoneCh <- h.healItems(objAPI)
xioutil.SafeClose(h.traverseAndHealDoneCh)
}
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
// which in-turn heals the respective meta directory path and any files in int.
func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) func() error {
return func() error {
// NOTE: Healing on meta is run regardless
// of any bucket being selected, this is to ensure that
// meta are always upto date and correct.
h.settings.Recursive = true
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string, scanMode madmin.HealScanMode) error {
if h.isQuitting() {
return errHealStopSignalled
}
err := h.queueHealTask(healSource{
bucket: bucket,
object: object,
versionID: versionID,
}, madmin.HealItemBucketMetadata)
return err
})
}
}
// healBuckets - check for all buckets heal or just particular bucket.
func (h *healSequence) healBuckets(objAPI ObjectLayer) error {
if h.isQuitting() {
return errHealStopSignalled
}
// 1. If a bucket was specified, heal only the bucket.
if h.bucket != "" {
return h.healBucket(objAPI, h.bucket, false)
}
buckets, err := objAPI.ListBuckets(h.ctx, BucketOptions{})
if err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
// Heal latest buckets first.
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Created.After(buckets[j].Created)
})
for _, bucket := range buckets {
if err = h.healBucket(objAPI, bucket.Name, false); err != nil {
return err
}
}
return nil
}
// healBucket - traverses and heals given bucket
func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly bool) error {
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
return err
}
if bucketsOnly {
return nil
}
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
return nil
}
// healObject - heal the given object and record result
func (h *healSequence) healObject(bucket, object, versionID string, scanMode madmin.HealScanMode) error {
if h.isQuitting() {
return errHealStopSignalled
}
err := h.queueHealTask(healSource{
bucket: bucket,
object: object,
versionID: versionID,
opts: &h.settings,
}, madmin.HealItemObject)
// Wait and proceed if there are active requests
waitForLowHTTPReq()
return err
}

441
cmd/admin-router.go Normal file
View File

@ -0,0 +1,441 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"net/http"
"github.com/klauspost/compress/gzhttp"
"github.com/klauspost/compress/gzip"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
)
const (
adminPathPrefix = minioReservedBucketPath + "/admin"
adminAPIVersion = madmin.AdminAPIVersion
adminAPIVersionPrefix = SlashSeparator + adminAPIVersion
adminAPISiteReplicationDevNull = "/site-replication/devnull"
adminAPISiteReplicationNetPerf = "/site-replication/netperf"
adminAPIClientDevNull = "/speedtest/client/devnull"
adminAPIClientDevExtraTime = "/speedtest/client/devnull/extratime"
)
var gzipHandler = func() func(http.Handler) http.HandlerFunc {
gz, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
if err != nil {
// Static params, so this is very unlikely.
logger.Fatal(err, "Unable to initialize server")
}
return gz
}()
// Set of handler options as bit flags
type hFlag uint8
const (
// this flag disables gzip compression of responses
noGZFlag = 1 << iota
// this flag enables tracing body and headers instead of just headers
traceAllFlag
// pass this flag to skip checking if object layer is available
noObjLayerFlag
)
// Has checks if the given flag is enabled in `h`.
func (h hFlag) Has(flag hFlag) bool {
// Use bitwise-AND and check if the result is non-zero.
return h&flag != 0
}
// adminMiddleware performs some common admin handler functionality for all
// handlers:
//
// - updates request context with `logger.ReqInfo` and api name based on the
// name of the function handler passed (this handler must be a method of
// `adminAPIHandlers`).
//
// - sets up call to send AuditLog
//
// While this is a middleware function (i.e. it takes a handler function and
// returns one), due to flags being passed based on required conditions, it is
// done per-"handler function registration" in the router.
//
// The passed in handler function must be a method of `adminAPIHandlers` for the
// name displayed in logs and trace to be accurate. The name is extracted via
// reflection.
//
// When no flags are passed, gzip compression, http tracing of headers and
// checking of object layer availability are all enabled. Use flags to modify
// this behavior.
func adminMiddleware(f http.HandlerFunc, flags ...hFlag) http.HandlerFunc {
// Collect all flags with bitwise-OR and assign operator
var handlerFlags hFlag
for _, flag := range flags {
handlerFlags |= flag
}
// Get name of the handler using reflection.
handlerName := getHandlerName(f, "adminAPIHandlers")
var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
// Update request context with `logger.ReqInfo`.
r = r.WithContext(newContext(r, w, handlerName))
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
// Check if object layer is available, if not return error early.
if !handlerFlags.Has(noObjLayerFlag) {
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(r.Context(), w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
}
// Apply http tracing "middleware" based on presence of flag.
var f2 http.HandlerFunc
if handlerFlags.Has(traceAllFlag) {
f2 = httpTraceAll(f)
} else {
f2 = httpTraceHdrs(f)
}
// call the final handler
f2(w, r)
}
// Enable compression of responses based on presence of flag.
if !handlerFlags.Has(noGZFlag) {
handler = gzipHandler(handler)
}
return handler
}
// adminAPIHandlers provides HTTP handlers for MinIO admin API.
type adminAPIHandlers struct{}
// registerAdminRouter - Add handler functions for each service REST API routes.
func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
adminAPI := adminAPIHandlers{}
// Admin router
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
adminVersions := []string{
adminAPIVersionPrefix,
}
for _, adminVersion := range adminVersions {
// Restart and stop MinIO service type=2
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(adminMiddleware(adminAPI.ServiceV2Handler, traceAllFlag)).Queries("action", "{action:.*}", "type", "2")
// Deprecated: Restart and stop MinIO service.
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(adminMiddleware(adminAPI.ServiceHandler, traceAllFlag)).Queries("action", "{action:.*}")
// Update all MinIO servers type=2
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(adminMiddleware(adminAPI.ServerUpdateV2Handler, traceAllFlag)).Queries("updateURL", "{updateURL:.*}", "type", "2")
// Deprecated: Update MinIO servers.
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(adminMiddleware(adminAPI.ServerUpdateHandler, traceAllFlag)).Queries("updateURL", "{updateURL:.*}")
// Info operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(adminMiddleware(adminAPI.ServerInfoHandler, traceAllFlag, noObjLayerFlag))
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceHdrsS3HFlag))
// StorageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(adminMiddleware(adminAPI.StorageInfoHandler, traceAllFlag))
// DataUsageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(adminMiddleware(adminAPI.DataUsageInfoHandler, traceAllFlag))
// Metrics operation
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceHdrsS3HFlag))
if globalIsDistErasure || globalIsErasure {
// Heal operations
// Heal processing endpoint.
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(adminMiddleware(adminAPI.HealHandler, traceAllFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(adminMiddleware(adminAPI.HealHandler, traceAllFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(adminMiddleware(adminAPI.HealHandler, traceAllFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(adminMiddleware(adminAPI.BackgroundHealStatusHandler, traceAllFlag))
// Pool operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/pools/list").HandlerFunc(adminMiddleware(adminAPI.ListPools, traceAllFlag))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/pools/status").HandlerFunc(adminMiddleware(adminAPI.StatusPool, traceAllFlag)).Queries("pool", "{pool:.*}")
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/decommission").HandlerFunc(adminMiddleware(adminAPI.StartDecommission, traceAllFlag)).Queries("pool", "{pool:.*}")
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/cancel").HandlerFunc(adminMiddleware(adminAPI.CancelDecommission, traceAllFlag)).Queries("pool", "{pool:.*}")
// Rebalance operations
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/rebalance/start").HandlerFunc(adminMiddleware(adminAPI.RebalanceStart, traceAllFlag))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/rebalance/status").HandlerFunc(adminMiddleware(adminAPI.RebalanceStatus, traceAllFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/rebalance/stop").HandlerFunc(adminMiddleware(adminAPI.RebalanceStop, traceAllFlag))
}
// Profiling operations - deprecated API
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(adminMiddleware(adminAPI.StartProfilingHandler, traceAllFlag, noObjLayerFlag)).
Queries("profilerType", "{profilerType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceHdrsS3HFlag, noObjLayerFlag))
// Profiling operations
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceHdrsS3HFlag, noObjLayerFlag))
// Config KV operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-config-kv").HandlerFunc(adminMiddleware(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/set-config-kv").HandlerFunc(adminMiddleware(adminAPI.SetConfigKVHandler))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/del-config-kv").HandlerFunc(adminMiddleware(adminAPI.DelConfigKVHandler))
}
// Enable config help in all modes.
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/help-config-kv").HandlerFunc(adminMiddleware(adminAPI.HelpConfigKVHandler, traceAllFlag)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
// Config KV history operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-config-history-kv").HandlerFunc(adminMiddleware(adminAPI.ListConfigHistoryKVHandler, traceAllFlag)).Queries("count", "{count:[0-9]+}")
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/clear-config-history-kv").HandlerFunc(adminMiddleware(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(adminMiddleware(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
}
// Config import/export bulk operations
if enableConfigOps {
// Get config
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(adminMiddleware(adminAPI.GetConfigHandler))
// Set config
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/config").HandlerFunc(adminMiddleware(adminAPI.SetConfigHandler))
}
// -- IAM APIs --
// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(adminMiddleware(adminAPI.AddCannedPolicy, traceAllFlag)).Queries("name", "{name:.*}")
// Add user IAM
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(adminMiddleware(adminAPI.AccountInfoHandler, traceAllFlag))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(adminMiddleware(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(adminMiddleware(adminAPI.SetUserStatus)).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
// Service accounts ops
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(adminMiddleware(adminAPI.AddServiceAccount))
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update-service-account").HandlerFunc(adminMiddleware(adminAPI.UpdateServiceAccount)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-service-account").HandlerFunc(adminMiddleware(adminAPI.InfoServiceAccount)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(adminMiddleware(adminAPI.ListServiceAccounts))
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(adminMiddleware(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")
// STS accounts ops
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/temporary-account-info").HandlerFunc(adminMiddleware(adminAPI.TemporaryAccountInfo)).Queries("accessKey", "{accessKey:.*}")
// Access key (service account/STS) operations
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-access-keys-bulk").HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysBulk)).Queries("listType", "{listType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-access-key").HandlerFunc(adminMiddleware(adminAPI.InfoAccessKey)).Queries("accessKey", "{accessKey:.*}")
// Info policy IAM latest
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(adminMiddleware(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
// List policies latest
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-canned-policies").HandlerFunc(adminMiddleware(adminAPI.ListBucketPolicies)).Queries("bucket", "{bucket:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(adminMiddleware(adminAPI.ListCannedPolicies))
// Builtin IAM policy associations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/builtin/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListPolicyMappingEntities))
// Remove policy IAM
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(adminMiddleware(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
// Set user or group policy
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-or-group-policy").
HandlerFunc(adminMiddleware(adminAPI.SetPolicyForUserOrGroup)).
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
// Attach/Detach policies to/from user or group
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/builtin/policy/{operation}").HandlerFunc(adminMiddleware(adminAPI.AttachDetachPolicyBuiltin))
// Remove user IAM
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(adminMiddleware(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
// List users
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-users").HandlerFunc(adminMiddleware(adminAPI.ListBucketUsers)).Queries("bucket", "{bucket:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-users").HandlerFunc(adminMiddleware(adminAPI.ListUsers))
// User info
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/user-info").HandlerFunc(adminMiddleware(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
// Add/Remove members from group
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/update-group-members").HandlerFunc(adminMiddleware(adminAPI.UpdateGroupMembers))
// Get Group
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/group").HandlerFunc(adminMiddleware(adminAPI.GetGroup)).Queries("group", "{group:.*}")
// List Groups
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/groups").HandlerFunc(adminMiddleware(adminAPI.ListGroups))
// Set Group Status
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(adminMiddleware(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
// Export IAM info to zipped file
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-iam").HandlerFunc(adminMiddleware(adminAPI.ExportIAM, noGZFlag))
// Import IAM info
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam").HandlerFunc(adminMiddleware(adminAPI.ImportIAM, noGZFlag))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam-v2").HandlerFunc(adminMiddleware(adminAPI.ImportIAMV2, noGZFlag))
// IDentity Provider configuration APIs
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.AddIdentityProviderCfg))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.UpdateIdentityProviderCfg))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp-config/{type}").HandlerFunc(adminMiddleware(adminAPI.ListIdentityProviderCfg))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.GetIdentityProviderCfg))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.DeleteIdentityProviderCfg))
// LDAP specific service accounts ops
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp/ldap/add-service-account").HandlerFunc(adminMiddleware(adminAPI.AddServiceAccountLDAP))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys").
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys-bulk").
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAPBulk)).Queries("listType", "{listType:.*}")
// LDAP IAM operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/ldap/policy/{operation}").HandlerFunc(adminMiddleware(adminAPI.AttachDetachPolicyLDAP))
// OpenID specific service accounts ops
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/openid/list-access-keys-bulk").
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysOpenIDBulk)).Queries("listType", "{listType:.*}")
// -- END IAM APIs --
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
adminMiddleware(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
adminMiddleware(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// Bucket replication operations
// GetBucketTargetHandler
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
adminMiddleware(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
// SetRemoteTargetHandler
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
adminMiddleware(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
// RemoveRemoteTargetHandler
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
adminMiddleware(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
// ReplicationDiff - MinIO extension API
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/replication/diff").HandlerFunc(
adminMiddleware(adminAPI.ReplicationDiffHandler)).Queries("bucket", "{bucket:.*}")
// ReplicationMRFHandler - MinIO extension API
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/replication/mrf").HandlerFunc(
adminMiddleware(adminAPI.ReplicationMRFHandler)).Queries("bucket", "{bucket:.*}")
// Batch job operations
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/start-job").HandlerFunc(
adminMiddleware(adminAPI.StartBatchJob))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-jobs").HandlerFunc(
adminMiddleware(adminAPI.ListBatchJobs))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/status-job").HandlerFunc(
adminMiddleware(adminAPI.BatchJobStatus))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/describe-job").HandlerFunc(
adminMiddleware(adminAPI.DescribeBatchJob))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/cancel-job").HandlerFunc(
adminMiddleware(adminAPI.CancelBatchJob))
// Bucket migration operations
// ExportBucketMetaHandler
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-bucket-metadata").HandlerFunc(
adminMiddleware(adminAPI.ExportBucketMetadataHandler))
// ImportBucketMetaHandler
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-bucket-metadata").HandlerFunc(
adminMiddleware(adminAPI.ImportBucketMetadataHandler))
// Remote Tier management operations
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(adminMiddleware(adminAPI.AddTierHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(adminMiddleware(adminAPI.EditTierHandler))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(adminMiddleware(adminAPI.ListTierHandler))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/tier/{tier}").HandlerFunc(adminMiddleware(adminAPI.RemoveTierHandler))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier/{tier}").HandlerFunc(adminMiddleware(adminAPI.VerifyTierHandler))
// Tier stats
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(adminMiddleware(adminAPI.TierStatsHandler))
// Cluster Replication APIs
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationAdd))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/remove").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationRemove))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/info").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationInfo))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/metainfo").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationMetaInfo))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/status").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationStatus))
adminRouter.Methods(http.MethodPost).Path(adminVersion + adminAPISiteReplicationDevNull).HandlerFunc(adminMiddleware(adminAPI.SiteReplicationDevNull, noObjLayerFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + adminAPISiteReplicationNetPerf).HandlerFunc(adminMiddleware(adminAPI.SiteReplicationNetPerf, noObjLayerFlag))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(adminMiddleware(adminAPI.SRPeerJoin))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(adminMiddleware(adminAPI.SRPeerBucketOps)).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(adminMiddleware(adminAPI.SRPeerReplicateIAMItem))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(adminMiddleware(adminAPI.SRPeerReplicateBucketItem))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(adminMiddleware(adminAPI.SRPeerGetIDPSettings))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/edit").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationEdit))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(adminMiddleware(adminAPI.SRPeerEdit))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(adminMiddleware(adminAPI.SRPeerRemove))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/resync/op").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationResyncOp)).Queries("operation", "{operation:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/state/edit").HandlerFunc(adminMiddleware(adminAPI.SRStateEdit))
if globalIsDistErasure {
// Top locks
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(adminMiddleware(adminAPI.TopLocksHandler))
// Force unlocks paths
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/force-unlock").
Queries("paths", "{paths:.*}").HandlerFunc(adminMiddleware(adminAPI.ForceUnlockHandler))
}
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest").HandlerFunc(adminMiddleware(adminAPI.ObjectSpeedTestHandler, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/object").HandlerFunc(adminMiddleware(adminAPI.ObjectSpeedTestHandler, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/drive").HandlerFunc(adminMiddleware(adminAPI.DriveSpeedtestHandler, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/net").HandlerFunc(adminMiddleware(adminAPI.NetperfHandler, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/site").HandlerFunc(adminMiddleware(adminAPI.SitePerfHandler, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + adminAPIClientDevNull).HandlerFunc(adminMiddleware(adminAPI.ClientDevNull, noGZFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion + adminAPIClientDevExtraTime).HandlerFunc(adminMiddleware(adminAPI.ClientDevNullExtraTime, noGZFlag))
// HTTP Trace
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(adminMiddleware(adminAPI.TraceHandler, noObjLayerFlag))
// Console Logs
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(adminMiddleware(adminAPI.ConsoleLogHandler, traceAllFlag))
// -- KMS APIs --
//
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/kms/status").HandlerFunc(adminMiddleware(adminAPI.KMSStatusHandler, traceAllFlag))
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/kms/key/create").HandlerFunc(adminMiddleware(adminAPI.KMSCreateKeyHandler, traceAllFlag)).Queries("key-id", "{key-id:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(adminMiddleware(adminAPI.KMSKeyStatusHandler, traceAllFlag))
// Keep obdinfo for backward compatibility with mc
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
HandlerFunc(adminMiddleware(adminAPI.HealthInfoHandler))
// -- Health API --
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
HandlerFunc(adminMiddleware(adminAPI.HealthInfoHandler))
// STS Revocation
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/revoke-tokens/{userProvider}").HandlerFunc(adminMiddleware(adminAPI.RevokeTokens))
}
// If none of the routes match add default error handler routes
adminRouter.NotFoundHandler = httpTraceAll(errorResponseHandler)
adminRouter.MethodNotAllowedHandler = httpTraceAll(methodNotAllowedHandler("Admin"))
}

171
cmd/admin-server-info.go Normal file
View File

@ -0,0 +1,171 @@
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"math"
"net/http"
"os"
"runtime"
"runtime/debug"
"sort"
"strings"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms"
xnet "github.com/minio/pkg/v3/net"
)
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request, metrics bool) madmin.ServerProperties {
addr := globalLocalNodeName
if r != nil {
addr = r.Host
}
if globalIsDistErasure {
addr = globalLocalNodeName
}
poolNumbers := make(map[int]struct{})
network := make(map[string]string)
for _, ep := range endpointServerPools {
for _, endpoint := range ep.Endpoints {
if endpoint.IsLocal {
poolNumbers[endpoint.PoolIdx+1] = struct{}{}
}
nodeName := endpoint.Host
if nodeName == "" {
nodeName = addr
}
if endpoint.IsLocal {
// Only proceed for local endpoints
network[nodeName] = string(madmin.ItemOnline)
continue
}
_, present := network[nodeName]
if !present {
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
network[nodeName] = string(madmin.ItemOnline)
} else {
if xnet.IsNetworkOrHostDown(err, false) {
network[nodeName] = string(madmin.ItemOffline)
} else if xnet.IsNetworkOrHostDown(err, true) {
network[nodeName] = "connection attempt timedout"
}
}
}
}
}
var memstats runtime.MemStats
runtime.ReadMemStats(&memstats)
gcStats := debug.GCStats{
// If stats.PauseQuantiles is non-empty, ReadGCStats fills
// it with quantiles summarizing the distribution of pause time.
// For example, if len(stats.PauseQuantiles) is 5, it will be
// filled with the minimum, 25%, 50%, 75%, and maximum pause times.
PauseQuantiles: make([]time.Duration, 5),
}
debug.ReadGCStats(&gcStats)
// Truncate GC stats to max 5 entries.
if len(gcStats.PauseEnd) > 5 {
gcStats.PauseEnd = gcStats.PauseEnd[len(gcStats.PauseEnd)-5:]
}
if len(gcStats.Pause) > 5 {
gcStats.Pause = gcStats.Pause[len(gcStats.Pause)-5:]
}
props := madmin.ServerProperties{
Endpoint: addr,
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
Version: Version,
CommitID: CommitID,
Network: network,
MemStats: madmin.MemStats{
Alloc: memstats.Alloc,
TotalAlloc: memstats.TotalAlloc,
Mallocs: memstats.Mallocs,
Frees: memstats.Frees,
HeapAlloc: memstats.HeapAlloc,
},
GoMaxProcs: runtime.GOMAXPROCS(0),
NumCPU: runtime.NumCPU(),
RuntimeVersion: runtime.Version(),
GCStats: &madmin.GCStats{
LastGC: gcStats.LastGC,
NumGC: gcStats.NumGC,
PauseTotal: gcStats.PauseTotal,
Pause: gcStats.Pause,
PauseEnd: gcStats.PauseEnd,
},
MinioEnvVars: make(map[string]string, 10),
}
for poolNumber := range poolNumbers {
props.PoolNumbers = append(props.PoolNumbers, poolNumber)
}
sort.Ints(props.PoolNumbers)
props.PoolNumber = func() int {
if len(props.PoolNumbers) == 1 {
return props.PoolNumbers[0]
}
return math.MaxInt // this indicates that its unset.
}()
sensitive := map[string]struct{}{
config.EnvAccessKey: {},
config.EnvSecretKey: {},
config.EnvRootUser: {},
config.EnvRootPassword: {},
config.EnvMinIOSubnetAPIKey: {},
kms.EnvKMSSecretKey: {},
}
for _, v := range os.Environ() {
if !strings.HasPrefix(v, "MINIO") && !strings.HasPrefix(v, "_MINIO") {
continue
}
split := strings.SplitN(v, "=", 2)
key := split[0]
value := ""
if len(split) > 1 {
value = split[1]
}
// Do not send sensitive creds.
if _, ok := sensitive[key]; ok || strings.Contains(strings.ToLower(key), "password") || strings.HasSuffix(strings.ToLower(key), "key") {
props.MinioEnvVars[key] = "*** EXISTS, REDACTED ***"
continue
}
props.MinioEnvVars[key] = value
}
objLayer := newObjectLayerFn()
if objLayer != nil {
storageInfo := objLayer.LocalStorageInfo(GlobalContext, metrics)
props.State = string(madmin.ItemOnline)
props.Disks = storageInfo.Disks
} else {
props.State = string(madmin.ItemInitializing)
props.Disks = getOfflineDisks("", globalEndpoints)
}
return props
}

85
cmd/api-datatypes.go Normal file
View File

@ -0,0 +1,85 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/xml"
"time"
)
// DeletedObject objects deleted
type DeletedObject struct {
DeleteMarker bool `xml:"DeleteMarker,omitempty"`
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
ObjectName string `xml:"Key,omitempty"`
VersionID string `xml:"VersionId,omitempty"`
// MTime of DeleteMarker on source that needs to be propagated to replica
DeleteMarkerMTime DeleteMarkerMTime `xml:"-"`
// MinIO extensions to support delete marker replication
ReplicationState ReplicationState `xml:"-"`
found bool // the object was found during deletion
}
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
type DeleteMarkerMTime struct {
time.Time
}
// MarshalXML encodes expiration date if it is non-zero and encodes
// empty string otherwise
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if t.IsZero() {
return nil
}
return e.EncodeElement(t.Format(time.RFC3339), startElement)
}
// ObjectV object version key/versionId
type ObjectV struct {
ObjectName string `xml:"Key"`
VersionID string `xml:"VersionId"`
}
// ObjectToDelete carries key name for the object to delete.
type ObjectToDelete struct {
ObjectV
// Replication status of DeleteMarker
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus"`
// Status of versioned delete (of object or DeleteMarker)
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus"`
// VersionPurgeStatuses holds the internal
VersionPurgeStatuses string `xml:"VersionPurgeStatuses"`
// ReplicateDecisionStr stringified representation of replication decision
ReplicateDecisionStr string `xml:"-"`
}
// createBucketLocationConfiguration container for bucket configuration request from client.
// Used for parsing the location from the request body for Makebucket.
type createBucketLocationConfiguration struct {
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`
Location string `xml:"LocationConstraint"`
}
// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted.
type DeleteObjectsRequest struct {
// Element to enable quiet mode for the request
Quiet bool
// List of objects to be deleted
Objects []ObjectToDelete `xml:"Object"`
}

2639
cmd/api-errors.go Normal file

File diff suppressed because it is too large Load Diff

89
cmd/api-errors_test.go Normal file
View File

@ -0,0 +1,89 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"errors"
"testing"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/hash"
)
var toAPIErrorTests = []struct {
err error
errCode APIErrorCode
}{
{err: hash.BadDigest{}, errCode: ErrBadDigest},
{err: hash.SHA256Mismatch{}, errCode: ErrContentSHA256Mismatch},
{err: IncompleteBody{}, errCode: ErrIncompleteBody},
{err: ObjectExistsAsDirectory{}, errCode: ErrObjectExistsAsDirectory},
{err: BucketNameInvalid{}, errCode: ErrInvalidBucketName},
{err: BucketExists{}, errCode: ErrBucketAlreadyOwnedByYou},
{err: ObjectNotFound{}, errCode: ErrNoSuchKey},
{err: ObjectNameInvalid{}, errCode: ErrInvalidObjectName},
{err: InvalidUploadID{}, errCode: ErrNoSuchUpload},
{err: InvalidPart{}, errCode: ErrInvalidPart},
{err: InsufficientReadQuorum{}, errCode: ErrSlowDownRead},
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDownWrite},
{err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented},
{err: MalformedUploadID{}, errCode: ErrNoSuchUpload},
{err: PartTooSmall{}, errCode: ErrEntityTooSmall},
{err: BucketNotEmpty{}, errCode: ErrBucketNotEmpty},
{err: BucketNotFound{}, errCode: ErrNoSuchBucket},
{err: StorageFull{}, errCode: ErrStorageFull},
{err: NotImplemented{}, errCode: ErrNotImplemented},
{err: errSignatureMismatch, errCode: ErrSignatureDoesNotMatch},
// SSE-C errors
{err: crypto.ErrInvalidCustomerAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
{err: crypto.ErrMissingCustomerKey, errCode: ErrMissingSSECustomerKey},
{err: crypto.ErrInvalidCustomerKey, errCode: ErrAccessDenied},
{err: crypto.ErrMissingCustomerKeyMD5, errCode: ErrMissingSSECustomerKeyMD5},
{err: crypto.ErrCustomerKeyMD5Mismatch, errCode: ErrSSECustomerKeyMD5Mismatch},
{err: errObjectTampered, errCode: ErrObjectTampered},
{err: nil, errCode: ErrNone},
{err: errors.New("Custom error"), errCode: ErrInternalError}, // Case where err type is unknown.
}
func TestAPIErrCode(t *testing.T) {
ctx := t.Context()
for i, testCase := range toAPIErrorTests {
errCode := toAPIErrorCode(ctx, testCase.err)
if errCode != testCase.errCode {
t.Errorf("Test %d: Expected error code %d, got %d", i+1, testCase.errCode, errCode)
}
}
}
// Check if an API error is properly defined
func TestAPIErrCodeDefinition(t *testing.T) {
for errAPI := ErrNone + 1; errAPI < apiErrCodeEnd; errAPI++ {
errCode, ok := errorCodes[errAPI]
if !ok {
t.Fatal(errAPI, "error code is not defined in the API error code table")
}
if errCode.Code == "" {
t.Fatal(errAPI, "error code has an empty XML code")
}
if errCode.HTTPStatusCode == 0 {
t.Fatal(errAPI, "error code has a zero HTTP status code")
}
}
}

269
cmd/api-headers.go Normal file
View File

@ -0,0 +1,269 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/json"
"encoding/xml"
"fmt"
"mime"
"net/http"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
xxml "github.com/minio/xxml"
)
// Returns a hexadecimal representation of time at the
// time response is sent to the client.
func mustGetRequestID(t time.Time) string {
return fmt.Sprintf("%X", t.UnixNano())
}
// setEventStreamHeaders to allow proxies to avoid buffering proxy responses
func setEventStreamHeaders(w http.ResponseWriter) {
w.Header().Set(xhttp.ContentType, "text/event-stream")
w.Header().Set(xhttp.CacheControl, "no-cache") // nginx to turn off buffering
w.Header().Set("X-Accel-Buffering", "no") // nginx to turn off buffering
}
// Write http common headers
func setCommonHeaders(w http.ResponseWriter) {
// Set the "Server" http header.
w.Header().Set(xhttp.ServerInfo, MinioStoreName)
// Set `x-amz-bucket-region` only if region is set on the server
// by default minio uses an empty region.
if region := globalSite.Region(); region != "" {
w.Header().Set(xhttp.AmzBucketRegion, region)
}
w.Header().Set(xhttp.AcceptRanges, "bytes")
// Remove sensitive information
crypto.RemoveSensitiveHeaders(w.Header())
}
// Encodes the response headers into XML format.
func encodeResponse(response interface{}) []byte {
var buf bytes.Buffer
buf.WriteString(xml.Header)
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
bugLogIf(GlobalContext, err)
return nil
}
return buf.Bytes()
}
// Use this encodeResponseList() to support control characters
// this function must be used by only ListObjects() for objects
// with control characters, this is a specialized extension
// to support AWS S3 compatible behavior.
//
// Do not use this function for anything other than ListObjects()
// variants, please open a github discussion if you wish to use
// this in other places.
func encodeResponseList(response interface{}) []byte {
var buf bytes.Buffer
buf.WriteString(xxml.Header)
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
bugLogIf(GlobalContext, err)
return nil
}
return buf.Bytes()
}
// Encodes the response headers into JSON format.
func encodeResponseJSON(response interface{}) []byte {
var bytesBuffer bytes.Buffer
e := json.NewEncoder(&bytesBuffer)
e.Encode(response)
return bytesBuffer.Bytes()
}
// Write parts count
func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
w.Header()[xhttp.AmzMpPartsCount] = []string{strconv.Itoa(len(objInfo.Parts))}
}
}
// Write object header
func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
// set common headers
setCommonHeaders(w)
// Set last modified time.
lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat)
w.Header().Set(xhttp.LastModified, lastModified)
// Set Etag if available.
if objInfo.ETag != "" {
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
}
if objInfo.ContentType != "" {
w.Header().Set(xhttp.ContentType, objInfo.ContentType)
}
if objInfo.ContentEncoding != "" {
w.Header().Set(xhttp.ContentEncoding, objInfo.ContentEncoding)
}
if !objInfo.Expires.IsZero() {
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
}
// Set tag count if object has tags
if len(objInfo.UserTags) > 0 {
tags, _ := tags.ParseObjectTags(objInfo.UserTags)
if tags != nil && tags.Count() > 0 {
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tags.Count())}
if opts.Tagging {
// This is MinIO only extension to return back tags along with the count.
w.Header()[xhttp.AmzObjectTagging] = []string{objInfo.UserTags}
}
}
}
// Set all other user defined metadata.
for k, v := range objInfo.UserDefined {
// Empty values for object lock and retention can be skipped.
if v == "" && equals(k, xhttp.AmzObjectLockMode, xhttp.AmzObjectLockRetainUntilDate) {
continue
}
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata
// values to client.
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
var isSet bool
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
if !stringsHasPrefixFold(k, userMetadataPrefix) {
continue
}
// check the doc https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
// For metadata values like "ö", "ÄMÄZÕÑ S3", and "öha, das sollte eigentlich
// funktionieren", tested against a real AWS S3 bucket, S3 may encode incorrectly. For
// example, "ö" was encoded as =?UTF-8?B?w4PCtg==?=, producing invalid UTF-8 instead
// of =?UTF-8?B?w7Y=?=. This mirrors errors like the ä½ in another string.
//
// S3 uses B-encoding (Base64) for non-ASCII-heavy metadata and Q-encoding
// (quoted-printable) for mostly ASCII strings. Long strings are split at word
// boundaries to fit RFC 2047s 75-character limit, ensuring HTTP parser
// compatibility.
//
// However, this splitting increases header size and can introduce errors, unlike Gos
// mime package in MinIO, which correctly encodes strings with fixed B/Q encodings,
// avoiding S3s heuristic-driven issues.
//
// For MinIO developers, decode S3 metadata with mime.WordDecoder, validate outputs,
// report encoding bugs to AWS, and use ASCII-only metadata to ensure reliable S3 API
// compatibility.
if needsMimeEncoding(v) {
// see https://github.com/golang/go/blob/release-branch.go1.24/src/net/mail/message.go#L325
if strings.ContainsAny(v, "\"#$%&'(),.:;<>@[]^`{|}~") {
v = mime.BEncoding.Encode("UTF-8", v)
} else {
v = mime.QEncoding.Encode("UTF-8", v)
}
}
w.Header()[strings.ToLower(k)] = []string{v}
isSet = true
break
}
if !isSet {
w.Header().Set(k, v)
}
}
var start, rangeLen int64
totalObjectSize, err := objInfo.GetActualSize()
if err != nil {
return err
}
if rs == nil && opts.PartNumber > 0 {
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
}
// For providing ranged content
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
if err != nil {
return err
}
// Set content length.
w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10))
if rs != nil {
contentRange := fmt.Sprintf("bytes %d-%d/%d", start, start+rangeLen-1, totalObjectSize)
w.Header().Set(xhttp.ContentRange, contentRange)
}
// Set the relevant version ID as part of the response header.
if objInfo.VersionID != "" && objInfo.VersionID != nullVersionID {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
}
if objInfo.ReplicationStatus.String() != "" {
w.Header()[xhttp.AmzBucketReplicationStatus] = []string{objInfo.ReplicationStatus.String()}
}
if objInfo.IsRemote() {
// Check if object is being restored. For more information on x-amz-restore header see
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
w.Header()[xhttp.AmzStorageClass] = []string{filterStorageClass(ctx, objInfo.TransitionedObject.Tier)}
}
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
lc.SetPredictionHeaders(w, objInfo.ToLifecycleOpts())
}
if v, ok := objInfo.UserDefined[ReservedMetadataPrefix+"compression"]; ok {
if i := strings.LastIndexByte(v, '/'); i >= 0 {
v = v[i+1:]
}
w.Header()[xhttp.MinIOCompressed] = []string{v}
}
return nil
}
// needsEncoding reports whether s contains any bytes that need to be encoded.
// see mime.needsEncoding
func needsMimeEncoding(s string) bool {
for _, b := range s {
if (b < ' ' || b > '~') && b != '\t' {
return true
}
}
return false
}

42
cmd/api-headers_test.go Normal file
View File

@ -0,0 +1,42 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"testing"
)
func TestNewRequestID(t *testing.T) {
// Ensure that it returns an alphanumeric result of length 16.
id := mustGetRequestID(UTCNow())
if len(id) != 16 {
t.Fail()
}
var e rune
for _, char := range id {
e = char
// Ensure that it is alphanumeric, in this case, between 0-9 and A-Z.
isAlnum := ('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')
if !isAlnum {
t.Fail()
}
}
}

153
cmd/api-resources.go Normal file
View File

@ -0,0 +1,153 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/base64"
"net/url"
"strconv"
)
// Parse bucket url queries
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string, errCode APIErrorCode) {
errCode = ErrNone
if values.Get("max-keys") != "" {
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
}
} else {
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
}
func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType, versionIDMarker string, errCode APIErrorCode) {
errCode = ErrNone
if values.Get("max-keys") != "" {
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
}
} else {
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
marker = values.Get("key-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
versionIDMarker = values.Get("version-id-marker")
return
}
// Parse bucket url queries for ListObjects V2.
func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int, encodingType string, errCode APIErrorCode) {
errCode = ErrNone
// The continuation-token cannot be empty.
if val, ok := values["continuation-token"]; ok {
if len(val[0]) == 0 {
errCode = ErrIncorrectContinuationToken
return
}
}
if values.Get("max-keys") != "" {
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
}
} else {
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
startAfter = values.Get("start-after")
delimiter = values.Get("delimiter")
fetchOwner = values.Get("fetch-owner") == "true"
encodingType = values.Get("encoding-type")
if token = values.Get("continuation-token"); token != "" {
decodedToken, err := base64.StdEncoding.DecodeString(token)
if err != nil {
errCode = ErrIncorrectContinuationToken
return
}
token = string(decodedToken)
}
return
}
// Parse bucket url queries for ?uploads
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string, errCode APIErrorCode) {
errCode = ErrNone
if values.Get("max-uploads") != "" {
var err error
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
errCode = ErrInvalidMaxUploads
return
}
} else {
maxUploads = maxUploadsList
}
prefix = values.Get("prefix")
keyMarker = values.Get("key-marker")
uploadIDMarker = values.Get("upload-id-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
}
// Parse object url queries
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string, errCode APIErrorCode) {
var err error
errCode = ErrNone
if values.Get("max-parts") != "" {
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
errCode = ErrInvalidMaxParts
return
}
} else {
maxParts = maxPartsList
}
if values.Get("part-number-marker") != "" {
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
errCode = ErrInvalidPartNumberMarker
return
}
}
uploadID = values.Get("uploadId")
encodingType = values.Get("encoding-type")
return
}

222
cmd/api-resources_test.go Normal file
View File

@ -0,0 +1,222 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"net/url"
"testing"
)
// Test list objects resources V2.
func TestListObjectsV2Resources(t *testing.T) {
testCases := []struct {
values url.Values
prefix, token, startAfter, delimiter string
fetchOwner bool
maxKeys int
encodingType string
errCode APIErrorCode
}{
{
values: url.Values{
"prefix": []string{"photos/"},
"continuation-token": []string{"dG9rZW4="},
"start-after": []string{"start-after"},
"delimiter": []string{SlashSeparator},
"fetch-owner": []string{"true"},
"max-keys": []string{"100"},
"encoding-type": []string{"gzip"},
},
prefix: "photos/",
token: "token",
startAfter: "start-after",
delimiter: SlashSeparator,
fetchOwner: true,
maxKeys: 100,
encodingType: "gzip",
errCode: ErrNone,
},
{
values: url.Values{
"prefix": []string{"photos/"},
"continuation-token": []string{"dG9rZW4="},
"start-after": []string{"start-after"},
"delimiter": []string{SlashSeparator},
"fetch-owner": []string{"true"},
"encoding-type": []string{"gzip"},
},
prefix: "photos/",
token: "token",
startAfter: "start-after",
delimiter: SlashSeparator,
fetchOwner: true,
maxKeys: maxObjectList,
encodingType: "gzip",
errCode: ErrNone,
},
{
values: url.Values{
"prefix": []string{"photos/"},
"continuation-token": []string{""},
"start-after": []string{"start-after"},
"delimiter": []string{SlashSeparator},
"fetch-owner": []string{"true"},
"encoding-type": []string{"gzip"},
},
prefix: "",
token: "",
startAfter: "",
delimiter: "",
fetchOwner: false,
maxKeys: 0,
encodingType: "",
errCode: ErrIncorrectContinuationToken,
},
}
for i, testCase := range testCases {
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(testCase.values)
if errCode != testCase.errCode {
t.Errorf("Test %d: Expected error code:%d, got %d", i+1, testCase.errCode, errCode)
}
if prefix != testCase.prefix {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.prefix, prefix)
}
if token != testCase.token {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.token, token)
}
if startAfter != testCase.startAfter {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.startAfter, startAfter)
}
if delimiter != testCase.delimiter {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.delimiter, delimiter)
}
if fetchOwner != testCase.fetchOwner {
t.Errorf("Test %d: Expected %t, got %t", i+1, testCase.fetchOwner, fetchOwner)
}
if maxKeys != testCase.maxKeys {
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.maxKeys, maxKeys)
}
if encodingType != testCase.encodingType {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.encodingType, encodingType)
}
}
}
// Test list objects resources V1.
func TestListObjectsV1Resources(t *testing.T) {
testCases := []struct {
values url.Values
prefix, marker, delimiter string
maxKeys int
encodingType string
}{
{
values: url.Values{
"prefix": []string{"photos/"},
"marker": []string{"test"},
"delimiter": []string{SlashSeparator},
"max-keys": []string{"100"},
"encoding-type": []string{"gzip"},
},
prefix: "photos/",
marker: "test",
delimiter: SlashSeparator,
maxKeys: 100,
encodingType: "gzip",
},
{
values: url.Values{
"prefix": []string{"photos/"},
"marker": []string{"test"},
"delimiter": []string{SlashSeparator},
"encoding-type": []string{"gzip"},
},
prefix: "photos/",
marker: "test",
delimiter: SlashSeparator,
maxKeys: maxObjectList,
encodingType: "gzip",
},
}
for i, testCase := range testCases {
prefix, marker, delimiter, maxKeys, encodingType, argsErr := getListObjectsV1Args(testCase.values)
if argsErr != ErrNone {
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
}
if prefix != testCase.prefix {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.prefix, prefix)
}
if marker != testCase.marker {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.marker, marker)
}
if delimiter != testCase.delimiter {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.delimiter, delimiter)
}
if maxKeys != testCase.maxKeys {
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.maxKeys, maxKeys)
}
if encodingType != testCase.encodingType {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.encodingType, encodingType)
}
}
}
// Validates extracting information for object resources.
func TestGetObjectsResources(t *testing.T) {
testCases := []struct {
values url.Values
uploadID string
partNumberMarker, maxParts int
encodingType string
}{
{
values: url.Values{
"uploadId": []string{"11123-11312312311231-12313"},
"part-number-marker": []string{"1"},
"max-parts": []string{"1000"},
"encoding-type": []string{"gzip"},
},
uploadID: "11123-11312312311231-12313",
partNumberMarker: 1,
maxParts: 1000,
encodingType: "gzip",
},
}
for i, testCase := range testCases {
uploadID, partNumberMarker, maxParts, encodingType, argsErr := getObjectResources(testCase.values)
if argsErr != ErrNone {
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
}
if uploadID != testCase.uploadID {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.uploadID, uploadID)
}
if partNumberMarker != testCase.partNumberMarker {
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.partNumberMarker, partNumberMarker)
}
if maxParts != testCase.maxParts {
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.maxParts, maxParts)
}
if encodingType != testCase.encodingType {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.encodingType, encodingType)
}
}
}

1017
cmd/api-response.go Normal file

File diff suppressed because it is too large Load Diff

125
cmd/api-response_test.go Normal file
View File

@ -0,0 +1,125 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"net/http"
"testing"
)
// Tests object location.
func TestObjectLocation(t *testing.T) {
testCases := []struct {
request *http.Request
bucket, object string
domains []string
expectedLocation string
}{
// Server binding to localhost IP with https.
{
request: &http.Request{
Host: "127.0.0.1:9000",
Header: map[string][]string{
"X-Forwarded-Scheme": {httpScheme},
},
},
bucket: "testbucket1",
object: "test/1.txt",
expectedLocation: "http://127.0.0.1:9000/testbucket1/test/1.txt",
},
{
request: &http.Request{
Host: "127.0.0.1:9000",
Header: map[string][]string{
"X-Forwarded-Scheme": {httpsScheme},
},
},
bucket: "testbucket1",
object: "test/1.txt",
expectedLocation: "https://127.0.0.1:9000/testbucket1/test/1.txt",
},
// Server binding to fqdn.
{
request: &http.Request{
Host: "s3.mybucket.org",
Header: map[string][]string{
"X-Forwarded-Scheme": {httpScheme},
},
},
bucket: "mybucket",
object: "test/1.txt",
expectedLocation: "http://s3.mybucket.org/mybucket/test/1.txt",
},
// Server binding to fqdn.
{
request: &http.Request{
Host: "mys3.mybucket.org",
Header: map[string][]string{},
},
bucket: "mybucket",
object: "test/1.txt",
expectedLocation: "http://mys3.mybucket.org/mybucket/test/1.txt",
},
// Server with virtual domain name.
{
request: &http.Request{
Host: "mybucket.mys3.bucket.org",
Header: map[string][]string{},
},
domains: []string{"mys3.bucket.org"},
bucket: "mybucket",
object: "test/1.txt",
expectedLocation: "http://mybucket.mys3.bucket.org/test/1.txt",
},
{
request: &http.Request{
Host: "mybucket.mys3.bucket.org",
Header: map[string][]string{
"X-Forwarded-Scheme": {httpsScheme},
},
},
domains: []string{"mys3.bucket.org"},
bucket: "mybucket",
object: "test/1.txt",
expectedLocation: "https://mybucket.mys3.bucket.org/test/1.txt",
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
if testCase.expectedLocation != gotLocation {
t.Errorf("expected %s, got %s", testCase.expectedLocation, gotLocation)
}
})
}
}
// Tests getURLScheme function behavior.
func TestGetURLScheme(t *testing.T) {
tls := false
gotScheme := getURLScheme(tls)
if gotScheme != httpScheme {
t.Errorf("Expected %s, got %s", httpScheme, gotScheme)
}
tls = true
gotScheme = getURLScheme(tls)
if gotScheme != httpsScheme {
t.Errorf("Expected %s, got %s", httpsScheme, gotScheme)
}
}

695
cmd/api-router.go Normal file
View File

@ -0,0 +1,695 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"net"
"net/http"
consoleapi "github.com/minio/console/api"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/mux"
"github.com/minio/pkg/v3/wildcard"
"github.com/rs/cors"
)
func newHTTPServerFn() *xhttp.Server {
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
return globalHTTPServer
}
func setHTTPServer(h *xhttp.Server) {
globalObjLayerMutex.Lock()
globalHTTPServer = h
globalObjLayerMutex.Unlock()
}
func newConsoleServerFn() *consoleapi.Server {
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
return globalConsoleSrv
}
func setConsoleSrv(srv *consoleapi.Server) {
globalObjLayerMutex.Lock()
globalConsoleSrv = srv
globalObjLayerMutex.Unlock()
}
func newObjectLayerFn() ObjectLayer {
globalObjLayerMutex.RLock()
defer globalObjLayerMutex.RUnlock()
return globalObjectAPI
}
func setObjectLayer(o ObjectLayer) {
globalObjLayerMutex.Lock()
globalObjectAPI = o
globalObjLayerMutex.Unlock()
}
// objectAPIHandlers implements and provides http handlers for S3 API.
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
}
// getHost tries its best to return the request host.
// According to section 14.23 of RFC 2616 the Host header
// can include the port number if the default value of 80 is not used.
func getHost(r *http.Request) string {
if r.URL.IsAbs() {
return r.URL.Host
}
return r.Host
}
func notImplementedHandler(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
}
type rejectedAPI struct {
api string
methods []string
queries []string
path string
}
var rejectedObjAPIs = []rejectedAPI{
{
api: "torrent",
methods: []string{http.MethodPut, http.MethodDelete, http.MethodGet},
queries: []string{"torrent", ""},
path: "/{object:.+}",
},
{
api: "acl",
methods: []string{http.MethodDelete},
queries: []string{"acl", ""},
path: "/{object:.+}",
},
}
var rejectedBucketAPIs = []rejectedAPI{
{
api: "inventory",
methods: []string{http.MethodGet, http.MethodPut, http.MethodDelete},
queries: []string{"inventory", ""},
},
{
api: "cors",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"cors", ""},
},
{
api: "metrics",
methods: []string{http.MethodGet, http.MethodPut, http.MethodDelete},
queries: []string{"metrics", ""},
},
{
api: "website",
methods: []string{http.MethodPut},
queries: []string{"website", ""},
},
{
api: "logging",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"logging", ""},
},
{
api: "accelerate",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"accelerate", ""},
},
{
api: "requestPayment",
methods: []string{http.MethodPut, http.MethodDelete},
queries: []string{"requestPayment", ""},
},
{
api: "acl",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodHead},
queries: []string{"acl", ""},
},
{
api: "publicAccessBlock",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"publicAccessBlock", ""},
},
{
api: "ownershipControls",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"ownershipControls", ""},
},
{
api: "intelligent-tiering",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"intelligent-tiering", ""},
},
{
api: "analytics",
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
queries: []string{"analytics", ""},
},
}
// Set of s3 handler options as bit flags.
type s3HFlag uint8
const (
// when provided, disables Gzip compression.
noGZS3HFlag = 1 << iota
// when provided, enables only tracing of headers. Otherwise, both headers
// and body are traced.
traceHdrsS3HFlag
// when provided, disables throttling via the `maxClients` middleware.
noThrottleS3HFlag
)
func (h s3HFlag) has(flag s3HFlag) bool {
// Use bitwise-AND and check if the result is non-zero.
return h&flag != 0
}
// s3APIMiddleware - performs some common handler functionality for S3 API
// handlers.
//
// It is set per-"handler function registration" in the router to allow for
// behavior modification via flags.
//
// This middleware always calls `collectAPIStats` to collect API stats.
//
// The passed in handler function must be a method of `objectAPIHandlers` for
// the name displayed in logs and trace to be accurate. The name is extracted
// via reflection.
//
// When **no** flags are passed, the behavior is to trace both headers and body,
// gzip the response and throttle the handler via `maxClients`. Each of these
// can be disabled via the corresponding `s3HFlag`.
//
// CAUTION: for requests involving large req/resp bodies ensure to pass the
// `traceHdrsS3HFlag`, otherwise both headers and body will be traced, causing
// high memory usage!
func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
// Collect all flags with bitwise-OR and assign operator
var handlerFlags s3HFlag
for _, flag := range flags {
handlerFlags |= flag
}
// Get name of the handler using reflection.
handlerName := getHandlerName(f, "objectAPIHandlers")
var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
// Wrap the actual handler with the appropriate tracing middleware.
var tracedHandler http.HandlerFunc
if handlerFlags.has(traceHdrsS3HFlag) {
tracedHandler = httpTraceHdrs(f)
} else {
tracedHandler = httpTraceAll(f)
}
// Skip wrapping with the gzip middleware if specified.
gzippedHandler := tracedHandler
if !handlerFlags.has(noGZS3HFlag) {
gzippedHandler = gzipHandler(gzippedHandler)
}
// Skip wrapping with throttling middleware if specified.
throttledHandler := gzippedHandler
if !handlerFlags.has(noThrottleS3HFlag) {
throttledHandler = maxClients(throttledHandler)
}
// Collect API stats using the API name got from reflection in
// `getHandlerName`.
statsCollectedHandler := collectAPIStats(handlerName, throttledHandler)
// Call the final handler.
statsCollectedHandler(w, r)
}
return handler
}
// registerAPIRouter - registers S3 compatible APIs.
func registerAPIRouter(router *mux.Router) {
// Initialize API.
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
}
// API Router
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
var routers []*mux.Router
for _, domainName := range globalDomainNames {
if IsKubernetes() {
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
host, _, err := net.SplitHostPort(getHost(r))
if err != nil {
host = r.Host
}
// Make sure to skip matching minio.<domain>` this is
// specifically meant for operator/k8s deployment
// The reason we need to skip this is for a special
// usecase where we need to make sure that
// minio.<namespace>.svc.<cluster_domain> is ignored
// by the bucketDNS style to ensure that path style
// is available and honored at this domain.
//
// All other `<bucket>.<namespace>.svc.<cluster_domain>`
// makes sure that buckets are routed through this matcher
// to match for `<bucket>`
return host != minioReservedBucket+"."+domainName
}).Host("{bucket:.+}."+domainName).Subrouter())
} else {
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
}
}
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
for _, router := range routers {
// Register all rejected object APIs
for _, r := range rejectedObjAPIs {
t := router.Methods(r.methods...).
HandlerFunc(collectAPIStats(r.api, httpTraceAll(notImplementedHandler))).
Queries(r.queries...)
t.Path(r.path)
}
// Object operations
// HeadObject
router.Methods(http.MethodHead).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.HeadObjectHandler))
// GetObjectAttributes
router.Methods(http.MethodGet).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.GetObjectAttributesHandler, traceHdrsS3HFlag)).
Queries("attributes", "")
// CopyObjectPart
router.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
HandlerFunc(s3APIMiddleware(api.CopyObjectPartHandler)).
Queries("partNumber", "{partNumber:.*}", "uploadId", "{uploadId:.*}")
// PutObjectPart
router.Methods(http.MethodPut).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.PutObjectPartHandler, traceHdrsS3HFlag)).
Queries("partNumber", "{partNumber:.*}", "uploadId", "{uploadId:.*}")
// ListObjectParts
router.Methods(http.MethodGet).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.ListObjectPartsHandler)).
Queries("uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
router.Methods(http.MethodPost).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.CompleteMultipartUploadHandler)).
Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
router.Methods(http.MethodPost).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.NewMultipartUploadHandler)).
Queries("uploads", "")
// AbortMultipartUpload
router.Methods(http.MethodDelete).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.AbortMultipartUploadHandler)).
Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call.
router.Methods(http.MethodGet).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.GetObjectACLHandler, traceHdrsS3HFlag)).
Queries("acl", "")
// PutObjectACL - this is a dummy call.
router.Methods(http.MethodPut).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.PutObjectACLHandler, traceHdrsS3HFlag)).
Queries("acl", "")
// GetObjectTagging
router.Methods(http.MethodGet).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.GetObjectTaggingHandler, traceHdrsS3HFlag)).
Queries("tagging", "")
// PutObjectTagging
router.Methods(http.MethodPut).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.PutObjectTaggingHandler, traceHdrsS3HFlag)).
Queries("tagging", "")
// DeleteObjectTagging
router.Methods(http.MethodDelete).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.DeleteObjectTaggingHandler, traceHdrsS3HFlag)).
Queries("tagging", "")
// SelectObjectContent
router.Methods(http.MethodPost).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.SelectObjectContentHandler, traceHdrsS3HFlag)).
Queries("select", "").Queries("select-type", "2")
// GetObjectRetention
router.Methods(http.MethodGet).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.GetObjectRetentionHandler)).
Queries("retention", "")
// GetObjectLegalHold
router.Methods(http.MethodGet).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.GetObjectLegalHoldHandler)).
Queries("legal-hold", "")
// GetObject with lambda ARNs
router.Methods(http.MethodGet).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.GetObjectLambdaHandler, traceHdrsS3HFlag)).
Queries("lambdaArn", "{lambdaArn:.*}")
// GetObject
router.Methods(http.MethodGet).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.GetObjectHandler, traceHdrsS3HFlag))
// CopyObject
router.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
HandlerFunc(s3APIMiddleware(api.CopyObjectHandler))
// PutObjectRetention
router.Methods(http.MethodPut).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.PutObjectRetentionHandler)).
Queries("retention", "")
// PutObjectLegalHold
router.Methods(http.MethodPut).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.PutObjectLegalHoldHandler)).
Queries("legal-hold", "")
// PutObject with auto-extract support for zip
router.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzSnowballExtract, "true").
HandlerFunc(s3APIMiddleware(api.PutObjectExtractHandler, traceHdrsS3HFlag))
// AppendObject to be rejected
router.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzWriteOffsetBytes, "").
HandlerFunc(s3APIMiddleware(errorResponseHandler))
// PutObject
router.Methods(http.MethodPut).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.PutObjectHandler, traceHdrsS3HFlag))
// DeleteObject
router.Methods(http.MethodDelete).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.DeleteObjectHandler))
// PostRestoreObject
router.Methods(http.MethodPost).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.PostRestoreObjectHandler)).
Queries("restore", "")
// Bucket operations
// GetBucketLocation
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketLocationHandler)).
Queries("location", "")
// GetBucketPolicy
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketPolicyHandler)).
Queries("policy", "")
// GetBucketLifecycle
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketLifecycleHandler)).
Queries("lifecycle", "")
// GetBucketEncryption
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketEncryptionHandler)).
Queries("encryption", "")
// GetBucketObjectLockConfig
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketObjectLockConfigHandler)).
Queries("object-lock", "")
// GetBucketReplicationConfig
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketReplicationConfigHandler)).
Queries("replication", "")
// GetBucketVersioning
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketVersioningHandler)).
Queries("versioning", "")
// GetBucketNotification
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketNotificationHandler)).
Queries("notification", "")
// ListenNotification
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)).
Queries("events", "{events:.*}")
// ResetBucketReplicationStatus - MinIO extension API
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.ResetBucketReplicationStatusHandler)).
Queries("replication-reset-status", "")
// Dummy Bucket Calls
// GetBucketACL -- this is a dummy call.
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketACLHandler)).
Queries("acl", "")
// PutBucketACL -- this is a dummy call.
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketACLHandler)).
Queries("acl", "")
// GetBucketCors - this is a dummy call.
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketCorsHandler)).
Queries("cors", "")
// PutBucketCors - this is a dummy call.
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketCorsHandler)).
Queries("cors", "")
// DeleteBucketCors - this is a dummy call.
router.Methods(http.MethodDelete).
HandlerFunc(s3APIMiddleware(api.DeleteBucketCorsHandler)).
Queries("cors", "")
// GetBucketWebsiteHandler - this is a dummy call.
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketWebsiteHandler)).
Queries("website", "")
// GetBucketAccelerateHandler - this is a dummy call.
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketAccelerateHandler)).
Queries("accelerate", "")
// GetBucketRequestPaymentHandler - this is a dummy call.
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketRequestPaymentHandler)).
Queries("requestPayment", "")
// GetBucketLoggingHandler - this is a dummy call.
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketLoggingHandler)).
Queries("logging", "")
// GetBucketTaggingHandler
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketTaggingHandler)).
Queries("tagging", "")
// DeleteBucketWebsiteHandler
router.Methods(http.MethodDelete).
HandlerFunc(s3APIMiddleware(api.DeleteBucketWebsiteHandler)).
Queries("website", "")
// DeleteBucketTaggingHandler
router.Methods(http.MethodDelete).
HandlerFunc(s3APIMiddleware(api.DeleteBucketTaggingHandler)).
Queries("tagging", "")
// ListMultipartUploads
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.ListMultipartUploadsHandler)).
Queries("uploads", "")
// ListObjectsV2M
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.ListObjectsV2MHandler)).
Queries("list-type", "2", "metadata", "true")
// ListObjectsV2
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.ListObjectsV2Handler)).
Queries("list-type", "2")
// ListObjectVersions
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.ListObjectVersionsMHandler)).
Queries("versions", "", "metadata", "true")
// ListObjectVersions
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.ListObjectVersionsHandler)).
Queries("versions", "")
// GetBucketPolicyStatus
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketPolicyStatusHandler)).
Queries("policyStatus", "")
// PutBucketLifecycle
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketLifecycleHandler)).
Queries("lifecycle", "")
// PutBucketReplicationConfig
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketReplicationConfigHandler)).
Queries("replication", "")
// PutBucketEncryption
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketEncryptionHandler)).
Queries("encryption", "")
// PutBucketPolicy
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketPolicyHandler)).
Queries("policy", "")
// PutBucketObjectLockConfig
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketObjectLockConfigHandler)).
Queries("object-lock", "")
// PutBucketTaggingHandler
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketTaggingHandler)).
Queries("tagging", "")
// PutBucketVersioning
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketVersioningHandler)).
Queries("versioning", "")
// PutBucketNotification
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketNotificationHandler)).
Queries("notification", "")
// ResetBucketReplicationStart - MinIO extension API
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.ResetBucketReplicationStartHandler)).
Queries("replication-reset", "")
// PutBucket
router.Methods(http.MethodPut).
HandlerFunc(s3APIMiddleware(api.PutBucketHandler))
// HeadBucket
router.Methods(http.MethodHead).
HandlerFunc(s3APIMiddleware(api.HeadBucketHandler))
// PostPolicy
router.Methods(http.MethodPost).
MatcherFunc(func(r *http.Request, _ *mux.RouteMatch) bool {
return isRequestPostPolicySignatureV4(r)
}).
HandlerFunc(s3APIMiddleware(api.PostPolicyBucketHandler, traceHdrsS3HFlag))
// DeleteMultipleObjects
router.Methods(http.MethodPost).
HandlerFunc(s3APIMiddleware(api.DeleteMultipleObjectsHandler)).
Queries("delete", "")
// DeleteBucketPolicy
router.Methods(http.MethodDelete).
HandlerFunc(s3APIMiddleware(api.DeleteBucketPolicyHandler)).
Queries("policy", "")
// DeleteBucketReplication
router.Methods(http.MethodDelete).
HandlerFunc(s3APIMiddleware(api.DeleteBucketReplicationConfigHandler)).
Queries("replication", "")
// DeleteBucketLifecycle
router.Methods(http.MethodDelete).
HandlerFunc(s3APIMiddleware(api.DeleteBucketLifecycleHandler)).
Queries("lifecycle", "")
// DeleteBucketEncryption
router.Methods(http.MethodDelete).
HandlerFunc(s3APIMiddleware(api.DeleteBucketEncryptionHandler)).
Queries("encryption", "")
// DeleteBucket
router.Methods(http.MethodDelete).
HandlerFunc(s3APIMiddleware(api.DeleteBucketHandler))
// MinIO extension API for replication.
//
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketReplicationMetricsV2Handler)).
Queries("replication-metrics", "2")
// deprecated handler
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.GetBucketReplicationMetricsHandler)).
Queries("replication-metrics", "")
// ValidateBucketReplicationCreds
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.ValidateBucketReplicationCredsHandler)).
Queries("replication-check", "")
// Register rejected bucket APIs
for _, r := range rejectedBucketAPIs {
router.Methods(r.methods...).
HandlerFunc(collectAPIStats(r.api, httpTraceAll(notImplementedHandler))).
Queries(r.queries...)
}
// S3 ListObjectsV1 (Legacy)
router.Methods(http.MethodGet).
HandlerFunc(s3APIMiddleware(api.ListObjectsV1Handler))
}
// Root operation
// ListenNotification
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)).
Queries("events", "{events:.*}")
// ListBuckets
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).
HandlerFunc(s3APIMiddleware(api.ListBucketsHandler))
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
// than failing with UnknownAPIRequest we simply handle it for now.
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).
HandlerFunc(s3APIMiddleware(api.ListBucketsHandler))
// If none of the routes match add default error handler routes
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
}
// corsHandler handler for CORS (Cross Origin Resource Sharing)
func corsHandler(handler http.Handler) http.Handler {
commonS3Headers := []string{
xhttp.Date,
xhttp.ETag,
xhttp.ServerInfo,
xhttp.Connection,
xhttp.AcceptRanges,
xhttp.ContentRange,
xhttp.ContentEncoding,
xhttp.ContentLength,
xhttp.ContentType,
xhttp.ContentDisposition,
xhttp.LastModified,
xhttp.ContentLanguage,
xhttp.CacheControl,
xhttp.RetryAfter,
xhttp.AmzBucketRegion,
xhttp.Expires,
"X-Amz*",
"x-amz*",
"*",
}
opts := cors.Options{
AllowOriginFunc: func(origin string) bool {
for _, allowedOrigin := range globalAPIConfig.getCorsAllowOrigins() {
if wildcard.MatchSimple(allowedOrigin, origin) {
return true
}
}
return false
},
AllowedMethods: []string{
http.MethodGet,
http.MethodPut,
http.MethodHead,
http.MethodPost,
http.MethodDelete,
http.MethodOptions,
http.MethodPatch,
},
AllowedHeaders: commonS3Headers,
ExposedHeaders: commonS3Headers,
AllowCredentials: true,
}
return cors.New(opts).Handler(handler)
}

119
cmd/api-utils.go Normal file
View File

@ -0,0 +1,119 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"net/http"
"reflect"
"runtime"
"strings"
)
func shouldEscape(c byte) bool {
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '_', '.', '/', '*':
return false
}
return true
}
// s3URLEncode is based on Golang's url.QueryEscape() code,
// while considering some S3 exceptions:
// - Avoid encoding '/' and '*'
// - Force encoding of '~'
func s3URLEncode(s string) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
if c == ' ' {
spaceCount++
} else {
hexCount++
}
}
}
if spaceCount == 0 && hexCount == 0 {
return s
}
var buf [64]byte
var t []byte
required := len(s) + 2*hexCount
if required <= len(buf) {
t = buf[:required]
} else {
t = make([]byte, required)
}
if hexCount == 0 {
copy(t, s)
for i := 0; i < len(s); i++ {
if s[i] == ' ' {
t[i] = '+'
}
}
return string(t)
}
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == ' ':
t[j] = '+'
j++
case shouldEscape(c):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
// s3EncodeName encodes string in response when encodingType is specified in AWS S3 requests.
func s3EncodeName(name, encodingType string) string {
if strings.ToLower(encodingType) == "url" {
return s3URLEncode(name)
}
return name
}
// getHandlerName returns the name of the handler function. It takes the type
// name as a string to clean up the name retrieved via reflection. This function
// only works correctly when the type is present in the cmd package.
func getHandlerName(f http.HandlerFunc, cmdType string) string {
name := runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
packageName := fmt.Sprintf("github.com/minio/minio/cmd.%s.", cmdType)
name = strings.TrimPrefix(name, packageName)
name = strings.TrimSuffix(name, "Handler-fm")
name = strings.TrimSuffix(name, "-fm")
return name
}

49
cmd/api-utils_test.go Normal file
View File

@ -0,0 +1,49 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"testing"
)
func TestS3EncodeName(t *testing.T) {
testCases := []struct {
inputText, encodingType, expectedOutput string
}{
{"a b", "", "a b"},
{"a b", "url", "a+b"},
{"p- ", "url", "p-+"},
{"p-%", "url", "p-%25"},
{"p/", "url", "p/"},
{"p/", "url", "p/"},
{"~user", "url", "%7Euser"},
{"*user", "url", "*user"},
{"user+password", "url", "user%2Bpassword"},
{"_user", "url", "_user"},
{"firstname.lastname", "url", "firstname.lastname"},
}
for i, testCase := range testCases {
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
outputText := s3EncodeName(testCase.inputText, testCase.encodingType)
if testCase.expectedOutput != outputText {
t.Errorf("Expected `%s`, got `%s`", testCase.expectedOutput, outputText)
}
})
}
}

354
cmd/apierrorcode_string.go Normal file

File diff suppressed because one or more lines are too long

785
cmd/auth-handler.go Normal file
View File

@ -0,0 +1,785 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"crypto/subtle"
"encoding/base64"
"encoding/hex"
"errors"
"io"
"mime"
"net/http"
"net/url"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/minio/minio/internal/auth"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/etag"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
xjwt "github.com/minio/minio/internal/jwt"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/mcontext"
"github.com/minio/pkg/v3/policy"
)
// Verify if request has JWT.
func isRequestJWT(r *http.Request) bool {
return strings.HasPrefix(r.Header.Get(xhttp.Authorization), jwtAlgorithm)
}
// Verify if request has AWS Signature Version '4'.
func isRequestSignatureV4(r *http.Request) bool {
return strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV4Algorithm)
}
// Verify if request has AWS Signature Version '2'.
func isRequestSignatureV2(r *http.Request) bool {
return (!strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV4Algorithm) &&
strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV2Algorithm))
}
// Verify if request has AWS PreSign Version '4'.
func isRequestPresignedSignatureV4(r *http.Request) bool {
_, ok := r.Form[xhttp.AmzCredential]
return ok
}
// Verify request has AWS PreSign Version '2'.
func isRequestPresignedSignatureV2(r *http.Request) bool {
_, ok := r.Form[xhttp.AmzAccessKeyID]
return ok
}
// Verify if request has AWS Post policy Signature Version '4'.
func isRequestPostPolicySignatureV4(r *http.Request) bool {
mediaType, _, err := mime.ParseMediaType(r.Header.Get(xhttp.ContentType))
if err != nil {
return false
}
return mediaType == "multipart/form-data" && r.Method == http.MethodPost
}
// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation.
func isRequestSignStreamingV4(r *http.Request) bool {
return r.Header.Get(xhttp.AmzContentSha256) == streamingContentSHA256 &&
r.Method == http.MethodPut
}
// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation.
func isRequestSignStreamingTrailerV4(r *http.Request) bool {
return r.Header.Get(xhttp.AmzContentSha256) == streamingContentSHA256Trailer &&
r.Method == http.MethodPut
}
// Verify if the request has AWS Streaming Signature Version '4', with unsigned content and trailer.
func isRequestUnsignedTrailerV4(r *http.Request) bool {
return r.Header.Get(xhttp.AmzContentSha256) == unsignedPayloadTrailer &&
r.Method == http.MethodPut
}
// Authorization type.
//
//go:generate stringer -type=authType -trimprefix=authType $GOFILE
type authType int
// List of all supported auth types.
const (
authTypeUnknown authType = iota
authTypeAnonymous
authTypePresigned
authTypePresignedV2
authTypePostPolicy
authTypeStreamingSigned
authTypeSigned
authTypeSignedV2
authTypeJWT
authTypeSTS
authTypeStreamingSignedTrailer
authTypeStreamingUnsignedTrailer
)
// Get request authentication type.
func getRequestAuthType(r *http.Request) (at authType) {
if r.URL != nil {
var err error
r.Form, err = url.ParseQuery(r.URL.RawQuery)
if err != nil {
authNLogIf(r.Context(), err)
return authTypeUnknown
}
}
if isRequestSignatureV2(r) {
return authTypeSignedV2
} else if isRequestPresignedSignatureV2(r) {
return authTypePresignedV2
} else if isRequestSignStreamingV4(r) {
return authTypeStreamingSigned
} else if isRequestSignStreamingTrailerV4(r) {
return authTypeStreamingSignedTrailer
} else if isRequestUnsignedTrailerV4(r) {
return authTypeStreamingUnsignedTrailer
} else if isRequestSignatureV4(r) {
return authTypeSigned
} else if isRequestPresignedSignatureV4(r) {
return authTypePresigned
} else if isRequestJWT(r) {
return authTypeJWT
} else if isRequestPostPolicySignatureV4(r) {
return authTypePostPolicy
} else if _, ok := r.Form[xhttp.Action]; ok {
return authTypeSTS
} else if _, ok := r.Header[xhttp.Authorization]; !ok {
return authTypeAnonymous
}
return authTypeUnknown
}
func validateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, bool, APIErrorCode) {
var cred auth.Credentials
var owner bool
s3Err := ErrAccessDenied
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
getRequestAuthType(r) == authTypeSigned {
// Get credential information from the request.
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
if s3Err != ErrNone {
return cred, owner, s3Err
}
// we only support V4 (no presign) with auth body
s3Err = isReqAuthenticated(ctx, r, region, serviceS3)
}
if s3Err != ErrNone {
return cred, owner, s3Err
}
logger.GetReqInfo(ctx).Cred = cred
logger.GetReqInfo(ctx).Owner = owner
logger.GetReqInfo(ctx).Region = globalSite.Region()
return cred, owner, ErrNone
}
// checkAdminRequestAuth checks for authentication and authorization for the incoming
// request. It only accepts V2 and V4 requests. Presigned, JWT and anonymous requests
// are automatically rejected.
func checkAdminRequestAuth(ctx context.Context, r *http.Request, action policy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
cred, owner, s3Err := validateAdminSignature(ctx, r, region)
if s3Err != ErrNone {
return cred, s3Err
}
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.Action(action),
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
}) {
// Request is allowed return the appropriate access key.
return cred, ErrNone
}
return cred, ErrAccessDenied
}
// Fetch the security token set by the client.
func getSessionToken(r *http.Request) (token string) {
token = r.Header.Get(xhttp.AmzSecurityToken)
if token != "" {
return token
}
return r.Form.Get(xhttp.AmzSecurityToken)
}
// Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
claims, _ := getClaimsFromToken(getSessionToken(r))
return claims
}
func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error) {
// JWT token for x-amz-security-token is signed with admin
// secret key, temporary credentials become invalid if
// server admin credentials change. This is done to ensure
// that clients cannot decode the token using the temp
// secret keys and generate an entirely new claim by essentially
// hijacking the policies. We need to make sure that this is
// based on admin credential such that token cannot be decoded
// on the client side and is treated like an opaque value.
claims, err := auth.ExtractClaims(token, secret)
if err != nil {
if subtle.ConstantTimeCompare([]byte(secret), []byte(globalActiveCred.SecretKey)) == 1 {
return nil, errAuthentication
}
claims, err = auth.ExtractClaims(token, globalActiveCred.SecretKey)
if err != nil {
return nil, errAuthentication
}
}
// If AuthZPlugin is set, return without any further checks.
if newGlobalAuthZPluginFn() != nil {
return claims, nil
}
// Check if a session policy is set. If so, decode it here.
sp, spok := claims.Lookup(policy.SessionPolicyName)
if spok {
// Looks like subpolicy is set and is a string, if set then its
// base64 encoded, decode it. Decoding fails reject such
// requests.
spBytes, err := base64.StdEncoding.DecodeString(sp)
if err != nil {
// Base64 decoding fails, we should log to indicate
// something is malforming the request sent by client.
authNLogIf(GlobalContext, err, logger.ErrorKind)
return nil, errAuthentication
}
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
}
return claims, nil
}
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(token string) (map[string]interface{}, error) {
jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
if err != nil {
return nil, err
}
return jwtClaims.Map(), nil
}
// Fetch claims in the security token returned by the client and validate the token.
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
token := getSessionToken(r)
if token != "" && cred.AccessKey == "" {
// x-amz-security-token is not allowed for anonymous access.
return nil, ErrNoAccessKey
}
if token == "" && cred.IsTemp() && !cred.IsServiceAccount() {
// Temporary credentials should always have x-amz-security-token
return nil, ErrInvalidToken
}
if token != "" && !cred.IsTemp() {
// x-amz-security-token should not present for static credentials.
return nil, ErrInvalidToken
}
if !cred.IsServiceAccount() && cred.IsTemp() && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
// validate token for temporary credentials only.
return nil, ErrInvalidToken
}
// Expired credentials must return error right away.
if cred.IsTemp() && cred.IsExpired() {
return nil, toAPIErrorCode(r.Context(), errInvalidAccessKeyID)
}
secret := globalActiveCred.SecretKey
if globalSiteReplicationSys.isEnabled() && cred.AccessKey != siteReplicatorSvcAcc {
nsecret, err := getTokenSigningKey()
if err != nil {
return nil, toAPIErrorCode(r.Context(), err)
}
// sign root's temporary accounts also with site replicator creds
if cred.ParentUser != globalActiveCred.AccessKey || cred.IsTemp() {
secret = nsecret
}
}
if cred.IsServiceAccount() {
token = cred.SessionToken
secret = cred.SecretKey
}
if token != "" {
claims, err := getClaimsFromTokenWithSecret(token, secret)
if err != nil {
return nil, toAPIErrorCode(r.Context(), err)
}
return claims.Map(), ErrNone
}
claims := xjwt.NewMapClaims()
return claims.Map(), ErrNone
}
// Check request auth type verifies the incoming http request
// - validates the request signature
// - validates the policy action if anonymous tests bucket policies if any,
// for authenticated requests validates IAM policies.
//
// returns APIErrorCode if any to be replied to the client.
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
logger.GetReqInfo(ctx).BucketName = bucketName
logger.GetReqInfo(ctx).ObjectName = objectName
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action)
return s3Err
}
// checkRequestAuthTypeWithVID is similar to checkRequestAuthType
// passes versionID additionally.
func checkRequestAuthTypeWithVID(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName, versionID string) (s3Err APIErrorCode) {
logger.GetReqInfo(ctx).BucketName = bucketName
logger.GetReqInfo(ctx).ObjectName = objectName
logger.GetReqInfo(ctx).VersionID = versionID
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action)
return s3Err
}
func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
if logger.GetReqInfo(ctx) == nil {
bugLogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind)
return ErrAccessDenied
}
var cred auth.Credentials
var owner bool
switch getRequestAuthType(r) {
case authTypeUnknown, authTypeStreamingSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
return ErrSignatureVersionNotSupported
case authTypePresignedV2, authTypeSignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeSigned, authTypePresigned:
region := globalSite.Region()
switch action {
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
region = ""
}
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
return s3Err
}
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return s3Err
}
logger.GetReqInfo(ctx).Cred = cred
logger.GetReqInfo(ctx).Owner = owner
logger.GetReqInfo(ctx).Region = globalSite.Region()
// region is valid only for CreateBucketAction.
var region string
if action == policy.CreateBucketAction {
// To extract region from XML in request body, get copy of request body.
payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
if err != nil {
authZLogIf(ctx, err, logger.ErrorKind)
return ErrMalformedXML
}
// Populate payload to extract location constraint.
r.Body = io.NopCloser(bytes.NewReader(payload))
region, s3Err = parseLocationConstraint(r)
if s3Err != ErrNone {
return s3Err
}
// Populate payload again to handle it in HTTP handler.
r.Body = io.NopCloser(bytes.NewReader(payload))
}
logger.GetReqInfo(ctx).Region = region
return s3Err
}
func authorizeRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
reqInfo := logger.GetReqInfo(ctx)
if reqInfo == nil {
return ErrAccessDenied
}
cred := reqInfo.Cred
owner := reqInfo.Owner
region := reqInfo.Region
bucket := reqInfo.BucketName
object := reqInfo.ObjectName
versionID := reqInfo.VersionID
if action != policy.ListAllMyBucketsAction && cred.AccessKey == "" {
// Anonymous checks are not meant for ListAllBuckets action
if globalPolicySys.IsAllowed(policy.BucketPolicyArgs{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: action,
BucketName: bucket,
ConditionValues: getConditionValues(r, region, auth.AnonymousCredentials),
IsOwner: false,
ObjectName: object,
}) {
// Request is allowed return the appropriate access key.
return ErrNone
}
if action == policy.ListBucketVersionsAction {
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
// verify as a fallback.
if globalPolicySys.IsAllowed(policy.BucketPolicyArgs{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListBucketAction,
BucketName: bucket,
ConditionValues: getConditionValues(r, region, auth.AnonymousCredentials),
IsOwner: false,
ObjectName: object,
}) {
// Request is allowed return the appropriate access key.
return ErrNone
}
}
return ErrAccessDenied
}
if action == policy.DeleteObjectAction && versionID != "" {
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.Action(policy.DeleteObjectVersionAction),
BucketName: bucket,
ConditionValues: getConditionValues(r, "", cred),
ObjectName: object,
IsOwner: owner,
Claims: cred.Claims,
DenyOnly: true,
}) { // Request is not allowed if Deny action on DeleteObjectVersionAction
return ErrAccessDenied
}
}
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: action,
BucketName: bucket,
ConditionValues: getConditionValues(r, "", cred),
ObjectName: object,
IsOwner: owner,
Claims: cred.Claims,
}) {
// Request is allowed return the appropriate access key.
return ErrNone
}
if action == policy.ListBucketVersionsAction {
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
// verify as a fallback.
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListBucketAction,
BucketName: bucket,
ConditionValues: getConditionValues(r, "", cred),
ObjectName: object,
IsOwner: owner,
Claims: cred.Claims,
}) {
// Request is allowed return the appropriate access key.
return ErrNone
}
}
return ErrAccessDenied
}
// Check request auth type verifies the incoming http request
// - validates the request signature
// - validates the policy action if anonymous tests bucket policies if any,
// for authenticated requests validates IAM policies.
//
// returns APIErrorCode if any to be replied to the client.
// Additionally returns the accessKey used in the request, and if this request is by an admin.
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
s3Err = authenticateRequest(ctx, r, action)
reqInfo := logger.GetReqInfo(ctx)
if reqInfo == nil {
return cred, owner, ErrAccessDenied
}
cred = reqInfo.Cred
owner = reqInfo.Owner
if s3Err != ErrNone {
return cred, owner, s3Err
}
return cred, owner, authorizeRequest(ctx, r, action)
}
// Verify if request has valid AWS Signature Version '2'.
func isReqAuthenticatedV2(r *http.Request) (s3Error APIErrorCode) {
if isRequestSignatureV2(r) {
return doesSignV2Match(r)
}
return doesPresignV2SignatureMatch(r)
}
func reqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error APIErrorCode) {
sha256sum := getContentSha256Cksum(r, stype)
switch {
case isRequestSignatureV4(r):
return doesSignatureMatch(sha256sum, r, region, stype)
case isRequestPresignedSignatureV4(r):
return doesPresignedSignatureMatch(sha256sum, r, region, stype)
default:
return ErrAccessDenied
}
}
// Verify if request has valid AWS Signature Version '4'.
func isReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error APIErrorCode) {
if errCode := reqSignatureV4Verify(r, region, stype); errCode != ErrNone {
return errCode
}
clientETag, err := etag.FromContentMD5(r.Header)
if err != nil {
return ErrInvalidDigest
}
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
var contentSHA256 []byte
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
if sha256Sum, ok := r.Form[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
contentSHA256, err = hex.DecodeString(sha256Sum[0])
if err != nil {
return ErrContentSHA256Mismatch
}
}
} else if _, ok := r.Header[xhttp.AmzContentSha256]; !skipSHA256 && ok {
contentSHA256, err = hex.DecodeString(r.Header.Get(xhttp.AmzContentSha256))
if err != nil || len(contentSHA256) == 0 {
return ErrContentSHA256Mismatch
}
}
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
// The verification happens implicit during reading.
reader, err := hash.NewReader(ctx, r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1)
if err != nil {
return toAPIErrorCode(ctx, err)
}
r.Body = reader
return ErrNone
}
// List of all support S3 auth types.
var supportedS3AuthTypes = map[authType]struct{}{
authTypeAnonymous: {},
authTypePresigned: {},
authTypePresignedV2: {},
authTypeSigned: {},
authTypeSignedV2: {},
authTypePostPolicy: {},
authTypeStreamingSigned: {},
authTypeStreamingSignedTrailer: {},
authTypeStreamingUnsignedTrailer: {},
}
// Validate if the authType is valid and supported.
func isSupportedS3AuthType(aType authType) bool {
_, ok := supportedS3AuthTypes[aType]
return ok
}
// setAuthMiddleware to validate authorization header for the incoming request.
func setAuthMiddleware(h http.Handler) http.Handler {
// handler for validating incoming authorization headers.
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt)
aType := getRequestAuthType(r)
switch aType {
case authTypeSigned, authTypeSignedV2, authTypeStreamingSigned, authTypeStreamingSignedTrailer:
// Verify if date headers are set, if not reject the request
amzDate, errCode := parseAmzDateHeader(r)
if errCode != ErrNone {
if ok {
tc.FuncName = "handler.Auth"
tc.ResponseRecorder.LogErrBody = true
}
// All our internal APIs are sensitive towards Date
// header, for all requests where Date header is not
// present we will reject such clients.
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(errCode), r.URL)
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
return
}
// Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past
// or in the future, reject request otherwise.
curTime := UTCNow()
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
if ok {
tc.FuncName = "handler.Auth"
tc.ResponseRecorder.LogErrBody = true
}
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrRequestTimeTooSkewed), r.URL)
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
return
}
h.ServeHTTP(w, r)
return
case authTypeJWT, authTypeSTS:
h.ServeHTTP(w, r)
return
default:
if isSupportedS3AuthType(aType) {
h.ServeHTTP(w, r)
return
}
}
if ok {
tc.FuncName = "handler.Auth"
tc.ResponseRecorder.LogErrBody = true
}
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL)
atomic.AddUint64(&globalHTTPStats.rejectedRequestsAuth, 1)
})
}
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool) (s3Err APIErrorCode) {
var retSet bool
if cred.AccessKey == "" {
return ErrAccessDenied
}
conditions := getConditionValues(r, "", cred)
conditions["object-lock-mode"] = []string{string(retMode)}
conditions["object-lock-retain-until-date"] = []string{retDate.UTC().Format(time.RFC3339)}
if retDays > 0 {
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
}
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.BypassGovernanceRetentionAction,
BucketName: bucketName,
ObjectName: objectName,
ConditionValues: conditions,
IsOwner: owner,
Claims: cred.Claims,
})
}
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.PutObjectRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
ObjectName: objectName,
IsOwner: owner,
Claims: cred.Claims,
}) {
retSet = true
}
if byPassSet || retSet {
return ErrNone
}
return ErrAccessDenied
}
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
// call verifies bucket policies and IAM policies, supports multi user
// checks etc.
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
region := globalSite.Region()
switch atype {
case authTypeUnknown:
return ErrSignatureVersionNotSupported
case authTypeSignedV2, authTypePresignedV2:
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer:
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
case authTypeStreamingUnsignedTrailer:
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
if s3Err == ErrMissingFields {
// Could be anonymous. cred + owner is zero value.
s3Err = ErrNone
}
}
if s3Err != ErrNone {
return s3Err
}
logger.GetReqInfo(ctx).Cred = cred
logger.GetReqInfo(ctx).Owner = owner
logger.GetReqInfo(ctx).Region = region
// Do not check for PutObjectRetentionAction permission,
// if mode and retain until date are not set.
// Can happen when bucket has default lock config set
if action == policy.PutObjectRetentionAction &&
r.Header.Get(xhttp.AmzObjectLockMode) == "" &&
r.Header.Get(xhttp.AmzObjectLockRetainUntilDate) == "" {
return ErrNone
}
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.BucketPolicyArgs{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", auth.AnonymousCredentials),
IsOwner: false,
ObjectName: objectName,
}) {
return ErrNone
}
return ErrAccessDenied
}
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred),
ObjectName: objectName,
IsOwner: owner,
Claims: cred.Claims,
}) {
return ErrNone
}
return ErrAccessDenied
}

501
cmd/auth-handler_test.go Normal file
View File

@ -0,0 +1,501 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"io"
"net/http"
"net/url"
"os"
"testing"
"time"
"github.com/minio/minio/internal/auth"
"github.com/minio/pkg/v3/policy"
)
type nullReader struct{}
func (r *nullReader) Read(b []byte) (int, error) {
return len(b), nil
}
// Test get request auth type.
func TestGetRequestAuthType(t *testing.T) {
type testCase struct {
req *http.Request
authT authType
}
nopCloser := io.NopCloser(io.LimitReader(&nullReader{}, 1024))
testCases := []testCase{
// Test case - 1
// Check for generic signature v4 header.
{
req: &http.Request{
URL: &url.URL{
Host: "127.0.0.1:9000",
Scheme: httpScheme,
Path: SlashSeparator,
},
Header: http.Header{
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
"X-Amz-Content-Sha256": []string{streamingContentSHA256},
"Content-Encoding": []string{streamingContentEncoding},
},
Method: http.MethodPut,
Body: nopCloser,
},
authT: authTypeStreamingSigned,
},
// Test case - 2
// Check for JWT header.
{
req: &http.Request{
URL: &url.URL{
Host: "127.0.0.1:9000",
Scheme: httpScheme,
Path: SlashSeparator,
},
Header: http.Header{
"Authorization": []string{"Bearer 12313123"},
},
},
authT: authTypeJWT,
},
// Test case - 3
// Empty authorization header.
{
req: &http.Request{
URL: &url.URL{
Host: "127.0.0.1:9000",
Scheme: httpScheme,
Path: SlashSeparator,
},
Header: http.Header{
"Authorization": []string{""},
},
},
authT: authTypeUnknown,
},
// Test case - 4
// Check for presigned.
{
req: &http.Request{
URL: &url.URL{
Host: "127.0.0.1:9000",
Scheme: httpScheme,
Path: SlashSeparator,
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
},
},
authT: authTypePresigned,
},
// Test case - 5
// Check for post policy.
{
req: &http.Request{
URL: &url.URL{
Host: "127.0.0.1:9000",
Scheme: httpScheme,
Path: SlashSeparator,
},
Header: http.Header{
"Content-Type": []string{"multipart/form-data"},
},
Method: http.MethodPost,
Body: nopCloser,
},
authT: authTypePostPolicy,
},
}
// .. Tests all request auth type.
for i, testc := range testCases {
authT := getRequestAuthType(testc.req)
if authT != testc.authT {
t.Errorf("Test %d: Expected %d, got %d", i+1, testc.authT, authT)
}
}
}
// Test all s3 supported auth types.
func TestS3SupportedAuthType(t *testing.T) {
type testCase struct {
authT authType
pass bool
}
// List of all valid and invalid test cases.
testCases := []testCase{
// Test 1 - supported s3 type anonymous.
{
authT: authTypeAnonymous,
pass: true,
},
// Test 2 - supported s3 type presigned.
{
authT: authTypePresigned,
pass: true,
},
// Test 3 - supported s3 type signed.
{
authT: authTypeSigned,
pass: true,
},
// Test 4 - supported s3 type with post policy.
{
authT: authTypePostPolicy,
pass: true,
},
// Test 5 - supported s3 type with streaming signed.
{
authT: authTypeStreamingSigned,
pass: true,
},
// Test 6 - supported s3 type with signature v2.
{
authT: authTypeSignedV2,
pass: true,
},
// Test 7 - supported s3 type with presign v2.
{
authT: authTypePresignedV2,
pass: true,
},
// Test 8 - JWT is not supported s3 type.
{
authT: authTypeJWT,
pass: false,
},
// Test 9 - unknown auth header is not supported s3 type.
{
authT: authTypeUnknown,
pass: false,
},
// Test 10 - some new auth type is not supported s3 type.
{
authT: authType(9),
pass: false,
},
}
// Validate all the test cases.
for i, tt := range testCases {
ok := isSupportedS3AuthType(tt.authT)
if ok != tt.pass {
t.Errorf("Test %d:, Expected %t, got %t", i+1, tt.pass, ok)
}
}
}
func TestIsRequestPresignedSignatureV2(t *testing.T) {
testCases := []struct {
inputQueryKey string
inputQueryValue string
expectedResult bool
}{
// Test case - 1.
// Test case with query key "AWSAccessKeyId" set.
{"", "", false},
// Test case - 2.
{"AWSAccessKeyId", "", true},
// Test case - 3.
{"X-Amz-Content-Sha256", "", false},
}
for i, testCase := range testCases {
// creating an input HTTP request.
// Only the query parameters are relevant for this particular test.
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
if err != nil {
t.Fatalf("Error initializing input HTTP request: %v", err)
}
q := inputReq.URL.Query()
q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
inputReq.URL.RawQuery = q.Encode()
inputReq.ParseForm()
actualResult := isRequestPresignedSignatureV2(inputReq)
if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult)
}
}
}
// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature version v4 detection.
func TestIsRequestPresignedSignatureV4(t *testing.T) {
testCases := []struct {
inputQueryKey string
inputQueryValue string
expectedResult bool
}{
// Test case - 1.
// Test case with query key ""X-Amz-Credential" set.
{"", "", false},
// Test case - 2.
{"X-Amz-Credential", "", true},
// Test case - 3.
{"X-Amz-Content-Sha256", "", false},
}
for i, testCase := range testCases {
// creating an input HTTP request.
// Only the query parameters are relevant for this particular test.
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
if err != nil {
t.Fatalf("Error initializing input HTTP request: %v", err)
}
q := inputReq.URL.Query()
q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
inputReq.URL.RawQuery = q.Encode()
inputReq.ParseForm()
actualResult := isRequestPresignedSignatureV4(inputReq)
if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult)
}
}
}
// Provides a fully populated http request instance, fails otherwise.
func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req, err := newTestRequest(method, urlStr, contentLength, body)
if err != nil {
t.Fatalf("Unable to initialize new http request %s", err)
}
return req
}
// This is similar to mustNewRequest but additionally the request
// is signed with AWS Signature V4, fails if not able to do so.
func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
cred := globalActiveCred
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
// This is similar to mustNewRequest but additionally the request
// is signed with AWS Signature V2, fails if not able to do so.
func mustNewSignedV2Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
cred := globalActiveCred
if err := signRequestV2(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
// This is similar to mustNewRequest but additionally the request
// is presigned with AWS Signature V2, fails if not able to do so.
func mustNewPresignedV2Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
cred := globalActiveCred
if err := preSignV2(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
// This is similar to mustNewRequest but additionally the request
// is presigned with AWS Signature V4, fails if not able to do so.
func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
cred := globalActiveCred
if err := preSignV4(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
func mustNewSignedShortMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
req.Header.Set("Content-Md5", "invalid-digest")
cred := globalActiveCred
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
func mustNewSignedEmptyMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
req.Header.Set("Content-Md5", "")
cred := globalActiveCred
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64,
body io.ReadSeeker, t *testing.T,
) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==")
cred := globalActiveCred
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
// Tests is requested authenticated function, tests replies for s3 errors.
func TestIsReqAuthenticated(t *testing.T) {
ctx, cancel := context.WithCancel(GlobalContext)
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
initAllSubsystems(ctx)
initConfigSubsystem(ctx, objLayer)
creds, err := auth.CreateCredentials("myuser", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
}
globalActiveCred = creds
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
// List of test cases for validating http request authentication.
testCases := []struct {
req *http.Request
s3Error APIErrorCode
}{
// When request is unsigned, access denied is returned.
{mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
// Empty Content-Md5 header.
{mustNewSignedEmptyMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
// Short Content-Md5 header.
{mustNewSignedShortMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
// When request is properly signed, but has bad Content-MD5 header.
{mustNewSignedBadMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
// When request is properly signed, error is none.
{mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrNone},
}
// Validates all testcases.
for i, testCase := range testCases {
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region(), serviceS3)
if s3Error != testCase.s3Error {
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
}
}
}
}
func TestCheckAdminRequestAuthType(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
creds, err := auth.CreateCredentials("myuser", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
}
globalActiveCred = creds
testCases := []struct {
Request *http.Request
ErrCode APIErrorCode
}{
{Request: mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
{Request: mustNewSignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
}
for i, testCase := range testCases {
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region()); s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
}
}
}
func TestValidateAdminSignature(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
initAllSubsystems(ctx)
initConfigSubsystem(ctx, objLayer)
creds, err := auth.CreateCredentials("admin", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
}
globalActiveCred = creds
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
testCases := []struct {
AccessKey string
SecretKey string
ErrCode APIErrorCode
}{
{"", "", ErrInvalidAccessKeyID},
{"admin", "", ErrSignatureDoesNotMatch},
{"admin", "wrongpassword", ErrSignatureDoesNotMatch},
{"wronguser", "mypassword", ErrInvalidAccessKeyID},
{"", "mypassword", ErrInvalidAccessKeyID},
{"admin", "mypassword", ErrNone},
}
for i, testCase := range testCases {
req := mustNewRequest(http.MethodGet, "http://localhost:9000/", 0, nil, t)
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
_, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
if s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i+1, testCase.ErrCode, s3Error)
}
}
}

34
cmd/authtype_string.go Normal file
View File

@ -0,0 +1,34 @@
// Code generated by "stringer -type=authType -trimprefix=authType auth-handler.go"; DO NOT EDIT.
package cmd
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[authTypeUnknown-0]
_ = x[authTypeAnonymous-1]
_ = x[authTypePresigned-2]
_ = x[authTypePresignedV2-3]
_ = x[authTypePostPolicy-4]
_ = x[authTypeStreamingSigned-5]
_ = x[authTypeSigned-6]
_ = x[authTypeSignedV2-7]
_ = x[authTypeJWT-8]
_ = x[authTypeSTS-9]
_ = x[authTypeStreamingSignedTrailer-10]
_ = x[authTypeStreamingUnsignedTrailer-11]
}
const _authType_name = "UnknownAnonymousPresignedPresignedV2PostPolicyStreamingSignedSignedSignedV2JWTSTSStreamingSignedTrailerStreamingUnsignedTrailer"
var _authType_index = [...]uint8{0, 7, 16, 25, 36, 46, 61, 67, 75, 78, 81, 103, 127}
func (i authType) String() string {
if i < 0 || i >= authType(len(_authType_index)-1) {
return "authType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _authType_name[_authType_index[i]:_authType_index[i+1]]
}

189
cmd/background-heal-ops.go Normal file
View File

@ -0,0 +1,189 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"runtime"
"strconv"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/pkg/v3/env"
)
// healTask represents what to heal along with options
//
// path: '/' => Heal disk formats along with metadata
// path: 'bucket/' or '/bucket/' => Heal bucket
// path: 'bucket/object' => Heal object
type healTask struct {
bucket string
object string
versionID string
opts madmin.HealOpts
// Healing response will be sent here
respCh chan healResult
}
// healResult represents a healing result with a possible error
type healResult struct {
result madmin.HealResultItem
err error
}
// healRoutine receives heal tasks, to heal buckets, objects and format.json
type healRoutine struct {
tasks chan healTask
workers int
}
func activeListeners() int {
// Bucket notification and http trace are not costly, it is okay to ignore them
// while counting the number of concurrent connections
return int(globalHTTPListen.Subscribers()) + int(globalTrace.Subscribers())
}
func waitForLowIO(maxIO int, maxWait time.Duration, currentIO func() int) {
// No need to wait run at full speed.
if maxIO <= 0 {
return
}
const waitTick = 100 * time.Millisecond
tmpMaxWait := maxWait
for currentIO() >= maxIO {
if tmpMaxWait > 0 {
if tmpMaxWait < waitTick {
time.Sleep(tmpMaxWait)
return
}
time.Sleep(waitTick)
tmpMaxWait -= waitTick
}
if tmpMaxWait <= 0 {
return
}
}
}
func currentHTTPIO() int {
httpServer := newHTTPServerFn()
if httpServer == nil {
return 0
}
return httpServer.GetRequestCount() - activeListeners()
}
func waitForLowHTTPReq() {
maxIO, maxWait, _ := globalHealConfig.Clone()
waitForLowIO(maxIO, maxWait, currentHTTPIO)
}
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
bgSeq := newBgHealSequence()
// Run the background healer
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
}
globalBackgroundHealState.LaunchNewHealSequence(bgSeq, objAPI)
}
// Wait for heal requests and process them
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer, bgSeq *healSequence) {
for {
select {
case task, ok := <-h.tasks:
if !ok {
return
}
var res madmin.HealResultItem
var err error
switch task.bucket {
case nopHeal:
err = errSkipFile
case SlashSeparator:
res, err = healDiskFormat(ctx, objAPI, task.opts)
default:
if task.object == "" {
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts)
} else {
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
}
}
if task.respCh != nil {
task.respCh <- healResult{result: res, err: err}
continue
}
// when respCh is not set caller is not waiting but we
// update the relevant metrics for them
if bgSeq != nil {
if err == nil {
bgSeq.countHealed(res.Type)
} else {
bgSeq.countFailed(res.Type)
}
}
case <-ctx.Done():
return
}
}
}
func newHealRoutine() *healRoutine {
workers := runtime.GOMAXPROCS(0) / 2
if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" {
if numHealers, err := strconv.Atoi(envHealWorkers); err != nil {
bugLogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
} else {
workers = numHealers
}
}
if workers == 0 {
workers = 4
}
return &healRoutine{
tasks: make(chan healTask),
workers: workers,
}
}
// healDiskFormat - heals format.json, return value indicates if a
// failure error occurred.
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
res, err := objAPI.HealFormat(ctx, opts.DryRun)
// return any error, ignore error returned when disks have
// already healed.
if err != nil && err != errNoHealRequired {
return madmin.HealResultItem{}, err
}
return res, nil
}

View File

@ -0,0 +1,609 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"sort"
"strings"
"sync"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/v3/env"
)
const (
defaultMonitorNewDiskInterval = time.Second * 10
healingTrackerFilename = ".healing.bin"
)
//go:generate msgp -file $GOFILE -unexported
// healingTracker is used to persist healing information during a heal.
type healingTracker struct {
disk StorageAPI `msg:"-"`
mu *sync.RWMutex `msg:"-"`
ID string
PoolIndex int
SetIndex int
DiskIndex int
Path string
Endpoint string
Started time.Time
LastUpdate time.Time
ObjectsTotalCount uint64
ObjectsTotalSize uint64
ItemsHealed uint64
ItemsFailed uint64
BytesDone uint64
BytesFailed uint64
// Last object scanned.
Bucket string `json:"-"`
Object string `json:"-"`
// Numbers when current bucket started healing,
// for resuming with correct numbers.
ResumeItemsHealed uint64 `json:"-"`
ResumeItemsFailed uint64 `json:"-"`
ResumeItemsSkipped uint64 `json:"-"`
ResumeBytesDone uint64 `json:"-"`
ResumeBytesFailed uint64 `json:"-"`
ResumeBytesSkipped uint64 `json:"-"`
// Filled on startup/restarts.
QueuedBuckets []string
// Filled during heal.
HealedBuckets []string
// ID of the current healing operation
HealID string
ItemsSkipped uint64
BytesSkipped uint64
RetryAttempts uint64
Finished bool // finished healing, whether with errors or not
// Add future tracking capabilities
// Be sure that they are included in toHealingDisk
}
// loadHealingTracker will load the healing tracker from the supplied disk.
// The disk ID will be validated against the loaded one.
func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker, error) {
if disk == nil {
return nil, errors.New("loadHealingTracker: nil drive given")
}
diskID, err := disk.GetDiskID()
if err != nil {
return nil, err
}
b, err := disk.ReadAll(ctx, minioMetaBucket,
pathJoin(bucketMetaPrefix, healingTrackerFilename))
if err != nil {
return nil, err
}
var h healingTracker
_, err = h.UnmarshalMsg(b)
if err != nil {
return nil, err
}
if h.ID != diskID && h.ID != "" {
return nil, fmt.Errorf("loadHealingTracker: drive id mismatch expected %s, got %s", h.ID, diskID)
}
h.disk = disk
h.ID = diskID
h.mu = &sync.RWMutex{}
return &h, nil
}
// newHealingTracker will create a new healing tracker for the disk.
func newHealingTracker() *healingTracker {
return &healingTracker{
mu: &sync.RWMutex{},
}
}
func initHealingTracker(disk StorageAPI, healID string) *healingTracker {
h := newHealingTracker()
diskID, _ := disk.GetDiskID()
h.disk = disk
h.ID = diskID
h.HealID = healID
h.Path = disk.String()
h.Endpoint = disk.Endpoint().String()
h.Started = time.Now().UTC()
h.PoolIndex, h.SetIndex, h.DiskIndex = disk.GetDiskLoc()
return h
}
func (h *healingTracker) resetHealing() {
h.mu.Lock()
defer h.mu.Unlock()
h.ItemsHealed = 0
h.ItemsFailed = 0
h.BytesDone = 0
h.BytesFailed = 0
h.ResumeItemsHealed = 0
h.ResumeItemsFailed = 0
h.ResumeBytesDone = 0
h.ResumeBytesFailed = 0
h.ItemsSkipped = 0
h.BytesSkipped = 0
h.HealedBuckets = nil
h.Object = ""
h.Bucket = ""
}
func (h *healingTracker) getLastUpdate() time.Time {
h.mu.RLock()
defer h.mu.RUnlock()
return h.LastUpdate
}
func (h *healingTracker) getBucket() string {
h.mu.RLock()
defer h.mu.RUnlock()
return h.Bucket
}
func (h *healingTracker) setBucket(bucket string) {
h.mu.Lock()
defer h.mu.Unlock()
h.Bucket = bucket
}
func (h *healingTracker) getObject() string {
h.mu.RLock()
defer h.mu.RUnlock()
return h.Object
}
func (h *healingTracker) setObject(object string) {
h.mu.Lock()
defer h.mu.Unlock()
h.Object = object
}
func (h *healingTracker) updateProgress(success, skipped bool, bytes uint64) {
h.mu.Lock()
defer h.mu.Unlock()
switch {
case success:
h.ItemsHealed++
h.BytesDone += bytes
case skipped:
h.ItemsSkipped++
h.BytesSkipped += bytes
default:
h.ItemsFailed++
h.BytesFailed += bytes
}
}
// update will update the tracker on the disk.
// If the tracker has been deleted an error is returned.
func (h *healingTracker) update(ctx context.Context) error {
h.mu.Lock()
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
h.ID, _ = h.disk.GetDiskID()
h.PoolIndex, h.SetIndex, h.DiskIndex = h.disk.GetDiskLoc()
}
h.mu.Unlock()
return h.save(ctx)
}
// save will unconditionally save the tracker and will be created if not existing.
func (h *healingTracker) save(ctx context.Context) error {
h.mu.Lock()
if h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
// Attempt to get location.
if api := newObjectLayerFn(); api != nil {
if ep, ok := api.(*erasureServerPools); ok {
h.PoolIndex, h.SetIndex, h.DiskIndex, _ = ep.getPoolAndSet(h.ID)
}
}
}
h.LastUpdate = time.Now().UTC()
htrackerBytes, err := h.MarshalMsg(nil)
h.mu.Unlock()
if err != nil {
return err
}
globalBackgroundHealState.updateHealStatus(h)
return h.disk.WriteAll(ctx, minioMetaBucket,
pathJoin(bucketMetaPrefix, healingTrackerFilename),
htrackerBytes)
}
// delete the tracker on disk.
func (h *healingTracker) delete(ctx context.Context) error {
return h.disk.Delete(ctx, minioMetaBucket,
pathJoin(bucketMetaPrefix, healingTrackerFilename),
DeleteOptions{
Recursive: false,
Immediate: false,
},
)
}
func (h *healingTracker) isHealed(bucket string) bool {
h.mu.RLock()
defer h.mu.RUnlock()
for _, v := range h.HealedBuckets {
if v == bucket {
return true
}
}
return false
}
// resume will reset progress to the numbers at the start of the bucket.
func (h *healingTracker) resume() {
h.mu.Lock()
defer h.mu.Unlock()
h.ItemsHealed = h.ResumeItemsHealed
h.ItemsFailed = h.ResumeItemsFailed
h.ItemsSkipped = h.ResumeItemsSkipped
h.BytesDone = h.ResumeBytesDone
h.BytesFailed = h.ResumeBytesFailed
h.BytesSkipped = h.ResumeBytesSkipped
}
// bucketDone should be called when a bucket is done healing.
// Adds the bucket to the list of healed buckets and updates resume numbers.
func (h *healingTracker) bucketDone(bucket string) {
h.mu.Lock()
defer h.mu.Unlock()
h.ResumeItemsHealed = h.ItemsHealed
h.ResumeItemsFailed = h.ItemsFailed
h.ResumeItemsSkipped = h.ItemsSkipped
h.ResumeBytesDone = h.BytesDone
h.ResumeBytesFailed = h.BytesFailed
h.ResumeBytesSkipped = h.BytesSkipped
h.HealedBuckets = append(h.HealedBuckets, bucket)
for i, b := range h.QueuedBuckets {
if b == bucket {
// Delete...
h.QueuedBuckets = append(h.QueuedBuckets[:i], h.QueuedBuckets[i+1:]...)
}
}
}
// setQueuedBuckets will add buckets, but exclude any that is already in h.HealedBuckets.
// Order is preserved.
func (h *healingTracker) setQueuedBuckets(buckets []BucketInfo) {
h.mu.Lock()
defer h.mu.Unlock()
s := set.CreateStringSet(h.HealedBuckets...)
h.QueuedBuckets = make([]string, 0, len(buckets))
for _, b := range buckets {
if !s.Contains(b.Name) {
h.QueuedBuckets = append(h.QueuedBuckets, b.Name)
}
}
}
func (h *healingTracker) printTo(writer io.Writer) {
h.mu.RLock()
defer h.mu.RUnlock()
b, err := json.MarshalIndent(h, "", " ")
if err != nil {
writer.Write([]byte(err.Error()))
return
}
writer.Write(b)
}
// toHealingDisk converts the information to madmin.HealingDisk
func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
h.mu.RLock()
defer h.mu.RUnlock()
return madmin.HealingDisk{
ID: h.ID,
HealID: h.HealID,
Endpoint: h.Endpoint,
PoolIndex: h.PoolIndex,
SetIndex: h.SetIndex,
DiskIndex: h.DiskIndex,
Finished: h.Finished,
Path: h.Path,
Started: h.Started.UTC(),
LastUpdate: h.LastUpdate.UTC(),
ObjectsTotalCount: h.ObjectsTotalCount,
ObjectsTotalSize: h.ObjectsTotalSize,
ItemsHealed: h.ItemsHealed,
ItemsSkipped: h.ItemsSkipped,
ItemsFailed: h.ItemsFailed,
BytesDone: h.BytesDone,
BytesSkipped: h.BytesSkipped,
BytesFailed: h.BytesFailed,
Bucket: h.Bucket,
Object: h.Object,
QueuedBuckets: h.QueuedBuckets,
HealedBuckets: h.HealedBuckets,
RetryAttempts: h.RetryAttempts,
ObjectsHealed: h.ItemsHealed, // Deprecated July 2021
ObjectsFailed: h.ItemsFailed, // Deprecated July 2021
}
}
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
z, ok := objAPI.(*erasureServerPools)
if !ok {
return
}
initBackgroundHealing(ctx, objAPI) // start quick background healing
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn {
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
go monitorLocalDisksAndHeal(ctx, z)
}
go globalMRFState.startMRFPersistence()
go globalMRFState.healRoutine(z)
}
func getLocalDisksToHeal() (disksToHeal Endpoints) {
globalLocalDrivesMu.RLock()
localDrives := cloneDrives(globalLocalDrivesMap)
globalLocalDrivesMu.RUnlock()
for _, disk := range localDrives {
_, err := disk.DiskInfo(context.Background(), DiskInfoOptions{})
if errors.Is(err, errUnformattedDisk) {
disksToHeal = append(disksToHeal, disk.Endpoint())
continue
}
if h := disk.Healing(); h != nil && !h.Finished {
disksToHeal = append(disksToHeal, disk.Endpoint())
}
}
if len(disksToHeal) == globalEndpoints.NEndpoints() {
// When all disks == all command line endpoints
// this is a fresh setup, no need to trigger healing.
return Endpoints{}
}
return disksToHeal
}
var newDiskHealingTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
var errRetryHealing = errors.New("some items failed to heal, we will retry healing this drive again")
func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint) error {
poolIdx, setIdx := endpoint.PoolIdx, endpoint.SetIdx
disk := getStorageViaEndpoint(endpoint)
if disk == nil {
return fmt.Errorf("Unexpected error disk must be initialized by now after formatting: %s", endpoint)
}
_, err := disk.DiskInfo(ctx, DiskInfoOptions{})
if err != nil {
if errors.Is(err, errDriveIsRoot) {
// This is a root drive, ignore and move on
return nil
}
if !errors.Is(err, errUnformattedDisk) {
return err
}
}
// Prevent parallel erasure set healing
locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx))
lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout)
if err != nil {
return fmt.Errorf("Healing of drive '%v' on %s pool, belonging to %s erasure set already in progress: %w",
disk, humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1), err)
}
ctx = lkctx.Context()
defer locker.Unlock(lkctx)
// Load healing tracker in this disk
tracker, err := loadHealingTracker(ctx, disk)
if err != nil {
// A healing tracker may be deleted if another disk in the
// same erasure set with same healing-id successfully finished
// healing.
if errors.Is(err, errFileNotFound) {
return nil
}
healingLogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
tracker = initHealingTracker(disk, mustGetUUID())
}
healingLogEvent(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint)
buckets, _ := z.ListBuckets(ctx, BucketOptions{})
// Buckets data are dispersed in multiple pools/sets, make
// sure to heal all bucket metadata configuration.
buckets = append(buckets, BucketInfo{
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
}, BucketInfo{
Name: pathJoin(minioMetaBucket, bucketMetaPrefix),
})
// Heal latest buckets first.
sort.Slice(buckets, func(i, j int) bool {
a, b := strings.HasPrefix(buckets[i].Name, minioMetaBucket), strings.HasPrefix(buckets[j].Name, minioMetaBucket)
if a != b {
return a
}
return buckets[i].Created.After(buckets[j].Created)
})
// Load bucket totals
cache := dataUsageCache{}
if err := cache.load(ctx, z.serverPools[poolIdx].sets[setIdx], dataUsageCacheName); err == nil {
dataUsageInfo := cache.dui(dataUsageRoot, nil)
tracker.ObjectsTotalCount = dataUsageInfo.ObjectsTotalCount
tracker.ObjectsTotalSize = dataUsageInfo.ObjectsTotalSize
}
tracker.PoolIndex, tracker.SetIndex, tracker.DiskIndex = disk.GetDiskLoc()
tracker.setQueuedBuckets(buckets)
if err := tracker.save(ctx); err != nil {
return err
}
// Start or resume healing of this erasure set
if err = z.serverPools[poolIdx].sets[setIdx].healErasureSet(ctx, tracker.QueuedBuckets, tracker); err != nil {
return err
}
// if objects have failed healing, we attempt a retry to heal the drive upto 3 times before giving up.
if tracker.ItemsFailed > 0 && tracker.RetryAttempts < 4 {
tracker.RetryAttempts++
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retrying %s time (healed: %d, skipped: %d, failed: %d).", disk,
humanize.Ordinal(int(tracker.RetryAttempts)), tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
tracker.resetHealing()
bugLogIf(ctx, tracker.update(ctx))
return errRetryHealing
}
if tracker.ItemsFailed > 0 {
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retried %d times (healed: %d, skipped: %d, failed: %d).", disk,
tracker.RetryAttempts, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
} else {
if tracker.RetryAttempts > 0 {
healingLogEvent(ctx, "Healing of drive '%s' is complete, retried %d times (healed: %d, skipped: %d).", disk,
tracker.RetryAttempts-1, tracker.ItemsHealed, tracker.ItemsSkipped)
} else {
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped)
}
}
if serverDebugLog {
tracker.printTo(os.Stdout)
fmt.Printf("\n")
}
if tracker.HealID == "" { // HealID was empty only before Feb 2023
bugLogIf(ctx, tracker.delete(ctx))
return nil
}
// Remove .healing.bin from all disks with similar heal-id
disks, err := z.GetDisks(poolIdx, setIdx)
if err != nil {
return err
}
for _, disk := range disks {
if disk == nil {
continue
}
t, err := loadHealingTracker(ctx, disk)
if err != nil {
if !errors.Is(err, errFileNotFound) {
healingLogIf(ctx, err)
}
continue
}
if t.HealID == tracker.HealID {
t.Finished = true
t.update(ctx)
}
}
return nil
}
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
// 1. Only the concerned erasure set will be listed and healed
// 2. Only the node hosting the disk is responsible to perform the heal
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
// Perform automatic disk healing when a disk is replaced locally.
diskCheckTimer := time.NewTimer(defaultMonitorNewDiskInterval)
defer diskCheckTimer.Stop()
for {
select {
case <-ctx.Done():
return
case <-diskCheckTimer.C:
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
if len(healDisks) == 0 {
// Reset for next interval.
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
continue
}
// Reformat disks immediately
_, err := z.HealFormat(context.Background(), false)
if err != nil && !errors.Is(err, errNoHealRequired) {
healingLogIf(ctx, err)
// Reset for next interval.
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
continue
}
for _, disk := range healDisks {
go func(disk Endpoint) {
globalBackgroundHealState.setDiskHealingStatus(disk, true)
if err := healFreshDisk(ctx, z, disk); err != nil {
globalBackgroundHealState.setDiskHealingStatus(disk, false)
timedout := OperationTimedOut{}
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) && !errors.Is(err, errRetryHealing) {
printEndpointError(disk, err, false)
}
return
}
// Only upon success pop the healed disk.
globalBackgroundHealState.popHealLocalDisks(disk)
}(disk)
}
// Reset for next interval.
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
}
}
}

View File

@ -0,0 +1,890 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "PoolIndex":
z.PoolIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "SetIndex":
z.SetIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "DiskIndex":
z.DiskIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
case "Path":
z.Path, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
case "Endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "Started":
z.Started, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "LastUpdate":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "ObjectsTotalCount":
z.ObjectsTotalCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalCount")
return
}
case "ObjectsTotalSize":
z.ObjectsTotalSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalSize")
return
}
case "ItemsHealed":
z.ItemsHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ItemsHealed")
return
}
case "ItemsFailed":
z.ItemsFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ItemsFailed")
return
}
case "BytesDone":
z.BytesDone, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
case "BytesFailed":
z.BytesFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Object":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "ResumeItemsHealed":
z.ResumeItemsHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeItemsHealed")
return
}
case "ResumeItemsFailed":
z.ResumeItemsFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeItemsFailed")
return
}
case "ResumeItemsSkipped":
z.ResumeItemsSkipped, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeItemsSkipped")
return
}
case "ResumeBytesDone":
z.ResumeBytesDone, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeBytesDone")
return
}
case "ResumeBytesFailed":
z.ResumeBytesFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeBytesFailed")
return
}
case "ResumeBytesSkipped":
z.ResumeBytesSkipped, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeBytesSkipped")
return
}
case "QueuedBuckets":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
if cap(z.QueuedBuckets) >= int(zb0002) {
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
} else {
z.QueuedBuckets = make([]string, zb0002)
}
for za0001 := range z.QueuedBuckets {
z.QueuedBuckets[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
case "HealedBuckets":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
if cap(z.HealedBuckets) >= int(zb0003) {
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
} else {
z.HealedBuckets = make([]string, zb0003)
}
for za0002 := range z.HealedBuckets {
z.HealedBuckets[za0002], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
case "HealID":
z.HealID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HealID")
return
}
case "ItemsSkipped":
z.ItemsSkipped, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ItemsSkipped")
return
}
case "BytesSkipped":
z.BytesSkipped, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesSkipped")
return
}
case "RetryAttempts":
z.RetryAttempts, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "Finished":
z.Finished, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Finished")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 29
// write "ID"
err = en.Append(0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
// write "PoolIndex"
err = en.Append(0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.PoolIndex)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
// write "SetIndex"
err = en.Append(0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.SetIndex)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
// write "DiskIndex"
err = en.Append(0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.DiskIndex)
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
// write "Path"
err = en.Append(0xa4, 0x50, 0x61, 0x74, 0x68)
if err != nil {
return
}
err = en.WriteString(z.Path)
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
// write "Endpoint"
err = en.Append(0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
// write "Started"
err = en.Append(0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Started)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
// write "LastUpdate"
err = en.Append(0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "ObjectsTotalCount"
err = en.Append(0xb1, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsTotalCount)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalCount")
return
}
// write "ObjectsTotalSize"
err = en.Append(0xb0, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsTotalSize)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalSize")
return
}
// write "ItemsHealed"
err = en.Append(0xab, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ItemsHealed)
if err != nil {
err = msgp.WrapError(err, "ItemsHealed")
return
}
// write "ItemsFailed"
err = en.Append(0xab, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ItemsFailed)
if err != nil {
err = msgp.WrapError(err, "ItemsFailed")
return
}
// write "BytesDone"
err = en.Append(0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.BytesDone)
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
// write "BytesFailed"
err = en.Append(0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.BytesFailed)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "Object"
err = en.Append(0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "ResumeItemsHealed"
err = en.Append(0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeItemsHealed)
if err != nil {
err = msgp.WrapError(err, "ResumeItemsHealed")
return
}
// write "ResumeItemsFailed"
err = en.Append(0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeItemsFailed)
if err != nil {
err = msgp.WrapError(err, "ResumeItemsFailed")
return
}
// write "ResumeItemsSkipped"
err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeItemsSkipped)
if err != nil {
err = msgp.WrapError(err, "ResumeItemsSkipped")
return
}
// write "ResumeBytesDone"
err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeBytesDone)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesDone")
return
}
// write "ResumeBytesFailed"
err = en.Append(0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeBytesFailed)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesFailed")
return
}
// write "ResumeBytesSkipped"
err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeBytesSkipped)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesSkipped")
return
}
// write "QueuedBuckets"
err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.QueuedBuckets)))
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
for za0001 := range z.QueuedBuckets {
err = en.WriteString(z.QueuedBuckets[za0001])
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
// write "HealedBuckets"
err = en.Append(0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.HealedBuckets)))
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
for za0002 := range z.HealedBuckets {
err = en.WriteString(z.HealedBuckets[za0002])
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
// write "HealID"
err = en.Append(0xa6, 0x48, 0x65, 0x61, 0x6c, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.HealID)
if err != nil {
err = msgp.WrapError(err, "HealID")
return
}
// write "ItemsSkipped"
err = en.Append(0xac, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ItemsSkipped)
if err != nil {
err = msgp.WrapError(err, "ItemsSkipped")
return
}
// write "BytesSkipped"
err = en.Append(0xac, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.BytesSkipped)
if err != nil {
err = msgp.WrapError(err, "BytesSkipped")
return
}
// write "RetryAttempts"
err = en.Append(0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.RetryAttempts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
// write "Finished"
err = en.Append(0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.Finished)
if err != nil {
err = msgp.WrapError(err, "Finished")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 29
// string "ID"
o = append(o, 0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
// string "PoolIndex"
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.PoolIndex)
// string "SetIndex"
o = append(o, 0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.SetIndex)
// string "DiskIndex"
o = append(o, 0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.DiskIndex)
// string "Path"
o = append(o, 0xa4, 0x50, 0x61, 0x74, 0x68)
o = msgp.AppendString(o, z.Path)
// string "Endpoint"
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
// string "Started"
o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Started)
// string "LastUpdate"
o = append(o, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
o = msgp.AppendTime(o, z.LastUpdate)
// string "ObjectsTotalCount"
o = append(o, 0xb1, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ObjectsTotalCount)
// string "ObjectsTotalSize"
o = append(o, 0xb0, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ObjectsTotalSize)
// string "ItemsHealed"
o = append(o, 0xab, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ItemsHealed)
// string "ItemsFailed"
o = append(o, 0xab, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ItemsFailed)
// string "BytesDone"
o = append(o, 0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
o = msgp.AppendUint64(o, z.BytesDone)
// string "BytesFailed"
o = append(o, 0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.BytesFailed)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "Object"
o = append(o, 0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.Object)
// string "ResumeItemsHealed"
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeItemsHealed)
// string "ResumeItemsFailed"
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeItemsFailed)
// string "ResumeItemsSkipped"
o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeItemsSkipped)
// string "ResumeBytesDone"
o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
o = msgp.AppendUint64(o, z.ResumeBytesDone)
// string "ResumeBytesFailed"
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeBytesFailed)
// string "ResumeBytesSkipped"
o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeBytesSkipped)
// string "QueuedBuckets"
o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
for za0001 := range z.QueuedBuckets {
o = msgp.AppendString(o, z.QueuedBuckets[za0001])
}
// string "HealedBuckets"
o = append(o, 0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.HealedBuckets)))
for za0002 := range z.HealedBuckets {
o = msgp.AppendString(o, z.HealedBuckets[za0002])
}
// string "HealID"
o = append(o, 0xa6, 0x48, 0x65, 0x61, 0x6c, 0x49, 0x44)
o = msgp.AppendString(o, z.HealID)
// string "ItemsSkipped"
o = append(o, 0xac, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ItemsSkipped)
// string "BytesSkipped"
o = append(o, 0xac, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
o = msgp.AppendUint64(o, z.BytesSkipped)
// string "RetryAttempts"
o = append(o, 0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
o = msgp.AppendUint64(o, z.RetryAttempts)
// string "Finished"
o = append(o, 0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
o = msgp.AppendBool(o, z.Finished)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "PoolIndex":
z.PoolIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "SetIndex":
z.SetIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "DiskIndex":
z.DiskIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
case "Path":
z.Path, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
case "Endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "Started":
z.Started, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "LastUpdate":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "ObjectsTotalCount":
z.ObjectsTotalCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalCount")
return
}
case "ObjectsTotalSize":
z.ObjectsTotalSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalSize")
return
}
case "ItemsHealed":
z.ItemsHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ItemsHealed")
return
}
case "ItemsFailed":
z.ItemsFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ItemsFailed")
return
}
case "BytesDone":
z.BytesDone, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
case "BytesFailed":
z.BytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Object":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "ResumeItemsHealed":
z.ResumeItemsHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeItemsHealed")
return
}
case "ResumeItemsFailed":
z.ResumeItemsFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeItemsFailed")
return
}
case "ResumeItemsSkipped":
z.ResumeItemsSkipped, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeItemsSkipped")
return
}
case "ResumeBytesDone":
z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesDone")
return
}
case "ResumeBytesFailed":
z.ResumeBytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesFailed")
return
}
case "ResumeBytesSkipped":
z.ResumeBytesSkipped, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesSkipped")
return
}
case "QueuedBuckets":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
if cap(z.QueuedBuckets) >= int(zb0002) {
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
} else {
z.QueuedBuckets = make([]string, zb0002)
}
for za0001 := range z.QueuedBuckets {
z.QueuedBuckets[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
case "HealedBuckets":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
if cap(z.HealedBuckets) >= int(zb0003) {
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
} else {
z.HealedBuckets = make([]string, zb0003)
}
for za0002 := range z.HealedBuckets {
z.HealedBuckets[za0002], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
case "HealID":
z.HealID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealID")
return
}
case "ItemsSkipped":
z.ItemsSkipped, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ItemsSkipped")
return
}
case "BytesSkipped":
z.BytesSkipped, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesSkipped")
return
}
case "RetryAttempts":
z.RetryAttempts, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "Finished":
z.Finished, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Finished")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *healingTracker) Msgsize() (s int) {
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
for za0001 := range z.QueuedBuckets {
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
}
s += 14 + msgp.ArrayHeaderSize
for za0002 := range z.HealedBuckets {
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
}
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size + 9 + msgp.BoolSize
return
}

View File

@ -0,0 +1,123 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalhealingTracker(t *testing.T) {
v := healingTracker{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsghealingTracker(b *testing.B) {
v := healingTracker{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsghealingTracker(b *testing.B) {
v := healingTracker{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalhealingTracker(b *testing.B) {
v := healingTracker{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodehealingTracker(t *testing.T) {
v := healingTracker{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodehealingTracker Msgsize() is inaccurate")
}
vn := healingTracker{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodehealingTracker(b *testing.B) {
v := healingTracker{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodehealingTracker(b *testing.B) {
v := healingTracker{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

839
cmd/batch-expire.go Normal file
View File

@ -0,0 +1,839 @@
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"runtime"
"strconv"
"time"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/bucket/versioning"
xhttp "github.com/minio/minio/internal/http"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/wildcard"
"github.com/minio/pkg/v3/workers"
"github.com/minio/pkg/v3/xtime"
"gopkg.in/yaml.v3"
)
// expire: # Expire objects that match a condition
// apiVersion: v1
// bucket: mybucket # Bucket where this batch job will expire matching objects from
// prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
// rules:
// - type: object # regular objects with zero or more older versions
// name: NAME # match object names that satisfy the wildcard expression.
// olderThan: 70h # match objects older than this value
// createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
// tags:
// - key: name
// value: pick* # match objects with tag 'name', all values starting with 'pick'
// metadata:
// - key: content-type
// value: image/* # match objects with 'content-type', all values starting with 'image/'
// size:
// lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
// greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
// purge:
// # retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
// # retainVersions: 5 # keep the latest 5 versions of the object.
//
// - type: deleted # objects with delete marker as their latest version
// name: NAME # match object names that satisfy the wildcard expression.
// olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
// createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
// purge:
// # retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
// # retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
//
// notify:
// endpoint: https://notify.endpoint # notification endpoint to receive job completion status
// token: Bearer xxxxx # optional authentication token for the notification endpoint
//
// retry:
// attempts: 10 # number of retries for the job before giving up
// delay: 500ms # least amount of delay between each retry
//go:generate msgp -file $GOFILE
// BatchJobExpirePurge type accepts non-negative versions to be retained
type BatchJobExpirePurge struct {
line, col int
RetainVersions int `yaml:"retainVersions" json:"retainVersions"`
}
var _ yaml.Unmarshaler = &BatchJobExpirePurge{}
// UnmarshalYAML - BatchJobExpirePurge extends unmarshal to extract line, col
func (p *BatchJobExpirePurge) UnmarshalYAML(val *yaml.Node) error {
type purge BatchJobExpirePurge
var tmp purge
err := val.Decode(&tmp)
if err != nil {
return err
}
*p = BatchJobExpirePurge(tmp)
p.line, p.col = val.Line, val.Column
return nil
}
// Validate returns nil if value is valid, ie > 0.
func (p BatchJobExpirePurge) Validate() error {
if p.RetainVersions < 0 {
return BatchJobYamlErr{
line: p.line,
col: p.col,
msg: "retainVersions must be >= 0",
}
}
return nil
}
// BatchJobExpireFilter holds all the filters currently supported for batch replication
type BatchJobExpireFilter struct {
line, col int
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
Size BatchJobSizeFilter `yaml:"size" json:"size"`
Type string `yaml:"type" json:"type"`
Name string `yaml:"name" json:"name"`
Purge BatchJobExpirePurge `yaml:"purge" json:"purge"`
}
var _ yaml.Unmarshaler = &BatchJobExpireFilter{}
// UnmarshalYAML - BatchJobExpireFilter extends unmarshal to extract line, col
// information
func (ef *BatchJobExpireFilter) UnmarshalYAML(value *yaml.Node) error {
type expFilter BatchJobExpireFilter
var tmp expFilter
err := value.Decode(&tmp)
if err != nil {
return err
}
*ef = BatchJobExpireFilter(tmp)
ef.line, ef.col = value.Line, value.Column
return err
}
// Matches returns true if obj matches the filter conditions specified in ef.
func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
switch ef.Type {
case BatchJobExpireObject:
if obj.DeleteMarker {
return false
}
case BatchJobExpireDeleted:
if !obj.DeleteMarker {
return false
}
default:
// we should never come here, Validate should have caught this.
batchLogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
return false
}
if len(ef.Name) > 0 && !wildcard.Match(ef.Name, obj.Name) {
return false
}
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan.D() {
return false
}
if ef.CreatedBefore != nil && !obj.ModTime.Before(*ef.CreatedBefore) {
return false
}
if len(ef.Tags) > 0 && !obj.DeleteMarker {
// Only parse object tags if tags filter is specified.
var tagMap map[string]string
if len(obj.UserTags) != 0 {
t, err := tags.ParseObjectTags(obj.UserTags)
if err != nil {
return false
}
tagMap = t.ToMap()
}
for _, kv := range ef.Tags {
// Object (version) must match all tags specified in
// the filter
var match bool
for t, v := range tagMap {
if kv.Match(BatchJobKV{Key: t, Value: v}) {
match = true
}
}
if !match {
return false
}
}
}
if len(ef.Metadata) > 0 && !obj.DeleteMarker {
for _, kv := range ef.Metadata {
// Object (version) must match all x-amz-meta and
// standard metadata headers
// specified in the filter
var match bool
for k, v := range obj.UserDefined {
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
continue
}
// We only need to match x-amz-meta or standardHeaders
if kv.Match(BatchJobKV{Key: k, Value: v}) {
match = true
}
}
if !match {
return false
}
}
}
return ef.Size.InRange(obj.Size)
}
const (
// BatchJobExpireObject - object type
BatchJobExpireObject string = "object"
// BatchJobExpireDeleted - delete marker type
BatchJobExpireDeleted string = "deleted"
)
// Validate returns nil if ef has valid fields, validation error otherwise.
func (ef BatchJobExpireFilter) Validate() error {
switch ef.Type {
case BatchJobExpireObject:
case BatchJobExpireDeleted:
if len(ef.Tags) > 0 || len(ef.Metadata) > 0 {
return BatchJobYamlErr{
line: ef.line,
col: ef.col,
msg: "delete type filter can't have tags or metadata",
}
}
default:
return BatchJobYamlErr{
line: ef.line,
col: ef.col,
msg: "invalid batch-expire type",
}
}
for _, tag := range ef.Tags {
if err := tag.Validate(); err != nil {
return err
}
}
for _, meta := range ef.Metadata {
if err := meta.Validate(); err != nil {
return err
}
}
if err := ef.Purge.Validate(); err != nil {
return err
}
if err := ef.Size.Validate(); err != nil {
return err
}
if ef.CreatedBefore != nil && !ef.CreatedBefore.Before(time.Now()) {
return BatchJobYamlErr{
line: ef.line,
col: ef.col,
msg: "CreatedBefore is in the future",
}
}
return nil
}
// BatchJobExpire represents configuration parameters for a batch expiration
// job typically supplied in yaml form
type BatchJobExpire struct {
line, col int
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
NotificationCfg BatchJobNotification `yaml:"notify" json:"notify"`
Retry BatchJobRetry `yaml:"retry" json:"retry"`
Rules []BatchJobExpireFilter `yaml:"rules" json:"rules"`
}
var _ yaml.Unmarshaler = &BatchJobExpire{}
// RedactSensitive will redact any sensitive information in b.
func (r *BatchJobExpire) RedactSensitive() {
if r == nil {
return
}
if r.NotificationCfg.Token != "" {
r.NotificationCfg.Token = redactedText
}
}
// UnmarshalYAML - BatchJobExpire extends default unmarshal to extract line, col information.
func (r *BatchJobExpire) UnmarshalYAML(val *yaml.Node) error {
type expireJob BatchJobExpire
var tmp expireJob
err := val.Decode(&tmp)
if err != nil {
return err
}
*r = BatchJobExpire(tmp)
r.line, r.col = val.Line, val.Column
return nil
}
// Notify notifies notification endpoint if configured regarding job failure or success.
func (r BatchJobExpire) Notify(ctx context.Context, body io.Reader) error {
if r.NotificationCfg.Endpoint == "" {
return nil
}
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.NotificationCfg.Endpoint, body)
if err != nil {
return err
}
if r.NotificationCfg.Token != "" {
req.Header.Set("Authorization", r.NotificationCfg.Token)
}
clnt := http.Client{Transport: getRemoteInstanceTransport()}
resp, err := clnt.Do(req)
if err != nil {
return err
}
xhttp.DrainBody(resp.Body)
if resp.StatusCode != http.StatusOK {
return errors.New(resp.Status)
}
return nil
}
// Expire expires object versions which have already matched supplied filter conditions
func (r *BatchJobExpire) Expire(ctx context.Context, api ObjectLayer, vc *versioning.Versioning, objsToDel []ObjectToDelete) []error {
opts := ObjectOptions{
PrefixEnabledFn: vc.PrefixEnabled,
VersionSuspended: vc.Suspended(),
}
allErrs := make([]error, 0, len(objsToDel))
for {
count := len(objsToDel)
if count == 0 {
break
}
if count > maxDeleteList {
count = maxDeleteList
}
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel[:count], opts)
allErrs = append(allErrs, errs...)
// Next batch of deletion
objsToDel = objsToDel[count:]
}
return allErrs
}
const (
batchExpireName = "batch-expire.bin"
batchExpireFormat = 1
batchExpireVersionV1 = 1
batchExpireVersion = batchExpireVersionV1
batchExpireAPIVersion = "v1"
batchExpireJobDefaultRetries = 3
batchExpireJobDefaultRetryDelay = 250 * time.Millisecond
)
type objInfoCache map[string]*ObjectInfo
func newObjInfoCache() objInfoCache {
return objInfoCache(make(map[string]*ObjectInfo))
}
func (oiCache objInfoCache) Add(toDel ObjectToDelete, oi *ObjectInfo) {
oiCache[fmt.Sprintf("%s-%s", toDel.ObjectName, toDel.VersionID)] = oi
}
func (oiCache objInfoCache) Get(toDel ObjectToDelete) (*ObjectInfo, bool) {
oi, ok := oiCache[fmt.Sprintf("%s-%s", toDel.ObjectName, toDel.VersionID)]
return oi, ok
}
func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo, job BatchJobRequest, api ObjectLayer, wk *workers.Workers, expireCh <-chan []expireObjInfo) {
vc, _ := globalBucketVersioningSys.Get(r.Bucket)
retryAttempts := job.Expire.Retry.Attempts
if retryAttempts <= 0 {
retryAttempts = batchExpireJobDefaultRetries
}
delay := job.Expire.Retry.Delay
if delay <= 0 {
delay = batchExpireJobDefaultRetryDelay
}
var i int
for toExpire := range expireCh {
select {
case <-ctx.Done():
return
default:
}
if i > 0 {
if wait := globalBatchConfig.ExpirationWait(); wait > 0 {
time.Sleep(wait)
}
}
i++
wk.Take()
go func(toExpire []expireObjInfo) {
defer wk.Give()
toExpireAll := make([]expireObjInfo, 0, len(toExpire))
toDel := make([]ObjectToDelete, 0, len(toExpire))
oiCache := newObjInfoCache()
for _, exp := range toExpire {
if exp.ExpireAll {
toExpireAll = append(toExpireAll, exp)
continue
}
// Cache ObjectInfo value via pointers for
// subsequent use to track objects which
// couldn't be deleted.
od := ObjectToDelete{
ObjectV: ObjectV{
ObjectName: exp.Name,
VersionID: exp.VersionID,
},
}
toDel = append(toDel, od)
oiCache.Add(od, &exp.ObjectInfo)
}
// DeleteObject(deletePrefix: true) to expire all versions of an object
for _, exp := range toExpireAll {
var success bool
for attempts := 1; attempts <= retryAttempts; attempts++ {
select {
case <-ctx.Done():
ri.trackMultipleObjectVersions(exp, success)
return
default:
}
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
_, err := api.DeleteObject(ctx, exp.Bucket, encodeDirObject(exp.Name), ObjectOptions{
DeletePrefix: true,
DeletePrefixObject: true, // use prefix delete on exact object (this is an optimization to avoid fan-out calls)
})
if err != nil {
stopFn(exp, err)
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s due to %v (attempts=%d)", exp.Bucket, exp.Name, err, attempts))
} else {
stopFn(exp, err)
success = true
break
}
}
ri.trackMultipleObjectVersions(exp, success)
}
// DeleteMultiple objects
toDelCopy := make([]ObjectToDelete, len(toDel))
for attempts := 1; attempts <= retryAttempts; attempts++ {
select {
case <-ctx.Done():
return
default:
}
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
// Copying toDel to select from objects whose
// deletion failed
copy(toDelCopy, toDel)
var failed int
errs := r.Expire(ctx, api, vc, toDel)
// reslice toDel in preparation for next retry attempt
toDel = toDel[:0]
for i, err := range errs {
if err != nil {
stopFn(toDelCopy[i], err)
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID,
err, attempts))
failed++
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
ri.trackCurrentBucketObject(r.Bucket, *oi, false, attempts)
}
if attempts != retryAttempts {
// retry
toDel = append(toDel, toDelCopy[i])
}
} else {
stopFn(toDelCopy[i], nil)
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
ri.trackCurrentBucketObject(r.Bucket, *oi, true, attempts)
}
}
}
globalBatchJobsMetrics.save(ri.JobID, ri)
if failed == 0 {
break
}
// Add a delay between retry attempts
if attempts < retryAttempts {
time.Sleep(delay)
}
}
}(toExpire)
}
}
type expireObjInfo struct {
ObjectInfo
ExpireAll bool
DeleteMarkerCount int64
}
// Start the batch expiration job, resumes if there was a pending job via "job.ID"
func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
ri := &batchJobInfo{
JobID: job.ID,
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.loadOrInit(ctx, api, job); err != nil {
return err
}
globalBatchJobsMetrics.save(job.ID, ri)
lastObject := ri.Object
now := time.Now().UTC()
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_EXPIRATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
if err != nil {
return err
}
wk, err := workers.New(workerSize)
if err != nil {
// invalid worker size.
return err
}
ctx, cancelCause := context.WithCancelCause(ctx)
defer cancelCause(nil)
results := make(chan itemOrErr[ObjectInfo], workerSize)
go func() {
prefixes := r.Prefix.F()
if len(prefixes) == 0 {
prefixes = []string{""}
}
for _, prefix := range prefixes {
prefixResultCh := make(chan itemOrErr[ObjectInfo], workerSize)
err := api.Walk(ctx, r.Bucket, prefix, prefixResultCh, WalkOptions{
Marker: lastObject,
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
VersionsSort: WalkVersionsSortDesc,
})
if err != nil {
cancelCause(err)
xioutil.SafeClose(results)
return
}
for result := range prefixResultCh {
results <- result
}
}
xioutil.SafeClose(results)
}()
// Goroutine to periodically save batch-expire job's in-memory state
saverQuitCh := make(chan struct{})
go func() {
saveTicker := time.NewTicker(10 * time.Second)
defer saveTicker.Stop()
quit := false
after := time.Minute
for !quit {
select {
case <-saveTicker.C:
case <-ctx.Done():
quit = true
case <-saverQuitCh:
quit = true
}
if quit {
// save immediately if we are quitting
after = 0
}
ctx, cancel := context.WithTimeout(GlobalContext, 30*time.Second) // independent context
batchLogIf(ctx, ri.updateAfter(ctx, api, after, job))
cancel()
}
}()
expireCh := make(chan []expireObjInfo, workerSize)
expireDoneCh := make(chan struct{})
go func() {
defer close(expireDoneCh)
batchObjsForDelete(ctx, r, ri, job, api, wk, expireCh)
}()
var (
prevObj ObjectInfo
matchedFilter BatchJobExpireFilter
versionsCount int
toDel []expireObjInfo
failed bool
done bool
)
deleteMarkerCountMap := map[string]int64{}
pushToExpire := func() {
// set preObject deleteMarkerCount
if len(toDel) > 0 {
lastDelIndex := len(toDel) - 1
lastDel := toDel[lastDelIndex]
if lastDel.ExpireAll {
toDel[lastDelIndex].DeleteMarkerCount = deleteMarkerCountMap[lastDel.Name]
// delete the key
delete(deleteMarkerCountMap, lastDel.Name)
}
}
// send down filtered entries to be deleted using
// DeleteObjects method
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
xfer := make([]expireObjInfo, len(toDel))
copy(xfer, toDel)
select {
case expireCh <- xfer:
toDel = toDel[:0] // resetting toDel
case <-ctx.Done():
done = true
}
}
}
for {
select {
case result, ok := <-results:
if !ok {
done = true
break
}
if result.Err != nil {
failed = true
batchLogIf(ctx, result.Err)
continue
}
if result.Item.DeleteMarker {
deleteMarkerCountMap[result.Item.Name]++
}
// Apply filter to find the matching rule to apply expiry
// actions accordingly.
// nolint:gocritic
if result.Item.IsLatest {
var match BatchJobExpireFilter
var found bool
for _, rule := range r.Rules {
if rule.Matches(result.Item, now) {
match = rule
found = true
break
}
}
if !found {
continue
}
if prevObj.Name != result.Item.Name {
// switch the object
pushToExpire()
}
prevObj = result.Item
matchedFilter = match
versionsCount = 1
// Include the latest version
if matchedFilter.Purge.RetainVersions == 0 {
toDel = append(toDel, expireObjInfo{
ObjectInfo: result.Item,
ExpireAll: true,
})
continue
}
} else if prevObj.Name == result.Item.Name {
if matchedFilter.Purge.RetainVersions == 0 {
continue // including latest version in toDel suffices, skipping other versions
}
versionsCount++
} else {
// switch the object
pushToExpire()
// a file switched with no LatestVersion, logging it
batchLogIf(ctx, fmt.Errorf("skipping object %s, no latest version found", result.Item.Name))
continue
}
if versionsCount <= matchedFilter.Purge.RetainVersions {
continue // retain versions
}
toDel = append(toDel, expireObjInfo{
ObjectInfo: result.Item,
})
pushToExpire()
case <-ctx.Done():
done = true
}
if done {
break
}
}
if context.Cause(ctx) != nil {
xioutil.SafeClose(expireCh)
return context.Cause(ctx)
}
pushToExpire()
// Send any remaining objects downstream
if len(toDel) > 0 {
select {
case <-ctx.Done():
case expireCh <- toDel:
}
}
xioutil.SafeClose(expireCh)
<-expireDoneCh // waits for the expire goroutine to complete
wk.Wait() // waits for all expire workers to retire
ri.Complete = !failed && ri.ObjectsFailed == 0
ri.Failed = failed || ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// Close the saverQuitCh - this also triggers saving in-memory state
// immediately one last time before we exit this method.
xioutil.SafeClose(saverQuitCh)
// Notify expire jobs final status to the configured endpoint
buf, _ := json.Marshal(ri)
if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil {
batchLogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
}
return nil
}
//msgp:ignore batchExpireJobError
type batchExpireJobError struct {
Code string
Description string
HTTPStatusCode int
}
func (e batchExpireJobError) Error() string {
return e.Description
}
// maxBatchRules maximum number of rules a batch-expiry job supports
const maxBatchRules = 50
// Validate validates the job definition input
func (r *BatchJobExpire) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {
if r == nil {
return nil
}
if r.APIVersion != batchExpireAPIVersion {
return batchExpireJobError{
Code: "InvalidArgument",
Description: "Unsupported batch expire API version",
HTTPStatusCode: http.StatusBadRequest,
}
}
if r.Bucket == "" {
return batchExpireJobError{
Code: "InvalidArgument",
Description: "Bucket argument missing",
HTTPStatusCode: http.StatusBadRequest,
}
}
if _, err := o.GetBucketInfo(ctx, r.Bucket, BucketOptions{}); err != nil {
if isErrBucketNotFound(err) {
return batchExpireJobError{
Code: "NoSuchSourceBucket",
Description: "The specified source bucket does not exist",
HTTPStatusCode: http.StatusNotFound,
}
}
return err
}
if len(r.Rules) > maxBatchRules {
return batchExpireJobError{
Code: "InvalidArgument",
Description: "Too many rules. Batch expire job can't have more than 100 rules",
HTTPStatusCode: http.StatusBadRequest,
}
}
for _, rule := range r.Rules {
if err := rule.Validate(); err != nil {
return batchExpireJobError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Invalid batch expire rule: %s", err),
HTTPStatusCode: http.StatusBadRequest,
}
}
}
if err := r.Retry.Validate(); err != nil {
return batchExpireJobError{
Code: "InvalidArgument",
Description: fmt.Sprintf("Invalid batch expire retry configuration: %s", err),
HTTPStatusCode: http.StatusBadRequest,
}
}
return nil
}

864
cmd/batch-expire_gen.go Normal file
View File

@ -0,0 +1,864 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"time"
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BatchJobExpire) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "APIVersion":
z.APIVersion, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "APIVersion")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
err = z.Prefix.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "NotificationCfg":
err = z.NotificationCfg.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "NotificationCfg")
return
}
case "Retry":
err = z.Retry.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Retry")
return
}
case "Rules":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Rules")
return
}
if cap(z.Rules) >= int(zb0002) {
z.Rules = (z.Rules)[:zb0002]
} else {
z.Rules = make([]BatchJobExpireFilter, zb0002)
}
for za0001 := range z.Rules {
err = z.Rules[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Rules", za0001)
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BatchJobExpire) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "APIVersion"
err = en.Append(0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.APIVersion)
if err != nil {
err = msgp.WrapError(err, "APIVersion")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "Prefix"
err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
if err != nil {
return
}
err = z.Prefix.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
// write "NotificationCfg"
err = en.Append(0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
if err != nil {
return
}
err = z.NotificationCfg.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "NotificationCfg")
return
}
// write "Retry"
err = en.Append(0xa5, 0x52, 0x65, 0x74, 0x72, 0x79)
if err != nil {
return
}
err = z.Retry.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Retry")
return
}
// write "Rules"
err = en.Append(0xa5, 0x52, 0x75, 0x6c, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Rules)))
if err != nil {
err = msgp.WrapError(err, "Rules")
return
}
for za0001 := range z.Rules {
err = z.Rules[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Rules", za0001)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BatchJobExpire) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "APIVersion"
o = append(o, 0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.APIVersion)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "Prefix"
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
o, err = z.Prefix.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
// string "NotificationCfg"
o = append(o, 0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
o, err = z.NotificationCfg.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "NotificationCfg")
return
}
// string "Retry"
o = append(o, 0xa5, 0x52, 0x65, 0x74, 0x72, 0x79)
o, err = z.Retry.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Retry")
return
}
// string "Rules"
o = append(o, 0xa5, 0x52, 0x75, 0x6c, 0x65, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Rules)))
for za0001 := range z.Rules {
o, err = z.Rules[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Rules", za0001)
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "APIVersion":
z.APIVersion, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APIVersion")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
bts, err = z.Prefix.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "NotificationCfg":
bts, err = z.NotificationCfg.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "NotificationCfg")
return
}
case "Retry":
bts, err = z.Retry.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Retry")
return
}
case "Rules":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Rules")
return
}
if cap(z.Rules) >= int(zb0002) {
z.Rules = (z.Rules)[:zb0002]
} else {
z.Rules = make([]BatchJobExpireFilter, zb0002)
}
for za0001 := range z.Rules {
bts, err = z.Rules[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Rules", za0001)
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BatchJobExpire) Msgsize() (s int) {
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
for za0001 := range z.Rules {
s += z.Rules[za0001].Msgsize()
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *BatchJobExpireFilter) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "OlderThan":
err = z.OlderThan.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
}
case "CreatedBefore":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "CreatedBefore")
return
}
z.CreatedBefore = nil
} else {
if z.CreatedBefore == nil {
z.CreatedBefore = new(time.Time)
}
*z.CreatedBefore, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CreatedBefore")
return
}
}
case "Tags":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
if cap(z.Tags) >= int(zb0002) {
z.Tags = (z.Tags)[:zb0002]
} else {
z.Tags = make([]BatchJobKV, zb0002)
}
for za0001 := range z.Tags {
err = z.Tags[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
}
case "Metadata":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Metadata")
return
}
if cap(z.Metadata) >= int(zb0003) {
z.Metadata = (z.Metadata)[:zb0003]
} else {
z.Metadata = make([]BatchJobKV, zb0003)
}
for za0002 := range z.Metadata {
err = z.Metadata[za0002].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Metadata", za0002)
return
}
}
case "Size":
err = z.Size.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
case "Type":
z.Type, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Purge":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
for zb0004 > 0 {
zb0004--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
switch msgp.UnsafeString(field) {
case "RetainVersions":
z.Purge.RetainVersions, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Purge", "RetainVersions")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BatchJobExpireFilter) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 8
// write "OlderThan"
err = en.Append(0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
if err != nil {
return
}
err = z.OlderThan.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
}
// write "CreatedBefore"
err = en.Append(0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
if err != nil {
return
}
if z.CreatedBefore == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = en.WriteTime(*z.CreatedBefore)
if err != nil {
err = msgp.WrapError(err, "CreatedBefore")
return
}
}
// write "Tags"
err = en.Append(0xa4, 0x54, 0x61, 0x67, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Tags)))
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
for za0001 := range z.Tags {
err = z.Tags[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
}
// write "Metadata"
err = en.Append(0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Metadata)))
if err != nil {
err = msgp.WrapError(err, "Metadata")
return
}
for za0002 := range z.Metadata {
err = z.Metadata[za0002].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Metadata", za0002)
return
}
}
// write "Size"
err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = z.Size.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
// write "Type"
err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Type)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
// write "Name"
err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "Purge"
err = en.Append(0xa5, 0x50, 0x75, 0x72, 0x67, 0x65)
if err != nil {
return
}
// map header, size 1
// write "RetainVersions"
err = en.Append(0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.Purge.RetainVersions)
if err != nil {
err = msgp.WrapError(err, "Purge", "RetainVersions")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BatchJobExpireFilter) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 8
// string "OlderThan"
o = append(o, 0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
o, err = z.OlderThan.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
}
// string "CreatedBefore"
o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
if z.CreatedBefore == nil {
o = msgp.AppendNil(o)
} else {
o = msgp.AppendTime(o, *z.CreatedBefore)
}
// string "Tags"
o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Tags)))
for za0001 := range z.Tags {
o, err = z.Tags[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
}
// string "Metadata"
o = append(o, 0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61)
o = msgp.AppendArrayHeader(o, uint32(len(z.Metadata)))
for za0002 := range z.Metadata {
o, err = z.Metadata[za0002].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Metadata", za0002)
return
}
}
// string "Size"
o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65)
o, err = z.Size.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
// string "Type"
o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65)
o = msgp.AppendString(o, z.Type)
// string "Name"
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Purge"
o = append(o, 0xa5, 0x50, 0x75, 0x72, 0x67, 0x65)
// map header, size 1
// string "RetainVersions"
o = append(o, 0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendInt(o, z.Purge.RetainVersions)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "OlderThan":
bts, err = z.OlderThan.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
}
case "CreatedBefore":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.CreatedBefore = nil
} else {
if z.CreatedBefore == nil {
z.CreatedBefore = new(time.Time)
}
*z.CreatedBefore, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CreatedBefore")
return
}
}
case "Tags":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
if cap(z.Tags) >= int(zb0002) {
z.Tags = (z.Tags)[:zb0002]
} else {
z.Tags = make([]BatchJobKV, zb0002)
}
for za0001 := range z.Tags {
bts, err = z.Tags[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Tags", za0001)
return
}
}
case "Metadata":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Metadata")
return
}
if cap(z.Metadata) >= int(zb0003) {
z.Metadata = (z.Metadata)[:zb0003]
} else {
z.Metadata = make([]BatchJobKV, zb0003)
}
for za0002 := range z.Metadata {
bts, err = z.Metadata[za0002].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Metadata", za0002)
return
}
}
case "Size":
bts, err = z.Size.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
case "Type":
z.Type, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Purge":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
for zb0004 > 0 {
zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
switch msgp.UnsafeString(field) {
case "RetainVersions":
z.Purge.RetainVersions, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Purge", "RetainVersions")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Purge")
return
}
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BatchJobExpireFilter) Msgsize() (s int) {
s = 1 + 10 + z.OlderThan.Msgsize() + 14
if z.CreatedBefore == nil {
s += msgp.NilSize
} else {
s += msgp.TimeSize
}
s += 5 + msgp.ArrayHeaderSize
for za0001 := range z.Tags {
s += z.Tags[za0001].Msgsize()
}
s += 9 + msgp.ArrayHeaderSize
for za0002 := range z.Metadata {
s += z.Metadata[za0002].Msgsize()
}
s += 5 + z.Size.Msgsize() + 5 + msgp.StringPrefixSize + len(z.Type) + 5 + msgp.StringPrefixSize + len(z.Name) + 6 + 1 + 15 + msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *BatchJobExpirePurge) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "RetainVersions":
z.RetainVersions, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "RetainVersions")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z BatchJobExpirePurge) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "RetainVersions"
err = en.Append(0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.RetainVersions)
if err != nil {
err = msgp.WrapError(err, "RetainVersions")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z BatchJobExpirePurge) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "RetainVersions"
o = append(o, 0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendInt(o, z.RetainVersions)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BatchJobExpirePurge) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "RetainVersions":
z.RetainVersions, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RetainVersions")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z BatchJobExpirePurge) Msgsize() (s int) {
s = 1 + 15 + msgp.IntSize
return
}

View File

@ -0,0 +1,349 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBatchJobExpire(t *testing.T) {
v := BatchJobExpire{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobExpire(b *testing.B) {
v := BatchJobExpire{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobExpire(b *testing.B) {
v := BatchJobExpire{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobExpire(b *testing.B) {
v := BatchJobExpire{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobExpire(t *testing.T) {
v := BatchJobExpire{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobExpire Msgsize() is inaccurate")
}
vn := BatchJobExpire{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobExpire(b *testing.B) {
v := BatchJobExpire{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobExpire(b *testing.B) {
v := BatchJobExpire{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobExpireFilter(t *testing.T) {
v := BatchJobExpireFilter{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobExpireFilter(b *testing.B) {
v := BatchJobExpireFilter{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobExpireFilter(b *testing.B) {
v := BatchJobExpireFilter{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobExpireFilter(b *testing.B) {
v := BatchJobExpireFilter{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobExpireFilter(t *testing.T) {
v := BatchJobExpireFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobExpireFilter Msgsize() is inaccurate")
}
vn := BatchJobExpireFilter{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobExpireFilter(b *testing.B) {
v := BatchJobExpireFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobExpireFilter(b *testing.B) {
v := BatchJobExpireFilter{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobExpirePurge(t *testing.T) {
v := BatchJobExpirePurge{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobExpirePurge(b *testing.B) {
v := BatchJobExpirePurge{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobExpirePurge(b *testing.B) {
v := BatchJobExpirePurge{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobExpirePurge(b *testing.B) {
v := BatchJobExpirePurge{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobExpirePurge(t *testing.T) {
v := BatchJobExpirePurge{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobExpirePurge Msgsize() is inaccurate")
}
vn := BatchJobExpirePurge{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobExpirePurge(b *testing.B) {
v := BatchJobExpirePurge{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobExpirePurge(b *testing.B) {
v := BatchJobExpirePurge{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

125
cmd/batch-expire_test.go Normal file
View File

@ -0,0 +1,125 @@
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"slices"
"testing"
"gopkg.in/yaml.v3"
)
func TestParseBatchJobExpire(t *testing.T) {
expireYaml := `
expire: # Expire objects that match a condition
apiVersion: v1
bucket: mybucket # Bucket where this batch job will expire matching objects from
prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
rules:
- type: object # regular objects with zero or more older versions
name: NAME # match object names that satisfy the wildcard expression.
olderThan: 7d10h # match objects older than this value
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
tags:
- key: name
value: pick* # match objects with tag 'name', all values starting with 'pick'
metadata:
- key: content-type
value: image/* # match objects with 'content-type', all values starting with 'image/'
size:
lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
purge:
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
# retainVersions: 5 # keep the latest 5 versions of the object.
- type: deleted # objects with delete marker as their latest version
name: NAME # match object names that satisfy the wildcard expression.
olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
purge:
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
# retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
notify:
endpoint: https://notify.endpoint # notification endpoint to receive job completion status
token: Bearer xxxxx # optional authentication token for the notification endpoint
retry:
attempts: 10 # number of retries for the job before giving up
delay: 500ms # least amount of delay between each retry
`
var job BatchJobRequest
err := yaml.Unmarshal([]byte(expireYaml), &job)
if err != nil {
t.Fatal("Failed to parse batch-job-expire yaml", err)
}
if !slices.Equal(job.Expire.Prefix.F(), []string{"myprefix"}) {
t.Fatal("Failed to parse batch-job-expire yaml")
}
multiPrefixExpireYaml := `
expire: # Expire objects that match a condition
apiVersion: v1
bucket: mybucket # Bucket where this batch job will expire matching objects from
prefix: # (Optional) Prefix under which this job will expire objects matching the rules below.
- myprefix
- myprefix1
rules:
- type: object # regular objects with zero or more older versions
name: NAME # match object names that satisfy the wildcard expression.
olderThan: 7d10h # match objects older than this value
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
tags:
- key: name
value: pick* # match objects with tag 'name', all values starting with 'pick'
metadata:
- key: content-type
value: image/* # match objects with 'content-type', all values starting with 'image/'
size:
lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
purge:
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
# retainVersions: 5 # keep the latest 5 versions of the object.
- type: deleted # objects with delete marker as their latest version
name: NAME # match object names that satisfy the wildcard expression.
olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
purge:
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
# retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
notify:
endpoint: https://notify.endpoint # notification endpoint to receive job completion status
token: Bearer xxxxx # optional authentication token for the notification endpoint
retry:
attempts: 10 # number of retries for the job before giving up
delay: 500ms # least amount of delay between each retry
`
var multiPrefixJob BatchJobRequest
err = yaml.Unmarshal([]byte(multiPrefixExpireYaml), &multiPrefixJob)
if err != nil {
t.Fatal("Failed to parse batch-job-expire yaml", err)
}
if !slices.Equal(multiPrefixJob.Expire.Prefix.F(), []string{"myprefix", "myprefix1"}) {
t.Fatal("Failed to parse batch-job-expire yaml")
}
}

2357
cmd/batch-handlers.go Normal file

File diff suppressed because it is too large Load Diff

952
cmd/batch-handlers_gen.go Normal file
View File

@ -0,0 +1,952 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BatchJobPrefix) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if cap((*z)) >= int(zb0002) {
(*z) = (*z)[:zb0002]
} else {
(*z) = make(BatchJobPrefix, zb0002)
}
for zb0001 := range *z {
(*z)[zb0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z BatchJobPrefix) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteArrayHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0003 := range z {
err = en.WriteString(z[zb0003])
if err != nil {
err = msgp.WrapError(err, zb0003)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z BatchJobPrefix) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendArrayHeader(o, uint32(len(z)))
for zb0003 := range z {
o = msgp.AppendString(o, z[zb0003])
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BatchJobPrefix) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if cap((*z)) >= int(zb0002) {
(*z) = (*z)[:zb0002]
} else {
(*z) = make(BatchJobPrefix, zb0002)
}
for zb0001 := range *z {
(*z)[zb0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z BatchJobPrefix) Msgsize() (s int) {
s = msgp.ArrayHeaderSize
for zb0003 := range z {
s += msgp.StringPrefixSize + len(z[zb0003])
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "User":
z.User, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "User")
return
}
case "Started":
z.Started, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "Replicate":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
z.Replicate = nil
} else {
if z.Replicate == nil {
z.Replicate = new(BatchJobReplicateV1)
}
err = z.Replicate.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
case "KeyRotate":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
z.KeyRotate = nil
} else {
if z.KeyRotate == nil {
z.KeyRotate = new(BatchJobKeyRotateV1)
}
err = z.KeyRotate.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
case "Expire":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
z.Expire = nil
} else {
if z.Expire == nil {
z.Expire = new(BatchJobExpire)
}
err = z.Expire.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BatchJobRequest) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "ID"
err = en.Append(0x86, 0xa2, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
// write "User"
err = en.Append(0xa4, 0x55, 0x73, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteString(z.User)
if err != nil {
err = msgp.WrapError(err, "User")
return
}
// write "Started"
err = en.Append(0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Started)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
// write "Replicate"
err = en.Append(0xa9, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65)
if err != nil {
return
}
if z.Replicate == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Replicate.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
// write "KeyRotate"
err = en.Append(0xa9, 0x4b, 0x65, 0x79, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65)
if err != nil {
return
}
if z.KeyRotate == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.KeyRotate.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
// write "Expire"
err = en.Append(0xa6, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65)
if err != nil {
return
}
if z.Expire == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Expire.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BatchJobRequest) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "ID"
o = append(o, 0x86, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
// string "User"
o = append(o, 0xa4, 0x55, 0x73, 0x65, 0x72)
o = msgp.AppendString(o, z.User)
// string "Started"
o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Started)
// string "Replicate"
o = append(o, 0xa9, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65)
if z.Replicate == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Replicate.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
// string "KeyRotate"
o = append(o, 0xa9, 0x4b, 0x65, 0x79, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65)
if z.KeyRotate == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.KeyRotate.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
// string "Expire"
o = append(o, 0xa6, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65)
if z.Expire == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Expire.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BatchJobRequest) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "User":
z.User, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "User")
return
}
case "Started":
z.Started, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "Replicate":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Replicate = nil
} else {
if z.Replicate == nil {
z.Replicate = new(BatchJobReplicateV1)
}
bts, err = z.Replicate.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
case "KeyRotate":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.KeyRotate = nil
} else {
if z.KeyRotate == nil {
z.KeyRotate = new(BatchJobKeyRotateV1)
}
bts, err = z.KeyRotate.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
case "Expire":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Expire = nil
} else {
if z.Expire == nil {
z.Expire = new(BatchJobExpire)
}
bts, err = z.Expire.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BatchJobRequest) Msgsize() (s int) {
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID) + 5 + msgp.StringPrefixSize + len(z.User) + 8 + msgp.TimeSize + 10
if z.Replicate == nil {
s += msgp.NilSize
} else {
s += z.Replicate.Msgsize()
}
s += 10
if z.KeyRotate == nil {
s += msgp.NilSize
} else {
s += z.KeyRotate.Msgsize()
}
s += 7
if z.Expire == nil {
s += msgp.NilSize
} else {
s += z.Expire.Msgsize()
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "v":
z.Version, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
case "jid":
z.JobID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "JobID")
return
}
case "jt":
z.JobType, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "JobType")
return
}
case "st":
z.StartTime, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
case "lu":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "ra":
z.RetryAttempts, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "at":
z.Attempts, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Attempts")
return
}
case "cmp":
z.Complete, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Complete")
return
}
case "fld":
z.Failed, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Failed")
return
}
case "lbkt":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "lobj":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "ob":
z.Objects, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
case "dm":
z.DeleteMarkers, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
case "obf":
z.ObjectsFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "dmf":
z.DeleteMarkersFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersFailed")
return
}
case "bt":
z.BytesTransferred, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "BytesTransferred")
return
}
case "bf":
z.BytesFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 17
// write "v"
err = en.Append(0xde, 0x0, 0x11, 0xa1, 0x76)
if err != nil {
return
}
err = en.WriteInt(z.Version)
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
// write "jid"
err = en.Append(0xa3, 0x6a, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteString(z.JobID)
if err != nil {
err = msgp.WrapError(err, "JobID")
return
}
// write "jt"
err = en.Append(0xa2, 0x6a, 0x74)
if err != nil {
return
}
err = en.WriteString(z.JobType)
if err != nil {
err = msgp.WrapError(err, "JobType")
return
}
// write "st"
err = en.Append(0xa2, 0x73, 0x74)
if err != nil {
return
}
err = en.WriteTime(z.StartTime)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
// write "lu"
err = en.Append(0xa2, 0x6c, 0x75)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "ra"
err = en.Append(0xa2, 0x72, 0x61)
if err != nil {
return
}
err = en.WriteInt(z.RetryAttempts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
// write "at"
err = en.Append(0xa2, 0x61, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.Attempts)
if err != nil {
err = msgp.WrapError(err, "Attempts")
return
}
// write "cmp"
err = en.Append(0xa3, 0x63, 0x6d, 0x70)
if err != nil {
return
}
err = en.WriteBool(z.Complete)
if err != nil {
err = msgp.WrapError(err, "Complete")
return
}
// write "fld"
err = en.Append(0xa3, 0x66, 0x6c, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.Failed)
if err != nil {
err = msgp.WrapError(err, "Failed")
return
}
// write "lbkt"
err = en.Append(0xa4, 0x6c, 0x62, 0x6b, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "lobj"
err = en.Append(0xa4, 0x6c, 0x6f, 0x62, 0x6a)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "ob"
err = en.Append(0xa2, 0x6f, 0x62)
if err != nil {
return
}
err = en.WriteInt64(z.Objects)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
// write "dm"
err = en.Append(0xa2, 0x64, 0x6d)
if err != nil {
return
}
err = en.WriteInt64(z.DeleteMarkers)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
// write "obf"
err = en.Append(0xa3, 0x6f, 0x62, 0x66)
if err != nil {
return
}
err = en.WriteInt64(z.ObjectsFailed)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
// write "dmf"
err = en.Append(0xa3, 0x64, 0x6d, 0x66)
if err != nil {
return
}
err = en.WriteInt64(z.DeleteMarkersFailed)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersFailed")
return
}
// write "bt"
err = en.Append(0xa2, 0x62, 0x74)
if err != nil {
return
}
err = en.WriteInt64(z.BytesTransferred)
if err != nil {
err = msgp.WrapError(err, "BytesTransferred")
return
}
// write "bf"
err = en.Append(0xa2, 0x62, 0x66)
if err != nil {
return
}
err = en.WriteInt64(z.BytesFailed)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 17
// string "v"
o = append(o, 0xde, 0x0, 0x11, 0xa1, 0x76)
o = msgp.AppendInt(o, z.Version)
// string "jid"
o = append(o, 0xa3, 0x6a, 0x69, 0x64)
o = msgp.AppendString(o, z.JobID)
// string "jt"
o = append(o, 0xa2, 0x6a, 0x74)
o = msgp.AppendString(o, z.JobType)
// string "st"
o = append(o, 0xa2, 0x73, 0x74)
o = msgp.AppendTime(o, z.StartTime)
// string "lu"
o = append(o, 0xa2, 0x6c, 0x75)
o = msgp.AppendTime(o, z.LastUpdate)
// string "ra"
o = append(o, 0xa2, 0x72, 0x61)
o = msgp.AppendInt(o, z.RetryAttempts)
// string "at"
o = append(o, 0xa2, 0x61, 0x74)
o = msgp.AppendInt(o, z.Attempts)
// string "cmp"
o = append(o, 0xa3, 0x63, 0x6d, 0x70)
o = msgp.AppendBool(o, z.Complete)
// string "fld"
o = append(o, 0xa3, 0x66, 0x6c, 0x64)
o = msgp.AppendBool(o, z.Failed)
// string "lbkt"
o = append(o, 0xa4, 0x6c, 0x62, 0x6b, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "lobj"
o = append(o, 0xa4, 0x6c, 0x6f, 0x62, 0x6a)
o = msgp.AppendString(o, z.Object)
// string "ob"
o = append(o, 0xa2, 0x6f, 0x62)
o = msgp.AppendInt64(o, z.Objects)
// string "dm"
o = append(o, 0xa2, 0x64, 0x6d)
o = msgp.AppendInt64(o, z.DeleteMarkers)
// string "obf"
o = append(o, 0xa3, 0x6f, 0x62, 0x66)
o = msgp.AppendInt64(o, z.ObjectsFailed)
// string "dmf"
o = append(o, 0xa3, 0x64, 0x6d, 0x66)
o = msgp.AppendInt64(o, z.DeleteMarkersFailed)
// string "bt"
o = append(o, 0xa2, 0x62, 0x74)
o = msgp.AppendInt64(o, z.BytesTransferred)
// string "bf"
o = append(o, 0xa2, 0x62, 0x66)
o = msgp.AppendInt64(o, z.BytesFailed)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "v":
z.Version, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
case "jid":
z.JobID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "JobID")
return
}
case "jt":
z.JobType, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "JobType")
return
}
case "st":
z.StartTime, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
case "lu":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "ra":
z.RetryAttempts, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "at":
z.Attempts, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Attempts")
return
}
case "cmp":
z.Complete, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Complete")
return
}
case "fld":
z.Failed, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Failed")
return
}
case "lbkt":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "lobj":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "ob":
z.Objects, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
case "dm":
z.DeleteMarkers, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
case "obf":
z.ObjectsFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "dmf":
z.DeleteMarkersFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersFailed")
return
}
case "bt":
z.BytesTransferred, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesTransferred")
return
}
case "bf":
z.BytesFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *batchJobInfo) Msgsize() (s int) {
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
return
}

View File

@ -0,0 +1,349 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBatchJobPrefix(t *testing.T) {
v := BatchJobPrefix{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobPrefix(b *testing.B) {
v := BatchJobPrefix{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobPrefix(b *testing.B) {
v := BatchJobPrefix{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobPrefix(b *testing.B) {
v := BatchJobPrefix{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobPrefix(t *testing.T) {
v := BatchJobPrefix{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobPrefix Msgsize() is inaccurate")
}
vn := BatchJobPrefix{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobPrefix(b *testing.B) {
v := BatchJobPrefix{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobPrefix(b *testing.B) {
v := BatchJobPrefix{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBatchJobRequest(t *testing.T) {
v := BatchJobRequest{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobRequest(b *testing.B) {
v := BatchJobRequest{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobRequest(b *testing.B) {
v := BatchJobRequest{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobRequest(b *testing.B) {
v := BatchJobRequest{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobRequest(t *testing.T) {
v := BatchJobRequest{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobRequest Msgsize() is inaccurate")
}
vn := BatchJobRequest{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobRequest(b *testing.B) {
v := BatchJobRequest{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobRequest(b *testing.B) {
v := BatchJobRequest{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalbatchJobInfo(t *testing.T) {
v := batchJobInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgbatchJobInfo(b *testing.B) {
v := batchJobInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgbatchJobInfo(b *testing.B) {
v := batchJobInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalbatchJobInfo(b *testing.B) {
v := batchJobInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodebatchJobInfo(t *testing.T) {
v := batchJobInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodebatchJobInfo Msgsize() is inaccurate")
}
vn := batchJobInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodebatchJobInfo(b *testing.B) {
v := batchJobInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodebatchJobInfo(b *testing.B) {
v := batchJobInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -0,0 +1,75 @@
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"slices"
"testing"
"gopkg.in/yaml.v3"
)
func TestBatchJobPrefix_UnmarshalYAML(t *testing.T) {
type args struct {
yamlStr string
}
type PrefixTemp struct {
Prefix BatchJobPrefix `yaml:"prefix"`
}
tests := []struct {
name string
b PrefixTemp
args args
want []string
wantErr bool
}{
{
name: "test1",
b: PrefixTemp{},
args: args{
yamlStr: `
prefix: "foo"
`,
},
want: []string{"foo"},
wantErr: false,
},
{
name: "test2",
b: PrefixTemp{},
args: args{
yamlStr: `
prefix:
- "foo"
- "bar"
`,
},
want: []string{"foo", "bar"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := yaml.Unmarshal([]byte(tt.args.yamlStr), &tt.b); (err != nil) != tt.wantErr {
t.Errorf("UnmarshalYAML() error = %v, wantErr %v", err, tt.wantErr)
}
if !slices.Equal(tt.b.Prefix.F(), tt.want) {
t.Errorf("UnmarshalYAML() = %v, want %v", tt.b.Prefix.F(), tt.want)
}
})
}
}

View File

@ -0,0 +1,290 @@
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"strings"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/pkg/v3/wildcard"
"gopkg.in/yaml.v3"
)
//go:generate msgp -file $GOFILE
//msgp:ignore BatchJobYamlErr
// BatchJobYamlErr can be used to return yaml validation errors with line,
// column information guiding user to fix syntax errors
type BatchJobYamlErr struct {
line, col int
msg string
}
// message returns the error message excluding line, col information.
// Intended to be used in unit tests.
func (b BatchJobYamlErr) message() string {
return b.msg
}
// Error implements Error interface
func (b BatchJobYamlErr) Error() string {
return fmt.Sprintf("%s\n Hint: error near line: %d, col: %d", b.msg, b.line, b.col)
}
// BatchJobKV is a key-value data type which supports wildcard matching
type BatchJobKV struct {
line, col int
Key string `yaml:"key" json:"key"`
Value string `yaml:"value" json:"value"`
}
var _ yaml.Unmarshaler = &BatchJobKV{}
// UnmarshalYAML - BatchJobKV extends default unmarshal to extract line, col information.
func (kv *BatchJobKV) UnmarshalYAML(val *yaml.Node) error {
type jobKV BatchJobKV
var tmp jobKV
err := val.Decode(&tmp)
if err != nil {
return err
}
*kv = BatchJobKV(tmp)
kv.line, kv.col = val.Line, val.Column
return nil
}
// Validate returns an error if key is empty
func (kv BatchJobKV) Validate() error {
if kv.Key == "" {
return BatchJobYamlErr{
line: kv.line,
col: kv.col,
msg: "key can't be empty",
}
}
return nil
}
// Empty indicates if kv is not set
func (kv BatchJobKV) Empty() bool {
return kv.Key == "" && kv.Value == ""
}
// Match matches input kv with kv, value will be wildcard matched depending on the user input
func (kv BatchJobKV) Match(ikv BatchJobKV) bool {
if kv.Empty() {
return true
}
if strings.EqualFold(kv.Key, ikv.Key) {
return wildcard.Match(kv.Value, ikv.Value)
}
return false
}
// BatchJobNotification stores notification endpoint and token information.
// Used by batch jobs to notify of their status.
type BatchJobNotification struct {
line, col int
Endpoint string `yaml:"endpoint" json:"endpoint"`
Token string `yaml:"token" json:"token"`
}
var _ yaml.Unmarshaler = &BatchJobNotification{}
// UnmarshalYAML - BatchJobNotification extends unmarshal to extract line, column information
func (b *BatchJobNotification) UnmarshalYAML(val *yaml.Node) error {
type notification BatchJobNotification
var tmp notification
err := val.Decode(&tmp)
if err != nil {
return err
}
*b = BatchJobNotification(tmp)
b.line, b.col = val.Line, val.Column
return nil
}
// BatchJobRetry stores retry configuration used in the event of failures.
type BatchJobRetry struct {
line, col int
Attempts int `yaml:"attempts" json:"attempts"` // number of retry attempts
Delay time.Duration `yaml:"delay" json:"delay"` // delay between each retries
}
var _ yaml.Unmarshaler = &BatchJobRetry{}
// UnmarshalYAML - BatchJobRetry extends unmarshal to extract line, column information
func (r *BatchJobRetry) UnmarshalYAML(val *yaml.Node) error {
type retry BatchJobRetry
var tmp retry
err := val.Decode(&tmp)
if err != nil {
return err
}
*r = BatchJobRetry(tmp)
r.line, r.col = val.Line, val.Column
return nil
}
// Validate validates input replicate retries.
func (r BatchJobRetry) Validate() error {
if r.Attempts < 0 {
return BatchJobYamlErr{
line: r.line,
col: r.col,
msg: "Invalid arguments specified",
}
}
if r.Delay < 0 {
return BatchJobYamlErr{
line: r.line,
col: r.col,
msg: "Invalid arguments specified",
}
}
return nil
}
// # snowball based archive transfer is by default enabled when source
// # is local and target is remote which is also minio.
// snowball:
// disable: false # optionally turn-off snowball archive transfer
// batch: 100 # upto this many objects per archive
// inmemory: true # indicates if the archive must be staged locally or in-memory
// compress: true # S2/Snappy compressed archive
// smallerThan: 5MiB # create archive for all objects smaller than 5MiB
// skipErrs: false # skips any source side read() errors
// BatchJobSnowball describes the snowball feature when replicating objects from a local source to a remote target
type BatchJobSnowball struct {
line, col int
Disable *bool `yaml:"disable" json:"disable"`
Batch *int `yaml:"batch" json:"batch"`
InMemory *bool `yaml:"inmemory" json:"inmemory"`
Compress *bool `yaml:"compress" json:"compress"`
SmallerThan *string `yaml:"smallerThan" json:"smallerThan"`
SkipErrs *bool `yaml:"skipErrs" json:"skipErrs"`
}
var _ yaml.Unmarshaler = &BatchJobSnowball{}
// UnmarshalYAML - BatchJobSnowball extends unmarshal to extract line, column information
func (b *BatchJobSnowball) UnmarshalYAML(val *yaml.Node) error {
type snowball BatchJobSnowball
var tmp snowball
err := val.Decode(&tmp)
if err != nil {
return err
}
*b = BatchJobSnowball(tmp)
b.line, b.col = val.Line, val.Column
return nil
}
// Validate the snowball parameters in the job description
func (b BatchJobSnowball) Validate() error {
if *b.Batch <= 0 {
return BatchJobYamlErr{
line: b.line,
col: b.col,
msg: "batch number should be non positive zero",
}
}
_, err := humanize.ParseBytes(*b.SmallerThan)
if err != nil {
return BatchJobYamlErr{
line: b.line,
col: b.col,
msg: err.Error(),
}
}
return nil
}
// BatchJobSizeFilter supports size based filters - LesserThan and GreaterThan
type BatchJobSizeFilter struct {
line, col int
UpperBound BatchJobSize `yaml:"lessThan" json:"lessThan"`
LowerBound BatchJobSize `yaml:"greaterThan" json:"greaterThan"`
}
// UnmarshalYAML - BatchJobSizeFilter extends unmarshal to extract line, column information
func (sf *BatchJobSizeFilter) UnmarshalYAML(val *yaml.Node) error {
type sizeFilter BatchJobSizeFilter
var tmp sizeFilter
err := val.Decode(&tmp)
if err != nil {
return err
}
*sf = BatchJobSizeFilter(tmp)
sf.line, sf.col = val.Line, val.Column
return nil
}
// InRange returns true in the following cases and false otherwise,
// - sf.LowerBound < sz, when sf.LowerBound alone is specified
// - sz < sf.UpperBound, when sf.UpperBound alone is specified
// - sf.LowerBound < sz < sf.UpperBound when both are specified,
func (sf BatchJobSizeFilter) InRange(sz int64) bool {
if sf.UpperBound > 0 && sz > int64(sf.UpperBound) {
return false
}
if sf.LowerBound > 0 && sz < int64(sf.LowerBound) {
return false
}
return true
}
// Validate checks if sf is a valid batch-job size filter
func (sf BatchJobSizeFilter) Validate() error {
if sf.LowerBound > 0 && sf.UpperBound > 0 && sf.LowerBound >= sf.UpperBound {
return BatchJobYamlErr{
line: sf.line,
col: sf.col,
msg: "invalid batch-job size filter",
}
}
return nil
}
// BatchJobSize supports humanized byte values in yaml files type BatchJobSize uint64
type BatchJobSize int64
// UnmarshalYAML to parse humanized byte values
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error {
var batchExpireSz string
err := unmarshal(&batchExpireSz)
if err != nil {
return err
}
sz, err := humanize.ParseBytes(batchExpireSz)
if err != nil {
return err
}
*s = BatchJobSize(sz)
return nil
}

Some files were not shown because too many files have changed in this diff Show More