diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index beb9a81c6..9e93efba4 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -24,17 +24,7 @@ jobs: sudo chown -R "$USER" "$HOME/.kube" "$HOME/.minikube" sudo usermod -aG docker "$USER" - run: | - mkdir -p certs - openssl req -x509 -newkey rsa:4096 -keyout certs/key.pem -out certs/cert.pem -days 365 -subj '/CN=localhost' -nodes -addext 'subjectAltName = DNS:localhost' - docker run -d --restart=always --name registry -v "$(pwd)"/certs:/certs -e REGISTRY_HTTP_ADDR=0.0.0.0:443 -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/cert.pem \ - -e REGISTRY_HTTP_TLS_KEY=/certs/key.pem \ - -p 443:443 \ - registry:2 - sudo mkdir /etc/docker/certs.d - sudo mkdir /etc/docker/certs.d/localhost:443 - sudo cp certs/cert.pem /etc/docker/certs.d/localhost:443/ca.crt - sudo cp certs/cert.pem /usr/local/share/ca-certificates/ca.crt - sudo update-ca-certificates + "${GITHUB_WORKSPACE}/scripts/start_registry.sh" minikube-registry export DOCKER_REGISTRY_HOST=localhost:443 - run: | KUBECONFIG="$HOME/.kube/config" DOCKER_REGISTRY_HOST=localhost:443 make build e2e @@ -47,26 +37,7 @@ jobs: sudo apt-get -y update podman version - run: | - curl -sLo kind "$(curl -sL https://api.github.com/repos/kubernetes-sigs/kind/releases/latest | jq -r '[.assets[] | select(.name == "kind-linux-amd64")] | first | .browser_download_url')" - chmod +x kind - sudo mv kind /bin/ - - run: | - kind create cluster - kind export kubeconfig - sudo chown -R "$USER" "$HOME/.kube" - sudo usermod -aG docker "$USER" - - run: | - mkdir -p certs - openssl req -x509 -newkey rsa:4096 -keyout certs/key.pem -out certs/cert.pem -days 365 -subj '/CN=localhost' -nodes -addext 'subjectAltName = DNS:localhost' - docker run -d --restart=always --name registry -v "$(pwd)"/certs:/certs -e REGISTRY_HTTP_ADDR=0.0.0.0:443 -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/cert.pem \ - -e REGISTRY_HTTP_TLS_KEY=/certs/key.pem \ - -p 443:443 \ - registry:2 - sudo mkdir /etc/docker/certs.d - sudo mkdir /etc/docker/certs.d/localhost:443 - sudo cp certs/cert.pem /etc/docker/certs.d/localhost:443/ca.crt - sudo cp certs/cert.pem /usr/local/share/ca-certificates/ca.crt - sudo update-ca-certificates + "${GITHUB_WORKSPACE}/scripts/start_registry.sh" kind-registry export DOCKER_REGISTRY_HOST=localhost:443 - run: | - KUBECONFIG="$HOME/.kube/config" DOCKER_REGISTRY_HOST=localhost:443 make build e2e + KUBECONFIG="$HOME/.kube/config" DOCKER_REGISTRY_HOST=localhost:443 make build e2e CLUSTER=kind diff --git a/.gitignore b/.gitignore index 783056ef8..0dc911b82 100644 --- a/.gitignore +++ b/.gitignore @@ -462,3 +462,13 @@ pkg/apprclient/openapi/git_push.sh !test/**testdata/** /coverage.out + +# Test artifacts generated by e2e +test/e2e/bundle.Dockerfile +test/e2e/downloaded/** +test/e2e/opm-* +test/e2e/bundle_tmp* +test/e2e/index_tmp* + +# don't check in the certs directory +certs/* diff --git a/Makefile b/Makefile index 4fba84bc2..bd586974e 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,23 @@ PKG := github.com/operator-framework/operator-registry GIT_COMMIT := $(or $(SOURCE_GIT_COMMIT),$(shell git rev-parse --short HEAD)) OPM_VERSION := $(or $(SOURCE_GIT_TAG),$(shell git describe --always --tags HEAD)) BUILD_DATE := $(shell date -u +'%Y-%m-%dT%H:%M:%SZ') -TAGS := -tags "json1" + +# define characters +null := +space := $(null) # +comma := , +# default to json1 for sqlite3 +TAGS := -tags=json1 + +# Cluster to use for e2e testing +CLUSTER ?= "" +ifeq ($(CLUSTER), kind) +# add kind to the list of tags +TAGS += kind +# convert tag format from space to comma list +TAGS := $(subst $(space),$(comma),$(strip $(TAGS))) +endif + # -race is only supported on linux/amd64, linux/ppc64le, linux/arm64, freebsd/amd64, netbsd/amd64, darwin/amd64 and windows/amd64 ifeq ($(shell go env GOARCH),s390x) TEST_RACE := @@ -14,7 +30,6 @@ else TEST_RACE := -race endif - .PHONY: all all: clean test build @@ -98,4 +113,4 @@ clean: .PHONY: e2e e2e: - $(GO) run github.com/onsi/ginkgo/ginkgo --v --randomizeAllSpecs --randomizeSuites --race $(TAGS) ./test/e2e + $(GO) run github.com/onsi/ginkgo/ginkgo --v --randomizeAllSpecs --randomizeSuites --race $(if $(TEST),-focus '$(TEST)') $(TAGS) ./test/e2e -- $(if $(SKIPTLS),-skip-tls true) diff --git a/docs/contributors/e2e_tests.md b/docs/contributors/e2e_tests.md new file mode 100644 index 000000000..5b683a768 --- /dev/null +++ b/docs/contributors/e2e_tests.md @@ -0,0 +1,143 @@ +# Execute end-to-end tests for local development + +To execute the end-to-end (e2e) test suite on your local development system, you have two options: + +1. Use a dynamically generated kind server. Each run of the test suite will automatically start a new kind cluster, +and will be torn down upon completion of tests. + +1. Stand up your own minikube or other cluster and use kubeconfig. This option is good for starting a cluster and keeping it +running even after the test suite has completed. + +## Kind without SSL + +1. Start a registry server without using SSL + + ```bash + ./scripts/start_registry.sh -s kind-registry + ``` + +1. Start the e2e tests: + + ```bash + DOCKER_REGISTRY_HOST=localhost:5000 make build e2e SKIPTLS="true" CLUSTER=kind + ``` + +1. Run a specific BDD test using the `TEST` argument to make. Note that this argument uses regular expressions. + + ```bash + DOCKER_REGISTRY_HOST=localhost:5000 make build e2e TEST='builds and manipulates bundle and index images' SKIPTLS="true" CLUSTER=kind + ``` + +1. If you want a quick way to ensure that your TEST regex argument will work, you can bypass the +make file and use `-dryRun` with `-focus` and see if the regex would trigger your specific test(s). + + ```bash + GOFLAGS="-mod=vendor" go run github.com/onsi/ginkgo/ginkgo --v --randomizeAllSpecs --randomizeSuites --race -dryRun -focus 'builds and manipulates bundle and index images' -tags=json1,kind ./test/e2e + ``` + +## Kind with SSL + +1. Start a registry server with SSL (NOTE: SSL CA root will be installed and server cert/key will be generated) + + ```bash + ./scripts/start_registry.sh kind-registry + ``` + +1. Start the e2e tests: + + ```bash + DOCKER_REGISTRY_HOST=localhost:443 make build e2e CLUSTER=kind + ``` + +1. Run a specific BDD test using the `TEST` argument to make. Note that this argument uses regular expressions. + + ```bash + DOCKER_REGISTRY_HOST=localhost:443 make build e2e TEST='builds and manipulates bundle and index images' CLUSTER=kind + ``` + +1. If you want a quick way to ensure that your TEST regex argument will work, you can bypass the +make file and use `-dryRun` with `-focus` and see if the regex would trigger your specific test(s). + + ```bash + GOFLAGS="-mod=vendor" go run github.com/onsi/ginkgo/ginkgo --v --randomizeAllSpecs --randomizeSuites --race -dryRun -focus 'builds and manipulates bundle and index images' -tags=json1,kind ./test/e2e + ``` + +## Minikube (or other type) using kubeconfig without SSL + +1. Install `minikube` (see https://minikube.sigs.k8s.io/docs/start/) + +1. Create a minikube cluster + + ```bash + minikube start + ``` + +1. Start a registry server without using SSL + + ```bash + ./scripts/start_registry.sh -s minikube-registry + ``` + +1. Start the e2e tests: + + ```bash + KUBECONFIG="$HOME/.kube/config" DOCKER_REGISTRY_HOST=localhost:5000 make build e2e SKIPTLS="true" + ``` + +1. Run a specific BDD test using the `TEST` argument to make. Note that this argument uses regular expressions. + + ```bash + KUBECONFIG="$HOME/.kube/config" DOCKER_REGISTRY_HOST=localhost:5000 make build e2e TEST='builds and manipulates bundle and index images' SKIPTLS="true" + ``` + +1. If you want a quick way to ensure that your TEST regex argument will work, you can bypass the +make file and use `-dryRun` with `-focus` and see if the regex would trigger your specific test(s). + + ```bash + GOFLAGS="-mod=vendor" go run github.com/onsi/ginkgo/ginkgo --v --randomizeAllSpecs --randomizeSuites --race -dryRun -focus 'builds and manipulates bundle and index images' -tags=json1 ./test/e2e + ``` + +TIP: use a non-dynamic `kind` server by using `kind get kubeconfig --name "kind" > /tmp/kindconfig` and set `KUBECONFIG="/tmp/kindconfig"` + +## Minikube (or other type) using kubeconfig with SSL + +1. Install `minikube` (see https://minikube.sigs.k8s.io/docs/start/) + +1. Create a minikube cluster + + ```bash + minikube start + ``` + +1. Start a registry server with SSL (NOTE: SSL CA root will be installed and server cert/key will be generated) + + ```bash + ./scripts/start_registry.sh minikube-registry + ``` + +1. Start the e2e tests: + + ```bash + KUBECONFIG="$HOME/.kube/config" DOCKER_REGISTRY_HOST=localhost:443 make build e2e + ``` + +1. Run a specific BDD test using the `TEST` argument to make. Note that this argument uses regular expressions. + + ```bash + KUBECONFIG="$HOME/.kube/config" DOCKER_REGISTRY_HOST=localhost:443 make build e2e TEST='builds and manipulates bundle and index images' + ``` + +1. If you want a quick way to ensure that your TEST regex argument will work, you can bypass the +make file and use `-dryRun` with `-focus` and see if the regex would trigger your specific test(s). + + ```bash + GOFLAGS="-mod=vendor" go run github.com/onsi/ginkgo/ginkgo --v --randomizeAllSpecs --randomizeSuites --race -dryRun -focus 'builds and manipulates bundle and index images' -tags=json1 ./test/e2e + ``` + +TIP: use a non-dynamic `kind` server by using `kind get kubeconfig --name "kind" > /tmp/kindconfig` and set `KUBECONFIG="/tmp/kindconfig"` + +## Known Limitations + +Currently the test case `Launch bundle` in test/e2e/bundle_image_test.go assumes that the `opm` executable used in the test is compiled for linux. +If you run this test on a darwin environment, the kube job will not succeed unless you manually cross compile for linux and include +the binary at `bin/opm`. \ No newline at end of file diff --git a/go.mod b/go.mod index f6a8a71b9..747571602 100644 --- a/go.mod +++ b/go.mod @@ -58,6 +58,8 @@ require ( k8s.io/apimachinery v0.20.1 k8s.io/client-go v0.20.1 k8s.io/kubectl v0.20.0 + sigs.k8s.io/controller-runtime v0.8.0 + sigs.k8s.io/kind v0.10.0 ) replace ( diff --git a/go.sum b/go.sum index 7431436b2..0e39970bf 100644 --- a/go.sum +++ b/go.sum @@ -44,6 +44,7 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= @@ -72,6 +73,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alessio/shellescape v1.2.2 h1:8LnL+ncxhWT2TR00dfJRT25JWWrhkMZXneHVWnetDZg= +github.com/alessio/shellescape v1.2.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -216,7 +219,10 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.1.0 h1:B0aXl1o/1cP8NbviYiBMkcHBtUjIJ1/Ccg6b+SwCLQg= +github.com/evanphx/json-patch/v5 v5.1.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -248,6 +254,7 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -438,6 +445,7 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -495,6 +503,7 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= @@ -583,6 +592,8 @@ github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= @@ -729,13 +740,16 @@ go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -824,6 +838,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -895,6 +910,8 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200928205150-006507a75852/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1094,6 +1111,7 @@ k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJ k8s.io/apiextensions-apiserver v0.20.1 h1:ZrXQeslal+6zKM/HjDXLzThlz/vPSxrfK3OqL8txgVQ= k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.1 h1:LAhz8pKbgR8tUwn7boK+b2HZdt7MiTu2mkYtFMUjTRQ= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= @@ -1126,6 +1144,7 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubectl v0.20.0 h1:q6HH6jILYi2lkzFqBhs63M4bKLxYlM0HpFJ///MgARA= @@ -1143,9 +1162,12 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyz sigs.k8s.io/controller-runtime v0.8.0 h1:s0dYdo7lQgJiAf+alP82PRwbz+oAqL3oSyMQ18XRDOc= sigs.k8s.io/controller-runtime v0.8.0/go.mod h1:v9Lbj5oX443uR7GXYY46E0EE2o7k2YxQ58GxVNeXSW4= sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= +sigs.k8s.io/kind v0.10.0 h1:Tm+QITIqdRd+efLOsxZHMAfLnr5K4e3/RH8MePspEXs= +sigs.k8s.io/kind v0.10.0/go.mod h1:fb32zUw7ewC47bPwLnwhf47wd/vADtv3c38KP7sjIlo= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/pkg/lib/bundle/exporter.go b/pkg/lib/bundle/exporter.go index 8a13ba479..53469f868 100644 --- a/pkg/lib/bundle/exporter.go +++ b/pkg/lib/bundle/exporter.go @@ -30,7 +30,7 @@ func NewExporterForBundle(image, directory string, containerTool containertools. } } -func (i *BundleExporter) Export() error { +func (i *BundleExporter) Export(skipTLS bool) error { log := logrus.WithField("img", i.image) @@ -44,11 +44,11 @@ func (i *BundleExporter) Export() error { var rerr error switch i.containerTool { case containertools.NoneTool: - reg, rerr = containerdregistry.NewRegistry(containerdregistry.WithLog(log), containerdregistry.WithCacheDir(filepath.Join(tmpDir, "cacheDir"))) + reg, rerr = containerdregistry.NewRegistry(containerdregistry.SkipTLS(skipTLS), containerdregistry.WithLog(log), containerdregistry.WithCacheDir(filepath.Join(tmpDir, "cacheDir"))) case containertools.PodmanTool: fallthrough case containertools.DockerTool: - reg, rerr = execregistry.NewRegistry(i.containerTool, log) + reg, rerr = execregistry.NewRegistry(i.containerTool, log, containertools.SkipTLS(skipTLS)) } if rerr != nil { return rerr diff --git a/pkg/lib/bundle/exporter_test.go b/pkg/lib/bundle/exporter_test.go new file mode 100644 index 000000000..7a3df6336 --- /dev/null +++ b/pkg/lib/bundle/exporter_test.go @@ -0,0 +1,18 @@ +package bundle + +import ( + "testing" + + "github.com/operator-framework/operator-registry/pkg/containertools" + "github.com/stretchr/testify/assert" +) + +func TestExportForBundleWithBadImage(t *testing.T) { + exporter := NewExporterForBundle("foo", "", containertools.DockerTool) + err := exporter.Export(true) + assert.Error(t, err) + + exporter = NewExporterForBundle("foo", "", containertools.NoneTool) + err = exporter.Export(true) + assert.Error(t, err) +} diff --git a/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdbackups.etcd.database.coreos.com.crd.yaml b/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdbackups.etcd.database.coreos.com.crd.yaml new file mode 100644 index 000000000..5afc088b9 --- /dev/null +++ b/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdbackups.etcd.database.coreos.com.crd.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: etcdbackups.etcd.database.coreos.com +spec: + group: etcd.database.coreos.com + names: + kind: EtcdBackup + listKind: EtcdBackupList + plural: etcdbackups + singular: etcdbackup + scope: Namespaced + version: v1beta2 diff --git a/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdclusters.etcd.database.coreos.com.crd.yaml b/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdclusters.etcd.database.coreos.com.crd.yaml new file mode 100644 index 000000000..c0515b4de --- /dev/null +++ b/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdclusters.etcd.database.coreos.com.crd.yaml @@ -0,0 +1,26 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: etcdclusters.etcd.database.coreos.com +spec: + group: etcd.database.coreos.com + names: + kind: EtcdCluster + listKind: EtcdClusterList + plural: etcdclusters + shortNames: + - etcdclus + - etcd + singular: etcdcluster + scope: Namespaced + version: v1beta2 + validation: + openAPIV3Schema: + properties: + spec: + type: object + properties: + test: + type: integer + minimum: 1 + maximum: 32767 diff --git a/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdoperator.v0.9.4.clusterserviceversion.yaml b/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdoperator.v0.9.4.clusterserviceversion.yaml new file mode 100644 index 000000000..636116370 --- /dev/null +++ b/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdoperator.v0.9.4.clusterserviceversion.yaml @@ -0,0 +1,312 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: "[\n {\n \"apiVersion\": \"etcd.database.coreos.com/v1beta2\"\ + ,\n \"kind\": \"EtcdCluster\",\n \"metadata\": {\n \"name\": \"example\"\ + \n },\n \"spec\": {\n \"size\": 3,\n \"version\": \"3.2.13\"\ + \n }\n },\n {\n \"apiVersion\": \"etcd.database.coreos.com/v1beta2\"\ + ,\n \"kind\": \"EtcdRestore\",\n \"metadata\": {\n \"name\": \"example-etcd-cluster-restore\"\ + \n },\n \"spec\": {\n \"etcdCluster\": {\n \"name\": \"example-etcd-cluster\"\ + \n },\n \"backupStorageType\": \"S3\",\n \"s3\": {\n \"\ + path\": \"<full-s3-path>\",\n \"awsSecret\": \"<aws-secret>\"\n \ + \ }\n }\n },\n {\n \"apiVersion\": \"etcd.database.coreos.com/v1beta2\"\ + ,\n \"kind\": \"EtcdBackup\",\n \"metadata\": {\n \"name\": \"example-etcd-cluster-backup\"\ + \n },\n \"spec\": {\n \"etcdEndpoints\": [\"<etcd-cluster-endpoints>\"\ + ],\n \"storageType\":\"S3\",\n \"s3\": {\n \"path\": \"<full-s3-path>\"\ + ,\n \"awsSecret\": \"<aws-secret>\"\n }\n }\n }\n]\n" + capabilities: Full Lifecycle + categories: Database + containerImage: quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b + createdAt: 2019-02-28 01:03:00 + description: Create and maintain highly-available etcd clusters on Kubernetes + repository: https://github.com/coreos/etcd-operator + tectonic-visibility: ocs + olm.operatorgroup: global-operators + olm.operatornamespace: openshift-operators + olm.skiprange: '>=4.1.0 <4.1.2' + name: etcdoperator.v0.9.4 + namespace: placeholder +spec: + customresourcedefinitions: + owned: + - description: Represents a cluster of etcd nodes. + displayName: etcd Cluster + kind: EtcdCluster + name: etcdclusters.etcd.database.coreos.com + resources: + - kind: Service + version: v1 + - kind: Pod + version: v1 + specDescriptors: + - description: The desired number of member Pods for the etcd cluster. + displayName: Size + path: size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: Limits describes the minimum/maximum amount of compute resources + required/allowed + displayName: Resource Requirements + path: pod.resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + statusDescriptors: + - description: The status of each of the member Pods for the etcd cluster. + displayName: Member Status + path: members + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: The service at which the running etcd cluster can be accessed. + displayName: Service + path: serviceName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Service + - description: The current size of the etcd cluster. + displayName: Cluster Size + path: size + - description: The current version of the etcd cluster. + displayName: Current Version + path: currentVersion + - description: The target version of the etcd cluster, after upgrading. + displayName: Target Version + path: targetVersion + - description: The current status of the etcd cluster. + displayName: Status + path: phase + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase + - description: Explanation for the current status of the cluster. + displayName: Status Details + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + version: v1beta2 + - description: Represents the intent to backup an etcd cluster. + displayName: etcd Backup + kind: EtcdBackup + name: etcdbackups.etcd.database.coreos.com + specDescriptors: + - description: Specifies the endpoints of an etcd cluster. + displayName: etcd Endpoint(s) + path: etcdEndpoints + x-descriptors: + - urn:alm:descriptor:etcd:endpoint + - description: The full AWS S3 path where the backup is saved. + displayName: S3 Path + path: s3.path + x-descriptors: + - urn:alm:descriptor:aws:s3:path + - description: The name of the secret object that stores the AWS credential + and config files. + displayName: AWS Secret + path: s3.awsSecret + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + statusDescriptors: + - description: Indicates if the backup was successful. + displayName: Succeeded + path: succeeded + x-descriptors: + - urn:alm:descriptor:text + - description: Indicates the reason for any backup related failures. + displayName: Reason + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + version: v1beta2 + - description: Represents the intent to restore an etcd cluster from a backup. + displayName: etcd Restore + kind: EtcdRestore + name: etcdrestores.etcd.database.coreos.com + specDescriptors: + - description: References the EtcdCluster which should be restored, + displayName: etcd Cluster + path: etcdCluster.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:EtcdCluster + - urn:alm:descriptor:text + - description: The full AWS S3 path where the backup is saved. + displayName: S3 Path + path: s3.path + x-descriptors: + - urn:alm:descriptor:aws:s3:path + - description: The name of the secret object that stores the AWS credential + and config files. + displayName: AWS Secret + path: s3.awsSecret + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + statusDescriptors: + - description: Indicates if the restore was successful. + displayName: Succeeded + path: succeeded + x-descriptors: + - urn:alm:descriptor:text + - description: Indicates the reason for any restore related failures. + displayName: Reason + path: reason + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase:reason + version: v1beta2 + description: "The etcd Operater creates and maintains highly-available etcd clusters\ + \ on Kubernetes, allowing engineers to easily deploy and manage etcd clusters\ + \ for their applications.\n\netcd is a distributed key value store that provides\ + \ a reliable way to store data across a cluster of machines. It\xE2\u20AC\u2122\ + s open-source and available on GitHub. etcd gracefully handles leader elections\ + \ during network partitions and will tolerate machine failure, including the leader.\n\ + \n\n### Reading and writing to etcd\n\nCommunicate with etcd though its command\ + \ line utility `etcdctl` via port forwarding:\n\n $ kubectl --namespace default\ + \ port-forward service/example-client 2379:2379\n $ etcdctl --endpoints http://127.0.0.1:2379\ + \ get /\n\nOr directly to the API using the automatically generated Kubernetes\ + \ Service:\n\n $ etcdctl --endpoints http://example-client.default.svc:2379\ + \ get /\n\nBe sure to secure your etcd cluster (see Common Configurations) before\ + \ exposing it outside of the namespace or cluster.\n\n\n### Supported Features\n\ + \n* **High availability** - Multiple instances of etcd are networked together\ + \ and secured. Individual failures or networking issues are transparently handled\ + \ to keep your cluster up and running.\n\n* **Automated updates** - Rolling out\ + \ a new etcd version works like all Kubernetes rolling updates. Simply declare\ + \ the desired version, and the etcd service starts a safe rolling update to the\ + \ new version automatically.\n\n* **Backups included** - Create etcd backups and\ + \ restore them through the etcd Operator.\n\n### Common Configurations\n\n* **Configure\ + \ TLS** - Specify [static TLS certs](https://github.com/coreos/etcd-operator/blob/master/doc/user/cluster_tls.md)\ + \ as Kubernetes secrets.\n\n* **Set Node Selector and Affinity** - [Spread your\ + \ etcd Pods](https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-member-cluster-with-node-selector-and-anti-affinity-across-nodes)\ + \ across Nodes and availability zones.\n\n* **Set Resource Limits** - [Set the\ + \ Kubernetes limit and request](https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-member-cluster-with-resource-requirement)\ + \ values for your etcd Pods.\n\n* **Customize Storage** - [Set a custom StorageClass](https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#custom-persistentvolumeclaim-definition)\ + \ that you would like to use.\n" + displayName: etcd + icon: + - base64data: iVBORw0KGgoAAAANSUhEUgAAAOEAAADZCAYAAADWmle6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAEKlJREFUeNrsndt1GzkShmEev4sTgeiHfRYdgVqbgOgITEVgOgLTEQydwIiKwFQCayoCU6+7DyYjsBiBFyVVz7RkXvqCSxXw/+f04XjGQ6IL+FBVuL769euXgZ7r39f/G9iP0X+u/jWDNZzZdGI/Ftama1jjuV4BwmcNpbAf1Fgu+V/9YRvNAyzT2a59+/GT/3hnn5m16wKWedJrmOCxkYztx9Q+py/+E0GJxtJdReWfz+mxNt+QzS2Mc0AI+HbBBwj9QViKbH5t64DsP2fvmGXUkWU4WgO+Uve2YQzBUGd7r+zH2ZG/tiUQc4QxKwgbwFfVGwwmdLL5wH78aPC/ZBem9jJpCAX3xtcNASSNgJLzUPSQyjB1zQNl8IQJ9MIU4lx2+Jo72ysXYKl1HSzN02BMa/vbZ5xyNJIshJzwf3L0dQhJw4Sih/SFw9Tk8sVeghVPoefaIYCkMZCKbrcP9lnZuk0uPUjGE/KE8JQry7W2tgfuC3vXgvNV+qSQbyFtAtyWk7zWiYevvuUQ9QEQCvJ+5mmu6dTjz1zFHLFj8Eb87MtxaZh/IQFIHom+9vgTWwZxAQjT9X4vtbEVPojwjiV471s00mhAckpwGuCn1HtFtRDaSh6y9zsL+LNBvCG/24ThcxHObdlWc1v+VQJe8LcO0jwtuF8BwnAAUgP9M8JPU2Me+Oh12auPGT6fHuTePE3bLDy+x9pTLnhMn+07TQGh//Bz1iI0c6kvtqInjvPZcYR3KsPVmUsPYt9nFig9SCY8VQNhpPBzn952bbgcsk2EvM89wzh3UEffBbyPqvBUBYQ8ODGPFOLsa7RF096WJ69L+E4EmnpjWu5o4ChlKaRTKT39RMMaVPEQRsz/nIWlDN80chjdJlSd1l0pJCAMVZsniobQVuxceMM9OFoaMd9zqZtjMEYYDW38Drb8Y0DYPLShxn0pvIFuOSxd7YCPet9zk452wsh54FJoeN05hcgSQoG5RR0Qh9Q4E4VvL4wcZq8UACgaRFEQKgSwWrkr5WFnGxiHSutqJGlXjBgIOayhwYBTA0ER0oisIVSUV0AAMT0IASCUO4hRIQSAEECMCCEPwqyQA0JCQBzEGjWNAqHiUVAoXUWbvggOIQCEAOJzxTjoaQ4AIaE64/aZridUsBYUgkhB15oGg1DBIl8IqirYwV6hPSGBSFteMCUBSVXwfYixBmamRubeMyjzMJQBDDowE3OesDD+zwqFoDqiEwXoXJpljB+PvWJGy75BKF1FPxhKygJuqUdYQGlLxNEXkrYyjQ0GbaAwEnUIlLRNvVjQDYUAsJB0HKLE4y0AIpQNgCIhBIhQTgCKhZBBpAN/v6LtQI50JfUgYOnnjmLUFHKhjxbAmdTCaTiBm3ovLPqG2urWAij6im0Nd9aTN9ygLUEt9LgSRnohxUPIKxlGaE+/6Y7znFf0yX+GnkvFFWmarkab2o9PmTeq8sbd2a7DaysXz7i64VeznN4jCQhN9gdDbRiuWrfrsq0mHIrlaq+hlotCtd3Um9u0BYWY8y5D67wccJoZjFca7iUs9VqZcfsZwTd1sbWGG+OcYaTnPAP7rTQVVlM4Sg3oGvB1tmNh0t/HKXZ1jFoIMwCQjtqbhNxUmkGYqgZEDZP11HN/S3gAYRozf0l8C5kKEKUvW0t1IfeWG/5MwgheZTT1E0AEhDkAePQO+Ig2H3DncAkQM4cwUQCD530dU4B5Yvmi2LlDqXfWrxMCcMth51RToRMNUXFnfc2KJ0+Ryl0VNOUwlhh6NoxK5gnViTgQpUG4SqSyt5z3zRJpuKmt3Q1614QaCBPaN6je+2XiFcWAKOXcUfIYKRyL/1lb7pe5VxSxxjQ6hImshqGRt5GWZVKO6q2wHwujfwDtIvaIdexj8Cm8+a68EqMfox6x/voMouZF4dHnEGNeCDMwT6vdNfekH1MafMk4PI06YtqLVGl95aEM9Z5vAeCTOA++YLtoVJRrsqNCaJ6WRmkdYaNec5BT/lcTRMqrhmwfjbpkj55+OKp8IEbU/JLgPJE6Wa3TTe9sHS+ShVD5QIyqIxMEwKh12olC6mHIed5ewEop80CNlfIOADYOT2nd6ZXCop+Ebqchc0JqxKcKASxChycJgUh1rnHA5ow9eTrhqNI7JWiAYYwBGGdpyNLoGw0Pkh96h1BpHihyywtATDM/7Hk2fN9EnH8BgKJCU4ooBkbXFMZJiPbrOyecGl3zgQDQL4hk10IZiOe+5w99Q/gBAEIJgPhJM4QAEEoFREAIAAEiIASAkD8Qt4AQAEIAERAGFlX4CACKAXGVM4ivMwWwCLFAlyeoaa70QePKm5Dlp+/n+ye/5dYgva6YsUaVeMa+tzNFeJtWwc+udbJ0Fg399kLielQJ5Ze61c2+7ytA6EZetiPxZC6tj22yJCv6jUwOyj/zcbqAxOMyAKEbfeHtNa7DtYXptjsk2kJxR+eIeim/tHNofUKYy8DMrQcAKWz6brpvzyIAlpwPhQ49l6b7skJf5Z+YTOYQc4FwLDxvoTDwaygQK+U/kVr+ytSFBG01Q3gnJJR4cNiAhx4HDub8/b5DULXlj6SVZghFiE+LdvE9vo/o8Lp1RmH5hzm0T6wdbZ6n+D6i44zDRc3ln6CpAEJfXiRU45oqLz8gFAThWsh7ughrRibc0QynHgZpNJa/ENJ+loCwu/qOGnFIjYR/n7TfgycULhcQhu6VC+HfF+L3BoAQ4WiZTw1M+FPCnA2gKC6/FAhXgDC+ojQGh3NuWsvfF1L/D5ohlCKtl1j2ldu9a/nPAKFwN56Bst10zCG0CPleXN/zXPgHQZXaZaBgrbzyY5V/mUA+6F0hwtGN9rwu5DVZPuwWqfxdFz1LWbJ2lwKEa+0Qsm4Dl3fp+Pu0lV97PgwIPfSsS+UQhj5Oo+vvFULazRIQyvGEcxPuNLCth2MvFsrKn8UOilAQShkh7TTczYNMoS6OdP47msrPi82lXKGWhCdMZYS0bFy+vcnGAjP1CIfvgbKNA9glecEH9RD6Ol4wRuWyN/G9MHnksS6o/GPf5XcwNSUlHzQhDuAKtWJmkwKElU7lylP5rgIcsquh/FI8YZCDpkJBuE4FQm7Icw8N+SrUGaQKyi8FwiDt1ve5o+Vu7qYHy/psgK8cvh+FTYuO77bhEC7GuaPiys/L1X4IgXDL+e3M5+ovLxBy5VLuIebw1oqcHoPfoaMJUsHays878r8KbDc3xtPx/84gZPBG/JwaufrsY/SRG/OY3//8QMNdsvdZCFtbW6f8pFuf5bflILAlX7O+4fdfugKyFYS8T2zAsXthdG0VurPGKwI06oF5vkBgHWkNp6ry29+lsPZMU3vijnXFNmoclr+6+Ou/FIb8yb30sS8YGjmTqCLyQsi5N/6ZwKs0Yenj68pfPjF6N782Dp2FzV9CTyoSeY8mLK16qGxIkLI8oa1n8tz9juP40DlK0epxYEbojbq+9QfurBeVIlCO9D2396bxiV4lkYQ3hOAFw2pbhqMGISkkQOMcQ9EqhDmGZZdo92JC0YHRNTfoSg+5e0IT+opqCKHoIU+4ztQIgBD1EFNrQAgIpYSil9lDmPHqkROPt+JC6AgPquSuumJmg0YARVCuneDfvPVeJokZ6pIXDkNxQtGzTF9/BQjRG0tQznfb74RwCQghpALBtIQnfK4zhxdyQvVCUeknMIT3hLyY+T5jo0yABqKPQNpUNw/09tGZod5jgCaYFxyYvJcNPkv9eof+I3pnCFEHIETjSM8L9tHZHYCQT9PaZGycU6yg8S4akDnJ+P03L0+t23XGzCLzRgII/Wqa+fv/xlfvmKvMUOcOrlCDdoei1MGdZm6G5VEIfRzzjd4aQs69n699Rx7ewhvCGzr2gmTPs8zNsJOrXt24FbkhhOjCfT4ICA/rPbyhUy94Dks0gJCX1NzCZui9YUd3oei+c257TalFbgg19ILHrlrL2gvWgXAL26EX76gZTNASQnad8Ibwhl284NhgXpB0c+jKhWO3Ms1hP9ihJYB9eMF6qd1BCPk0qA1s+LimFIu7m4nsdQIzPK4VbQ8hYvrnuSH2G9b2ggP78QmWqBdF9Vx8SSY6QYdUW7BTA1schZATyhvY8lHvcRbNUS9YGFy2U+qmzh2YPVc0I7yAOFyHfRpyUwtCSzOdPXMHmz7qDIM0e0V2wZTEk+6Ym6N63eBLp/b5Bts+2cKCSJ/LuoZO3ANSiE5hKAZjnvNSS4931jcw9jpwT0feV/qSJ1pVtCyfHKDkvK8Ejx7pUxGh2xFNSwx8QTi2H9ceC0/nni64MS/5N5dG39pDqvRV+WgGk71c9VFXF9b+xYvOw/d61iv7m3MvEHryhvecwC52jSSx4VIIgwnMNT/UsTxIgpPt3K/ARj15CptwL3Zd/ceDSATj2DGQjbxgWwhdeMMte7zpy5On9vymRm/YxBYljGVjKWF9VJf7I1+sex3wY8w/V1QPTborW/72gkdsRDaZMJBdbdHIC7aCkAu9atlLbtnrzerMnyToDaGwelOnk3/hHSem/ZK7e/t7jeeR20LYBgqa8J80gS8jbwi5F02Uj1u2NYJxap8PLkJfLxA2hIJyvnHX/AfeEPLpBfe0uSFHbnXaea3Qd5d6HcpYZ8L6M7lnFwMQ3MNg+RxUR1+6AshtbsVgfXTEg1sIGax9UND2p7f270wdG3eK9gXVGHdw2k5sOyZv+Nbs39Z308XR9DqWb2J+PwKDhuKHPobfuXf7gnYGHdCs7bhDDadD4entDug7LWNsnRNW4mYqwJ9dk+GGSTPBiA2j0G8RWNM5upZtcG4/3vMfP7KnbK2egx6CCnDPhRn7NgD3cghLIad5WcM2SO38iqHvvMOosyeMpQ5zlVCaaj06GVs9xUbHdiKoqrHWgquFEFMWUEWfXUxJAML23hAHFOctmjZQffKD2pywkhtSGHKNtpitLroscAeE7kCkSsC60vxEl6yMtL9EL5HKGCMszU5bk8gdkklAyEn5FO0yK419rIxBOIqwFMooDE0tHEVYijAUECIshRCGIhxFWIowFJ5QkEYIS5PTJrUwNGlPyN6QQPyKtpuM1E/K5+YJDV/MiA3AaehzqgAm7QnZG9IGYKo8bHnSK7VblLL3hOwNHziPuEGOqE5brrdR6i+atCfckyeWD47HkAkepRGLY/e8A8J0gCwYSNypF08bBm+e6zVz2UL4AshhBUjML/rXLefqC82bcQFhGC9JDwZ1uuu+At0S5gCETYHsV4DUeD9fDN2Zfy5OXaW2zAwQygCzBLJ8cvaW5OXKC1FxfTggFAHmoAJnSiOw2wps9KwRWgJCLaEswaj5NqkLwAYIU4BxqTSXbHXpJdRMPZgAOiAMqABCNGYIEEJutEK5IUAIwYMDQgiCACEEAcJs1Vda7gGqDhCmoiEghAAhBAHCrKXVo2C1DCBMRlp37uMIEECoX7xrX3P5C9QiINSuIcoPAUI0YkAICLNWgfJDh4T9hH7zqYH9+JHAq7zBqWjwhPAicTVCVQJCNF50JghHocahKK0X/ZnQKyEkhSdUpzG8OgQI42qC94EQjsYLRSmH+pbgq73L6bYkeEJ4DYTYmeg1TOBFc/usTTp3V9DdEuXJ2xDCUbXhaXk0/kAYmBvuMB4qkC35E5e5AMKkwSQgyxufyuPy6fMMgAFCSI73LFXU/N8AmEL9X4ABACNSKMHAgb34AAAAAElFTkSuQmCC + mediatype: image/png + install: + spec: + deployments: + - name: etcd-operator + spec: + replicas: 1 + selector: + matchLabels: + name: etcd-operator-alm-owned + template: + metadata: + labels: + name: etcd-operator-alm-owned + name: etcd-operator-alm-owned + spec: + containers: + - command: + - etcd-operator + - --create-crd=false + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b + name: etcd-operator + - command: + - etcd-backup-operator + - --create-crd=false + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b + name: etcd-backup-operator + - command: + - etcd-restore-operator + - --create-crd=false + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: quay.io/coreos/etcd-operator@sha256:66a37fd61a06a43969854ee6d3e21087a98b93838e284a6086b13917f96b0d9b + name: etcd-restore-operator + serviceAccountName: etcd-operator + permissions: + - rules: + - apiGroups: + - etcd.database.coreos.com + resources: + - etcdclusters + - etcdbackups + - etcdrestores + verbs: + - '*' + - apiGroups: + - '' + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + verbs: + - '*' + - apiGroups: + - apps + resources: + - deployments + verbs: + - '*' + - apiGroups: + - '' + resources: + - secrets + verbs: + - get + serviceAccountName: etcd-operator + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: false + type: AllNamespaces + keywords: + - etcd + - key value + - database + - coreos + - open source + labels: + alm-owner-etcd: etcdoperator + operated-by: etcdoperator + links: + - name: Blog + url: https://coreos.com/etcd + - name: Documentation + url: https://coreos.com/operators/etcd/docs/latest/ + - name: etcd Operator Source Code + url: https://github.com/coreos/etcd-operator + maintainers: + - email: etcd-dev@googlegroups.com + name: etcd Community + maturity: alpha + provider: + name: CNCF + replaces: etcdoperator.v0.9.2 + selector: + matchLabels: + alm-owner-etcd: etcdoperator + operated-by: etcdoperator + version: 0.9.4 diff --git a/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdrestores.etcd.database.coreos.com.crd.yaml b/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdrestores.etcd.database.coreos.com.crd.yaml new file mode 100644 index 000000000..5b851cd12 --- /dev/null +++ b/pkg/lib/bundle/testdata/validate/invalid_manifests_bundle/invalid_annotation_name/etcdrestores.etcd.database.coreos.com.crd.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: etcdrestores.etcd.database.coreos.com +spec: + group: etcd.database.coreos.com + names: + kind: EtcdRestore + listKind: EtcdRestoreList + plural: etcdrestores + singular: etcdrestore + scope: Namespaced + version: v1beta2 diff --git a/pkg/lib/bundle/validate_test.go b/pkg/lib/bundle/validate_test.go index 4d50b06f8..51e0a8b28 100644 --- a/pkg/lib/bundle/validate_test.go +++ b/pkg/lib/bundle/validate_test.go @@ -3,6 +3,7 @@ package bundle import ( "errors" "fmt" + "strings" "testing" "github.com/sirupsen/logrus" @@ -112,35 +113,35 @@ func TestValidateBundleContent(t *testing.T) { mediaType string directory string numErrors int - errString string + errStrings []string }{ { description: "registryv1 bundle/invalid csv", mediaType: RegistryV1Type, directory: "./testdata/validate/invalid_manifests_bundle/invalid_csv/", numErrors: 1, - errString: "install modes not found", + errStrings: []string{"install modes not found"}, }, { description: "registryv1 bundle/invalid crd", mediaType: RegistryV1Type, directory: "./testdata/validate/invalid_manifests_bundle/invalid_crd/", numErrors: 1, - errString: "must contain unique version name", + errStrings: []string{"must contain unique version names"}, }, { description: "registryv1 bundle/invalid sa", mediaType: RegistryV1Type, directory: "./testdata/validate/invalid_manifests_bundle/invalid_sa/", numErrors: 1, - errString: "json: cannot unmarshal number into Go struct field ObjectMeta.metadata.namespace of type string", + errStrings: []string{"json: cannot unmarshal number into Go struct field ObjectMeta.metadata.namespace of type string"}, }, { description: "registryv1 bundle/invalid type", mediaType: RegistryV1Type, directory: "./testdata/validate/invalid_manifests_bundle/invalid_type/", numErrors: 1, - errString: "ResourceQuota is not supported type for registryV1 bundle", + errStrings: []string{"ResourceQuota is not supported type for registryV1 bundle"}, }, { description: "valid registryv1 bundle", @@ -153,15 +154,39 @@ func TestValidateBundleContent(t *testing.T) { mediaType: RegistryV1Type, directory: "./testdata/validate/invalid_manifests_bundle/invalid_bundle/", numErrors: 1, - errString: "owned CRD etcdclusters.etcd.database.coreos.com/v1beta2 not found in bundle", + errStrings: []string{"owned CRD etcdclusters.etcd.database.coreos.com/v1beta2 not found in bundle"}, }, { description: "invalid registryv1 bundle/extra crd", mediaType: RegistryV1Type, directory: "./testdata/validate/invalid_manifests_bundle/invalid_bundle_2/", numErrors: 1, - errString: `CRD etcdclusters.etcd.database.coreos.com/v1beta2 is present in bundle "etcdoperator.v0.9.4" but not defined in CSV`, + errStrings: []string{`CRD etcdclusters.etcd.database.coreos.com/v1beta2 is present in bundle "etcdoperator.v0.9.4" but not defined in CSV`}, }, + { + description: "invalid annotation names", + mediaType: RegistryV1Type, + directory: "./testdata/validate/invalid_manifests_bundle/invalid_annotation_name/", + numErrors: 3, + errStrings: []string{ + "provided annotation olm.operatorgroup uses wrong case and should be olm.operatorGroup instead", + "provided annotation olm.operatornamespace uses wrong case and should be olm.operatorNamespace instead", + "provided annotation olm.skiprange uses wrong case and should be olm.skipRange instead", + }, + }, + } + + // doesActualErrorMatchExpected iterates through expected error messages and looks for substring match against actualError. + // returns true for match, false otherwise + doesActualErrorMatchExpected := func(actualError string, expectedSlice []string) bool { + for _, expected := range expectedSlice { + if strings.Contains(actualError, expected) { + // found a substring match + return true + } + } + // no substring matches + return false } for i, tt := range table { @@ -173,9 +198,9 @@ func TestValidateBundleContent(t *testing.T) { require.True(t, isValidationErr) } require.Len(t, validationError.Errors, tt.numErrors, table[i].description) - if len(validationError.Errors) > 0 { - e := validationError.Errors[0] - require.Contains(t, e.Error(), tt.errString) + // convert each validation error to a string and check against all expected errStrings looking for a match + for _, e := range validationError.Errors { + require.True(t, doesActualErrorMatchExpected(e.Error(), tt.errStrings)) } } } diff --git a/pkg/lib/indexer/indexer.go b/pkg/lib/indexer/indexer.go index d53bf3c0d..1f8cf22f1 100644 --- a/pkg/lib/indexer/indexer.go +++ b/pkg/lib/indexer/indexer.go @@ -546,7 +546,7 @@ func (i ImageIndexer) ExportFromIndex(request ExportFromIndexRequest) error { bundleDir.bundleVersion = strconv.Itoa(rand.Intn(10000)) } exporter := bundle.NewExporterForBundle(bundleImage, filepath.Join(request.DownloadPath, bundleDir.pkgName, bundleDir.bundleVersion), request.ContainerTool) - if err := exporter.Export(); err != nil { + if err := exporter.Export(request.SkipTLS); err != nil { err = fmt.Errorf("exporting bundle image:%s failed with %s", bundleImage, err) mu.Lock() errs = append(errs, err) diff --git a/scripts/start_registry.sh b/scripts/start_registry.sh new file mode 100755 index 000000000..af205b56c --- /dev/null +++ b/scripts/start_registry.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash +set -o errexit + +# create temp directory and setup for removal on script exit +TMPDIR="$(mktemp -d)" +MKCERT_TEMP_DIR="${TMPDIR}/mkcert" +trap 'rm -rf -- "$TMPDIR"' EXIT + +# output file locations +CERT_DIR=certs + +# skip TLS flag (default to using TLS) +SKIP_TLS=false + +function usage() { + echo -n "$(basename "$0") [OPTION]... <registry name> + +Creates a named docker registry and optionally generates and uses TLS certs. +TLS uses port 443 and non-TLS uses port 5000. + + <registry name> name of target regsitry (e.g. cp.stg.icr.io/cp) + + Options: + -s, --skip-tls Skip TLS certificate generation and usage in image registry (defaults to using TLS if not provided) + -c, --cert-dir Output directory for cert and key files (defaults to ./certs) + -h, --help Display this help and exit +" + exit 0 +} + +function err_exit() { + echo >&2 "[ERROR] $1" + exit 1 +} + +while [[ $# -gt 0 ]] +do +key="$1" + +case $key in + -s|--skip-tls) + SKIP_TLS=true + shift + ;; + -c|--cert-dir) + CERT_DIR="$2" + shift 2 + ;; + -h|--help) + usage + shift + ;; + --) # end argument parsing + shift + break + ;; + --*=|-*) # unsupported flags + err_exit "Unsupported flag $1" + ;; + *) + POSITIONAL+=("$1") # save it in an array for later + shift # past argument + ;; +esac +done +set -- "${POSITIONAL[@]}" # restore positional parameters + +REGISTRY_NAME=${1?' Missing required registry name'} + +CERT="${CERT_DIR}/cert.pem" +KEY="${CERT_DIR}/key.pem" + +function installMkcert() { + # see if mkcert is already installed + if ! "$(go env GOPATH)/bin/mkcert" --version &>/dev/null; then + mkdir -p "${MKCERT_TEMP_DIR}" + # not installed, so clone the repo to temp dir + if ! git clone https://github.com/FiloSottile/mkcert "${MKCERT_TEMP_DIR}"; then + err_exit "unable to clone mkcert git repository" + fi + + # use sub shell to prevent changing users CWD + if (cd "${MKCERT_TEMP_DIR}" && go install -ldflags "-X main.Version=$(git describe --tags)"); then + # run mkcert + if ! "$(go env GOPATH)/bin/mkcert" --version &>/dev/null; then + err_exit "mkcert is not functioning correctly" + fi + else + err_exit "unable to install mkcert" + fi + fi +} + +function createCerts() { + # create destination for certs + mkdir -p "${CERT_DIR}" + # check if CAROOT is installed or not + if [[ ! -d $("$(go env GOPATH)/bin/mkcert" -CAROOT) ]]; then + if ! "$(go env GOPATH)/bin/mkcert" -install; then + err_exit "Unable to install CAROOT" + fi + fi + # check if the certs were already created from a previous run of this script + if [[ ! -f "${CERT}" ]] && [[ ! -f "${KEY}" ]]; then + if ! "$(go env GOPATH)/bin/mkcert" -cert-file "${CERT}" -key-file "${KEY}" localhost 127.0.0.1 ::1; then + err_exit "Unable to create cert and key files" + fi + fi +} + +running="$(docker inspect -f '{{.State.Running}}' "${REGISTRY_NAME}" 2>/dev/null || true)" +if [ "${running}" = 'true' ]; then + echo "Registry is already running. Nothing to do." + exit 0 +fi + +if [[ "$SKIP_TLS" = false ]] ; then + # Configured to use TLS + + # use mkcert tool for simple cross platform setup/configuration + installMkcert + createCerts + + reg_port='443' + absolute_path_cert_dir="$(cd "$(dirname "${CERT_DIR}")"; pwd -P)/$(basename "${CERT_DIR}")" + docker run \ + -d \ + --restart=always \ + --name "${REGISTRY_NAME}" \ + -v "${absolute_path_cert_dir}":/certs \ + -e REGISTRY_HTTP_ADDR=0.0.0.0:443 \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/cert.pem \ + -e REGISTRY_HTTP_TLS_KEY=/certs/key.pem \ + -p "127.0.0.1:${reg_port}:443" \ + registry:2 + +else + # Configured to not use TLS + + reg_port='5000' + docker run \ + -d \ + --restart=always \ + -p "127.0.0.1:${reg_port}:5000" \ + --name "${REGISTRY_NAME}" \ + registry:2 +fi \ No newline at end of file diff --git a/test/e2e/bundle_image_test.go b/test/e2e/bundle_image_test.go index 8023d3ef0..851ab5671 100644 --- a/test/e2e/bundle_image_test.go +++ b/test/e2e/bundle_image_test.go @@ -6,6 +6,8 @@ import ( "io" "os" "os/exec" + "regexp" + "strings" "time" . "github.com/onsi/ginkgo" @@ -15,14 +17,13 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" - "github.com/operator-framework/operator-registry/pkg/client" "github.com/operator-framework/operator-registry/pkg/configmap" "github.com/operator-framework/operator-registry/pkg/lib/bundle" + "github.com/operator-framework/operator-registry/test/e2e/ctx" ) var builderCmd string @@ -713,7 +714,7 @@ var _ = Describe("Launch bundle", func() { It("should populate specified configmap", func() { // these permissions are only necessary for the e2e (and not OLM using the feature) By("configuring configmap service account") - kubeclient, err := client.NewKubeClient("", logrus.StandardLogger()) + kubeclient, err := kubernetes.NewForConfig(ctx.Ctx().RESTConfig()) Expect(err).NotTo(HaveOccurred()) _, err = kubeclient.RbacV1().Roles(namespace).Create(context.TODO(), &rbacv1.Role{ @@ -822,28 +823,33 @@ var _ = Describe("Launch bundle", func() { }) }) -const kindControlPlaneNodeName = "kind-control-plane" +var kindControlPlaneNodeNameRegex = regexp.MustCompile("^kind-.*-control-plane$|^kind-control-plane$") -func isKindCluster(client *kubernetes.Clientset) (bool, error) { - kindNode, err := client.CoreV1().Nodes().Get(context.TODO(), kindControlPlaneNodeName, metav1.GetOptions{}) +func isKindCluster(client *kubernetes.Clientset) (bool, string, error) { + nodes, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { - if errors.IsNotFound(err) { - // not a kind cluster - return false, nil - } // transient error accessing nodes in cluster // return an error, failing the test - return false, fmt.Errorf("accessing nodes in cluster") + return false, "", fmt.Errorf("accessing nodes in cluster") } + + var kindNode *corev1.Node + for _, node := range nodes.Items { + if kindControlPlaneNodeNameRegex.MatchString(node.Name) { + kindNode = &node + } + } + if kindNode == nil { - return true, fmt.Errorf("finding kind node %s in cluster", kindControlPlaneNodeName) + return false, "", nil } - return true, nil + // found a match... strip off -control-plane from name and return to caller + return true, strings.TrimSuffix(kindNode.Name, "-control-plane"), nil } // maybeLoadKind loads the built images via kind load docker-image if the test is running in a kind cluster func maybeLoadKind(client *kubernetes.Clientset, w io.Writer, images ...string) error { - kind, err := isKindCluster(client) + kind, kindServerName, err := isKindCluster(client) if err != nil { return err } @@ -852,7 +858,7 @@ func maybeLoadKind(client *kubernetes.Clientset, w io.Writer, images ...string) } for _, image := range images { - cmd := exec.Command("kind", "load", "docker-image", image) + cmd := exec.Command("kind", "load", "docker-image", image, "--name", kindServerName) cmd.Stderr = w cmd.Stdout = w err := cmd.Run() diff --git a/test/e2e/ctx/ctx.go b/test/e2e/ctx/ctx.go new file mode 100644 index 000000000..93ef4cf8c --- /dev/null +++ b/test/e2e/ctx/ctx.go @@ -0,0 +1,87 @@ +package ctx + +import ( + "fmt" + "strings" + + . "github.com/onsi/ginkgo" + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + kscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + k8scontrollerclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ctx TestContext + +// TestContext represents the environment of an executing test. It can +// be considered roughly analogous to a kubeconfig context. +type TestContext struct { + restConfig *rest.Config + + scheme *runtime.Scheme + + // client is the controller-runtime client -- we should use this from now on + client k8scontrollerclient.Client +} + +// Ctx returns a pointer to the global test context. During parallel +// test executions, Ginkgo starts one process per test "node", and +// each node will have its own context, which may or may not point to +// the same test cluster. +func Ctx() *TestContext { + return &ctx +} + +func (ctx TestContext) Logf(f string, v ...interface{}) { + if !strings.HasSuffix(f, "\n") { + f += "\n" + } + fmt.Fprintf(GinkgoWriter, f, v...) +} + +func (ctx TestContext) Scheme() *runtime.Scheme { + return ctx.scheme +} + +func (ctx TestContext) RESTConfig() *rest.Config { + return rest.CopyConfig(ctx.restConfig) +} + +func (ctx TestContext) Client() k8scontrollerclient.Client { + return ctx.client +} + +func setDerivedFields(ctx *TestContext) error { + if ctx == nil { + return fmt.Errorf("nil test context") + } + + if ctx.restConfig == nil { + return fmt.Errorf("nil RESTClient") + } + + ctx.scheme = runtime.NewScheme() + + localSchemeBuilder := runtime.NewSchemeBuilder( + apiextensionsv1.AddToScheme, + kscheme.AddToScheme, + operatorsv1alpha1.AddToScheme, + operatorsv1.AddToScheme, + ) + if err := localSchemeBuilder.AddToScheme(ctx.scheme); err != nil { + return err + } + + client, err := k8scontrollerclient.New(ctx.restConfig, k8scontrollerclient.Options{ + Scheme: ctx.scheme, + }) + if err != nil { + return err + } + ctx.client = client + + return nil +} diff --git a/test/e2e/ctx/doc.go b/test/e2e/ctx/doc.go new file mode 100644 index 000000000..c0f79f2b5 --- /dev/null +++ b/test/e2e/ctx/doc.go @@ -0,0 +1,5 @@ +// Package ctx provides information about the Kubernetes cluster under +// test, as well as optional functionality for provisioning (i.e., +// creating new clusters) and installing (i.e., deploying operators +// and supporting resources into clusters). +package ctx diff --git a/test/e2e/ctx/must.go b/test/e2e/ctx/must.go new file mode 100644 index 000000000..eaca97e67 --- /dev/null +++ b/test/e2e/ctx/must.go @@ -0,0 +1,9 @@ +package ctx + +func MustProvision(ctx *TestContext) func() { + deprovision, err := Provision(ctx) + if err != nil { + panic(err) + } + return deprovision +} diff --git a/test/e2e/ctx/provisioner_kind.go b/test/e2e/ctx/provisioner_kind.go new file mode 100644 index 000000000..3145e9cee --- /dev/null +++ b/test/e2e/ctx/provisioner_kind.go @@ -0,0 +1,177 @@ +// +build kind + +package ctx + +import ( + golangContext "context" + "encoding/csv" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/cluster" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/log" +) + +var ( + images = flag.String("kind.images", "", "comma-separated list of image archives to load on cluster nodes, relative to the test binary or test package path") +) + +type kindLogAdapter struct { + *TestContext +} + +var _ log.Logger = kindLogAdapter{} + +func (kl kindLogAdapter) Enabled() bool { + return true +} + +func (kl kindLogAdapter) Info(message string) { + kl.Infof(message) +} + +func (kl kindLogAdapter) Infof(format string, args ...interface{}) { + kl.Logf(format, args) +} + +func (kl kindLogAdapter) Warn(message string) { + kl.Warnf(message) +} + +func (kl kindLogAdapter) Warnf(format string, args ...interface{}) { + kl.Logf(format, args) +} + +func (kl kindLogAdapter) Error(message string) { + kl.Errorf(message) +} + +func (kl kindLogAdapter) Errorf(format string, args ...interface{}) { + kl.Logf(format, args) +} + +func (kl kindLogAdapter) V(log.Level) log.InfoLogger { + return kl +} + +func Provision(ctx *TestContext) (func(), error) { + dir, err := ioutil.TempDir("", "kind.") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory: %s", err.Error()) + } + defer os.RemoveAll(dir) + kubeconfigPath := filepath.Join(dir, "kubeconfig") + + provider := cluster.NewProvider( + cluster.ProviderWithLogger(kindLogAdapter{ctx}), + ) + name := fmt.Sprintf("kind-%s", rand.String(16)) + + // create cluster with configuration: + // 1. use local docker registry + // 2. wait for 5 minutes for control plane + // 3. use explicit kube config path + registry := "kind-registry" + // registry := "localhost" + registryPort := 5000 + if err := provider.Create( + name, + cluster.CreateWithV1Alpha4Config(&v1alpha4.Cluster{ + ContainerdConfigPatches: []string{ + fmt.Sprintf(`[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:%[2]d"] + endpoint = ["http://%[1]s:%[2]d"]`, registry, registryPort), + }, + }), + cluster.CreateWithWaitForReady(5*time.Minute), + cluster.CreateWithKubeconfigPath(kubeconfigPath), + ); err != nil { + return nil, fmt.Errorf("failed to create kind cluster: %s", err.Error()) + } + + nodes, err := provider.ListNodes(name) + if err != nil { + return nil, fmt.Errorf("failed to list kind nodes: %s", err.Error()) + } + + var archives []string + if images != nil { + records, err := csv.NewReader(strings.NewReader(*images)).ReadAll() + if err != nil { + return nil, fmt.Errorf("error parsing image flag: %s", err.Error()) + } + for _, row := range records { + archives = append(archives, row...) + } + } + + for _, archive := range archives { + for _, node := range nodes { + fd, err := os.Open(archive) + if err != nil { + return nil, fmt.Errorf("error opening archive %q: %s", archive, err.Error()) + } + err = nodeutils.LoadImageArchive(node, fd) + fd.Close() + if err != nil { + return nil, fmt.Errorf("error loading image archive %q to node %q: %s", archive, node, err.Error()) + } + } + } + + kubeconfig, err := provider.KubeConfig(name, false) + if err != nil { + return nil, fmt.Errorf("failed to read kubeconfig: %s", err.Error()) + } + restConfig, err := clientcmd.RESTConfigFromKubeConfig([]byte(kubeconfig)) + if err != nil { + return nil, fmt.Errorf("error loading kubeconfig: %s", err.Error()) + } + + ctx.restConfig = restConfig + + var once sync.Once + deprovision := func() { + once.Do(func() { + provider.Delete(name, kubeconfigPath) + }) + } + derivedFieldsErr := setDerivedFields(ctx) + // make best attempt before we return to: + // 1) add local registry ConfigMap + // 2) connect "kind" network to the registry + if derivedFieldsErr == nil { + // add config map + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "local-registry-hosting", + Namespace: "kube-public", + }, + Data: map[string]string{"localRegistryHosting.v1": fmt.Sprintf(`host: "localhost:%d"\nhelp: "https://kind.sigs.k8s.io/docs/user/local-registry/"`, registryPort)}, + } + err := ctx.Client().Create(golangContext.TODO(), configMap) + if err != nil { + ctx.Logf("failed to create local-registry-hosting ConfigMap: %s", err.Error()) + } + // connect the docker "kind" network to the registry so the local registry is resolvable + // we don't care about errors here since the connection might already exist + cmd := exec.Command("docker", "network", "connect", "kind", registry) + err = cmd.Start() + if err != nil { + ctx.Logf("failed to exec %#v: %v", cmd.Args, err) + } + } + return deprovision, derivedFieldsErr +} diff --git a/test/e2e/ctx/provisioner_kubeconfig.go b/test/e2e/ctx/provisioner_kubeconfig.go new file mode 100644 index 000000000..fa3a6eec3 --- /dev/null +++ b/test/e2e/ctx/provisioner_kubeconfig.go @@ -0,0 +1,74 @@ +// +build !kind + +package ctx + +import ( + "bytes" + "fmt" + "io" + "net" + "os" + "path/filepath" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +func Provision(ctx *TestContext) (func(), error) { + path := os.Getenv("KUBECONFIG") + if path == "" { + home, err := os.UserHomeDir() + if err != nil { + return nil, fmt.Errorf("failed to determine kubeconfig path: %s", err.Error()) + } + path = filepath.Join(home, ".kube", "config") + } + + f, err := os.Open(path) + if os.IsNotExist(err) { + // try in-cluster config + // see https://github.com/coreos/etcd-operator/issues/731#issuecomment-283804819 + if len(os.Getenv("KUBERNETES_SERVICE_HOST")) == 0 { + addrs, err := net.LookupHost("kubernetes.default.svc") + if err != nil { + return nil, fmt.Errorf("failed to resolve kubernetes service: %s", err.Error()) + } + os.Setenv("KUBERNETES_SERVICE_HOST", addrs[0]) + } + + if len(os.Getenv("KUBERNETES_SERVICE_PORT")) == 0 { + os.Setenv("KUBERNETES_SERVICE_PORT", "443") + } + + restConfig, err := rest.InClusterConfig() + if err != nil { + return nil, fmt.Errorf("failed to retrieve in-cluster config: %s", err.Error()) + } + + ctx.restConfig = restConfig + return func() {}, setDerivedFields(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to open kubeconfig: %s", err.Error()) + } + defer f.Close() + + const MaxKubeconfigBytes = 65535 + var b bytes.Buffer + n, err := b.ReadFrom(io.LimitReader(f, MaxKubeconfigBytes)) + if err != nil { + return nil, fmt.Errorf("failed to read kubeconfig: %s", err.Error()) + } + if n >= MaxKubeconfigBytes { + return nil, fmt.Errorf("kubeconfig larger than maximum allowed size: %d bytes", MaxKubeconfigBytes) + } + + restConfig, err := clientcmd.RESTConfigFromKubeConfig(b.Bytes()) + if err != nil { + return nil, fmt.Errorf("error loading kubeconfig: %s", err.Error()) + } + + ctx.restConfig = restConfig + + return func() {}, setDerivedFields(ctx) +} diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 86fd284b4..d8736d868 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -1,6 +1,7 @@ package e2e_test import ( + "flag" "fmt" "os" "os/exec" @@ -11,6 +12,7 @@ import ( "github.com/spf13/cobra" opmroot "github.com/operator-framework/operator-registry/cmd/opm/root" + "github.com/operator-framework/operator-registry/test/e2e/ctx" ) // quay.io is the default registry used if no local registry endpoint is provided @@ -24,16 +26,22 @@ var ( // opm command under test. opm *cobra.Command + + skipTLSForRegistry = flag.Bool("skip-tls", false, "skip TLS certificate verification for container image registries while pulling bundles or index") ) func TestE2E(t *testing.T) { + flag.Parse() RegisterFailHandler(Fail) RunSpecs(t, "E2E Suite") } +var deprovision func() = func() {} + var _ = BeforeSuite(func() { // Configure test registry (hostnames, credentials, etc.) configureRegistry() + deprovision = ctx.MustProvision(ctx.Ctx()) opm = opmroot.NewCmd() // Creating multiple instances would cause flag registration conflicts }) @@ -60,3 +68,7 @@ func configureRegistry() { By(fmt.Sprintf("Using container image registry %s", dockerHost)) } + +var _ = AfterSuite(func() { + deprovision() +}) diff --git a/test/e2e/opm_test.go b/test/e2e/opm_test.go index 61e115772..1c0933215 100644 --- a/test/e2e/opm_test.go +++ b/test/e2e/opm_test.go @@ -108,6 +108,7 @@ func buildIndexWith(containerTool, fromIndexImage, toIndexImage string, bundleIm Bundles: bundleImages, Permissive: false, Overwrite: overwriteLatest, + SkipTLS: *skipTLSForRegistry, } return indexAdder.AddToIndex(request) @@ -168,6 +169,7 @@ func exportPackageWith(containerTool string) error { Packages: packages, DownloadPath: "downloaded", ContainerTool: containertools.NewContainerTool(containerTool, containertools.NoneTool), + SkipTLS: *skipTLSForRegistry, } return indexExporter.ExportFromIndex(request) @@ -183,6 +185,7 @@ func exportIndexImageWith(containerTool string) error { Packages: []string{}, DownloadPath: "downloaded", ContainerTool: containertools.NewContainerTool(containerTool, containertools.NoneTool), + SkipTLS: *skipTLSForRegistry, } return indexExporter.ExportFromIndex(request) @@ -458,7 +461,7 @@ var _ = Describe("opm", func() { request := lregistry.AddToRegistryRequest{ Permissive: false, - SkipTLS: false, + SkipTLS: *skipTLSForRegistry, InputDatabase: dbFile, Bundles: []string{ch.Head.BundlePath}, Mode: registry.ReplacesMode, diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 000000000..0cd380037 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,5 @@ +TAGS +tags +.*.swp +tomlcheck/tomlcheck +toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml new file mode 100644 index 000000000..8b8afc4f0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip +install: + - go install ./... + - go get github.com/BurntSushi/toml-test +script: + - export PATH="$PATH:$HOME/gopath/bin" + - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 000000000..6efcfd0ce --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1,3 @@ +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) + diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 000000000..01b574320 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile new file mode 100644 index 000000000..3600848d3 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/Makefile @@ -0,0 +1,19 @@ +install: + go install ./... + +test: install + go test -v + toml-test toml-test-decoder + toml-test -encoder toml-test-encoder + +fmt: + gofmt -w *.go */*.go + colcheck *.go */*.go + +tags: + find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS + +push: + git push origin master + git push github master + diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 000000000..7c1b37ecc --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,218 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/toml-lang/toml + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Documentation: https://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml) + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 000000000..b0fd51d5b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,509 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 000000000..b9914a679 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,121 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 000000000..b371f396e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/toml-lang/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 000000000..d905c21a2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,568 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "toml: cannot encode array with mixed element types") + errArrayNilElement = errors.New( + "toml: cannot encode array with nil element") + errNonString = errors.New( + "toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New( + "toml: cannot encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "toml: TOML array element cannot contain a table") + errNoKey = errors.New( + "toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexported fields + if f.PkgPath != "" && !f.Anonymous { + continue + } + frv := rv.Field(i) + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), f.Index) + } + continue + } + // Fall through to the normal field encoding logic below + // for non-struct anonymous fields. + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + opts := getOptions(sft.Tag) + if opts.skip { + continue + } + keyName := sft.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(sf) { + continue + } + if opts.omitzero && isZero(sf) { + continue + } + + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go new file mode 100644 index 000000000..d36e1dd60 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go new file mode 100644 index 000000000..e8d503d04 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 000000000..e0a742a88 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,953 @@ +package toml + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to three runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [3]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 3 { + lx.nprev++ + } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only twice between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, "+ + "comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("expected end of table array name delimiter %q, "+ + "but got %q instead", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name " + + "(table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator " + + "(table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + default: + return lexBareTableName + } +} + +// lexBareTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareTableName + } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, "+ + "but got %q instead", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("unexpected key separator %q", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.backup() + lx.emit(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emit(itemText) + return lexKeyEnd + default: + return lx.errorf("bare keys cannot contain %q", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("expected key separator %q, but got %q instead", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '+', '-': + return lexNumberStart + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %q instead", + arrayEnd, r, + ) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + } + return lx.errorf("expected a comma or an inline table terminator %q, "+ + "but got %q instead", inlineTableEnd, r) +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case '\\': + return lexMultilineStringEscape + case stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following "+ + "escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either an integer, a float, or datetime. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', 'T', ':', '.', 'Z', '+': + return lexDatetime + } + + lx.backup() + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. +func lexNumberStart(lx *lexer) stateFn { + // We MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumber + } + switch r { + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 000000000..50869ef92 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,592 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + case itemInlineTableStart: + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + p.currentKey = "" + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ != itemKeyStart { + p.bug("Expected key start but instead found %q, around line %d", + it.val, p.approxLine) + } + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + // retrieve key + k := p.next() + p.approxLine = k.line + kname := p.keyString(k) + + // retrieve value + p.currentKey = kname + val, typ := p.value(p.next()) + // make sure we keep metadata up to date + p.setType(kname, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[kname] = val + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 000000000..c73f8afc1 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 000000000..608997c22 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/alessio/shellescape/.gitignore b/vendor/github.com/alessio/shellescape/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/alessio/shellescape/.travis.yml b/vendor/github.com/alessio/shellescape/.travis.yml new file mode 100644 index 000000000..f5763c5aa --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.travis.yml @@ -0,0 +1,4 @@ +language: go + +go: + - 1.14 diff --git a/vendor/github.com/alessio/shellescape/AUTHORS b/vendor/github.com/alessio/shellescape/AUTHORS new file mode 100644 index 000000000..4a647a6f4 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/AUTHORS @@ -0,0 +1 @@ +Alessio Treglia <alessio@debian.org> diff --git a/vendor/github.com/alessio/shellescape/LICENSE b/vendor/github.com/alessio/shellescape/LICENSE new file mode 100644 index 000000000..9f760679f --- /dev/null +++ b/vendor/github.com/alessio/shellescape/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Alessio Treglia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alessio/shellescape/README.md b/vendor/github.com/alessio/shellescape/README.md new file mode 100644 index 000000000..ce9e33f42 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/README.md @@ -0,0 +1,58 @@ +[](https://godoc.org/github.com/alessio/shellescape) +[](http://travis-ci.org/#!/alessio/shellescape) +[](https://gocover.io/github.com/alessio/shellescape) +[](https://coveralls.io/github/alessio/shellescape?branch=master) +# shellescape +Escape arbitrary strings for safe use as command line arguments. +## Contents of the package + +This package provides the `shellescape.Quote()` function that returns a +shell-escaped copy of a string. This functionality could be helpful +in those cases where it is known that the output of a Go program will +be appended to/used in the context of shell programs' command line arguments. + +This work was inspired by the Python original package [shellescape] +(https://pypi.python.org/pypi/shellescape). + +## Usage + +The following snippet shows a typical unsafe idiom: + +```go +package main + +import ( + "fmt" + "os" +) + +func main() { + fmt.Printf("ls -l %s\n", os.Args[1]) +} +``` +_[See in Go Playground](https://play.golang.org/p/Wj2WoUfH_d)_ + +Especially when creating pipeline of commands which might end up being +executed by a shell interpreter, tt is particularly unsafe to not +escape arguments. + +`shellescape.Quote()` comes in handy and to safely escape strings: + +```go +package main + +import ( + "fmt" + "os" + + "gopkg.in/alessio/shellescape.v1" +) + +func main() { + fmt.Printf("ls -l %s\n", shellescape.Quote(os.Args[1])) +} +``` +_[See in Go Playground](https://play.golang.org/p/HJ_CXgSrmp)_ + +## The escargs utility +__escargs__ reads lines from the standard input and prints shell-escaped versions. Unlinke __xargs__, blank lines on the standard input are not discarded. diff --git a/vendor/github.com/alessio/shellescape/go.mod b/vendor/github.com/alessio/shellescape/go.mod new file mode 100644 index 000000000..08d282650 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/go.mod @@ -0,0 +1,3 @@ +module github.com/alessio/shellescape + +go 1.14 diff --git a/vendor/github.com/alessio/shellescape/shellescape.go b/vendor/github.com/alessio/shellescape/shellescape.go new file mode 100644 index 000000000..56725d182 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/shellescape.go @@ -0,0 +1,39 @@ +/* +Package shellescape provides the shellescape.Quote to escape arbitrary +strings for a safe use as command line arguments in the most common +POSIX shells. + +The original Python package which this work was inspired by can be found +at https://pypi.python.org/pypi/shellescape. +*/ +package shellescape // "import gopkg.in/alessio/shellescape.v1" + +/* +The functionality provided by shellescape.Quote could be helpful +in those cases where it is known that the output of a Go program will +be appended to/used in the context of shell programs' command line arguments. +*/ + +import ( + "regexp" + "strings" +) + +var pattern *regexp.Regexp + +func init() { + pattern = regexp.MustCompile(`[^\w@%+=:,./-]`) +} + +// Quote returns a shell-escaped version of the string s. The returned value +// is a string that can safely be used as one token in a shell command line. +func Quote(s string) string { + if len(s) == 0 { + return "''" + } + if pattern.MatchString(s) { + return "'" + strings.Replace(s, "'", "'\"'\"'", -1) + "'" + } + + return s +} diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml new file mode 100644 index 000000000..50e4afd19 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -0,0 +1,19 @@ +language: go + +go: + - 1.14 + - 1.13 + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - go get github.com/jessevdk/go-flags + +script: + - go get + - go test -cover ./... + - cd ./v5 + - go get + - go test -cover ./... + +notifications: + email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 000000000..df76d7d77 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 000000000..121b039db --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,298 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionality for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[](http://godoc.org/github.com/evanphx/json-patch) +[](https://travis-ci.org/evanphx/json-patch) +[](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch/v5 +``` + +**Stable Versions**: +* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. + This defaults to `true` and enables the non-standard practice of allowing + negative indices to mean indices starting at the end of an array. This + functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = + false`. + +* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, + which limits the total size increase in bytes caused by "copy" operations in a + patch. It defaults to 0, which means there is no limit. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated alternative doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "different"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "different" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go new file mode 100644 index 000000000..75304b443 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 000000000..14e8bb5ce --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,386 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, errBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + if len(bt) != len(at) { + return false + } + for key := range bt { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 000000000..f185a45b2 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,784 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + if len(n.doc) != len(o.doc) { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if (v == nil) != (ov == nil) { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(ary) + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(cur) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/evanphx/json-patch/v5/LICENSE b/vendor/github.com/evanphx/json-patch/v5/LICENSE new file mode 100644 index 000000000..0eb9b72d8 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/v5/errors.go b/vendor/github.com/evanphx/json-patch/v5/errors.go new file mode 100644 index 000000000..75304b443 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/github.com/evanphx/json-patch/v5/go.mod b/vendor/github.com/evanphx/json-patch/v5/go.mod new file mode 100644 index 000000000..c731d4c3b --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/go.mod @@ -0,0 +1,8 @@ +module github.com/evanphx/json-patch/v5 + +go 1.12 + +require ( + github.com/jessevdk/go-flags v1.4.0 + github.com/pkg/errors v0.8.1 +) diff --git a/vendor/github.com/evanphx/json-patch/v5/go.sum b/vendor/github.com/evanphx/json-patch/v5/go.sum new file mode 100644 index 000000000..0edb94114 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/go.sum @@ -0,0 +1,4 @@ +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/github.com/evanphx/json-patch/v5/merge.go b/vendor/github.com/evanphx/json-patch/v5/merge.go new file mode 100644 index 000000000..14e8bb5ce --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/merge.go @@ -0,0 +1,386 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, errBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + if len(bt) != len(at) { + return false + } + for key := range bt { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/v5/patch.go b/vendor/github.com/evanphx/json-patch/v5/patch.go new file mode 100644 index 000000000..e73675987 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/patch.go @@ -0,0 +1,788 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + if len(n.doc) != len(o.doc) { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if (v == nil) != (ov == nil) { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + v, ok := (*d)[key] + if !ok { + return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key) + } + return v, nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return errors.Wrapf(ErrMissing, "unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(ary) + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(cur) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + if err != nil && errors.Cause(err) != ErrMissing { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 000000000..604314dd4 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: false +go: + - 1.13.x + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com> + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 000000000..38418353e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[](http://godoc.org/github.com/mattn/go-isatty) +[](https://codecov.io/gh/mattn/go-isatty) +[](https://coveralls.io/github/mattn/go-isatty?branch=master) +[](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 000000000..605c4c221 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,5 @@ +module github.com/mattn/go-isatty + +go 1.12 + +require golang.org/x/sys v0.0.0-20200116001909-b77594299b42 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 000000000..912e29cbc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..711f28808 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 000000000..ff714a376 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,15 @@ +// +build appengine js nacl + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 000000000..c5b6e0c08 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,22 @@ +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..bdd5c79a0 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,22 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 000000000..31a1ca973 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,18 @@ +// +build linux aix +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 000000000..1fa869154 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json new file mode 100644 index 000000000..5ae9d96b7 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/renovate.json @@ -0,0 +1,8 @@ +{ + "extends": [ + "config:base" + ], + "postUpdateOptions": [ + "gomodTidy" + ] +} diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore new file mode 100644 index 000000000..7b5883475 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.dockerignore @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore new file mode 100644 index 000000000..e6ba63a5c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.gitignore @@ -0,0 +1,5 @@ +test_program/test_program_bin +fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md new file mode 100644 index 000000000..405c911c9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md @@ -0,0 +1,132 @@ +## Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use TOML +implementation for Go that gets the job done and gets out of your way – +dealing with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or +small, is more than welcomed! + +### Ask questions + +Any question you may have, somebody else might have it too. Always feel +free to ask them on the [issues tracker][issues-tracker]. We will try to +answer them as clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and +ask away! + +### Improve the documentation + +The best way to share your knowledge and experience with go-toml is to +improve the documentation. Fix a typo, clarify an interface, add an +example, anything goes! + +The documentation is present in the [README][readme] and thorough the +source code. On release, it gets updated on [GoDoc][godoc]. To make a +change to the documentation, create a pull request with your proposed +changes. For simple changes like that, the easiest way to go is probably +the "Fork this project and edit the file" button on Github, displayed at +the top right of the file. Unless it's a trivial change (for example a +typo), provide a little bit of context in your pull request description or +commit message. + +### Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and +fix by reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on +what to include. When in doubt: add more details! By reducing ambiguity and +providing more information, it decreases back and forth and saves everyone +time. + +### Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +* A short proposal with some POC code is better than a lengthy piece of + text with no code. Code speaks louder than words. +* No backward-incompatible patch will be accepted unless discussed. + Sometimes it's hard, and Go's lack of versioning by default does not + help, but we try not to break people's programs unless we absolutely have + to. +* If you are writing a new feature or extending an existing one, make sure + to write some documentation. +* Bug fixes need to be accompanied with regression tests. +* New code needs to be tested. +* Your commit messages need to explain why the change is needed, even if + already included in the PR description. + +It does sound like a lot, but those best practices are here to save time +overall and continuously improve the quality of the project, which is +something everyone benefits from. + +#### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. You're in! + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +#### Run the tests + +You can run tests for go-toml using Go's test tool: `go test ./...`. +When creating a pull requests, all tests will be ran on Linux on a few Go +versions (Travis CI), and on Windows using the latest Go version +(AppVeyor). + +#### Style + +Try to look around and follow the same format and structure as the rest of +the code. We enforce using `go fmt` on the whole code base. + +--- + +### Maintainers-only + +#### Merge pull request + +Checklist: + +* Passing CI. +* Does not introduce backward-incompatible changes (unless discussed). +* Has relevant doc changes. +* Has relevant unit tests. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +#### New release + +1. Go to [releases][releases]. Click on "X commits to master since this + release". +2. Make note of all the changes. Look for backward incompatible changes, + new features, and bug fixes. +3. Pick the new version using the above and semver. +4. Create a [new release][new-release]. +5. Follow the same format as [1.1.0][release-110]. + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[godoc]: https://godoc.org/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[releases]: https://github.com/pelletier/go-toml/releases +[new-release]: https://github.com/pelletier/go-toml/releases/new +[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile new file mode 100644 index 000000000..fffdb0166 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:1.12-alpine3.9 as builder +WORKDIR /go/src/github.com/pelletier/go-toml +COPY . . +ENV CGO_ENABLED=0 +ENV GOOS=linux +RUN go install ./... + +FROM scratch +COPY --from=builder /go/bin/tomll /usr/bin/tomll +COPY --from=builder /go/bin/tomljson /usr/bin/tomljson +COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE new file mode 100644 index 000000000..583bdae62 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile new file mode 100644 index 000000000..9e4503aea --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Makefile @@ -0,0 +1,29 @@ +export CGO_ENABLED=0 +go := go +go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) +go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) + +out.tools := tomll tomljson jsontoml +out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) +sources := $(wildcard **/*.go) + + +.PHONY: +tools: $(out.tools) + +$(out.tools): $(sources) + GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ + +.PHONY: +dist: $(out.dist) + +$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % + if [ "$(go.goos)" = "windows" ]; then \ + tar -cJf $@ $^.exe; \ + else \ + tar -cJf $@ $^; \ + fi + +.PHONY: +clean: + rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..041cdc4a2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,5 @@ +**Issue:** add link to pelletier/go-toml issue here + +Explanation of what this pull request does. + +More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md new file mode 100644 index 000000000..6831deb5b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -0,0 +1,151 @@ +# go-toml + +Go library for the [TOML](https://github.com/mojombo/toml) format. + +This library supports TOML version +[v1.0.0-rc.1](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v1.0.0-rc.1.md) + +[](http://godoc.org/github.com/pelletier/go-toml) +[](https://github.com/pelletier/go-toml/blob/master/LICENSE) +[](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) +[](https://codecov.io/gh/pelletier/go-toml) +[](https://goreportcard.com/report/github.com/pelletier/go-toml) +[](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) + +## Features + +Go-toml provides the following features for using data parsed from TOML documents: + +* Load TOML documents from files and string data +* Easily navigate TOML structure using Tree +* Marshaling and unmarshaling to and from data structures +* Line & column position data for all parsed elements +* [Query support similar to JSON-Path](query/) +* Syntax errors contain line and column numbers + +## Import + +```go +import "github.com/pelletier/go-toml" +``` + +## Usage example + +Read a TOML document: + +```go +config, _ := toml.Load(` +[postgres] +user = "pelletier" +password = "mypassword"`) +// retrieve data directly +user := config.Get("postgres.user").(string) + +// or using an intermediate object +postgresConfig := config.Get("postgres").(*toml.Tree) +password := postgresConfig.Get("password").(string) +``` + +Or use Unmarshal: + +```go +type Postgres struct { + User string + Password string +} +type Config struct { + Postgres Postgres +} + +doc := []byte(` +[Postgres] +User = "pelletier" +Password = "mypassword"`) + +config := Config{} +toml.Unmarshal(doc, &config) +fmt.Println("user=", config.Postgres.User) +``` + +Or use a query: + +```go +// use a query to gather elements without walking the tree +q, _ := query.Compile("$..[user,password]") +results := q.Execute(config) +for ii, item := range results.Values() { + fmt.Printf("Query result %d: %v\n", ii, item) +} +``` + +## Documentation + +The documentation and additional examples are available at +[godoc.org](http://godoc.org/github.com/pelletier/go-toml). + +## Tools + +Go-toml provides two handy command line tools: + +* `tomll`: Reads TOML files and lints them. + + ``` + go install github.com/pelletier/go-toml/cmd/tomll + tomll --help + ``` +* `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + go install github.com/pelletier/go-toml/cmd/tomljson + tomljson --help + ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + go install github.com/pelletier/go-toml/cmd/jsontoml + jsontoml --help + ``` + +### Docker image + +Those tools are also availble as a Docker image from +[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to +use `tomljson`: + +``` +docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml +``` + +Only master (`latest`) and tagged versions are published to dockerhub. You +can build your own image as usual: + +``` +docker build -t go-toml . +``` + +## Contribute + +Feel free to report bugs and patches using GitHub's pull requests system on +[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be +much appreciated! + +### Run tests + +`go test ./...` + +### Fuzzing + +The script `./fuzz.sh` is available to +run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. + +## Versioning + +Go-toml follows [Semantic Versioning](http://semver.org/). The supported version +of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of +this document. The last two major versions of Go are supported +(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). + +## License + +The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml new file mode 100644 index 000000000..ff5376b09 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml @@ -0,0 +1,230 @@ +trigger: +- master + +stages: +- stage: fuzzit + displayName: "Run Fuzzit" + dependsOn: [] + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: submit + displayName: "Submit" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml + - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml + - task: Bash@3 + inputs: + filePath: './fuzzit.sh' + env: + TYPE: fuzzing + FUZZIT_API_KEY: $(FUZZIT_API_KEY) + +- stage: run_checks + displayName: "Check" + dependsOn: [] + jobs: + - job: fmt + displayName: "fmt" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - task: Go@0 + displayName: "go fmt ./..." + inputs: + command: 'custom' + customCommand: 'fmt' + arguments: './...' + - job: coverage + displayName: "coverage" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - task: Go@0 + displayName: "Generate coverage" + inputs: + command: 'test' + arguments: "-race -coverprofile=coverage.txt -covermode=atomic" + - task: Bash@3 + inputs: + targetType: 'inline' + script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' + env: + CODECOV_TOKEN: $(CODECOV_TOKEN) + - job: benchmark + displayName: "benchmark" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - task: Bash@3 + inputs: + filePath: './benchmark.sh' + arguments: "master $(Build.Repository.Uri)" + + - job: fuzzing + displayName: "fuzzing" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml + - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml + - task: Bash@3 + inputs: + filePath: './fuzzit.sh' + env: + TYPE: local-regression + + - job: go_unit_tests + displayName: "unit tests" + strategy: + matrix: + linux 1.15: + goVersion: '1.15' + imageName: 'ubuntu-latest' + mac 1.15: + goVersion: '1.15' + imageName: 'macOS-latest' + windows 1.15: + goVersion: '1.15' + imageName: 'windows-latest' + linux 1.14: + goVersion: '1.14' + imageName: 'ubuntu-latest' + mac 1.14: + goVersion: '1.14' + imageName: 'macOS-latest' + windows 1.14: + goVersion: '1.14' + imageName: 'windows-latest' + pool: + vmImage: $(imageName) + steps: + - task: GoTool@0 + displayName: "Install Go $(goVersion)" + inputs: + version: $(goVersion) + - task: Go@0 + displayName: "go test ./..." + inputs: + command: 'test' + arguments: './...' +- stage: build_binaries + displayName: "Build binaries" + dependsOn: run_checks + jobs: + - job: build_binary + displayName: "Build binary" + strategy: + matrix: + linux_amd64: + GOOS: linux + GOARCH: amd64 + darwin_amd64: + GOOS: darwin + GOARCH: amd64 + windows_amd64: + GOOS: windows + GOARCH: amd64 + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go" + inputs: + version: 1.15 + - task: Bash@3 + inputs: + targetType: inline + script: "make dist" + env: + go.goos: $(GOOS) + go.goarch: $(GOARCH) + - task: CopyFiles@2 + inputs: + sourceFolder: '$(Build.SourcesDirectory)' + contents: '*.tar.xz' + TargetFolder: '$(Build.ArtifactStagingDirectory)' + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: binaries +- stage: build_binaries_manifest + displayName: "Build binaries manifest" + dependsOn: build_binaries + jobs: + - job: build_manifest + displayName: "Build binaries manifest" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: 'current' + downloadType: 'single' + artifactName: 'binaries' + downloadPath: '$(Build.SourcesDirectory)' + - task: Bash@3 + inputs: + targetType: inline + script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: manifest + +- stage: build_docker_image + displayName: "Build Docker image" + dependsOn: run_checks + jobs: + - job: build + displayName: "Build" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + command: 'build' + Dockerfile: 'Dockerfile' + buildContext: '.' + addPipelineData: false + +- stage: publish_docker_image + displayName: "Publish Docker image" + dependsOn: build_docker_image + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: publish + displayName: "Publish" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + containerRegistry: 'DockerHub' + repository: 'pelletier/go-toml' + command: 'buildAndPush' + Dockerfile: 'Dockerfile' + buildContext: '.' + tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh new file mode 100644 index 000000000..a69d3040f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -ex + +reference_ref=${1:-master} +reference_git=${2:-.} + +if ! `hash benchstat 2>/dev/null`; then + echo "Installing benchstat" + go get golang.org/x/perf/cmd/benchstat +fi + +tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` +ref_tempdir="${tempdir}/ref" +ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" +local_benchmark="`pwd`/benchmark-local.txt" + +echo "=== ${reference_ref} (${ref_tempdir})" +git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null +pushd ${ref_tempdir} >/dev/null +git checkout ${reference_ref} >/dev/null 2>/dev/null +go test -bench=. -benchmem | tee ${ref_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${ref_benchmark} +popd >/dev/null + +echo "" +echo "=== local" +go test -bench=. -benchmem | tee ${local_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${local_benchmark} + +echo "" +echo "=== diff" +benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go new file mode 100644 index 000000000..a1406a32b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -0,0 +1,23 @@ +// Package toml is a TOML parser and manipulation library. +// +// This version supports the specification as described in +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md +// +// Marshaling +// +// Go-toml can marshal and unmarshal TOML documents from and to data +// structures. +// +// TOML document as a tree +// +// Go-toml can operate on a TOML document as a tree. Use one of the Load* +// functions to parse TOML data and obtain a Tree instance, then one of its +// methods to manipulate the tree. +// +// JSONPath-like queries +// +// The package github.com/pelletier/go-toml/query implements a system +// similar to JSONPath to quickly retrieve elements of a TOML document using a +// single expression. See the package documentation for more information. +// +package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml new file mode 100644 index 000000000..780d9c68f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -0,0 +1,30 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml new file mode 100644 index 000000000..f45bf88b8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example.toml @@ -0,0 +1,30 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go new file mode 100644 index 000000000..14570c8d3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package toml + +func Fuzz(data []byte) int { + tree, err := LoadBytes(data) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + str, err := tree.ToTomlString() + if err != nil { + if str != "" { + panic(`str must be "" if there is an error`) + } + panic(err) + } + + tree, err = Load(str) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + return 1 +} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh new file mode 100644 index 000000000..3204b4c44 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.sh @@ -0,0 +1,15 @@ +#! /bin/sh +set -eu + +go get github.com/dvyukov/go-fuzz/go-fuzz +go get github.com/dvyukov/go-fuzz/go-fuzz-build + +if [ ! -e toml-fuzz.zip ]; then + go-fuzz-build github.com/pelletier/go-toml +fi + +rm -fr fuzz +mkdir -p fuzz/corpus +cp *.toml fuzz/corpus + +go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/fuzzit.sh b/vendor/github.com/pelletier/go-toml/fuzzit.sh new file mode 100644 index 000000000..b575a6081 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzzit.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -xe + +# go-fuzz doesn't support modules yet, so ensure we do everything +# in the old style GOPATH way +export GO111MODULE="off" + +# install go-fuzz +go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build + +# target name can only contain lower-case letters (a-z), digits (0-9) and a dash (-) +# to add another target, make sure to create it with `fuzzit create target` +# before using `fuzzit create job` +TARGET=toml-fuzzer + +go-fuzz-build -libfuzzer -o ${TARGET}.a github.com/pelletier/go-toml +clang -fsanitize=fuzzer ${TARGET}.a -o ${TARGET} + +# install fuzzit for talking to fuzzit.dev service +# or latest version: +# https://github.com/fuzzitdev/fuzzit/releases/latest/download/fuzzit_Linux_x86_64 +wget -q -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64 +chmod a+x fuzzit + +# TODO: change kkowalczyk to go-toml and create toml-fuzzer target there +./fuzzit create job --type $TYPE go-toml/${TARGET} ${TARGET} diff --git a/vendor/github.com/pelletier/go-toml/go.mod b/vendor/github.com/pelletier/go-toml/go.mod new file mode 100644 index 000000000..e924cb90c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/go.mod @@ -0,0 +1,5 @@ +module github.com/pelletier/go-toml + +go 1.12 + +require github.com/davecgh/go-spew v1.1.1 diff --git a/vendor/github.com/pelletier/go-toml/go.sum b/vendor/github.com/pelletier/go-toml/go.sum new file mode 100644 index 000000000..6f356470d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/go.sum @@ -0,0 +1,19 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go new file mode 100644 index 000000000..e091500b2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -0,0 +1,112 @@ +// Parsing keys handling both bare and quoted keys. + +package toml + +import ( + "errors" + "fmt" +) + +// Convert the bare key group string to an array. +// The input supports double quotation and single quotation, +// but escape sequences are not supported. Lexers must unescape them beforehand. +func parseKey(key string) ([]string, error) { + runes := []rune(key) + var groups []string + + if len(key) == 0 { + return nil, errors.New("empty key") + } + + idx := 0 + for idx < len(runes) { + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip leading whitespace + } + if idx >= len(runes) { + break + } + r := runes[idx] + if isValidBareChar(r) { + // parse bare key + startIdx := idx + endIdx := -1 + idx++ + for idx < len(runes) { + r = runes[idx] + if isValidBareChar(r) { + idx++ + } else if r == '.' { + endIdx = idx + break + } else if isSpace(r) { + endIdx = idx + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip trailing whitespace + } + if idx < len(runes) && runes[idx] != '.' { + return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) + } + break + } else { + return nil, fmt.Errorf("invalid bare key character: %c", r) + } + } + if endIdx == -1 { + endIdx = idx + } + groups = append(groups, string(runes[startIdx:endIdx])) + } else if r == '\'' { + // parse single quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed single-quoted key") + } + r = runes[idx] + if r == '\'' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ + } + } else if r == '"' { + // parse double quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed double-quoted key") + } + r = runes[idx] + if r == '"' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ + } + } else if r == '.' { + idx++ + if idx >= len(runes) { + return nil, fmt.Errorf("unexpected end of key") + } + r = runes[idx] + if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { + return nil, fmt.Errorf("expecting key part after dot") + } + } else { + return nil, fmt.Errorf("invalid key character: %c", r) + } + } + if len(groups) == 0 { + return nil, fmt.Errorf("empty key") + } + return groups, nil +} + +func isValidBareChar(r rune) bool { + return isAlphanumeric(r) || r == '-' || isDigit(r) +} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go new file mode 100644 index 000000000..b18861924 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -0,0 +1,807 @@ +// TOML lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package toml + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +var dateRegexp *regexp.Regexp + +// Define state functions +type tomlLexStateFn func() tomlLexStateFn + +// Define lexer +type tomlLexer struct { + inputIdx int + input []rune // Textual source + currentTokenStart int + currentTokenStop int + tokens []token + brackets []rune + line int + col int + endbufferLine int + endbufferCol int +} + +// Basic read operations on input + +func (l *tomlLexer) read() rune { + r := l.peek() + if r == '\n' { + l.endbufferLine++ + l.endbufferCol = 1 + } else { + l.endbufferCol++ + } + l.inputIdx++ + return r +} + +func (l *tomlLexer) next() rune { + r := l.read() + + if r != eof { + l.currentTokenStop++ + } + return r +} + +func (l *tomlLexer) ignore() { + l.currentTokenStart = l.currentTokenStop + l.line = l.endbufferLine + l.col = l.endbufferCol +} + +func (l *tomlLexer) skip() { + l.next() + l.ignore() +} + +func (l *tomlLexer) fastForward(n int) { + for i := 0; i < n; i++ { + l.next() + } +} + +func (l *tomlLexer) emitWithValue(t tokenType, value string) { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: t, + val: value, + }) + l.ignore() +} + +func (l *tomlLexer) emit(t tokenType) { + l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) +} + +func (l *tomlLexer) peek() rune { + if l.inputIdx >= len(l.input) { + return eof + } + return l.input[l.inputIdx] +} + +func (l *tomlLexer) peekString(size int) string { + maxIdx := len(l.input) + upperIdx := l.inputIdx + size // FIXME: potential overflow + if upperIdx > maxIdx { + upperIdx = maxIdx + } + return string(l.input[l.inputIdx:upperIdx]) +} + +func (l *tomlLexer) follow(next string) bool { + return next == l.peekString(len(next)) +} + +// Error management + +func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + }) + return nil +} + +// State functions + +func (l *tomlLexer) lexVoid() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '}': // after '{' + return l.lexRightCurlyBrace + case '[': + return l.lexTableKey + case '#': + return l.lexComment(l.lexVoid) + case '=': + return l.lexEqual + case '\r': + fallthrough + case '\n': + l.skip() + continue + } + + if isSpace(next) { + l.skip() + } + + if isKeyStartChar(next) { + return l.lexKey + } + + if next == eof { + l.next() + break + } + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexRvalue() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '.': + return l.errorf("cannot start float with a dot") + case '=': + return l.lexEqual + case '[': + return l.lexLeftBracket + case ']': + return l.lexRightBracket + case '{': + return l.lexLeftCurlyBrace + case '}': + return l.lexRightCurlyBrace + case '#': + return l.lexComment(l.lexRvalue) + case '"': + return l.lexString + case '\'': + return l.lexLiteralString + case ',': + return l.lexComma + case '\r': + fallthrough + case '\n': + l.skip() + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { + return l.lexRvalue + } + return l.lexVoid + } + + if l.follow("true") { + return l.lexTrue + } + + if l.follow("false") { + return l.lexFalse + } + + if l.follow("inf") { + return l.lexInf + } + + if l.follow("nan") { + return l.lexNan + } + + if isSpace(next) { + l.skip() + continue + } + + if next == eof { + l.next() + break + } + + possibleDate := l.peekString(35) + dateSubmatches := dateRegexp.FindStringSubmatch(possibleDate) + if dateSubmatches != nil && dateSubmatches[0] != "" { + l.fastForward(len(dateSubmatches[0])) + if dateSubmatches[2] == "" { // no timezone information => local date + return l.lexLocalDate + } + return l.lexDate + } + + if next == '+' || next == '-' || isDigit(next) { + return l.lexNumber + } + + return l.errorf("no value can start with %c", next) + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenLeftCurlyBrace) + l.brackets = append(l.brackets, '{') + return l.lexVoid +} + +func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenRightCurlyBrace) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { + return l.errorf("cannot have '}' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] + return l.lexRvalue +} + +func (l *tomlLexer) lexDate() tomlLexStateFn { + l.emit(tokenDate) + return l.lexRvalue +} + +func (l *tomlLexer) lexLocalDate() tomlLexStateFn { + l.emit(tokenLocalDate) + return l.lexRvalue +} + +func (l *tomlLexer) lexTrue() tomlLexStateFn { + l.fastForward(4) + l.emit(tokenTrue) + return l.lexRvalue +} + +func (l *tomlLexer) lexFalse() tomlLexStateFn { + l.fastForward(5) + l.emit(tokenFalse) + return l.lexRvalue +} + +func (l *tomlLexer) lexInf() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenInf) + return l.lexRvalue +} + +func (l *tomlLexer) lexNan() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenNan) + return l.lexRvalue +} + +func (l *tomlLexer) lexEqual() tomlLexStateFn { + l.next() + l.emit(tokenEqual) + return l.lexRvalue +} + +func (l *tomlLexer) lexComma() tomlLexStateFn { + l.next() + l.emit(tokenComma) + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { + return l.lexVoid + } + return l.lexRvalue +} + +// Parse the key and emits its value without escape sequences. +// bare keys, basic string keys and literal string keys are supported. +func (l *tomlLexer) lexKey() tomlLexStateFn { + var sb strings.Builder + + for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { + if r == '"' { + l.next() + str, err := l.lexStringAsString(`"`, false, true) + if err != nil { + return l.errorf(err.Error()) + } + sb.WriteString("\"") + sb.WriteString(str) + sb.WriteString("\"") + l.next() + continue + } else if r == '\'' { + l.next() + str, err := l.lexLiteralStringAsString(`'`, false) + if err != nil { + return l.errorf(err.Error()) + } + sb.WriteString("'") + sb.WriteString(str) + sb.WriteString("'") + l.next() + continue + } else if r == '\n' { + return l.errorf("keys cannot contain new lines") + } else if isSpace(r) { + var str strings.Builder + str.WriteString(" ") + + // skip trailing whitespace + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + // break loop if not a dot + if r != '.' { + break + } + str.WriteString(".") + // skip trailing whitespace after dot + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + sb.WriteString(str.String()) + continue + } else if r == '.' { + // skip + } else if !isValidBareChar(r) { + return l.errorf("keys cannot contain %c character", r) + } + sb.WriteRune(r) + l.next() + } + l.emitWithValue(tokenKey, sb.String()) + return l.lexVoid +} + +func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { + return func() tomlLexStateFn { + for next := l.peek(); next != '\n' && next != eof; next = l.peek() { + if next == '\r' && l.follow("\r\n") { + break + } + l.next() + } + l.ignore() + return previousState + } +} + +func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { + l.next() + l.emit(tokenLeftBracket) + l.brackets = append(l.brackets, '[') + return l.lexRvalue +} + +func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { + var sb strings.Builder + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + // find end of string + for { + if l.follow(terminator) { + return sb.String(), nil + } + + next := l.peek() + if next == eof { + break + } + sb.WriteRune(l.next()) + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexLiteralString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := "'" + discardLeadingNewLine := false + if l.follow("''") { + l.skip() + l.skip() + terminator = "'''" + discardLeadingNewLine = true + } + + str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +// Lex a string and return the results as a string. +// Terminator is the substring indicating the end of the token. +// The resulting string does not include the terminator. +func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { + var sb strings.Builder + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + for { + if l.follow(terminator) { + return sb.String(), nil + } + + if l.follow("\\") { + l.next() + switch l.peek() { + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + // skip all whitespace chars following backslash + for strings.ContainsRune("\r\n\t ", l.peek()) { + l.next() + } + case '"': + sb.WriteString("\"") + l.next() + case 'n': + sb.WriteString("\n") + l.next() + case 'b': + sb.WriteString("\b") + l.next() + case 'f': + sb.WriteString("\f") + l.next() + case '/': + sb.WriteString("/") + l.next() + case 't': + sb.WriteString("\t") + l.next() + case 'r': + sb.WriteString("\r") + l.next() + case '\\': + sb.WriteString("\\") + l.next() + case 'u': + l.next() + var code strings.Builder + for i := 0; i < 4; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code.WriteRune(c) + } + intcode, err := strconv.ParseInt(code.String(), 16, 32) + if err != nil { + return "", errors.New("invalid unicode escape: \\u" + code.String()) + } + sb.WriteRune(rune(intcode)) + case 'U': + l.next() + var code strings.Builder + for i := 0; i < 8; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code.WriteRune(c) + } + intcode, err := strconv.ParseInt(code.String(), 16, 64) + if err != nil { + return "", errors.New("invalid unicode escape: \\U" + code.String()) + } + sb.WriteRune(rune(intcode)) + default: + return "", errors.New("invalid escape sequence: \\" + string(l.peek())) + } + } else { + r := l.peek() + + if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { + return "", fmt.Errorf("unescaped control character %U", r) + } + l.next() + sb.WriteRune(r) + } + + if l.peek() == eof { + break + } + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := `"` + discardLeadingNewLine := false + acceptNewLines := false + if l.follow(`""`) { + l.skip() + l.skip() + terminator = `"""` + discardLeadingNewLine = true + acceptNewLines = true + } + + str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +func (l *tomlLexer) lexTableKey() tomlLexStateFn { + l.next() + + if l.peek() == '[' { + // token '[[' signifies an array of tables + l.next() + l.emit(tokenDoubleLeftBracket) + return l.lexInsideTableArrayKey + } + // vanilla table key + l.emit(tokenLeftBracket) + return l.lexInsideTableKey +} + +// Parse the key till "]]", but only bare keys are supported +func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroupArray) + } + l.next() + if l.peek() != ']' { + break + } + l.next() + l.emit(tokenDoubleRightBracket) + return l.lexVoid + case '[': + return l.errorf("table array key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table array key") +} + +// Parse the key till "]" but only bare keys are supported +func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroup) + } + l.next() + l.emit(tokenRightBracket) + return l.lexVoid + case '[': + return l.errorf("table key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table key") +} + +func (l *tomlLexer) lexRightBracket() tomlLexStateFn { + l.next() + l.emit(tokenRightBracket) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { + return l.errorf("cannot have ']' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] + return l.lexRvalue +} + +type validRuneFn func(r rune) bool + +func isValidHexRune(r rune) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r rune) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r rune) bool { + return r == '0' || r == '1' || r == '_' +} + +func (l *tomlLexer) lexNumber() tomlLexStateFn { + r := l.peek() + + if r == '0' { + follow := l.peekString(2) + if len(follow) == 2 { + var isValidRune validRuneFn + switch follow[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { + return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) + } + } + + if isValidRune != nil { + l.next() + l.next() + digitSeen := false + for { + next := l.peek() + if !isValidRune(next) { + break + } + digitSeen = true + l.next() + } + + if !digitSeen { + return l.errorf("number needs at least one digit") + } + + l.emit(tokenInteger) + + return l.lexRvalue + } + } + } + + if r == '+' || r == '-' { + l.next() + if l.follow("inf") { + return l.lexInf + } + if l.follow("nan") { + return l.lexNan + } + } + + pointSeen := false + expSeen := false + digitSeen := false + for { + next := l.peek() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + l.next() + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if next == 'e' || next == 'E' { + expSeen = true + l.next() + r := l.peek() + if r == '+' || r == '-' { + l.next() + } + } else if isDigit(next) { + digitSeen = true + l.next() + } else if next == '_' { + l.next() + } else { + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen || expSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexRvalue +} + +func (l *tomlLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } +} + +func init() { + // Regexp for all date/time formats supported by TOML. + // Group 1: nano precision + // Group 2: timezone + // + // /!\ also matches the empty string + // + // Example matches: + // 1979-05-27T07:32:00Z + // 1979-05-27T00:32:00-07:00 + // 1979-05-27T00:32:00.999999-07:00 + // 1979-05-27 07:32:00Z + // 1979-05-27 00:32:00-07:00 + // 1979-05-27 00:32:00.999999-07:00 + // 1979-05-27T07:32:00 + // 1979-05-27T00:32:00.999999 + // 1979-05-27 07:32:00 + // 1979-05-27 00:32:00.999999 + // 1979-05-27 + // 07:32:00 + // 00:32:00.999999 + dateRegexp = regexp.MustCompile(`^(?:\d{1,4}-\d{2}-\d{2})?(?:[T ]?\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})?)?`) +} + +// Entry point +func lexToml(inputBytes []byte) []token { + runes := bytes.Runes(inputBytes) + l := &tomlLexer{ + input: runes, + tokens: make([]token, 0, 256), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go new file mode 100644 index 000000000..a2149e966 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/localtime.go @@ -0,0 +1,281 @@ +// Implementation of TOML's local date/time. +// Copied over from https://github.com/googleapis/google-cloud-go/blob/master/civil/civil.go +// to avoid pulling all the Google dependencies. +// +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package toml + +import ( + "fmt" + "time" +) + +// A LocalDate represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type LocalDate struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// LocalDateOf returns the LocalDate in which a time occurs in that time's location. +func LocalDateOf(t time.Time) LocalDate { + var d LocalDate + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseLocalDate(s string) (LocalDate, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return LocalDate{}, err + } + return LocalDateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d LocalDate) IsValid() bool { + return LocalDateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.LocalDate, even when time.LocalDate returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d LocalDate) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d LocalDate) AddDays(n int) LocalDate { + return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d LocalDate) DaysSince(s LocalDate) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 LocalDate) Before(d2 LocalDate) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 LocalDate) After(d2 LocalDate) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseLocalDate. +func (d *LocalDate) UnmarshalText(data []byte) error { + var err error + *d, err = ParseLocalDate(string(data)) + return err +} + +// A LocalTime represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. +type LocalTime struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func LocalTimeOf(t time.Time) LocalTime { + var tm LocalTime + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseLocalTime parses a string and returns the time value it represents. +// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseLocalTime(s string) (LocalTime, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return LocalTime{}, err + } + return LocalTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t LocalTime) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return LocalTimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t LocalTime) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseLocalTime. +func (t *LocalTime) UnmarshalText(data []byte) error { + var err error + *t, err = ParseLocalTime(string(data)) + return err +} + +// A LocalDateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type LocalDateTime struct { + Date LocalDate + Time LocalTime +} + +// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. + +// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. +func LocalDateTimeOf(t time.Time) LocalDateTime { + return LocalDateTime{ + Date: LocalDateOf(t), + Time: LocalTimeOf(t), + } +} + +// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. +// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseLocalTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseLocalDateTime(s string) (LocalDateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return LocalDateTime{}, err + } + } + return LocalDateTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalDate. +func (dt LocalDateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt LocalDateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the LocalDateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then +// both +// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.LocalDateTime{ +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, +// civil.LocalTime{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt LocalDateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt LocalDateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseLocalDateTime +func (dt *LocalDateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseLocalDateTime(string(data)) + return err +} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go new file mode 100644 index 000000000..032e0ffc5 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -0,0 +1,1269 @@ +package toml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +const ( + tagFieldName = "toml" + tagFieldComment = "comment" + tagCommented = "commented" + tagMultiline = "multiline" + tagDefault = "default" +) + +type tomlOpts struct { + name string + nameFromTag bool + comment string + commented bool + multiline bool + include bool + omitempty bool + defaultValue string +} + +type encOpts struct { + quoteMapKeys bool + arraysOneElementPerLine bool +} + +var encOptsDefaults = encOpts{ + quoteMapKeys: false, +} + +type annotation struct { + tag string + comment string + commented string + multiline string + defaultValue string +} + +var annotationDefault = annotation{ + tag: tagFieldName, + comment: tagFieldComment, + commented: tagCommented, + multiline: tagMultiline, + defaultValue: tagDefault, +} + +type marshalOrder int + +// Orders the Encoder can write the fields to the output stream. +const ( + // Sort fields alphabetically. + OrderAlphabetical marshalOrder = iota + 1 + // Preserve the order the fields are encountered. For example, the order of fields in + // a struct. + OrderPreserve +) + +var timeType = reflect.TypeOf(time.Time{}) +var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() +var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var localDateType = reflect.TypeOf(LocalDate{}) +var localTimeType = reflect.TypeOf(LocalTime{}) +var localDateTimeType = reflect.TypeOf(LocalDateTime{}) +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) + +// Check if the given marshal type maps to a Tree primitive +func isPrimitive(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isPrimitive(mtype.Elem()) + case reflect.Bool: + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Struct: + return isTimeType(mtype) + default: + return false + } +} + +func isTimeType(mtype reflect.Type) bool { + return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType +} + +// Check if the given marshal type maps to a Tree slice or array +func isTreeSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTreeSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTree(mtype.Elem()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a custom marshaler type +func isCustomMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isCustomMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a text marshaler type +func isTextMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTextMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a non-Tree slice or array +func isOtherSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isOtherSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return !isTreeSequence(mtype) + default: + return false + } +} + +// Check if the given marshal type maps to a Tree +func isTree(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTree(mtype.Elem()) + case reflect.Map: + return true + case reflect.Struct: + return !isPrimitive(mtype) + default: + return false + } +} + +func isCustomMarshaler(mtype reflect.Type) bool { + return mtype.Implements(marshalerType) +} + +func callCustomMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(Marshaler).MarshalTOML() +} + +func isTextMarshaler(mtype reflect.Type) bool { + return mtype.Implements(textMarshalerType) && !isTimeType(mtype) +} + +func callTextMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(encoding.TextMarshaler).MarshalText() +} + +func isCustomUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(unmarshalerType) +} + +func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { + return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) +} + +func isTextUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(textUnmarshalerType) +} + +func callTextUnmarshaler(mval reflect.Value, text []byte) error { + return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) +} + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Unmarshaler is the interface implemented by types that +// can unmarshal a TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +/* +Marshal returns the TOML encoding of v. Behavior is similar to the Go json +encoder, except that there is no concept of a Marshaler interface or MarshalTOML +function for sub-structs, and currently only definite types can be marshaled +(i.e. no `interface{}`). + +The following struct annotations are supported: + + toml:"Field" Overrides the field's name to output. + omitempty When set, empty values and groups are not emitted. + comment:"comment" Emits a # comment on the same line. This supports new lines. + commented:"true" Emits the value as commented. + +Note that pointers are automatically assigned the "omitempty" option, as TOML +explicitly does not handle null values (saying instead the label should be +dropped). + +Tree structural types and corresponding marshal types: + + *Tree (*)struct, (*)map[string]interface{} + []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} + []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) + interface{} (*)primitive + +Tree primitive types and corresponding marshal types: + + uint64 uint, uint8-uint64, pointers to same + int64 int, int8-uint64, pointers to same + float64 float32, float64, pointers to same + string string, pointers to same + bool bool, pointers to same + time.LocalTime time.LocalTime{}, pointers to same + +For additional flexibility, use the Encoder API. +*/ +func Marshal(v interface{}) ([]byte, error) { + return NewEncoder(nil).marshal(v) +} + +// Encoder writes TOML values to an output stream. +type Encoder struct { + w io.Writer + encOpts + annotation + line int + col int + order marshalOrder + promoteAnon bool + indentation string +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + encOpts: encOptsDefaults, + annotation: annotationDefault, + line: 0, + col: 1, + order: OrderAlphabetical, + indentation: " ", + } +} + +// Encode writes the TOML encoding of v to the stream. +// +// See the documentation for Marshal for details. +func (e *Encoder) Encode(v interface{}) error { + b, err := e.marshal(v) + if err != nil { + return err + } + if _, err := e.w.Write(b); err != nil { + return err + } + return nil +} + +// QuoteMapKeys sets up the encoder to encode +// maps with string type keys with quoted TOML keys. +// +// This relieves the character limitations on map keys. +func (e *Encoder) QuoteMapKeys(v bool) *Encoder { + e.quoteMapKeys = v + return e +} + +// ArraysWithOneElementPerLine sets up the encoder to encode arrays +// with more than one element on multiple lines instead of one. +// +// For example: +// +// A = [1,2,3] +// +// Becomes +// +// A = [ +// 1, +// 2, +// 3, +// ] +func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { + e.arraysOneElementPerLine = v + return e +} + +// Order allows to change in which order fields will be written to the output stream. +func (e *Encoder) Order(ord marshalOrder) *Encoder { + e.order = ord + return e +} + +// Indentation allows to change indentation when marshalling. +func (e *Encoder) Indentation(indent string) *Encoder { + e.indentation = indent + return e +} + +// SetTagName allows changing default tag "toml" +func (e *Encoder) SetTagName(v string) *Encoder { + e.tag = v + return e +} + +// SetTagComment allows changing default tag "comment" +func (e *Encoder) SetTagComment(v string) *Encoder { + e.comment = v + return e +} + +// SetTagCommented allows changing default tag "commented" +func (e *Encoder) SetTagCommented(v string) *Encoder { + e.commented = v + return e +} + +// SetTagMultiline allows changing default tag "multiline" +func (e *Encoder) SetTagMultiline(v string) *Encoder { + e.multiline = v + return e +} + +// PromoteAnonymous allows to change how anonymous struct fields are marshaled. +// Usually, they are marshaled as if the inner exported fields were fields in +// the outer struct. However, if an anonymous struct field is given a name in +// its TOML tag, it is treated like a regular struct field with that name. +// rather than being anonymous. +// +// In case anonymous promotion is enabled, all anonymous structs are promoted +// and treated like regular struct fields. +func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { + e.promoteAnon = promote + return e +} + +func (e *Encoder) marshal(v interface{}) ([]byte, error) { + // Check if indentation is valid + for _, char := range e.indentation { + if !isSpace(char) { + return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") + } + } + + mtype := reflect.TypeOf(v) + if mtype == nil { + return []byte{}, errors.New("nil cannot be marshaled to TOML") + } + + switch mtype.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Ptr: + if mtype.Elem().Kind() != reflect.Struct { + return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") + } + if reflect.ValueOf(v).IsNil() { + return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") + } + default: + return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") + } + + sval := reflect.ValueOf(v) + if isCustomMarshaler(mtype) { + return callCustomMarshaler(sval) + } + if isTextMarshaler(mtype) { + return callTextMarshaler(sval) + } + t, err := e.valueToTree(mtype, sval) + if err != nil { + return []byte{}, err + } + + var buf bytes.Buffer + _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, false) + + return buf.Bytes(), err +} + +// Create next tree with a position based on Encoder.line +func (e *Encoder) nextTree() *Tree { + return newTreeWithPosition(Position{Line: e.line, Col: 1}) +} + +// Convert given marshal struct or map value to toml tree +func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToTree(mtype.Elem(), mval.Elem()) + } + tval := e.nextTree() + switch mtype.Kind() { + case reflect.Struct: + switch mval.Interface().(type) { + case Tree: + reflect.ValueOf(tval).Elem().Set(mval) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef, mvalf := mtype.Field(i), mval.Field(i) + opts := tomlOptions(mtypef, e.annotation) + if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { + val, err := e.valueToToml(mtypef.Type, mvalf) + if err != nil { + return nil, err + } + if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { + e.appendTree(tval, tree) + } else { + val = e.wrapTomlValue(val, tval) + tval.SetPathWithOptions([]string{opts.name}, SetOptions{ + Comment: opts.comment, + Commented: opts.commented, + Multiline: opts.multiline, + }, val) + } + } + } + } + case reflect.Map: + keys := mval.MapKeys() + if e.order == OrderPreserve && len(keys) > 0 { + // Sorting []reflect.Value is not straight forward. + // + // OrderPreserve will support deterministic results when string is used + // as the key to maps. + typ := keys[0].Type() + kind := keys[0].Kind() + if kind == reflect.String { + ikeys := make([]string, len(keys)) + for i := range keys { + ikeys[i] = keys[i].Interface().(string) + } + sort.Strings(ikeys) + for i := range ikeys { + keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) + } + } + } + for _, key := range keys { + mvalf := mval.MapIndex(key) + if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { + continue + } + val, err := e.valueToToml(mtype.Elem(), mvalf) + if err != nil { + return nil, err + } + val = e.wrapTomlValue(val, tval) + if e.quoteMapKeys { + keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) + if err != nil { + return nil, err + } + tval.SetPath([]string{keyStr}, val) + } else { + tval.SetPath([]string{key.String()}, val) + } + } + } + return tval, nil +} + +// Convert given marshal slice to slice of Toml trees +func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { + tval := make([]*Tree, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal slice to slice of toml values +func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + tval := make([]interface{}, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal value to toml value +func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + if mtype.Kind() == reflect.Ptr { + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err + default: + return e.valueToToml(mtype.Elem(), mval.Elem()) + } + } + if mtype.Kind() == reflect.Interface { + return e.valueToToml(mval.Elem().Type(), mval.Elem()) + } + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err + case isTree(mtype): + return e.valueToTree(mtype, mval) + case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): + return e.valueToOtherSlice(mtype, mval) + case isTreeSequence(mtype): + return e.valueToTreeSlice(mtype, mval) + default: + switch mtype.Kind() { + case reflect.Bool: + return mval.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { + return fmt.Sprint(mval), nil + } + return mval.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return mval.Uint(), nil + case reflect.Float32, reflect.Float64: + return mval.Float(), nil + case reflect.String: + return mval.String(), nil + case reflect.Struct: + return mval.Interface(), nil + default: + return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) + } + } +} + +func (e *Encoder) appendTree(t, o *Tree) error { + for key, value := range o.values { + if _, ok := t.values[key]; ok { + continue + } + if tomlValue, ok := value.(*tomlValue); ok { + tomlValue.position.Col = t.position.Col + } + t.values[key] = value + } + return nil +} + +// Create a toml value with the current line number as the position line +func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { + _, isTree := val.(*Tree) + _, isTreeS := val.([]*Tree) + if isTree || isTreeS { + return val + } + + ret := &tomlValue{ + value: val, + position: Position{ + e.line, + parent.position.Col, + }, + } + e.line++ + return ret +} + +// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. +// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for +// sub-structs, and only definite types can be unmarshaled. +func (t *Tree) Unmarshal(v interface{}) error { + d := Decoder{tval: t, tagName: tagFieldName} + return d.unmarshal(v) +} + +// Marshal returns the TOML encoding of Tree. +// See Marshal() documentation for types mapping table. +func (t *Tree) Marshal() ([]byte, error) { + var buf bytes.Buffer + _, err := t.WriteTo(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Unmarshal parses the TOML-encoded data and stores the result in the value +// pointed to by v. Behavior is similar to the Go json encoder, except that there +// is no concept of an Unmarshaler interface or UnmarshalTOML function for +// sub-structs, and currently only definite types can be unmarshaled to (i.e. no +// `interface{}`). +// +// The following struct annotations are supported: +// +// toml:"Field" Overrides the field's name to map to. +// default:"foo" Provides a default value. +// +// For default values, only fields of the following types are supported: +// * string +// * bool +// * int +// * int64 +// * float64 +// +// See Marshal() documentation for types mapping table. +func Unmarshal(data []byte, v interface{}) error { + t, err := LoadReader(bytes.NewReader(data)) + if err != nil { + return err + } + return t.Unmarshal(v) +} + +// Decoder reads and decodes TOML values from an input stream. +type Decoder struct { + r io.Reader + tval *Tree + encOpts + tagName string + strict bool + visitor visitorState +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + encOpts: encOptsDefaults, + tagName: tagFieldName, + } +} + +// Decode reads a TOML-encoded value from it's input +// and unmarshals it in the value pointed at by v. +// +// See the documentation for Marshal for details. +func (d *Decoder) Decode(v interface{}) error { + var err error + d.tval, err = LoadReader(d.r) + if err != nil { + return err + } + return d.unmarshal(v) +} + +// SetTagName allows changing default tag "toml" +func (d *Decoder) SetTagName(v string) *Decoder { + d.tagName = v + return d +} + +// Strict allows changing to strict decoding. Any fields that are found in the +// input data and do not have a corresponding struct member cause an error. +func (d *Decoder) Strict(strict bool) *Decoder { + d.strict = strict + return d +} + +func (d *Decoder) unmarshal(v interface{}) error { + mtype := reflect.TypeOf(v) + if mtype == nil { + return errors.New("nil cannot be unmarshaled from TOML") + } + if mtype.Kind() != reflect.Ptr { + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + elem := mtype.Elem() + + switch elem.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Interface: + elem = mapStringInterfaceType + default: + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + if reflect.ValueOf(v).IsNil() { + return errors.New("nil pointer cannot be unmarshaled from TOML") + } + + vv := reflect.ValueOf(v).Elem() + + if d.strict { + d.visitor = newVisitorState(d.tval) + } + + sval, err := d.valueFromTree(elem, d.tval, &vv) + if err != nil { + return err + } + if err := d.visitor.validate(); err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(sval) + return nil +} + +// Convert toml tree to marshal struct or map, using marshal type. When mval1 +// is non-nil, merge fields into the given value instead of allocating a new one. +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval, mval1) + } + + // Check if pointer to value implements the Unmarshaler interface. + if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { + d.visitor.visitAll() + + if tval == nil { + return mvalPtr.Elem(), nil + } + + if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil + } + + var mval reflect.Value + switch mtype.Kind() { + case reflect.Struct: + if mval1 != nil { + mval = *mval1 + } else { + mval = reflect.New(mtype).Elem() + } + + switch mval.Interface().(type) { + case Tree: + mval.Set(reflect.ValueOf(tval).Elem()) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef := mtype.Field(i) + an := annotation{tag: d.tagName} + opts := tomlOptions(mtypef, an) + if !opts.include { + continue + } + baseKey := opts.name + keysToTry := []string{ + baseKey, + strings.ToLower(baseKey), + strings.ToTitle(baseKey), + strings.ToLower(string(baseKey[0])) + baseKey[1:], + } + + found := false + if tval != nil { + for _, key := range keysToTry { + exists := tval.HasPath([]string{key}) + if !exists { + continue + } + + d.visitor.push(key) + val := tval.GetPath([]string{key}) + fval := mval.Field(i) + mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.Field(i).Set(mvalf) + found = true + d.visitor.pop() + break + } + } + + if !found && opts.defaultValue != "" { + mvalf := mval.Field(i) + var val interface{} + var err error + switch mvalf.Kind() { + case reflect.String: + val = opts.defaultValue + case reflect.Bool: + val, err = strconv.ParseBool(opts.defaultValue) + case reflect.Uint: + val, err = strconv.ParseUint(opts.defaultValue, 10, 0) + case reflect.Uint8: + val, err = strconv.ParseUint(opts.defaultValue, 10, 8) + case reflect.Uint16: + val, err = strconv.ParseUint(opts.defaultValue, 10, 16) + case reflect.Uint32: + val, err = strconv.ParseUint(opts.defaultValue, 10, 32) + case reflect.Uint64: + val, err = strconv.ParseUint(opts.defaultValue, 10, 64) + case reflect.Int: + val, err = strconv.ParseInt(opts.defaultValue, 10, 0) + case reflect.Int8: + val, err = strconv.ParseInt(opts.defaultValue, 10, 8) + case reflect.Int16: + val, err = strconv.ParseInt(opts.defaultValue, 10, 16) + case reflect.Int32: + val, err = strconv.ParseInt(opts.defaultValue, 10, 32) + case reflect.Int64: + val, err = strconv.ParseInt(opts.defaultValue, 10, 64) + case reflect.Float32: + val, err = strconv.ParseFloat(opts.defaultValue, 32) + case reflect.Float64: + val, err = strconv.ParseFloat(opts.defaultValue, 64) + default: + return mvalf, fmt.Errorf("unsupported field type for default option") + } + + if err != nil { + return mvalf, err + } + mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) + } + + // save the old behavior above and try to check structs + if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { + tmpTval := tval + if !mtypef.Anonymous { + tmpTval = nil + } + fval := mval.Field(i) + v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) + if err != nil { + return v, err + } + mval.Field(i).Set(v) + } + } + } + case reflect.Map: + mval = reflect.MakeMap(mtype) + for _, key := range tval.Keys() { + d.visitor.push(key) + // TODO: path splits key + val := tval.GetPath([]string{key}) + mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) + d.visitor.pop() + } + } + return mval, nil +} + +// Convert toml value to marshal struct/map slice, using marshal type +func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + d.visitor.push(strconv.Itoa(i)) + val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + d.visitor.pop() + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val := reflect.ValueOf(tval) + length := val.Len() + + mval, err := makeSliceOrArray(mtype, length) + if err != nil { + return mval, err + } + + for i := 0; i < length; i++ { + val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Create a new slice or a new array with specified length +func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { + var mval reflect.Value + switch mtype.Kind() { + case reflect.Slice: + mval = reflect.MakeSlice(mtype, tLength, tLength) + case reflect.Array: + mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() + if tLength > mtype.Len() { + return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) + } + } + return mval, nil +} + +// Convert toml value to marshal value, using marshal type. When mval1 is non-nil +// and the given type is a struct value, merge fields into it. +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval, mval1) + } + + switch t := tval.(type) { + case *Tree: + var mval11 *reflect.Value + if mtype.Kind() == reflect.Struct { + mval11 = mval1 + } + + if isTree(mtype) { + return d.valueFromTree(mtype, t, mval11) + } + + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) + } else { + return d.valueFromToml(mval1.Elem().Type(), t, nil) + } + } + + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) + case []*Tree: + if isTreeSequence(mtype) { + return d.valueFromTreeSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) + case []interface{}: + d.visitor.visit() + if isOtherSequence(mtype) { + return d.valueFromOtherSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) + default: + d.visitor.visit() + // Check if pointer to value implements the encoding.TextUnmarshaler. + if mvalPtr := reflect.New(mtype); isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { + if err := d.unmarshalText(tval, mvalPtr); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) + } + return mvalPtr.Elem(), nil + } + + switch mtype.Kind() { + case reflect.Bool, reflect.Struct: + val := reflect.ValueOf(tval) + + switch val.Type() { + case localDateType: + localDate := val.Interface().(LocalDate) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil + } + case localDateTimeType: + localDateTime := val.Interface().(LocalDateTime) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date( + localDateTime.Date.Year, + localDateTime.Date.Month, + localDateTime.Date.Day, + localDateTime.Time.Hour, + localDateTime.Time.Minute, + localDateTime.Time.Second, + localDateTime.Time.Nanosecond, + time.Local)), nil + } + } + + // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.String: + val := reflect.ValueOf(tval) + // stupidly, int64 is convertible to string. So special case this. + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val := reflect.ValueOf(tval) + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { + d, err := time.ParseDuration(val.String()) + if err != nil { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) + } + return reflect.ValueOf(d), nil + } + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Float32, reflect.Float64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Interface: + if mval1 == nil || mval1.IsNil() { + return reflect.ValueOf(tval), nil + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + case reflect.Slice, reflect.Array: + if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { + return d.valueFromOtherSliceI(mtype, t) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + default: + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + } + } +} + +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + var melem *reflect.Value + + if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { + elem := mval1.Elem() + melem = &elem + } + + val, err := d.valueFromToml(mtype.Elem(), tval, melem) + if err != nil { + return reflect.ValueOf(nil), err + } + mval := reflect.New(mtype.Elem()) + mval.Elem().Set(val) + return mval, nil +} + +func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { + var buf bytes.Buffer + fmt.Fprint(&buf, tval) + return callTextUnmarshaler(mval, buf.Bytes()) +} + +func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { + tag := vf.Tag.Get(an.tag) + parse := strings.Split(tag, ",") + var comment string + if c := vf.Tag.Get(an.comment); c != "" { + comment = c + } + commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) + multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) + defaultValue := vf.Tag.Get(tagDefault) + result := tomlOpts{ + name: vf.Name, + nameFromTag: false, + comment: comment, + commented: commented, + multiline: multiline, + include: true, + omitempty: false, + defaultValue: defaultValue, + } + if parse[0] != "" { + if parse[0] == "-" && len(parse) == 1 { + result.include = false + } else { + result.name = strings.Trim(parse[0], " ") + result.nameFromTag = true + } + } + if vf.PkgPath != "" { + result.include = false + } + if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { + result.omitempty = true + } + if vf.Type.Kind() == reflect.Ptr { + result.omitempty = true + } + return result +} + +func isZero(val reflect.Value) bool { + switch val.Type().Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + return val.Len() == 0 + default: + return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) + } +} + +func formatError(err error, pos Position) error { + if err.Error()[0] == '(' { // Error already contains position information + return err + } + return fmt.Errorf("%s: %s", pos, err) +} + +// visitorState keeps track of which keys were unmarshaled. +type visitorState struct { + tree *Tree + path []string + keys map[string]struct{} + active bool +} + +func newVisitorState(tree *Tree) visitorState { + path, result := []string{}, map[string]struct{}{} + insertKeys(path, result, tree) + return visitorState{ + tree: tree, + path: path[:0], + keys: result, + active: true, + } +} + +func (s *visitorState) push(key string) { + if s.active { + s.path = append(s.path, key) + } +} + +func (s *visitorState) pop() { + if s.active { + s.path = s.path[:len(s.path)-1] + } +} + +func (s *visitorState) visit() { + if s.active { + delete(s.keys, strings.Join(s.path, ".")) + } +} + +func (s *visitorState) visitAll() { + if s.active { + for k := range s.keys { + if strings.HasPrefix(k, strings.Join(s.path, ".")) { + delete(s.keys, k) + } + } + } +} + +func (s *visitorState) validate() error { + if !s.active { + return nil + } + undecoded := make([]string, 0, len(s.keys)) + for key := range s.keys { + undecoded = append(undecoded, key) + } + sort.Strings(undecoded) + if len(undecoded) > 0 { + return fmt.Errorf("undecoded keys: %q", undecoded) + } + return nil +} + +func insertKeys(path []string, m map[string]struct{}, tree *Tree) { + for k, v := range tree.values { + switch node := v.(type) { + case []*Tree: + for i, item := range node { + insertKeys(append(path, k, strconv.Itoa(i)), m, item) + } + case *Tree: + insertKeys(append(path, k), m, node) + case *tomlValue: + m[strings.Join(append(path, k), ".")] = struct{}{} + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml new file mode 100644 index 000000000..792b72ed7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic_lists] + floats = [12.3,45.6,78.9] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + ints = [8001,8001,8002] + uints = [5002,5003] + strings = ["One","Two","Three"] + +[[subdocptrs]] + name = "Second" + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.second] + name = "Second" + + [subdoc.first] + name = "First" + +[basic] + uint = 5001 + bool = true + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + date = 1979-05-27T07:32:00Z + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml new file mode 100644 index 000000000..ba5e110bf --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic] + bool = true + date = 1979-05-27T07:32:00Z + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + uint = 5001 + +[basic_lists] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + floats = [12.3,45.6,78.9] + ints = [8001,8001,8002] + strings = ["One","Two","Three"] + uints = [5002,5003] + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.first] + name = "First" + + [subdoc.second] + name = "Second" + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" + +[[subdocptrs]] + name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go new file mode 100644 index 000000000..7bf40bbdc --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -0,0 +1,493 @@ +// TOML Parser. + +package toml + +import ( + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +type tomlParser struct { + flowIdx int + flow []token + tree *Tree + currentTable []string + seenTableKeys []string +} + +type tomlParserStateFn func() tomlParserStateFn + +// Formats and panics an error message based on a token +func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *tomlParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *tomlParser) peek() *token { + if p.flowIdx >= len(p.flow) { + return nil + } + return &p.flow[p.flowIdx] +} + +func (p *tomlParser) assume(typ tokenType) { + tok := p.getToken() + if tok == nil { + p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) + } + if tok.typ != typ { + p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) + } +} + +func (p *tomlParser) getToken() *token { + tok := p.peek() + if tok == nil { + return nil + } + p.flowIdx++ + return tok +} + +func (p *tomlParser) parseStart() tomlParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenDoubleLeftBracket: + return p.parseGroupArray + case tokenLeftBracket: + return p.parseGroup + case tokenKey: + return p.parseAssign + case tokenEOF: + return nil + case tokenError: + p.raiseError(tok, "parsing error: %s", tok.String()) + default: + p.raiseError(tok, "unexpected token %s", tok.typ) + } + return nil +} + +func (p *tomlParser) parseGroupArray() tomlParserStateFn { + startToken := p.getToken() // discard the [[ + key := p.getToken() + if key.typ != tokenKeyGroupArray { + p.raiseError(key, "unexpected token %s, was expecting a table array key", key) + } + + // get or create table array element at the indicated part in the path + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries + destTree := p.tree.GetPath(keys) + var array []*Tree + if destTree == nil { + array = make([]*Tree, 0) + } else if target, ok := destTree.([]*Tree); ok && target != nil { + array = destTree.([]*Tree) + } else { + p.raiseError(key, "key %s is already assigned and not of type table array", key) + } + p.currentTable = keys + + // add a new tree to the end of the table array + newTree := newTree() + newTree.position = startToken.Position + array = append(array, newTree) + p.tree.SetPath(p.currentTable, array) + + // remove all keys that were children of this table array + prefix := key.val + "." + found := false + for ii := 0; ii < len(p.seenTableKeys); { + tableKey := p.seenTableKeys[ii] + if strings.HasPrefix(tableKey, prefix) { + p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) + } else { + found = (tableKey == key.val) + ii++ + } + } + + // keep this key name from use by other kinds of assignments + if !found { + p.seenTableKeys = append(p.seenTableKeys, key.val) + } + + // move to next parser state + p.assume(tokenDoubleRightBracket) + return p.parseStart +} + +func (p *tomlParser) parseGroup() tomlParserStateFn { + startToken := p.getToken() // discard the [ + key := p.getToken() + if key.typ != tokenKeyGroup { + p.raiseError(key, "unexpected token %s, was expecting a table key", key) + } + for _, item := range p.seenTableKeys { + if item == key.val { + p.raiseError(key, "duplicated tables") + } + } + + p.seenTableKeys = append(p.seenTableKeys, key.val) + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + if err := p.tree.createSubTree(keys, startToken.Position); err != nil { + p.raiseError(key, "%s", err) + } + destTree := p.tree.GetPath(keys) + if target, ok := destTree.(*Tree); ok && target != nil && target.inline { + p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", + strings.Join(keys, ".")) + } + p.assume(tokenRightBracket) + p.currentTable = keys + return p.parseStart +} + +func (p *tomlParser) parseAssign() tomlParserStateFn { + key := p.getToken() + p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err.Error()) + } + + value := p.parseRvalue() + var tableKey []string + if len(p.currentTable) > 0 { + tableKey = p.currentTable + } else { + tableKey = []string{} + } + + prefixKey := parsedKey[0 : len(parsedKey)-1] + tableKey = append(tableKey, prefixKey...) + + // find the table to assign, looking out for arrays of tables + var targetNode *Tree + switch node := p.tree.GetPath(tableKey).(type) { + case []*Tree: + targetNode = node[len(node)-1] + case *Tree: + targetNode = node + case nil: + // create intermediate + if err := p.tree.createSubTree(tableKey, key.Position); err != nil { + p.raiseError(key, "could not create intermediate group: %s", err) + } + targetNode = p.tree.GetPath(tableKey).(*Tree) + default: + p.raiseError(key, "Unknown table type for path: %s", + strings.Join(tableKey, ".")) + } + + if targetNode.inline { + p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", + strings.Join(tableKey, ".")) + } + + // assign value to the found table + keyVal := parsedKey[len(parsedKey)-1] + localKey := []string{keyVal} + finalKey := append(tableKey, keyVal) + if targetNode.GetPath(localKey) != nil { + p.raiseError(key, "The following key was defined twice: %s", + strings.Join(finalKey, ".")) + } + var toInsert interface{} + + switch value.(type) { + case *Tree, []*Tree: + toInsert = value + default: + toInsert = &tomlValue{value: value, position: key.Position} + } + targetNode.values[keyVal] = toInsert + return p.parseStart +} + +var numberUnderscoreInvalidRegexp *regexp.Regexp +var hexNumberUnderscoreInvalidRegexp *regexp.Regexp + +func numberContainsInvalidUnderscore(value string) error { + if numberUnderscoreInvalidRegexp.MatchString(value) { + return errors.New("invalid use of _ in number") + } + return nil +} + +func hexNumberContainsInvalidUnderscore(value string) error { + if hexNumberUnderscoreInvalidRegexp.MatchString(value) { + return errors.New("invalid use of _ in hex number") + } + return nil +} + +func cleanupNumberToken(value string) string { + cleanedVal := strings.Replace(value, "_", "", -1) + return cleanedVal +} + +func (p *tomlParser) parseRvalue() interface{} { + tok := p.getToken() + if tok == nil || tok.typ == tokenEOF { + p.raiseError(tok, "expecting a value") + } + + switch tok.typ { + case tokenString: + return tok.val + case tokenTrue: + return true + case tokenFalse: + return false + case tokenInf: + if tok.val[0] == '-' { + return math.Inf(-1) + } + return math.Inf(1) + case tokenNan: + return math.NaN() + case tokenInteger: + cleanedVal := cleanupNumberToken(tok.val) + var err error + var val int64 + if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { + switch cleanedVal[1] { + case 'x': + err = hexNumberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) + case 'o': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) + case 'b': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) + default: + panic("invalid base") // the lexer should catch this first + } + } else { + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal, 10, 64) + } + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenFloat: + err := numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + cleanedVal := cleanupNumberToken(tok.val) + val, err := strconv.ParseFloat(cleanedVal, 64) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenDate: + layout := time.RFC3339Nano + if !strings.Contains(tok.val, "T") { + layout = strings.Replace(layout, "T", " ", 1) + } + val, err := time.ParseInLocation(layout, tok.val, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalDate: + v := strings.Replace(tok.val, " ", "T", -1) + isDateTime := false + isTime := false + for _, c := range v { + if c == 'T' || c == 't' { + isDateTime = true + break + } + if c == ':' { + isTime = true + break + } + } + + var val interface{} + var err error + + if isDateTime { + val, err = ParseLocalDateTime(v) + } else if isTime { + val, err = ParseLocalTime(v) + } else { + val, err = ParseLocalDate(v) + } + + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLeftBracket: + return p.parseArray() + case tokenLeftCurlyBrace: + return p.parseInlineTable() + case tokenEqual: + p.raiseError(tok, "cannot have multiple equals for the same key") + case tokenError: + p.raiseError(tok, "%s", tok) + } + + p.raiseError(tok, "never reached") + + return nil +} + +func tokenIsComma(t *token) bool { + return t != nil && t.typ == tokenComma +} + +func (p *tomlParser) parseInlineTable() *Tree { + tree := newTree() + var previous *token +Loop: + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated inline table") + } + switch follow.typ { + case tokenRightCurlyBrace: + p.getToken() + break Loop + case tokenKey, tokenInteger, tokenString: + if !tokenIsComma(previous) && previous != nil { + p.raiseError(follow, "comma expected between fields in inline table") + } + key := p.getToken() + p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err) + } + + value := p.parseRvalue() + tree.SetPath(parsedKey, value) + case tokenComma: + if tokenIsComma(previous) { + p.raiseError(follow, "need field between two commas in inline table") + } + p.getToken() + default: + p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) + } + previous = follow + } + if tokenIsComma(previous) { + p.raiseError(previous, "trailing comma at the end of inline table") + } + tree.inline = true + return tree +} + +func (p *tomlParser) parseArray() interface{} { + var array []interface{} + arrayType := reflect.TypeOf(newTree()) + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ == tokenRightBracket { + p.getToken() + break + } + val := p.parseRvalue() + if reflect.TypeOf(val) != arrayType { + arrayType = nil + } + array = append(array, val) + follow = p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ != tokenRightBracket && follow.typ != tokenComma { + p.raiseError(follow, "missing comma") + } + if follow.typ == tokenComma { + p.getToken() + } + } + + // if the array is a mixed-type array or its length is 0, + // don't convert it to a table array + if len(array) <= 0 { + arrayType = nil + } + // An array of Trees is actually an array of inline + // tables, which is a shorthand for a table array. If the + // array was not converted from []interface{} to []*Tree, + // the two notations would not be equivalent. + if arrayType == reflect.TypeOf(newTree()) { + tomlArray := make([]*Tree, len(array)) + for i, v := range array { + tomlArray[i] = v.(*Tree) + } + return tomlArray + } + return array +} + +func parseToml(flow []token) *Tree { + result := newTree() + result.position = Position{1, 1} + parser := &tomlParser{ + flowIdx: 0, + flow: flow, + tree: result, + currentTable: make([]string, 0), + seenTableKeys: make([]string, 0), + } + parser.run() + return result +} + +func init() { + numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`) + hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`) +} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go new file mode 100644 index 000000000..c17bff87b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/position.go @@ -0,0 +1,29 @@ +// Position support for go-toml + +package toml + +import ( + "fmt" +) + +// Position of a document element within a TOML document. +// +// Line and Col are both 1-indexed positions for the element's line number and +// column number, respectively. Values of zero or less will cause Invalid(), +// to return true. +type Position struct { + Line int // line within the document + Col int // column within the line +} + +// String representation of the position. +// Displays 1-indexed line and column numbers. +func (p Position) String() string { + return fmt.Sprintf("(%d, %d)", p.Line, p.Col) +} + +// Invalid returns whether or not the position is valid (i.e. with negative or +// null values) +func (p Position) Invalid() bool { + return p.Line <= 0 || p.Col <= 0 +} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go new file mode 100644 index 000000000..6af4ec46b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -0,0 +1,134 @@ +package toml + +import "fmt" + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenComment + tokenKey + tokenString + tokenInteger + tokenTrue + tokenFalse + tokenFloat + tokenInf + tokenNan + tokenEqual + tokenLeftBracket + tokenRightBracket + tokenLeftCurlyBrace + tokenRightCurlyBrace + tokenLeftParen + tokenRightParen + tokenDoubleLeftBracket + tokenDoubleRightBracket + tokenDate + tokenLocalDate + tokenKeyGroup + tokenKeyGroupArray + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot + tokenEOL +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Comment", + "Key", + "String", + "Integer", + "True", + "False", + "Float", + "Inf", + "NaN", + "=", + "[", + "]", + "{", + "}", + "(", + ")", + "]]", + "[[", + "LocalDate", + "LocalDate", + "KeyGroup", + "KeyGroupArray", + ",", + ":", + "$", + "*", + "?", + ".", + "..", + "EOL", +} + +type token struct { + Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' +} + +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') +} + +func isDigit(r rune) bool { + return '0' <= r && r <= '9' +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go new file mode 100644 index 000000000..cbb89a9af --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -0,0 +1,529 @@ +package toml + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" +) + +type tomlValue struct { + value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list + comment string + commented bool + multiline bool + position Position +} + +// Tree is the result of the parsing of a TOML file. +type Tree struct { + values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree + comment string + commented bool + inline bool + position Position +} + +func newTree() *Tree { + return newTreeWithPosition(Position{}) +} + +func newTreeWithPosition(pos Position) *Tree { + return &Tree{ + values: make(map[string]interface{}), + position: pos, + } +} + +// TreeFromMap initializes a new Tree object using the given map. +func TreeFromMap(m map[string]interface{}) (*Tree, error) { + result, err := toTree(m) + if err != nil { + return nil, err + } + return result.(*Tree), nil +} + +// Position returns the position of the tree. +func (t *Tree) Position() Position { + return t.position +} + +// Has returns a boolean indicating if the given key exists. +func (t *Tree) Has(key string) bool { + if key == "" { + return false + } + return t.HasPath(strings.Split(key, ".")) +} + +// HasPath returns true if the given path of keys exists, false otherwise. +func (t *Tree) HasPath(keys []string) bool { + return t.GetPath(keys) != nil +} + +// Keys returns the keys of the toplevel tree (does not recurse). +func (t *Tree) Keys() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + return keys +} + +// Get the value at key in the Tree. +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// If you need to retrieve non-bare keys, use GetPath. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) Get(key string) interface{} { + if key == "" { + return t + } + return t.GetPath(strings.Split(key, ".")) +} + +// GetPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.value + default: + return node + } +} + +// GetArray returns the value at key in the Tree. +// It returns []string, []int64, etc type if key has homogeneous lists +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArray(key string) interface{} { + if key == "" { + return t + } + return t.GetArrayPath(strings.Split(key, ".")) +} + +// GetArrayPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArrayPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + switch n := node.value.(type) { + case []interface{}: + return getArray(n) + default: + return node.value + } + default: + return node + } +} + +// if homogeneous array, then return slice type object over []interface{} +func getArray(n []interface{}) interface{} { + var s []string + var i64 []int64 + var f64 []float64 + var bl []bool + for _, value := range n { + switch v := value.(type) { + case string: + s = append(s, v) + case int64: + i64 = append(i64, v) + case float64: + f64 = append(f64, v) + case bool: + bl = append(bl, v) + default: + return n + } + } + if len(s) == len(n) { + return s + } else if len(i64) == len(n) { + return i64 + } else if len(f64) == len(n) { + return f64 + } else if len(bl) == len(n) { + return bl + } + return n +} + +// GetPosition returns the position of the given key. +func (t *Tree) GetPosition(key string) Position { + if key == "" { + return t.position + } + return t.GetPositionPath(strings.Split(key, ".")) +} + +// SetPositionPath sets the position of element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree position is set. +func (t *Tree) SetPositionPath(keys []string, pos Position) { + if len(keys) == 0 { + t.position = pos + return + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + subtree = node[len(node)-1] + default: + return + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + node.position = pos + return + case *Tree: + node.position = pos + return + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + node[len(node)-1].position = pos + return + } +} + +// GetPositionPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPositionPath(keys []string) Position { + if len(keys) == 0 { + return t.position + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return Position{0, 0} + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + subtree = node[len(node)-1] + default: + return Position{0, 0} + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.position + case *Tree: + return node.position + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + return node[len(node)-1].position + default: + return Position{0, 0} + } +} + +// GetDefault works like Get but with a default value +func (t *Tree) GetDefault(key string, def interface{}) interface{} { + val := t.Get(key) + if val == nil { + return def + } + return val +} + +// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. +// The default values within the struct are valid default options. +type SetOptions struct { + Comment string + Commented bool + Multiline bool +} + +// SetWithOptions is the same as Set, but allows you to provide formatting +// instructions to the key, that will be used by Marshal(). +func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { + t.SetPathWithOptions(strings.Split(key, "."), opts, value) +} + +// SetPathWithOptions is the same as SetPath, but allows you to provide +// formatting instructions to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { + subtree := t + for i, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) + subtree.values[intermediateKey] = node + } + subtree = node[len(node)-1] + } + } + + var toInsert interface{} + + switch v := value.(type) { + case *Tree: + v.comment = opts.Comment + v.commented = opts.Commented + toInsert = value + case []*Tree: + for i := range v { + v[i].commented = opts.Commented + } + toInsert = value + case *tomlValue: + v.comment = opts.Comment + v.commented = opts.Commented + v.multiline = opts.Multiline + toInsert = v + default: + toInsert = &tomlValue{value: value, + comment: opts.Comment, + commented: opts.Commented, + multiline: opts.Multiline, + position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} + } + + subtree.values[keys[len(keys)-1]] = toInsert +} + +// Set an element in the tree. +// Key is a dot-separated path (e.g. a.b.c). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) Set(key string, value interface{}) { + t.SetWithComment(key, "", false, value) +} + +// SetWithComment is the same as Set, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { + t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) +} + +// SetPath sets an element in the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) SetPath(keys []string, value interface{}) { + t.SetPathWithComment(keys, "", false, value) +} + +// SetPathWithComment is the same as SetPath, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { + t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) +} + +// Delete removes a key from the tree. +// Key is a dot-separated path (e.g. a.b.c). +func (t *Tree) Delete(key string) error { + keys, err := parseKey(key) + if err != nil { + return err + } + return t.DeletePath(keys) +} + +// DeletePath removes a key from the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +func (t *Tree) DeletePath(keys []string) error { + keyLen := len(keys) + if keyLen == 1 { + delete(t.values, keys[0]) + return nil + } + tree := t.GetPath(keys[:keyLen-1]) + item := keys[keyLen-1] + switch node := tree.(type) { + case *Tree: + delete(node.values, item) + return nil + } + return errors.New("no such key to delete") +} + +// createSubTree takes a tree and a key and create the necessary intermediate +// subtrees to create a subtree at that point. In-place. +// +// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] +// and tree[a][b][c] +// +// Returns nil on success, error object on failure +func (t *Tree) createSubTree(keys []string, pos Position) error { + subtree := t + for i, intermediateKey := range keys { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + tree.position = pos + tree.inline = subtree.inline + subtree.values[intermediateKey] = tree + nextTree = tree + } + + switch node := nextTree.(type) { + case []*Tree: + subtree = node[len(node)-1] + case *Tree: + subtree = node + default: + return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", + strings.Join(keys, "."), intermediateKey, nextTree, nextTree) + } + } + return nil +} + +// LoadBytes creates a Tree from a []byte. +func LoadBytes(b []byte) (tree *Tree, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = errors.New(r.(string)) + } + }() + + if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { + b = b[4:] + } else if len(b) >= 3 && hasUTF8BOM3(b) { + b = b[3:] + } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { + b = b[2:] + } + + tree = parseToml(lexToml(b)) + return +} + +func hasUTF16BigEndianBOM2(b []byte) bool { + return b[0] == 0xFE && b[1] == 0xFF +} + +func hasUTF16LittleEndianBOM2(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE +} + +func hasUTF8BOM3(b []byte) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +func hasUTF32BigEndianBOM4(b []byte) bool { + return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF +} + +func hasUTF32LittleEndianBOM4(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 +} + +// LoadReader creates a Tree from any io.Reader. +func LoadReader(reader io.Reader) (tree *Tree, err error) { + inputBytes, err := ioutil.ReadAll(reader) + if err != nil { + return + } + tree, err = LoadBytes(inputBytes) + return +} + +// Load creates a Tree from a string. +func Load(content string) (tree *Tree, err error) { + return LoadBytes([]byte(content)) +} + +// LoadFile creates a Tree from a file. +func LoadFile(path string) (tree *Tree, err error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return LoadReader(file) +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go new file mode 100644 index 000000000..80353500a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -0,0 +1,155 @@ +package toml + +import ( + "fmt" + "reflect" + "time" +) + +var kindToType = [reflect.String + 1]reflect.Type{ + reflect.Bool: reflect.TypeOf(true), + reflect.String: reflect.TypeOf(""), + reflect.Float32: reflect.TypeOf(float64(1)), + reflect.Float64: reflect.TypeOf(float64(1)), + reflect.Int: reflect.TypeOf(int64(1)), + reflect.Int8: reflect.TypeOf(int64(1)), + reflect.Int16: reflect.TypeOf(int64(1)), + reflect.Int32: reflect.TypeOf(int64(1)), + reflect.Int64: reflect.TypeOf(int64(1)), + reflect.Uint: reflect.TypeOf(uint64(1)), + reflect.Uint8: reflect.TypeOf(uint64(1)), + reflect.Uint16: reflect.TypeOf(uint64(1)), + reflect.Uint32: reflect.TypeOf(uint64(1)), + reflect.Uint64: reflect.TypeOf(uint64(1)), +} + +// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. +// supported values: +// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 +func typeFor(k reflect.Kind) reflect.Type { + if k > 0 && int(k) < len(kindToType) { + return kindToType[k] + } + return nil +} + +func simpleValueCoercion(object interface{}) (interface{}, error) { + switch original := object.(type) { + case string, bool, int64, uint64, float64, time.Time: + return original, nil + case int: + return int64(original), nil + case int8: + return int64(original), nil + case int16: + return int64(original), nil + case int32: + return int64(original), nil + case uint: + return uint64(original), nil + case uint8: + return uint64(original), nil + case uint16: + return uint64(original), nil + case uint32: + return uint64(original), nil + case float32: + return float64(original), nil + case fmt.Stringer: + return original.String(), nil + case []interface{}: + value := reflect.ValueOf(original) + length := value.Len() + arrayValue := reflect.MakeSlice(value.Type(), 0, length) + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return arrayValue.Interface(), nil + default: + return nil, fmt.Errorf("cannot convert type %T to Tree", object) + } +} + +func sliceToTree(object interface{}) (interface{}, error) { + // arrays are a bit tricky, since they can represent either a + // collection of simple values, which is represented by one + // *tomlValue, or an array of tables, which is represented by an + // array of *Tree. + + // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice + value := reflect.ValueOf(object) + insideType := value.Type().Elem() + length := value.Len() + if length > 0 { + insideType = reflect.ValueOf(value.Index(0).Interface()).Type() + } + if insideType.Kind() == reflect.Map { + // this is considered as an array of tables + tablesArray := make([]*Tree, 0, length) + for i := 0; i < length; i++ { + table := value.Index(i) + tree, err := toTree(table.Interface()) + if err != nil { + return nil, err + } + tablesArray = append(tablesArray, tree.(*Tree)) + } + return tablesArray, nil + } + + sliceType := typeFor(insideType.Kind()) + if sliceType == nil { + sliceType = insideType + } + + arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) + + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil +} + +func toTree(object interface{}) (interface{}, error) { + value := reflect.ValueOf(object) + + if value.Kind() == reflect.Map { + values := map[string]interface{}{} + keys := value.MapKeys() + for _, key := range keys { + if key.Kind() != reflect.String { + if _, ok := key.Interface().(string); !ok { + return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) + } + } + + v := value.MapIndex(key) + newValue, err := toTree(v.Interface()) + if err != nil { + return nil, err + } + values[key.String()] = newValue + } + return &Tree{values: values, position: Position{}}, nil + } + + if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { + return sliceToTree(object) + } + + simpleValue, err := simpleValueCoercion(object) + if err != nil { + return nil, err + } + return &tomlValue{value: simpleValue, position: Position{}}, nil +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go new file mode 100644 index 000000000..ae6dac49d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -0,0 +1,517 @@ +package toml + +import ( + "bytes" + "fmt" + "io" + "math" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type valueComplexity int + +const ( + valueSimple valueComplexity = iota + 1 + valueComplex +) + +type sortNode struct { + key string + complexity valueComplexity +} + +// Encodes a string to a TOML-compliant multi-line string value +// This function is a clone of the existing encodeTomlString function, except that whitespace characters +// are preserved. Quotation marks and backslashes are also not escaped. +func encodeMultilineTomlString(value string, commented string) string { + var b bytes.Buffer + adjacentQuoteCount := 0 + + b.WriteString(commented) + for i, rr := range value { + if rr != '"' { + adjacentQuoteCount = 0 + } else { + adjacentQuoteCount++ + } + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString("\t") + case '\n': + b.WriteString("\n" + commented) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString("\r") + case '"': + if adjacentQuoteCount >= 3 || i == len(value)-1 { + adjacentQuoteCount = 0 + b.WriteString(`\"`) + } else { + b.WriteString(`"`) + } + case '\\': + b.WriteString(`\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +// Encodes a string to a TOML-compliant string value +func encodeTomlString(value string) string { + var b bytes.Buffer + + for _, rr := range value { + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString(`\t`) + case '\n': + b.WriteString(`\n`) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString(`\r`) + case '"': + b.WriteString(`\"`) + case '\\': + b.WriteString(`\\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +func tomlTreeStringRepresentation(t *Tree, ord marshalOrder) (string, error) { + var orderedVals []sortNode + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + var values []string + for _, node := range orderedVals { + k := node.key + v := t.values[k] + + repr, err := tomlValueStringRepresentation(v, "", "", ord, false) + if err != nil { + return "", err + } + values = append(values, quoteKeyIfNeeded(k)+" = "+repr) + } + return "{ " + strings.Join(values, ", ") + " }", nil +} + +func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord marshalOrder, arraysOneElementPerLine bool) (string, error) { + // this interface check is added to dereference the change made in the writeTo function. + // That change was made to allow this function to see formatting options. + tv, ok := v.(*tomlValue) + if ok { + v = tv.value + } else { + tv = &tomlValue{} + } + + switch value := v.(type) { + case uint64: + return strconv.FormatUint(value, 10), nil + case int64: + return strconv.FormatInt(value, 10), nil + case float64: + // Default bit length is full 64 + bits := 64 + // Float panics if nan is used + if !math.IsNaN(value) { + // if 32 bit accuracy is enough to exactly show, use 32 + _, acc := big.NewFloat(value).Float32() + if acc == big.Exact { + bits = 32 + } + } + if math.Trunc(value) == value { + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil + } + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil + case string: + if tv.multiline { + return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil + } + return "\"" + encodeTomlString(value) + "\"", nil + case []byte: + b, _ := v.([]byte) + return string(b), nil + case bool: + if value { + return "true", nil + } + return "false", nil + case time.Time: + return value.Format(time.RFC3339), nil + case LocalDate: + return value.String(), nil + case LocalDateTime: + return value.String(), nil + case LocalTime: + return value.String(), nil + case *Tree: + return tomlTreeStringRepresentation(value, ord) + case nil: + return "", nil + } + + rv := reflect.ValueOf(v) + + if rv.Kind() == reflect.Slice { + var values []string + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return "", err + } + values = append(values, itemRepr) + } + if arraysOneElementPerLine && len(values) > 1 { + stringBuffer := bytes.Buffer{} + valueIndent := indent + ` ` // TODO: move that to a shared encoder state + + stringBuffer.WriteString("[\n") + + for _, value := range values { + stringBuffer.WriteString(valueIndent) + stringBuffer.WriteString(commented + value) + stringBuffer.WriteString(`,`) + stringBuffer.WriteString("\n") + } + + stringBuffer.WriteString(indent + commented + "]") + + return stringBuffer.String(), nil + } + return "[" + strings.Join(values, ", ") + "]", nil + } + return "", fmt.Errorf("unsupported value type %T: %v", v, v) +} + +func getTreeArrayLine(trees []*Tree) (line int) { + // get lowest line number that is not 0 + for _, tv := range trees { + if tv.position.Line < line || line == 0 { + line = tv.position.Line + } + } + return +} + +func sortByLines(t *Tree) (vals []sortNode) { + var ( + line int + lines []int + tv *Tree + tom *tomlValue + node sortNode + ) + vals = make([]sortNode, 0) + m := make(map[int]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree: + tv = v.(*Tree) + line = tv.position.Line + node = sortNode{key: k, complexity: valueComplex} + case []*Tree: + line = getTreeArrayLine(v.([]*Tree)) + node = sortNode{key: k, complexity: valueComplex} + default: + tom = v.(*tomlValue) + line = tom.position.Line + node = sortNode{key: k, complexity: valueSimple} + } + lines = append(lines, line) + vals = append(vals, node) + m[line] = node + } + sort.Ints(lines) + + for i, line := range lines { + vals[i] = m[line] + } + + return vals +} + +func sortAlphabetical(t *Tree) (vals []sortNode) { + var ( + node sortNode + simpVals []string + compVals []string + ) + vals = make([]sortNode, 0) + m := make(map[string]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree, []*Tree: + node = sortNode{key: k, complexity: valueComplex} + compVals = append(compVals, node.key) + default: + node = sortNode{key: k, complexity: valueSimple} + simpVals = append(simpVals, node.key) + } + vals = append(vals, node) + m[node.key] = node + } + + // Simples first to match previous implementation + sort.Strings(simpVals) + i := 0 + for _, key := range simpVals { + vals[i] = m[key] + i++ + } + + sort.Strings(compVals) + for _, key := range compVals { + vals[i] = m[key] + i++ + } + + return vals +} + +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false) +} + +func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder, indentString string, parentCommented bool) (int64, error) { + var orderedVals []sortNode + + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + for _, node := range orderedVals { + switch node.complexity { + case valueComplex: + k := node.key + v := t.values[k] + + combinedKey := quoteKeyIfNeeded(k) + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + var commented string + if parentCommented || t.commented || tv.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || tv.commented) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + var commented string + if parentCommented || t.commented || subTree.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || subTree.commented) + if err != nil { + return bytesCount, err + } + } + } + default: // Simple + k := node.key + v, ok := t.values[k].(*tomlValue) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + + var commented string + if parentCommented || t.commented || v.commented { + commented = "# " + } + repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + quotedKey := quoteKeyIfNeeded(k) + writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + } + } + + return bytesCount, nil +} + +// quote a key if it does not fit the bare key format (A-Za-z0-9_-) +// quoted keys use the same rules as strings +func quoteKeyIfNeeded(k string) string { + // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain + // keys that have already been quoted. + // not an ideal situation, but good enough of a stop gap. + if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { + return k + } + isBare := true + for _, r := range k { + if !isValidBareChar(r) { + isBare = false + break + } + } + if isBare { + return k + } + return quoteKey(k) +} + +func quoteKey(k string) string { + return "\"" + encodeTomlString(k) + "\"" +} + +func writeStrings(w io.Writer, s ...string) (int, error) { + var n int + for i := range s { + b, err := io.WriteString(w, s[i]) + n += b + if err != nil { + return n, err + } + } + return n, nil +} + +// WriteTo encode the Tree as Toml and writes it to the writer w. +// Returns the number of bytes written in case of success, or an error if anything happened. +func (t *Tree) WriteTo(w io.Writer) (int64, error) { + return t.writeTo(w, "", "", 0, false) +} + +// ToTomlString generates a human-readable representation of the current tree. +// Output spans multiple lines, and is suitable for ingest by a TOML parser. +// If the conversion cannot be performed, ToString returns a non-nil error. +func (t *Tree) ToTomlString() (string, error) { + b, err := t.Marshal() + if err != nil { + return "", err + } + return string(b), nil +} + +// String generates a human-readable representation of the current tree. +// Alias of ToString. Present to implement the fmt.Stringer interface. +func (t *Tree) String() string { + result, _ := t.ToTomlString() + return result +} + +// ToMap recursively generates a representation of the tree using Go built-in structures. +// The following types are used: +// +// * bool +// * float64 +// * int64 +// * string +// * uint64 +// * time.Time +// * map[string]interface{} (where interface{} is any of this list) +// * []interface{} (where interface{} is any of this list) +func (t *Tree) ToMap() map[string]interface{} { + result := map[string]interface{}{} + + for k, v := range t.values { + switch node := v.(type) { + case []*Tree: + var array []interface{} + for _, item := range node { + array = append(array, item.ToMap()) + } + result[k] = array + case *Tree: + result[k] = node.ToMap() + case *tomlValue: + result[k] = node.value + } + } + return result +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go new file mode 100644 index 000000000..2741ee2c8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1 + +package internalversion // import "k8s.io/apimachinery/pkg/apis/meta/internalversion" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go new file mode 100644 index 000000000..ae39b74eb --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// addToGroupVersion registers common meta types into schemas. +func addToGroupVersion(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + // ListOptions is the only options struct which needs conversion (it exposes labels and fields + // as selectors for convenience). The other types have only a single representation today. + scheme.AddKnownTypes(SchemeGroupVersion, + &ListOptions{}, + &metav1.GetOptions{}, + &metav1.ExportOptions{}, + &metav1.DeleteOptions{}, + &metav1.CreateOptions{}, + &metav1.UpdateOptions{}, + ) + scheme.AddKnownTypes(SchemeGroupVersion, + &metav1.Table{}, + &metav1.TableOptions{}, + &metav1beta1.PartialObjectMetadata{}, + &metav1beta1.PartialObjectMetadataList{}, + ) + if err := metav1beta1.AddMetaToScheme(scheme); err != nil { + return err + } + if err := metav1.AddMetaToScheme(scheme); err != nil { + return err + } + // Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this) + scheme.AddUnversionedTypes(SchemeGroupVersion, + &metav1.DeleteOptions{}, + &metav1.CreateOptions{}, + &metav1.UpdateOptions{}) + + metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion) + if err := metav1beta1.RegisterConversions(scheme); err != nil { + return err + } + return nil +} + +// Unlike other API groups, meta internal knows about all meta external versions, but keeps +// the logic for conversion private. +func init() { + localSchemeBuilder.Register(addToGroupVersion) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go new file mode 100644 index 000000000..a45fa2a8a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme // import "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go new file mode 100644 index 000000000..472a9aeb2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// Scheme is the registry for any type that adheres to the meta API spec. +var scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme. +var Codecs = serializer.NewCodecFactory(scheme) + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +// Unlike other API groups, meta internal knows about all meta external versions, but keeps +// the logic for conversion private. +func init() { + utilruntime.Must(internalversion.AddToScheme(scheme)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go new file mode 100644 index 000000000..a49b5f2be --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -0,0 +1,80 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ListOptions is the query options to a standard REST list call. +type ListOptions struct { + metav1.TypeMeta + + // A selector based on labels + LabelSelector labels.Selector + // A selector based on fields + FieldSelector fields.Selector + // If true, watch for changes to this list + Watch bool + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + AllowWatchBookmarks bool + // resourceVersion sets a constraint on what resource versions a request may be served from. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for + // details. + ResourceVersion string + // resourceVersionMatch determines how resourceVersion is applied to list calls. + // It is highly recommended that resourceVersionMatch be set for list calls where + // resourceVersion is set. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for + // details. + ResourceVersionMatch metav1.ResourceVersionMatch + + // Timeout for the list/watch call. + TimeoutSeconds *int64 + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. + Continue string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []runtime.Object +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go new file mode 100644 index 000000000..a9b28f244 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -0,0 +1,145 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package internalversion + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*List)(nil), (*v1.List)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_internalversion_List_To_v1_List(a.(*List), b.(*v1.List), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.List)(nil), (*List)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_List_To_internalversion_List(a.(*v1.List), b.(*List), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ListOptions)(nil), (*v1.ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_internalversion_ListOptions_To_v1_ListOptions(a.(*ListOptions), b.(*v1.ListOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ListOptions)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ListOptions_To_internalversion_ListOptions(a.(*v1.ListOptions), b.(*ListOptions), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_internalversion_List_To_v1_List is an autogenerated conversion function. +func Convert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error { + return autoConvert_internalversion_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_List_To_internalversion_List is an autogenerated conversion function. +func Convert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error { + return autoConvert_v1_List_To_internalversion_List(in, out, s) +} + +func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error { + if err := v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.Watch = in.Watch + out.AllowWatchBookmarks = in.AllowWatchBookmarks + out.ResourceVersion = in.ResourceVersion + out.ResourceVersionMatch = v1.ResourceVersionMatch(in.ResourceVersionMatch) + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} + +// Convert_internalversion_ListOptions_To_v1_ListOptions is an autogenerated conversion function. +func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error { + return autoConvert_internalversion_ListOptions_To_v1_ListOptions(in, out, s) +} + +func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { + if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.Watch = in.Watch + out.AllowWatchBookmarks = in.AllowWatchBookmarks + out.ResourceVersion = in.ResourceVersion + out.ResourceVersionMatch = v1.ResourceVersionMatch(in.ResourceVersionMatch) + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} + +// Convert_v1_ListOptions_To_internalversion_ListOptions is an autogenerated conversion function. +func Convert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_internalversion_ListOptions(in, out, s) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go new file mode 100644 index 000000000..d5e4fc680 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -0,0 +1,96 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package internalversion + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if (*in)[i] != nil { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.LabelSelector != nil { + out.LabelSelector = in.LabelSelector.DeepCopySelector() + } + if in.FieldSelector != nil { + out.FieldSelector = in.FieldSelector.DeepCopySelector() + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go new file mode 100644 index 000000000..5cac6fba5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "unsafe" + + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" +) + +// Convert_Slice_string_To_v1beta1_IncludeObjectPolicy allows converting a URL query parameter value +func Convert_Slice_string_To_v1beta1_IncludeObjectPolicy(in *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { + if len(*in) > 0 { + *out = IncludeObjectPolicy((*in)[0]) + } + return nil +} + +// Convert_v1beta1_PartialObjectMetadataList_To_v1_PartialObjectMetadataList allows converting PartialObjectMetadataList between versions +func Convert_v1beta1_PartialObjectMetadataList_To_v1_PartialObjectMetadataList(in *PartialObjectMetadataList, out *v1.PartialObjectMetadataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.PartialObjectMetadata)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_PartialObjectMetadataList_To_v1beta1_PartialObjectMetadataList allows converting PartialObjectMetadataList between versions +func Convert_v1_PartialObjectMetadataList_To_v1beta1_PartialObjectMetadataList(in *v1.PartialObjectMetadataList, out *PartialObjectMetadataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.PartialObjectMetadata)(unsafe.Pointer(&in.Items)) + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go new file mode 100644 index 000000000..2b7e8ca0b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go new file mode 100644 index 000000000..20c9d2ec7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta.k8s.io + +package v1beta1 // import "k8s.io/apimachinery/pkg/apis/meta/v1beta1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go new file mode 100644 index 000000000..cd5fc9026 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -0,0 +1,415 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptor_90ec10f86b91f9a8, []int{0} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8) +} + +var fileDescriptor_90ec10f86b91f9a8 = []byte{ + // 317 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30, + 0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0, + 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xbf, 0x5d, 0xac, + 0x69, 0x4a, 0xf2, 0xef, 0xc0, 0x9b, 0x1f, 0xc1, 0x8f, 0xb5, 0xe3, 0x8e, 0x03, 0x61, 0xb8, 0xf8, + 0x45, 0x24, 0x5d, 0x15, 0x19, 0x0a, 0xbb, 0xf5, 0x79, 0xca, 0xef, 0x97, 0x27, 0x24, 0x1c, 0xa7, + 0x67, 0x96, 0x49, 0xcd, 0xd3, 0x22, 0x02, 0x93, 0x01, 0x82, 0xe5, 0x33, 0xc8, 0x26, 0xda, 0xf0, + 0xea, 0x87, 0xc8, 0xa5, 0x12, 0xf1, 0x54, 0x66, 0x60, 0x9e, 0x79, 0x9e, 0x26, 0xbe, 0xb0, 0x5c, + 0x01, 0x0a, 0x3e, 0x1b, 0x44, 0x80, 0x62, 0xc0, 0x13, 0xc8, 0xc0, 0x08, 0x84, 0x09, 0xcb, 0x8d, + 0x46, 0xdd, 0x3c, 0xde, 0xa0, 0xec, 0x27, 0xca, 0xf2, 0x34, 0xf1, 0x85, 0x65, 0x1e, 0x65, 0x15, + 0xda, 0xee, 0x27, 0x12, 0xa7, 0x45, 0xc4, 0x62, 0xad, 0x78, 0xa2, 0x13, 0xcd, 0x4b, 0x43, 0x54, + 0x3c, 0x94, 0xa9, 0x0c, 0xe5, 0xd7, 0xc6, 0xdc, 0x3e, 0xd9, 0x65, 0xd4, 0xf6, 0x9e, 0xf6, 0xe9, + 0x5f, 0x94, 0x29, 0x32, 0x94, 0x0a, 0xb8, 0x8d, 0xa7, 0xa0, 0xc4, 0x36, 0x77, 0xf4, 0x46, 0xc2, + 0xc3, 0x1b, 0x61, 0x50, 0x8a, 0xa7, 0xeb, 0xe8, 0x11, 0x62, 0xbc, 0x02, 0x14, 0x13, 0x81, 0xe2, + 0x52, 0x5a, 0x6c, 0xde, 0x85, 0x75, 0x55, 0xe5, 0xd6, 0xbf, 0x2e, 0xe9, 0x35, 0x86, 0x8c, 0xed, + 0x72, 0x71, 0xe6, 0x69, 0x6f, 0x1a, 0x1d, 0xcc, 0x57, 0x9d, 0xc0, 0xad, 0x3a, 0xf5, 0xaf, 0x66, + 0xfc, 0x6d, 0x6c, 0xde, 0x87, 0x35, 0x89, 0xa0, 0x6c, 0x8b, 0x74, 0xff, 0xf7, 0x1a, 0xc3, 0xf3, + 0xdd, 0xd4, 0xbf, 0xae, 0x1d, 0xed, 0x57, 0xe7, 0xd4, 0x2e, 0xbc, 0x71, 0xbc, 0x11, 0x8f, 0xfa, + 0xf3, 0x35, 0x0d, 0x16, 0x6b, 0x1a, 0x2c, 0xd7, 0x34, 0x78, 0x71, 0x94, 0xcc, 0x1d, 0x25, 0x0b, + 0x47, 0xc9, 0xd2, 0x51, 0xf2, 0xee, 0x28, 0x79, 0xfd, 0xa0, 0xc1, 0xed, 0x5e, 0xf5, 0x52, 0x9f, + 0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x82, 0x5b, 0x80, 0x29, 0x02, 0x00, 0x00, +} + +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, v1.PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto new file mode 100644 index 000000000..a209dd456 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -0,0 +1,41 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.apimachinery.pkg.apis.meta.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// PartialObjectMetadataList contains a list of objects containing only their metadata. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; + + // items contains each of the included items. + repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go new file mode 100644 index 000000000..a4a412e33 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// AddMetaToScheme registers base meta types into schemas. +func AddMetaToScheme(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Table{}, + &TableOptions{}, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, + ) + + return nil +} + +// RegisterConversions adds conversion functions to the given scheme. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*PartialObjectMetadataList)(nil), (*v1.PartialObjectMetadataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PartialObjectMetadataList_To_v1_PartialObjectMetadataList(a.(*PartialObjectMetadataList), b.(*v1.PartialObjectMetadataList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PartialObjectMetadataList)(nil), (*PartialObjectMetadataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PartialObjectMetadataList_To_v1beta1_PartialObjectMetadataList(a.(*v1.PartialObjectMetadataList), b.(*PartialObjectMetadataList), scope) + }); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go new file mode 100644 index 000000000..f16170a37 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -0,0 +1,84 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package v1beta1 is alpha objects from meta that will be introduced. +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Table is a tabular representation of a set of API resources. The server transforms the +// object into a set of preferred columns for quickly reviewing the objects. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=false +type Table = v1.Table + +// TableColumnDefinition contains information about a column returned in the Table. +// +protobuf=false +type TableColumnDefinition = v1.TableColumnDefinition + +// TableRow is an individual row in a table. +// +protobuf=false +type TableRow = v1.TableRow + +// TableRowCondition allows a row to be marked with additional information. +// +protobuf=false +type TableRowCondition = v1.TableRowCondition + +type RowConditionType = v1.RowConditionType + +type ConditionStatus = v1.ConditionStatus + +type IncludeObjectPolicy = v1.IncludeObjectPolicy + +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TableOptions = v1.TableOptions + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata = v1.PartialObjectMetadata + +// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than +// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must +// remain independent of v1.PartialObjectMetadataList to preserve mappings. + +// PartialObjectMetadataList contains a list of objects containing only their metadata. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + v1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` + + // items contains each of the included items. + Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` +} + +const ( + RowCompleted = v1.RowCompleted + + ConditionTrue = v1.ConditionTrue + ConditionFalse = v1.ConditionFalse + ConditionUnknown = v1.ConditionUnknown + + IncludeNone = v1.IncludeNone + IncludeMetadata = v1.IncludeMetadata + IncludeObject = v1.IncludeObject +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..ef7e7c1e9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PartialObjectMetadataList = map[string]string{ + "": "PartialObjectMetadataList contains a list of objects containing only their metadata.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "items contains each of the included items.", +} + +func (PartialObjectMetadataList) SwaggerDoc() map[string]string { + return map_PartialObjectMetadataList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..89053b981 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,59 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PartialObjectMetadata, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. +func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { + if in == nil { + return nil + } + out := new(PartialObjectMetadataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..73e63fc11 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/doc.go b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go new file mode 100644 index 000000000..5b2b22b6d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version provides utilities for version number comparisons +package version // import "k8s.io/apimachinery/pkg/util/version" diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go new file mode 100644 index 000000000..8c997ec45 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -0,0 +1,325 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Version is an opaque representation of a version number +type Version struct { + components []uint + semver bool + preRelease string + buildMetadata string +} + +var ( + // versionMatchRE splits a version string into numeric and "extra" parts + versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`) + // extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not validate the "no leading zeroes" constraint for pre-release + extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`) +) + +func parse(str string, semver bool) (*Version, error) { + parts := versionMatchRE.FindStringSubmatch(str) + if parts == nil { + return nil, fmt.Errorf("could not parse %q as version", str) + } + numbers, extra := parts[1], parts[2] + + components := strings.Split(numbers, ".") + if (semver && len(components) != 3) || (!semver && len(components) < 2) { + return nil, fmt.Errorf("illegal version string %q", str) + } + + v := &Version{ + components: make([]uint, len(components)), + semver: semver, + } + for i, comp := range components { + if (i == 0 || semver) && strings.HasPrefix(comp, "0") && comp != "0" { + return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) + } + num, err := strconv.ParseUint(comp, 10, 0) + if err != nil { + return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err) + } + v.components[i] = uint(num) + } + + if semver && extra != "" { + extraParts := extraMatchRE.FindStringSubmatch(extra) + if extraParts == nil { + return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str) + } + v.preRelease, v.buildMetadata = extraParts[1], extraParts[2] + + for _, comp := range strings.Split(v.preRelease, ".") { + if _, err := strconv.ParseUint(comp, 10, 0); err == nil { + if strings.HasPrefix(comp, "0") && comp != "0" { + return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) + } + } + } + } + + return v, nil +} + +// ParseGeneric parses a "generic" version string. The version string must consist of two +// or more dot-separated numeric fields (the first of which can't have leading zeroes), +// followed by arbitrary uninterpreted data (which need not be separated from the final +// numeric field by punctuation). For convenience, leading and trailing whitespace is +// ignored, and the version can be preceded by the letter "v". See also ParseSemantic. +func ParseGeneric(str string) (*Version, error) { + return parse(str, false) +} + +// MustParseGeneric is like ParseGeneric except that it panics on error +func MustParseGeneric(str string) *Version { + v, err := ParseGeneric(str) + if err != nil { + panic(err) + } + return v +} + +// ParseSemantic parses a version string that exactly obeys the syntax and semantics of +// the "Semantic Versioning" specification (http://semver.org/) (although it ignores +// leading and trailing whitespace, and allows the version to be preceded by "v"). For +// version strings that are not guaranteed to obey the Semantic Versioning syntax, use +// ParseGeneric. +func ParseSemantic(str string) (*Version, error) { + return parse(str, true) +} + +// MustParseSemantic is like ParseSemantic except that it panics on error +func MustParseSemantic(str string) *Version { + v, err := ParseSemantic(str) + if err != nil { + panic(err) + } + return v +} + +// Major returns the major release number +func (v *Version) Major() uint { + return v.components[0] +} + +// Minor returns the minor release number +func (v *Version) Minor() uint { + return v.components[1] +} + +// Patch returns the patch release number if v is a Semantic Version, or 0 +func (v *Version) Patch() uint { + if len(v.components) < 3 { + return 0 + } + return v.components[2] +} + +// BuildMetadata returns the build metadata, if v is a Semantic Version, or "" +func (v *Version) BuildMetadata() string { + return v.buildMetadata +} + +// PreRelease returns the prerelease metadata, if v is a Semantic Version, or "" +func (v *Version) PreRelease() string { + return v.preRelease +} + +// Components returns the version number components +func (v *Version) Components() []uint { + return v.components +} + +// WithMajor returns copy of the version object with requested major number +func (v *Version) WithMajor(major uint) *Version { + result := *v + result.components = []uint{major, v.Minor(), v.Patch()} + return &result +} + +// WithMinor returns copy of the version object with requested minor number +func (v *Version) WithMinor(minor uint) *Version { + result := *v + result.components = []uint{v.Major(), minor, v.Patch()} + return &result +} + +// WithPatch returns copy of the version object with requested patch number +func (v *Version) WithPatch(patch uint) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), patch} + return &result +} + +// WithPreRelease returns copy of the version object with requested prerelease +func (v *Version) WithPreRelease(preRelease string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.preRelease = preRelease + return &result +} + +// WithBuildMetadata returns copy of the version object with requested buildMetadata +func (v *Version) WithBuildMetadata(buildMetadata string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.buildMetadata = buildMetadata + return &result +} + +// String converts a Version back to a string; note that for versions parsed with +// ParseGeneric, this will not include the trailing uninterpreted portion of the version +// number. +func (v *Version) String() string { + if v == nil { + return "<nil>" + } + var buffer bytes.Buffer + + for i, comp := range v.components { + if i > 0 { + buffer.WriteString(".") + } + buffer.WriteString(fmt.Sprintf("%d", comp)) + } + if v.preRelease != "" { + buffer.WriteString("-") + buffer.WriteString(v.preRelease) + } + if v.buildMetadata != "" { + buffer.WriteString("+") + buffer.WriteString(v.buildMetadata) + } + + return buffer.String() +} + +// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0 +// if they are equal +func (v *Version) compareInternal(other *Version) int { + + vLen := len(v.components) + oLen := len(other.components) + for i := 0; i < vLen && i < oLen; i++ { + switch { + case other.components[i] < v.components[i]: + return 1 + case other.components[i] > v.components[i]: + return -1 + } + } + + // If components are common but one has more items and they are not zeros, it is bigger + switch { + case oLen < vLen && !onlyZeros(v.components[oLen:]): + return 1 + case oLen > vLen && !onlyZeros(other.components[vLen:]): + return -1 + } + + if !v.semver || !other.semver { + return 0 + } + + switch { + case v.preRelease == "" && other.preRelease != "": + return 1 + case v.preRelease != "" && other.preRelease == "": + return -1 + case v.preRelease == other.preRelease: // includes case where both are "" + return 0 + } + + vPR := strings.Split(v.preRelease, ".") + oPR := strings.Split(other.preRelease, ".") + for i := 0; i < len(vPR) && i < len(oPR); i++ { + vNum, err := strconv.ParseUint(vPR[i], 10, 0) + if err == nil { + oNum, err := strconv.ParseUint(oPR[i], 10, 0) + if err == nil { + switch { + case oNum < vNum: + return 1 + case oNum > vNum: + return -1 + default: + continue + } + } + } + if oPR[i] < vPR[i] { + return 1 + } else if oPR[i] > vPR[i] { + return -1 + } + } + + switch { + case len(oPR) < len(vPR): + return 1 + case len(oPR) > len(vPR): + return -1 + } + + return 0 +} + +// returns false if array contain any non-zero element +func onlyZeros(array []uint) bool { + for _, num := range array { + if num != 0 { + return false + } + } + return true +} + +// AtLeast tests if a version is at least equal to a given minimum version. If both +// Versions are Semantic Versions, this will use the Semantic Version comparison +// algorithm. Otherwise, it will compare only the numeric components, with non-present +// components being considered "0" (ie, "1.4" is equal to "1.4.0"). +func (v *Version) AtLeast(min *Version) bool { + return v.compareInternal(min) != -1 +} + +// LessThan tests if a version is less than a given version. (It is exactly the opposite +// of AtLeast, for situations where asking "is v too old?" makes more sense than asking +// "is v new enough?".) +func (v *Version) LessThan(other *Version) bool { + return v.compareInternal(other) == -1 +} + +// Compare compares v against a version string (which will be parsed as either Semantic +// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if +// it is greater than other, or 0 if they are equal. +func (v *Version) Compare(other string) (int, error) { + ov, err := parse(other, v.semver) + if err != nil { + return 0, err + } + return v.compareInternal(ov), nil +} diff --git a/vendor/k8s.io/client-go/metadata/interface.go b/vendor/k8s.io/client-go/metadata/interface.go new file mode 100644 index 000000000..127c39501 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/interface.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// Interface allows a caller to get the metadata (in the form of PartialObjectMetadata objects) +// from any Kubernetes compatible resource API. +type Interface interface { + Resource(resource schema.GroupVersionResource) Getter +} + +// ResourceInterface contains the set of methods that may be invoked on objects by their metadata. +// Update is not supported by the server, but Patch can be used for the actions Update would handle. +type ResourceInterface interface { + Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error + DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) + List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) +} + +// Getter handles both namespaced and non-namespaced resource types consistently. +type Getter interface { + Namespace(string) ResourceInterface + ResourceInterface +} diff --git a/vendor/k8s.io/client-go/metadata/metadata.go b/vendor/k8s.io/client-go/metadata/metadata.go new file mode 100644 index 000000000..72b557991 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadata.go @@ -0,0 +1,307 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "k8s.io/klog/v2" + + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" +) + +var deleteScheme = runtime.NewScheme() +var parameterScheme = runtime.NewScheme() +var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme) +var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) + +var versionV1 = schema.GroupVersion{Version: "v1"} + +func init() { + metav1.AddToGroupVersion(parameterScheme, versionV1) + metav1.AddToGroupVersion(deleteScheme, versionV1) +} + +// Client allows callers to retrieve the object metadata for any +// Kubernetes-compatible API endpoint. The client uses the +// meta.k8s.io/v1 PartialObjectMetadata resource to more efficiently +// retrieve just the necessary metadata, but on older servers +// (Kubernetes 1.14 and before) will retrieve the object and then +// convert the metadata. +type Client struct { + client *rest.RESTClient +} + +var _ Interface = &Client{} + +// ConfigFor returns a copy of the provided config with the +// appropriate metadata client defaults set. +func ConfigFor(inConfig *rest.Config) *rest.Config { + config := rest.CopyConfig(inConfig) + config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + config.ContentType = "application/vnd.kubernetes.protobuf" + config.NegotiatedSerializer = metainternalversionscheme.Codecs.WithoutConversion() + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + return config +} + +// NewForConfigOrDie creates a new metadata client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) Interface { + ret, err := NewForConfig(c) + if err != nil { + panic(err) + } + return ret +} + +// NewForConfig creates a new metadata client that can retrieve object +// metadata details about any Kubernetes object (core, aggregated, or custom +// resource based) in the form of PartialObjectMetadata objects, or returns +// an error. +func NewForConfig(inConfig *rest.Config) (Interface, error) { + config := ConfigFor(inConfig) + // for serializing the options + config.GroupVersion = &schema.GroupVersion{} + config.APIPath = "/this-value-should-never-be-sent" + + restClient, err := rest.RESTClientFor(config) + if err != nil { + return nil, err + } + + return &Client{client: restClient}, nil +} + +type client struct { + client *Client + namespace string + resource schema.GroupVersionResource +} + +// Resource returns an interface that can access cluster or namespace +// scoped instances of resource. +func (c *Client) Resource(resource schema.GroupVersionResource) Getter { + return &client{client: c, resource: resource} +} + +// Namespace returns an interface that can access namespace-scoped instances of the +// provided resource. +func (c *client) Namespace(ns string) ResourceInterface { + ret := *c + ret.namespace = ns + return &ret +} + +// Delete removes the provided resource from the server. +func (c *client) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { + if len(name) == 0 { + return fmt.Errorf("name is required") + } + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(deleteOptionsByte). + Do(ctx) + return result.Error() +} + +// DeleteCollection triggers deletion of all resources in the specified scope (namespace or cluster). +func (c *client) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(c.makeURLSegments("")...). + Body(deleteOptionsByte). + SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). + Do(ctx) + return result.Error() +} + +// Get returns the resource with name from the specified scope (namespace or cluster). +func (c *client) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + klog.V(5).Infof("Unable to retrieve PartialObjectMetadata: %#v", err) + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadata + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err) + } + if !isLikelyObjectMetadata(&partial) { + return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema: %#v", partial) + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +// List returns all resources within the specified scope (namespace or cluster). +func (c *client) List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { + result := c.client.client.Get().AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + klog.V(5).Infof("Unable to retrieve PartialObjectMetadataList: %#v", err) + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadataList + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadataList: %v", err) + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +// Watch finds all changes to the resources in the specified scope (namespace or cluster). +func (c *client) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.client.Get(). + AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + Watch(ctx) +} + +// Patch modifies the named resource in the specified scope (namespace or cluster). +func (c *client) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + result := c.client.client. + Patch(pt). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(data). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadata + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err) + } + if !isLikelyObjectMetadata(&partial) { + return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema") + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +func (c *client) makeURLSegments(name string) []string { + url := []string{} + if len(c.resource.Group) == 0 { + url = append(url, "api") + } else { + url = append(url, "apis", c.resource.Group) + } + url = append(url, c.resource.Version) + + if len(c.namespace) > 0 { + url = append(url, "namespaces", c.namespace) + } + url = append(url, c.resource.Resource) + + if len(name) > 0 { + url = append(url, name) + } + + return url +} + +func isLikelyObjectMetadata(meta *metav1.PartialObjectMetadata) bool { + return len(meta.UID) > 0 || !meta.CreationTimestamp.IsZero() || len(meta.Name) > 0 || len(meta.GenerateName) > 0 +} diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go new file mode 100644 index 000000000..484e4c839 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// CategoryExpander maps category strings to GroupResources. +// Categories are classification or 'tag' of a group of resources. +type CategoryExpander interface { + Expand(category string) ([]schema.GroupResource, bool) +} + +// SimpleCategoryExpander implements CategoryExpander interface +// using a static mapping of categories to GroupResource mapping. +type SimpleCategoryExpander struct { + Expansions map[string][]schema.GroupResource +} + +// Expand fulfills CategoryExpander +func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret, ok := e.Expansions[category] + return ret, ok +} + +// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList, +// and then convert to fallbackExpander +type discoveryCategoryExpander struct { + discoveryClient discovery.DiscoveryInterface +} + +// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from +// the API, found through the discovery client. In case of any error or no category found (which likely +// means we're at a cluster prior to categories support, fallback to the expander provided. +func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander { + if client == nil { + panic("Please provide discovery client to shortcut expander") + } + return discoveryCategoryExpander{discoveryClient: client} +} + +// Expand fulfills CategoryExpander +func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + // Get all supported resources for groups and versions from server, if no resource found, fallback anyway. + _, apiResourceLists, _ := e.discoveryClient.ServerGroupsAndResources() + if len(apiResourceLists) == 0 { + return nil, false + } + + discoveredExpansions := map[string][]schema.GroupResource{} + for _, apiResourceList := range apiResourceLists { + gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + if err != nil { + continue + } + // Collect GroupVersions by categories + for _, apiResource := range apiResourceList.APIResources { + if categories := apiResource.Categories; len(categories) > 0 { + for _, category := range categories { + groupResource := schema.GroupResource{ + Group: gv.Group, + Resource: apiResource.Name, + } + discoveredExpansions[category] = append(discoveredExpansions[category], groupResource) + } + } + } + } + + ret, ok := discoveredExpansions[category] + return ret, ok +} + +// UnionCategoryExpander implements CategoryExpander interface. +// It maps given category string to union of expansions returned by all the CategoryExpanders in the list. +type UnionCategoryExpander []CategoryExpander + +// Expand fulfills CategoryExpander +func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret := []schema.GroupResource{} + ok := false + + // Expand the category for each CategoryExpander in the list and merge/combine the results. + for _, expansion := range u { + curr, currOk := expansion.Expand(category) + + for _, currGR := range curr { + found := false + for _, existing := range ret { + if existing == currGR { + found = true + break + } + } + if !found { + ret = append(ret, currGR) + } + } + ok = ok || currOk + } + + return ret, ok +} diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go new file mode 100644 index 000000000..19ae95e1b --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/discovery.go @@ -0,0 +1,338 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + + "k8s.io/klog/v2" +) + +// APIGroupResources is an API group with a mapping of versions to +// resources. +type APIGroupResources struct { + Group metav1.APIGroup + // A mapping of version string to a slice of APIResources for + // that version. + VersionedResources map[string][]metav1.APIResource +} + +// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered +// groups and resources passed in. +func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + + var groupPriority []string + // /v1 is special. It should always come first + resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} + kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} + + for _, group := range groupResources { + groupPriority = append(groupPriority, group.Group.Name) + + // Make sure the preferred version comes first + if len(group.Group.PreferredVersion.Version) != 0 { + preferred := group.Group.PreferredVersion.Version + if _, ok := group.VersionedResources[preferred]; ok { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Kind: meta.AnyKind, + }) + } + } + + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions + if discoveryVersion.Version != group.Group.PreferredVersion.Version { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Kind: meta.AnyKind, + }) + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}) + + for _, resource := range resources { + scope := meta.RESTScopeNamespace + if !resource.Namespaced { + scope = meta.RESTScopeRoot + } + + // if we have a slash, then this is a subresource and we shouldn't create mappings for those. + if strings.Contains(resource.Name, "/") { + continue + } + + plural := gv.WithResource(resource.Name) + singular := gv.WithResource(resource.SingularName) + // this is for legacy resources and servers which don't list singular forms. For those we must still guess. + if len(resource.SingularName) == 0 { + _, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind)) + } + + versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope) + versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope) + // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior + versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + } + // TODO why is this type not in discovery (at least for "v1") + versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) + unionMapper = append(unionMapper, versionMapper) + } + } + + for _, group := range groupPriority { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group, + Version: meta.AnyVersion, + Resource: meta.AnyResource, + }) + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group, + Version: meta.AnyVersion, + Kind: meta.AnyKind, + }) + } + + return meta.PriorityRESTMapper{ + Delegate: unionMapper, + ResourcePriority: resourcePriority, + KindPriority: kindPriority, + } +} + +// GetAPIGroupResources uses the provided discovery client to gather +// discovery information and populate a slice of APIGroupResources. +func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) { + gs, rs, err := cl.ServerGroupsAndResources() + if rs == nil || gs == nil { + return nil, err + // TODO track the errors and update callers to handle partial errors. + } + rsm := map[string]*metav1.APIResourceList{} + for _, r := range rs { + rsm[r.GroupVersion] = r + } + + var result []*APIGroupResources + for _, group := range gs { + groupResources := &APIGroupResources{ + Group: *group, + VersionedResources: make(map[string][]metav1.APIResource), + } + for _, version := range group.Versions { + resources, ok := rsm[version.GroupVersion] + if !ok { + continue + } + groupResources.VersionedResources[version.Version] = resources.APIResources + } + result = append(result, groupResources) + } + return result, nil +} + +// DeferredDiscoveryRESTMapper is a RESTMapper that will defer +// initialization of the RESTMapper until the first mapping is +// requested. +type DeferredDiscoveryRESTMapper struct { + initMu sync.Mutex + delegate meta.RESTMapper + cl discovery.CachedDiscoveryInterface +} + +// NewDeferredDiscoveryRESTMapper returns a +// DeferredDiscoveryRESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper { + return &DeferredDiscoveryRESTMapper{ + cl: cl, + } +} + +func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { + d.initMu.Lock() + defer d.initMu.Unlock() + + if d.delegate != nil { + return d.delegate, nil + } + + groupResources, err := GetAPIGroupResources(d.cl) + if err != nil { + return nil, err + } + + d.delegate = NewDiscoveryRESTMapper(groupResources) + return d.delegate, err +} + +// Reset resets the internally cached Discovery information and will +// cause the next mapping request to re-discover. +func (d *DeferredDiscoveryRESTMapper) Reset() { + klog.V(5).Info("Invalidating discovery information") + + d.initMu.Lock() + defer d.initMu.Unlock() + + d.cl.Invalidate() + d.delegate = nil +} + +// KindFor takes a partial resource and returns back the single match. +// It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, err = del.KindFor(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvk, err = d.KindFor(resource) + } + return +} + +// KindsFor takes a partial resource and returns back the list of +// potential kinds in priority order. +func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvks, err = del.KindsFor(resource) + if len(gvks) == 0 && !d.cl.Fresh() { + d.Reset() + gvks, err = d.KindsFor(resource) + } + return +} + +// ResourceFor takes a partial resource and returns back the single +// match. It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, err = del.ResourceFor(input) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvr, err = d.ResourceFor(input) + } + return +} + +// ResourcesFor takes a partial resource and returns back the list of +// potential resource in priority order. +func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvrs, err = del.ResourcesFor(input) + if len(gvrs) == 0 && !d.cl.Fresh() { + d.Reset() + gvrs, err = d.ResourcesFor(input) + } + return +} + +// RESTMapping identifies a preferred resource mapping for the +// provided group kind. +func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + m, err = del.RESTMapping(gk, versions...) + if err != nil && !d.cl.Fresh() { + d.Reset() + m, err = d.RESTMapping(gk, versions...) + } + return +} + +// RESTMappings returns the RESTMappings for the provided group kind +// in a rough internal preferred order. If no kind is found, it will +// return a NoResourceMatchError. +func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + ms, err = del.RESTMappings(gk, versions...) + if len(ms) == 0 && !d.cl.Fresh() { + d.Reset() + ms, err = d.RESTMappings(gk, versions...) + } + return +} + +// ResourceSingularizer converts a resource name from plural to +// singular (e.g., from pods to pod). +func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + del, err := d.getDelegate() + if err != nil { + return resource, err + } + singular, err = del.ResourceSingularizer(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + singular, err = d.ResourceSingularizer(resource) + } + return +} + +func (d *DeferredDiscoveryRESTMapper) String() string { + del, err := d.getDelegate() + if err != nil { + return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) + } + return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) +} + +// Make sure it satisfies the interface +var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go new file mode 100644 index 000000000..73b317c1c --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -0,0 +1,172 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "strings" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped +type shortcutExpander struct { + RESTMapper meta.RESTMapper + + discoveryClient discovery.DiscoveryInterface +} + +var _ meta.RESTMapper = &shortcutExpander{} + +// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client} +} + +// KindFor fulfills meta.RESTMapper +func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) +} + +// KindsFor fulfills meta.RESTMapper +func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource)) +} + +// ResourcesFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource)) +} + +// ResourceFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource)) +} + +// ResourceSingularizer fulfills meta.RESTMapper +func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) { + return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource) +} + +// RESTMapping fulfills meta.RESTMapper +func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return e.RESTMapper.RESTMapping(gk, versions...) +} + +// RESTMappings fulfills meta.RESTMapper +func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + return e.RESTMapper.RESTMappings(gk, versions...) +} + +// getShortcutMappings returns a set of tuples which holds short names for resources. +// First the list of potential resources will be taken from the API server. +// Next we will append the hardcoded list of resources - to be backward compatible with old servers. +// NOTE that the list is ordered by group priority. +func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) { + res := []resourceShortcuts{} + // get server resources + // This can return an error *and* the results it was able to find. We don't need to fail on the error. + _, apiResList, err := e.discoveryClient.ServerGroupsAndResources() + if err != nil { + klog.V(1).Infof("Error loading discovery information: %v", err) + } + for _, apiResources := range apiResList { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + continue + } + for _, apiRes := range apiResources.APIResources { + for _, shortName := range apiRes.ShortNames { + rs := resourceShortcuts{ + ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, + LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, + } + res = append(res, rs) + } + } + } + + return apiResList, res, nil +} + +// expandResourceShortcut will return the expanded version of resource +// (something that a pkg/api/meta.RESTMapper can understand), if it is +// indeed a shortcut. If no match has been found, we will match on group prefixing. +// Lastly we will return resource unmodified. +func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource { + // get the shortcut mappings and return on first match. + if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil { + // avoid expanding if there's an exact match to a full resource name + for _, apiResources := range allResources { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + continue + } + if len(resource.Group) != 0 && resource.Group != gv.Group { + continue + } + for _, apiRes := range apiResources.APIResources { + if resource.Resource == apiRes.Name { + return resource + } + if resource.Resource == apiRes.SingularName { + return resource + } + } + } + + for _, item := range shortcutResources { + if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + + // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling + if len(resource.Group) == 0 { + return resource + } + for _, item := range shortcutResources { + if !strings.HasPrefix(item.ShortForm.Group, resource.Group) { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + } + + return resource +} + +// ResourceShortcuts represents a structure that holds the information how to +// transition from resource's shortcut to its full name. +type resourceShortcuts struct { + ShortForm schema.GroupResource + LongForm schema.GroupResource +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4173a1382..fe003674d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,8 @@ # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm +# github.com/BurntSushi/toml v0.3.1 +github.com/BurntSushi/toml # github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd github.com/MakeNowJust/heredoc # github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 @@ -33,6 +35,8 @@ github.com/PuerkitoBio/purell github.com/PuerkitoBio/urlesc # github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d github.com/Shopify/logrus-bugsnag +# github.com/alessio/shellescape v1.2.2 +github.com/alessio/shellescape # github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a github.com/asaskevich/govalidator # github.com/beorn7/perks v1.0.1 @@ -187,6 +191,10 @@ github.com/docker/libtrust # github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/docker/spdystream github.com/docker/spdystream/spdy +# github.com/evanphx/json-patch v4.9.0+incompatible +github.com/evanphx/json-patch +# github.com/evanphx/json-patch/v5 v5.1.0 +github.com/evanphx/json-patch/v5 # github.com/fsnotify/fsnotify v1.4.9 github.com/fsnotify/fsnotify # github.com/garyburd/redigo v1.6.0 @@ -286,6 +294,8 @@ github.com/konsorten/go-windows-terminal-sequences github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/mattn/go-isatty v0.0.12 +github.com/mattn/go-isatty # github.com/mattn/go-sqlite3 v1.10.0 ## explicit github.com/mattn/go-sqlite3 @@ -383,6 +393,8 @@ github.com/operator-framework/api/pkg/validation/internal # github.com/otiai10/copy v1.2.0 ## explicit github.com/otiai10/copy +# github.com/pelletier/go-toml v1.8.1 +github.com/pelletier/go-toml # github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 ## explicit github.com/phayes/freeport @@ -685,9 +697,12 @@ k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/api/validation/path +k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured k8s.io/apimachinery/pkg/apis/meta/v1/validation +k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields @@ -719,6 +734,7 @@ k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/util/version k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version @@ -783,6 +799,7 @@ k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 +k8s.io/client-go/metadata k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 @@ -790,6 +807,7 @@ k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest k8s.io/client-go/rest/watch +k8s.io/client-go/restmapper k8s.io/client-go/tools/auth k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api @@ -837,7 +855,48 @@ k8s.io/utils/trace sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client # sigs.k8s.io/controller-runtime v0.8.0 +## explicit +sigs.k8s.io/controller-runtime/pkg/client +sigs.k8s.io/controller-runtime/pkg/client/apiutil sigs.k8s.io/controller-runtime/pkg/scheme +# sigs.k8s.io/kind v0.10.0 +## explicit +sigs.k8s.io/kind/pkg/apis/config/defaults +sigs.k8s.io/kind/pkg/apis/config/v1alpha4 +sigs.k8s.io/kind/pkg/cluster +sigs.k8s.io/kind/pkg/cluster/constants +sigs.k8s.io/kind/pkg/cluster/internal/create +sigs.k8s.io/kind/pkg/cluster/internal/create/actions +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready +sigs.k8s.io/kind/pkg/cluster/internal/delete +sigs.k8s.io/kind/pkg/cluster/internal/kubeadm +sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig +sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig +sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer +sigs.k8s.io/kind/pkg/cluster/internal/logs +sigs.k8s.io/kind/pkg/cluster/internal/patch +sigs.k8s.io/kind/pkg/cluster/internal/providers +sigs.k8s.io/kind/pkg/cluster/internal/providers/common +sigs.k8s.io/kind/pkg/cluster/internal/providers/docker +sigs.k8s.io/kind/pkg/cluster/internal/providers/podman +sigs.k8s.io/kind/pkg/cluster/nodes +sigs.k8s.io/kind/pkg/cluster/nodeutils +sigs.k8s.io/kind/pkg/cmd +sigs.k8s.io/kind/pkg/cmd/kind/version +sigs.k8s.io/kind/pkg/errors +sigs.k8s.io/kind/pkg/exec +sigs.k8s.io/kind/pkg/fs +sigs.k8s.io/kind/pkg/internal/apis/config +sigs.k8s.io/kind/pkg/internal/apis/config/encoding +sigs.k8s.io/kind/pkg/internal/cli +sigs.k8s.io/kind/pkg/internal/env +sigs.k8s.io/kind/pkg/log # sigs.k8s.io/structured-merge-diff/v4 v4.0.2 sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.2.0 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go new file mode 100644 index 000000000..b3464c655 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apiutil contains utilities for working with raw Kubernetes +// API machinery, such as creating RESTMappers and raw REST clients, +// and extracting the GVK of an object. +package apiutil + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/discovery" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +var ( + protobufScheme = runtime.NewScheme() + protobufSchemeLock sync.RWMutex +) + +func init() { + // Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers. + // For custom resources, CRDs can not support Protocol Buffers but Aggregated API can. + // See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + if err := clientgoscheme.AddToScheme(protobufScheme); err != nil { + panic(err) + } +} + +// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should +// be additional types that do support protobuf. +func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { + protobufSchemeLock.Lock() + defer protobufSchemeLock.Unlock() + return addToScheme(protobufScheme) +} + +// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery +// information fetched by a new client with the given config. +func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { + // Get a mapper + dc, err := discovery.NewDiscoveryClientForConfig(c) + if err != nil { + return nil, err + } + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. +func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + + gvks, isUnversioned, err := scheme.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + if isUnversioned { + return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj) + } + + if len(gvks) < 1 { + return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj) + } + if len(gvks) > 1 { + // this should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine + return schema.GroupVersionKind{}, fmt.Errorf( + "multiple group-version-kinds associated with type %T, refusing to guess at one", obj) + } + return gvks[0], nil +} + +// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated +// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from +// baseConfig, if set, otherwise a default serializer will be set. +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { + cfg := createRestConfig(gvk, isUnstructured, baseConfig) + if cfg.NegotiatedSerializer == nil { + cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs} + } + return rest.RESTClientFor(cfg) +} + +//createRestConfig copies the base config and updates needed fields for a new rest config +func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config) *rest.Config { + gv := gvk.GroupVersion() + + cfg := rest.CopyConfig(baseConfig) + cfg.GroupVersion = &gv + if gvk.Group == "" { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + if cfg.UserAgent == "" { + cfg.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true. + if cfg.ContentType == "" && !isUnstructured { + protobufSchemeLock.RLock() + if protobufScheme.Recognizes(gvk) { + cfg.ContentType = runtime.ContentTypeProtobuf + } + protobufSchemeLock.RUnlock() + } + return cfg +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go new file mode 100644 index 000000000..5e9a7b5f5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -0,0 +1,285 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "errors" + "sync" + + "golang.org/x/time/rate" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// dynamicRESTMapper is a RESTMapper that dynamically discovers resource +// types at runtime. +type dynamicRESTMapper struct { + mu sync.RWMutex // protects the following fields + staticMapper meta.RESTMapper + limiter *rate.Limiter + newMapper func() (meta.RESTMapper, error) + + lazy bool + // Used for lazy init. + initOnce sync.Once +} + +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper +type DynamicRESTMapperOption func(*dynamicRESTMapper) error + +// WithLimiter sets the RESTMapper's underlying limiter to lim. +func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.limiter = lim + return nil + } +} + +// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings +// until an API call is made. +var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { + drm.lazy = true + return nil +} + +// WithCustomMapper supports setting a custom RESTMapper refresher instead of +// the default method, which uses a discovery client. +// +// This exists mainly for testing, but can be useful if you need tighter control +// over how discovery is performed, which discovery endpoints are queried, etc. +func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.newMapper = newMapper + return nil + } +} + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. opts +// configure the RESTMapper. +func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { + client, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + drm := &dynamicRESTMapper{ + limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), + newMapper: func() (meta.RESTMapper, error) { + groupResources, err := restmapper.GetAPIGroupResources(client) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(groupResources), nil + }, + } + for _, opt := range opts { + if err = opt(drm); err != nil { + return nil, err + } + } + if !drm.lazy { + if err := drm.setStaticMapper(); err != nil { + return nil, err + } + } + return drm, nil +} + +var ( + // defaultRefilRate is the default rate at which potential calls are + // added back to the "bucket" of allowed calls. + defaultRefillRate = 5 + // defaultLimitSize is the default starting/max number of potential calls + // per second. Once a call is used, it's added back to the bucket at a rate + // of defaultRefillRate per second. + defaultLimitSize = 5 +) + +// setStaticMapper sets drm's staticMapper by querying its client, regardless +// of reload backoff. +func (drm *dynamicRESTMapper) setStaticMapper() error { + newMapper, err := drm.newMapper() + if err != nil { + return err + } + drm.staticMapper = newMapper + return nil +} + +// init initializes drm only once if drm is lazy. +func (drm *dynamicRESTMapper) init() (err error) { + drm.initOnce.Do(func() { + if drm.lazy { + err = drm.setStaticMapper() + } + }) + return err +} + +// checkAndReload attempts to call the given callback, which is assumed to be dependent +// on the data in the restmapper. +// +// If the callback returns an error that matches the given error, it will attempt to reload +// the RESTMapper's data and re-call the callback once that's occurred. +// If the callback returns any other error, the function will return immediately regardless. +// +// It will take care of ensuring that reloads are rate-limited and that extraneous calls +// aren't made. If a reload would exceed the limiters rate, it returns the error return by +// the callback. +// It's thread-safe, and worries about thread-safety for the callback (so the callback does +// not need to attempt to lock the restmapper). +func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error { + // first, check the common path -- data is fresh enough + // (use an IIFE for the lock's defer) + err := func() error { + drm.mu.RLock() + defer drm.mu.RUnlock() + + return checkNeedsReload() + }() + + // NB(directxman12): `Is` and `As` have a confusing relationship -- + // `Is` is like `== or does this implement .Is`, whereas `As` says + // `can I type-assert into` + needsReload := errors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // if the data wasn't fresh, we'll need to try and update it, so grab the lock... + drm.mu.Lock() + defer drm.mu.Unlock() + + // ... and double-check that we didn't reload in the meantime + err = checkNeedsReload() + needsReload = errors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // we're still stale, so grab a rate-limit token if we can... + if !drm.limiter.Allow() { + // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) + // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError + return err + } + + // ...reload... + if err := drm.setStaticMapper(); err != nil { + return err + } + + // ...and return the results of the closure regardless + return checkNeedsReload() +} + +// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. + +func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionKind{}, err + } + var gvk schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvk, err = drm.staticMapper.KindFor(resource) + return err + }) + return gvk, err +} + +func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvks []schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvks, err = drm.staticMapper.KindsFor(resource) + return err + }) + return gvks, err +} + +func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionResource{}, err + } + + var gvr schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvr, err = drm.staticMapper.ResourceFor(input) + return err + }) + return gvr, err +} + +func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvrs []schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvrs, err = drm.staticMapper.ResourcesFor(input) + return err + }) + return gvrs, err +} + +func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mapping *meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mapping, err = drm.staticMapper.RESTMapping(gk, versions...) + return err + }) + return mapping, err +} + +func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mappings []*meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mappings, err = drm.staticMapper.RESTMappings(gk, versions...) + return err + }) + return mappings, err +} + +func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { + if err := drm.init(); err != nil { + return "", err + } + var singular string + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + singular, err = drm.staticMapper.ResourceSingularizer(resource) + return err + }) + return singular, err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go new file mode 100644 index 000000000..0af814fdf --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -0,0 +1,264 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// Options are creation options for a Client +type Options struct { + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper +} + +// New returns a new Client using the provided config and Options. +// The returned client reads *and* writes directly from the server +// (it doesn't use object caches). It understands how to work with +// normal types (both custom resources and aggregated/built-in resources), +// as well as unstructured types. +// +// In the case of normal types, the scheme will be used to look up the +// corresponding group, version, and kind for the given type. In the +// case of unstructured types, the group, version, and kind will be extracted +// from the corresponding fields on the object. +func New(config *rest.Config, options Options) (Client, error) { + if config == nil { + return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") + } + + // Init a scheme if none provided + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + // Init a Mapper if none provided + if options.Mapper == nil { + var err error + options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + if err != nil { + return nil, err + } + } + + clientcache := &clientCache{ + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), + + structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + } + + rawMetaClient, err := metadata.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) + } + + c := &client{ + typedClient: typedClient{ + cache: clientcache, + paramCodec: runtime.NewParameterCodec(options.Scheme), + }, + unstructuredClient: unstructuredClient{ + cache: clientcache, + paramCodec: noConversionParamCodec{}, + }, + metadataClient: metadataClient{ + client: rawMetaClient, + restMapper: options.Mapper, + }, + scheme: options.Scheme, + mapper: options.Mapper, + } + + return c, nil +} + +var _ Client = &client{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type client struct { + typedClient typedClient + unstructuredClient unstructuredClient + metadataClient metadataClient + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. +// TODO(vincepri): Remove this function and its calls once controller-runtime dependencies are upgraded to 1.16? +func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { + if gvk != schema.EmptyObjectKind.GroupVersionKind() { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(gvk) + } + } +} + +// Scheme returns the scheme this client is using. +func (c *client) Scheme() *runtime.Scheme { + return c.scheme +} + +// RESTMapper returns the scheme this client is using. +func (c *client) RESTMapper() meta.RESTMapper { + return c.mapper +} + +// Create implements client.Client +func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Create(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot create using only metadata") + default: + return c.typedClient.Create(ctx, obj, opts...) + } +} + +// Update implements client.Client +func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Update(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") + default: + return c.typedClient.Update(ctx, obj, opts...) + } +} + +// Delete implements client.Client +func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Delete(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Delete(ctx, obj, opts...) + default: + return c.typedClient.Delete(ctx, obj, opts...) + } +} + +// DeleteAllOf implements client.Client +func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.DeleteAllOf(ctx, obj, opts...) + default: + return c.typedClient.DeleteAllOf(ctx, obj, opts...) + } +} + +// Patch implements client.Client +func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Patch(ctx, obj, patch, opts...) + default: + return c.typedClient.Patch(ctx, obj, patch, opts...) + } +} + +// Get implements client.Client +func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Get(ctx, key, obj) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Get(ctx, key, obj) + default: + return c.typedClient.Get(ctx, key, obj) + } +} + +// List implements client.Client +func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + switch obj.(type) { + case *unstructured.UnstructuredList: + return c.unstructuredClient.List(ctx, obj, opts...) + case *metav1.PartialObjectMetadataList: + return c.metadataClient.List(ctx, obj, opts...) + default: + return c.typedClient.List(ctx, obj, opts...) + } +} + +// Status implements client.StatusClient +func (c *client) Status() StatusWriter { + return &statusWriter{client: c} +} + +// statusWriter is client.StatusWriter that writes status subresource +type statusWriter struct { + client *client +} + +// ensure statusWriter implements client.StatusWriter +var _ StatusWriter = &statusWriter{} + +// Update implements client.StatusWriter +func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) + } +} + +// Patch implements client.Client +func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...) + default: + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go new file mode 100644 index 000000000..bf6ee882b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// clientCache creates and caches rest clients and metadata for Kubernetes types +type clientCache struct { + // config is the rest.Config to talk to an apiserver + config *rest.Config + + // scheme maps go structs to GroupVersionKinds + scheme *runtime.Scheme + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // codecs are used to create a REST client for a gvk + codecs serializer.CodecFactory + + // structuredResourceByType caches structured type metadata + structuredResourceByType map[schema.GroupVersionKind]*resourceMeta + // unstructuredResourceByType caches unstructured type metadata + unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta + mu sync.RWMutex +} + +// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { + if strings.HasSuffix(gvk.Kind, "List") && isList { + // if this was a list, treat it as a request for the item's resource + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + if err != nil { + return nil, err + } + mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil +} + +// getResource returns the resource meta information for the given type of object. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, err + } + + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + isUnstructured = isUnstructured || isUnstructuredList + + // It's better to do creation work twice than to not let multiple + // people make requests at once + c.mu.RLock() + resourceByType := c.structuredResourceByType + if isUnstructured { + resourceByType = c.unstructuredResourceByType + } + r, known := resourceByType[gvk] + c.mu.RUnlock() + + if known { + return r, nil + } + + // Initialize a new Client + c.mu.Lock() + defer c.mu.Unlock() + r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured) + if err != nil { + return nil, err + } + resourceByType[gvk] = r + return r, err +} + +// getObjMeta returns objMeta containing both type and object metadata and state +func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { + r, err := c.getResource(obj) + if err != nil { + return nil, err + } + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + return &objMeta{resourceMeta: r, Object: m}, err +} + +// resourceMeta caches state for a Kubernetes type. +type resourceMeta struct { + // client is the rest client used to talk to the apiserver + rest.Interface + // gvk is the GroupVersionKind of the resourceMeta + gvk schema.GroupVersionKind + // mapping is the rest mapping + mapping *meta.RESTMapping +} + +// isNamespaced returns true if the type is namespaced +func (r *resourceMeta) isNamespaced() bool { + return r.mapping.Scope.Name() != meta.RESTScopeNameRoot + +} + +// resource returns the resource name of the type +func (r *resourceMeta) resource() string { + return r.mapping.Resource.Resource +} + +// objMeta stores type and object information about a Kubernetes type +type objMeta struct { + // resourceMeta contains type information for the object + *resourceMeta + + // Object contains meta data for the object instance + metav1.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go new file mode 100644 index 000000000..5789de204 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go @@ -0,0 +1,24 @@ +package client + +import ( + "errors" + "net/url" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.ParameterCodec = noConversionParamCodec{} + +// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings. +// it's useful in scenarios with the unstructured client and arbitrary resources. +type noConversionParamCodec struct{} + +func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) { + return queryparams.Convert(obj) +} + +func (noConversionParamCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error { + return errors.New("DecodeParameters not implemented on noConversionParamCodec") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go new file mode 100644 index 000000000..2965e5fa9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package client contains functionality for interacting with Kubernetes API +// servers. +// +// Clients +// +// Clients are split into two interfaces -- Readers and Writers. Readers +// get and list, while writers create, update, and delete. +// +// The New function can be used to create a new client that talks directly +// to the API server. +// +// A common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the DelegatingClient type, which can +// be used to have a client whose Reader is different from the Writer. +// +// Options +// +// Many client operations in Kubernetes support options. These options are +// represented as variadic arguments at the end of a given method call. +// For instance, to use a label selector on list, you can call +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) +// +// Indexing +// +// Indexes may be added to caches using a FieldIndexer. This allows you to easily +// and efficiently look up objects with certain properties. You can then make +// use of the index by specifying a field selector on calls to List on the Reader +// corresponding to the given Cache. +// +// For instance, a Secret controller might have an index on the +// `.spec.volumes.secret.secretName` field in Pod objects, so that it could +// easily look up all pods that reference a given secret. +package client diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go new file mode 100644 index 000000000..67e80e055 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -0,0 +1,106 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDryRunClient wraps an existing client and enforces DryRun mode +// on all mutating api calls. +func NewDryRunClient(c Client) Client { + return &dryRunClient{client: c} +} + +var _ Client = &dryRunClient{} + +// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode. +type dryRunClient struct { + client Client +} + +// Scheme returns the scheme this client is using. +func (c *dryRunClient) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +// RESTMapper returns the rest mapper this client is using. +func (c *dryRunClient) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +// Create implements client.Client +func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append(opts, DryRunAll)...) +} + +// Update implements client.Client +func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Delete implements client.Client +func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) +} + +// DeleteAllOf implements client.Client +func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.Client +func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} + +// Get implements client.Client +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + return c.client.Get(ctx, key, obj) +} + +// List implements client.Client +func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + return c.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient +func (c *dryRunClient) Status() StatusWriter { + return &dryRunStatusWriter{client: c.client.Status()} +} + +// ensure dryRunStatusWriter implements client.StatusWriter +var _ StatusWriter = &dryRunStatusWriter{} + +// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode +// enforced. +type dryRunStatusWriter struct { + client StatusWriter +} + +// Update implements client.StatusWriter +func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.StatusWriter +func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go new file mode 100644 index 000000000..09636968f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -0,0 +1,136 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// ObjectKey identifies a Kubernetes Object. +type ObjectKey = types.NamespacedName + +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object +func ObjectKeyFromObject(obj Object) ObjectKey { + return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} +} + +// Patch is a patch that can be applied to a Kubernetes object. +type Patch interface { + // Type is the PatchType of the patch. + Type() types.PatchType + // Data is the raw data representing the patch. + Data(obj runtime.Object) ([]byte, error) +} + +// TODO(directxman12): is there a sane way to deal with get/delete options? + +// Reader knows how to read and list Kubernetes objects. +type Reader interface { + // Get retrieves an obj for the given object key from the Kubernetes Cluster. + // obj must be a struct pointer so that obj can be updated with the response + // returned by the Server. + Get(ctx context.Context, key ObjectKey, obj Object) error + + // List retrieves list of objects for a given namespace and list options. On a + // successful call, Items field in the list will be populated with the + // result returned from the server. + List(ctx context.Context, list ObjectList, opts ...ListOption) error +} + +// Writer knows how to create, delete, and update Kubernetes objects. +type Writer interface { + // Create saves the object obj in the Kubernetes cluster. + Create(ctx context.Context, obj Object, opts ...CreateOption) error + + // Delete deletes the given obj from Kubernetes cluster. + Delete(ctx context.Context, obj Object, opts ...DeleteOption) error + + // Update updates the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error +} + +// StatusClient knows how to create a client which can update status subresource +// for kubernetes objects. +type StatusClient interface { + Status() StatusWriter +} + +// StatusWriter knows how to update status subresource of a Kubernetes object. +type StatusWriter interface { + // Update updates the fields corresponding to the status subresource for the + // given obj. obj must be a struct pointer so that obj can be updated + // with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given object's subresource. obj must be a struct + // pointer so that obj can be updated with the content returned by the + // Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error +} + +// Client knows how to perform CRUD operations on Kubernetes objects. +type Client interface { + Reader + Writer + StatusClient + + // Scheme returns the scheme this client is using. + Scheme() *runtime.Scheme + // RESTMapper returns the rest this client is using. + RESTMapper() meta.RESTMapper +} + +// IndexerFunc knows how to take an object and turn it into a series +// of non-namespaced keys. Namespaced objects are automatically given +// namespaced and non-spaced variants, so keys do not need to include namespace. +type IndexerFunc func(Object) []string + +// FieldIndexer knows how to index over a particular "field" such that it +// can later be used by a field selector. +type FieldIndexer interface { + // IndexFields adds an index with the given field name on the given object type + // by using the given function to extract the value for that field. If you want + // compatibility with the Kubernetes API server, only return one key, and only use + // fields that the API server supports. Otherwise, you can return multiple keys, + // and "equality" in the field selector means that at least one key matches the value. + // The FieldIndexer will automatically take care of indexing over namespace + // and supporting efficient all-namespace queries. + IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error +} + +// IgnoreNotFound returns nil on NotFound errors. +// All other values that are not NotFound errors or nil are returned unmodified. +func IgnoreNotFound(err error) error { + if apierrors.IsNotFound(err) { + return nil + } + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go new file mode 100644 index 000000000..6587a1940 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -0,0 +1,193 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/metadata" +) + +// TODO(directxman12): we could rewrite this on top of the low-level REST +// client to avoid the extra shallow copy at the end, but I'm not sure it's +// worth it -- the metadata client deals with falling back to loading the whole +// object on older API servers, etc, and we'd have to reproduce that. + +// metadataClient is a client that reads & writes metadata-only requests to/from the API server. +type metadataClient struct { + client metadata.Interface + restMapper meta.RESTMapper +} + +func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) { + mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return mc.client.Resource(mapping.Resource), nil + } + return mc.client.Resource(mapping.Resource).Namespace(ns), nil +} + +// Delete implements client.Client +func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) +} + +// DeleteAllOf implements client.Client +func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace) + if err != nil { + return err + } + + return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) +} + +// Patch implements client.Client +func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// Get implements client.Client +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + + resInt, err := mc.getResourceInterface(gvk, key.Namespace) + if err != nil { + return err + } + + res, err := resInt.Get(ctx, key.Name, metav1.GetOptions{}) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// List implements client.Client +func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + res, err := resInt.List(ctx, *listOpts.AsListOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status") + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go new file mode 100644 index 000000000..5ed8baca9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -0,0 +1,253 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// NewNamespacedClient wraps an existing client enforcing the namespace value. +// All functions using this client will have the same namespace declared here. +func NewNamespacedClient(c Client, ns string) Client { + return &namespacedClient{ + client: c, + namespace: ns, + } +} + +var _ Client = &namespacedClient{} + +// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value. +type namespacedClient struct { + namespace string + client Client +} + +// Scheme returns the scheme this client is using. +func (n *namespacedClient) Scheme() *runtime.Scheme { + return n.client.Scheme() +} + +// RESTMapper returns the scheme this client is using. +func (n *namespacedClient) RESTMapper() meta.RESTMapper { + return n.client.RESTMapper() +} + +// isNamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func isNamespaced(c Client, obj runtime.Object) (bool, error) { + var gvk schema.GroupVersionKind + var err error + + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + + isUnstructured = isUnstructured || isUnstructuredList + if isUnstructured { + gvk = obj.GetObjectKind().GroupVersionKind() + } else { + gvk, err = apiutil.GVKForObject(obj, c.Scheme()) + if err != nil { + return false, err + } + } + + gk := schema.GroupKind{ + Group: gvk.Group, + Kind: gvk.Kind, + } + restmapping, err := c.RESTMapper().RESTMapping(gk) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + scope := restmapping.Scope.Name() + + if scope == "" { + return false, errors.New("Scope cannot be identified. Empty scope returned") + } + + if scope != meta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} + +// Create implements clinet.Client +func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Create(ctx, obj, opts...) +} + +// Update implements client.Client +func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Update(ctx, obj, opts...) +} + +// Delete implements client.Client +func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Delete(ctx, obj, opts...) +} + +// DeleteAllOf implements client.Client +func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + if isNamespaceScoped { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.DeleteAllOf(ctx, obj, opts...) +} + +// Patch implements client.Client +func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Patch(ctx, obj, patch, opts...) +} + +// Get implements client.Client +func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + if isNamespaceScoped { + if key.Namespace != "" && key.Namespace != n.namespace { + return fmt.Errorf("Namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) + } + key.Namespace = n.namespace + } + return n.client.Get(ctx, key, obj) +} + +// List implements client.Client +func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if n.namespace != "" { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient +func (n *namespacedClient) Status() StatusWriter { + return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n} +} + +// ensure namespacedClientStatusWriter implements client.StatusWriter +var _ StatusWriter = &namespacedClientStatusWriter{} + +type namespacedClientStatusWriter struct { + StatusClient StatusWriter + namespace string + namespacedclient Client +} + +// Update implements client.StatusWriter +func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Update(ctx, obj, opts...) +} + +// Patch implements client.StatusWriter +func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Patch(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go new file mode 100644 index 000000000..31e334d6c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go @@ -0,0 +1,77 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Object is a Kubernetes object, allows functions to work indistinctly with +// any resource that implements both Object interfaces. +// +// Semantically, these are objects which are both serializable (runtime.Object) +// and identifiable (metav1.Object) -- think any object which you could write +// as YAML or JSON, and then `kubectl create`. +// +// Code-wise, this means that any object which embeds both ObjectMeta (which +// provides metav1.Object) and TypeMeta (which provides half of runtime.Object) +// and has a `DeepCopyObject` implementation (the other half of runtime.Object) +// will implement this by default. +// +// For example, nearly all the built-in types are Objects, as well as all +// KubeBuilder-generated CRDs (unless you do something real funky to them). +// +// By and large, most things that implement runtime.Object also implement +// Object -- it's very rare to have *just* a runtime.Object implementation (the +// cases tend to be funky built-in types like Webhook payloads that don't have +// a `metadata` field). +// +// Notice that XYZList types are distinct: they implement ObjectList instead. +type Object interface { + metav1.Object + runtime.Object +} + +// ObjectList is a Kubernetes object list, allows functions to work +// indistinctly with any resource that implements both runtime.Object and +// metav1.ListInterface interfaces. +// +// Semantically, this is any object which may be serialized (ObjectMeta), and +// is a kubernetes list wrapper (has items, pagination fields, etc) -- think +// the wrapper used in a response from a `kubectl list --output yaml` call. +// +// Code-wise, this means that any object which embedds both ListMeta (which +// provides metav1.ListInterface) and TypeMeta (which provides half of +// runtime.Object) and has a `DeepCopyObject` implementation (the other half of +// runtime.Object) will implement this by default. +// +// For example, nearly all the built-in XYZList types are ObjectLists, as well +// as the XYZList types for all KubeBuilder-generated CRDs (unless you do +// something real funky to them). +// +// By and large, most things that are XYZList and implement runtime.Object also +// implement ObjectList -- it's very rare to have *just* a runtime.Object +// implementation (the cases tend to be funky built-in types like Webhook +// payloads that don't have a `metadata` field). +// +// This is similar to Object, which is almost always implemented by the items +// in the list themselves. +type ObjectList interface { + metav1.ListInterface + runtime.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go new file mode 100644 index 000000000..f25327646 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -0,0 +1,697 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" +) + +// {{{ "Functional" Option Interfaces + +// CreateOption is some configuration that modifies options for a create request. +type CreateOption interface { + // ApplyToCreate applies this configuration to the given create options. + ApplyToCreate(*CreateOptions) +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*DeleteOptions) +} + +// ListOption is some configuration that modifies options for a list request. +type ListOption interface { + // ApplyToList applies this configuration to the given list options. + ApplyToList(*ListOptions) +} + +// UpdateOption is some configuration that modifies options for a update request. +type UpdateOption interface { + // ApplyToUpdate applies this configuration to the given update options. + ApplyToUpdate(*UpdateOptions) +} + +// PatchOption is some configuration that modifies options for a patch request. +type PatchOption interface { + // ApplyToPatch applies this configuration to the given patch options. + ApplyToPatch(*PatchOptions) +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// }}} + +// {{{ Multi-Type Options + +// DryRunAll sets the "dry run" option to "all", executing all +// validation, etc without persisting the change to storage. +var DryRunAll = dryRunAll{} + +type dryRunAll struct{} + +// ApplyToCreate applies this configuration to the given create options. +func (dryRunAll) ApplyToCreate(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToUpdate applies this configuration to the given update options. +func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given patch options. +func (dryRunAll) ApplyToPatch(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given delete options. +func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +type FieldOwner string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldManager = string(f) +} + +// }}} + +// {{{ Create Options + +// CreateOptions contains options for create requests. It's generally a subset +// of metav1.CreateOptions. +type CreateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw CreateOptions, as passed to the API server. + Raw *metav1.CreateOptions +} + +// AsCreateOptions returns these options as a metav1.CreateOptions. +// This may mutate the Raw field. +func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { + if o == nil { + return &metav1.CreateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.CreateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given create options on these options, +// and then returns itself (for convenient chaining). +func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { + for _, opt := range opts { + opt.ApplyToCreate(o) + } + return o +} + +// ApplyToCreate implements CreateOption +func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { + if o.DryRun != nil { + co.DryRun = o.DryRun + } + if o.FieldManager != "" { + co.FieldManager = o.FieldManager + } + if o.Raw != nil { + co.Raw = o.Raw + } +} + +var _ CreateOption = &CreateOptions{} + +// }}} + +// {{{ Delete Options + +// DeleteOptions contains options for delete requests. It's generally a subset +// of metav1.DeleteOptions. +type DeleteOptions struct { + // GracePeriodSeconds is the duration in seconds before the object should be + // deleted. Value must be non-negative integer. The value zero indicates + // delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + GracePeriodSeconds *int64 + + // Preconditions must be fulfilled before a deletion is carried out. If not + // possible, a 409 Conflict status will be returned. + Preconditions *metav1.Preconditions + + // PropagationPolicy determined whether and how garbage collection will be + // performed. Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + PropagationPolicy *metav1.DeletionPropagation + + // Raw represents raw DeleteOptions, as passed to the API server. + Raw *metav1.DeleteOptions + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string +} + +// AsDeleteOptions returns these options as a metav1.DeleteOptions. +// This may mutate the Raw field. +func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { + if o == nil { + return &metav1.DeleteOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.DeleteOptions{} + } + + o.Raw.GracePeriodSeconds = o.GracePeriodSeconds + o.Raw.Preconditions = o.Preconditions + o.Raw.PropagationPolicy = o.PropagationPolicy + o.Raw.DryRun = o.DryRun + return o.Raw +} + +// ApplyOptions applies the given delete options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { + for _, opt := range opts { + opt.ApplyToDelete(o) + } + return o +} + +var _ DeleteOption = &DeleteOptions{} + +// ApplyToDelete implements DeleteOption +func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { + if o.GracePeriodSeconds != nil { + do.GracePeriodSeconds = o.GracePeriodSeconds + } + if o.Preconditions != nil { + do.Preconditions = o.Preconditions + } + if o.PropagationPolicy != nil { + do.PropagationPolicy = o.PropagationPolicy + } + if o.Raw != nil { + do.Raw = o.Raw + } + if o.DryRun != nil { + do.DryRun = o.DryRun + } +} + +// GracePeriodSeconds sets the grace period for the deletion +// to the given number of seconds. +type GracePeriodSeconds int64 + +// ApplyToDelete applies this configuration to the given delete options. +func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { + secs := int64(s) + opts.GracePeriodSeconds = &secs +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + s.ApplyToDelete(&opts.DeleteOptions) +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions metav1.Preconditions + +// ApplyToDelete applies this configuration to the given delete options. +func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { + preconds := metav1.Preconditions(p) + opts.Preconditions = &preconds +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// PropagationPolicy determined whether and how garbage collection will be +// performed. Either this field or OrphanDependents may be set, but not both. +// The default policy is decided by the existing finalizer set in the +// metadata.finalizers and the resource-specific default policy. +// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - +// allow the garbage collector to delete the dependents in the background; +// 'Foreground' - a cascading policy that deletes all dependents in the +// foreground. +type PropagationPolicy metav1.DeletionPropagation + +// ApplyToDelete applies the given delete options on these options. +// It will propagate to the dependents of the object to let the garbage collector handle it. +func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { + policy := metav1.DeletionPropagation(p) + opts.PropagationPolicy = &policy +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// }}} + +// {{{ List Options + +// ListOptions contains options for limiting or filtering results. +// It's generally a subset of metav1.ListOptions, with support for +// pre-parsed selectors (since generally, selectors will be executed +// against the cache). +type ListOptions struct { + // LabelSelector filters results by label. Use SetLabelSelector to + // set from raw string form. + LabelSelector labels.Selector + // FieldSelector filters results by a particular field. In order + // to use this with cache-based implementations, restrict usage to + // a single field-value pair that's been added to the indexers. + FieldSelector fields.Selector + + // Namespace represents the namespace to list for, or empty for + // non-namespaced objects, or to list across all namespaces. + Namespace string + + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. This field is not supported if watch + // is true in the Raw ListOptions. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. This field is not supported if watch is true in the Raw ListOptions. + Continue string + + // Raw represents raw ListOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface, + // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. + Raw *metav1.ListOptions +} + +var _ ListOption = &ListOptions{} + +// ApplyToList implements ListOption for ListOptions +func (o *ListOptions) ApplyToList(lo *ListOptions) { + if o.LabelSelector != nil { + lo.LabelSelector = o.LabelSelector + } + if o.FieldSelector != nil { + lo.FieldSelector = o.FieldSelector + } + if o.Namespace != "" { + lo.Namespace = o.Namespace + } + if o.Raw != nil { + lo.Raw = o.Raw + } + if o.Limit > 0 { + lo.Limit = o.Limit + } + if o.Continue != "" { + lo.Continue = o.Continue + } +} + +// AsListOptions returns these options as a flattened metav1.ListOptions. +// This may mutate the Raw field. +func (o *ListOptions) AsListOptions() *metav1.ListOptions { + if o == nil { + return &metav1.ListOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.ListOptions{} + } + if o.LabelSelector != nil { + o.Raw.LabelSelector = o.LabelSelector.String() + } + if o.FieldSelector != nil { + o.Raw.FieldSelector = o.FieldSelector.String() + } + if !o.Raw.Watch { + o.Raw.Limit = o.Limit + o.Raw.Continue = o.Continue + } + return o.Raw +} + +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { + for _, opt := range opts { + opt.ApplyToList(o) + } + return o +} + +// MatchingLabels filters the list/delete operation on the given set of labels. +type MatchingLabels map[string]string + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabels) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid reserializing this over and over? + sel := labels.SelectorFromValidatedSet(map[string]string(m)) + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// HasLabels filters the list/delete operation checking if the set of labels exists +// without checking their values. +type HasLabels []string + +// ApplyToList applies this configuration to the given list options. +func (m HasLabels) ApplyToList(opts *ListOptions) { + sel := labels.NewSelector() + for _, label := range m { + r, err := labels.NewRequirement(label, selection.Exists, nil) + if err == nil { + sel = sel.Add(*r) + } + } + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingLabelsSelector filters the list/delete operation on the given label +// selector (or index in the case of cached lists). A struct is used because +// labels.Selector is an interface, which cannot be aliased. +type MatchingLabelsSelector struct { + labels.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) { + opts.LabelSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFields filters the list/delete operation on the given field Set +// (or index in the case of cached lists). +type MatchingFields fields.Set + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFields) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid re-serializing this? + sel := fields.Set(m).AsSelector() + opts.FieldSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFieldsSelector filters the list/delete operation on the given field +// selector (or index in the case of cached lists). A struct is used because +// fields.Selector is an interface, which cannot be aliased. +type MatchingFieldsSelector struct { + fields.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) { + opts.FieldSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// InNamespace restricts the list/delete operation to the given namespace. +type InNamespace string + +// ApplyToList applies this configuration to the given list options. +func (n InNamespace) ApplyToList(opts *ListOptions) { + opts.Namespace = string(n) +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + n.ApplyToList(&opts.ListOptions) +} + +// Limit specifies the maximum number of results to return from the server. +// Limit does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Limit int64 + +// ApplyToList applies this configuration to the given an list options. +func (l Limit) ApplyToList(opts *ListOptions) { + opts.Limit = int64(l) +} + +// Continue sets a continuation token to retrieve chunks of results when using limit. +// Continue does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Continue string + +// ApplyToList applies this configuration to the given an List options. +func (c Continue) ApplyToList(opts *ListOptions) { + opts.Continue = string(c) +} + +// }}} + +// {{{ Update Options + +// UpdateOptions contains options for create requests. It's generally a subset +// of metav1.UpdateOptions. +type UpdateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw UpdateOptions, as passed to the API server. + Raw *metav1.UpdateOptions +} + +// AsUpdateOptions returns these options as a metav1.UpdateOptions. +// This may mutate the Raw field. +func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { + if o == nil { + return &metav1.UpdateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.UpdateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given update options on these options, +// and then returns itself (for convenient chaining). +func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { + for _, opt := range opts { + opt.ApplyToUpdate(o) + } + return o +} + +var _ UpdateOption = &UpdateOptions{} + +// ApplyToUpdate implements UpdateOption +func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { + if o.DryRun != nil { + uo.DryRun = o.DryRun + } + if o.FieldManager != "" { + uo.FieldManager = o.FieldManager + } + if o.Raw != nil { + uo.Raw = o.Raw + } +} + +// }}} + +// {{{ Patch Options + +// PatchOptions contains options for patch requests. +type PatchOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw PatchOptions, as passed to the API server. + Raw *metav1.PatchOptions +} + +// ApplyOptions applies the given patch options on these options, +// and then returns itself (for convenient chaining). +func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions { + for _, opt := range opts { + opt.ApplyToPatch(o) + } + return o +} + +// AsPatchOptions returns these options as a metav1.PatchOptions. +// This may mutate the Raw field. +func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { + if o == nil { + return &metav1.PatchOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.PatchOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.Force = o.Force + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +var _ PatchOption = &PatchOptions{} + +// ApplyToPatch implements PatchOptions +func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { + if o.DryRun != nil { + po.DryRun = o.DryRun + } + if o.Force != nil { + po.Force = o.Force + } + if o.FieldManager != "" { + po.FieldManager = o.FieldManager + } + if o.Raw != nil { + po.Raw = o.Raw + } +} + +// ForceOwnership indicates that in case of conflicts with server-side apply, +// the client should acquire ownership of the conflicting field. Most +// controllers should use this. +var ForceOwnership = forceOwnership{} + +type forceOwnership struct{} + +func (forceOwnership) ApplyToPatch(opts *PatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + +// }}} + +// {{{ DeleteAllOf Options + +// these are all just delete options and list options + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +// It's just list and delete options smooshed together. +type DeleteAllOfOptions struct { + ListOptions + DeleteOptions +} + +// ApplyOptions applies the given deleteallof options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions { + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) + } + return o +} + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +// ApplyToDeleteAllOf implements DeleteAllOfOption +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { + o.ApplyToList(&do.ListOptions) + o.ApplyToDelete(&do.DeleteOptions) +} + +// }}} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go new file mode 100644 index 000000000..c32a06c06 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -0,0 +1,186 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" +) + +var ( + // Apply uses server-side apply to patch the given object. + Apply = applyPatch{} + + // Merge uses the raw object as a merge patch, without modifications. + // Use MergeFrom if you wish to compute a diff instead. + Merge = mergePatch{} +) + +type patch struct { + patchType types.PatchType + data []byte +} + +// Type implements Patch. +func (s *patch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *patch) Data(obj runtime.Object) ([]byte, error) { + return s.data, nil +} + +// RawPatch constructs a new Patch with the given PatchType and data. +func RawPatch(patchType types.PatchType, data []byte) Patch { + return &patch{patchType, data} +} + +// MergeFromWithOptimisticLock can be used if clients want to make sure a patch +// is being applied to the latest resource version of an object. +// +// The behavior is similar to what an Update would do, without the need to send the +// whole object. Usually this method is useful if you might have multiple clients +// acting on the same object and the same API version, but with different versions of the Go structs. +// +// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C. +// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not. +type MergeFromWithOptimisticLock struct{} + +// ApplyToMergeFrom applies this configuration to the given patch options. +func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) { + in.OptimisticLock = true +} + +// MergeFromOption is some configuration that modifies options for a merge-from patch data. +type MergeFromOption interface { + // ApplyToMergeFrom applies this configuration to the given patch options. + ApplyToMergeFrom(*MergeFromOptions) +} + +// MergeFromOptions contains options to generate a merge-from patch data. +type MergeFromOptions struct { + // OptimisticLock, when true, includes `metadata.resourceVersion` into the final + // patch data. If the `resourceVersion` field doesn't match what's stored, + // the operation results in a conflict and clients will need to try again. + OptimisticLock bool +} + +type mergeFromPatch struct { + from runtime.Object + opts MergeFromOptions +} + +// Type implements patch. +func (s *mergeFromPatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (s *mergeFromPatch) Data(obj runtime.Object) ([]byte, error) { + originalJSON, err := json.Marshal(s.from) + if err != nil { + return nil, err + } + + modifiedJSON, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + data, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return nil, err + } + + if s.opts.OptimisticLock { + dataMap := map[string]interface{}{} + if err := json.Unmarshal(data, &dataMap); err != nil { + return nil, err + } + fromMeta, ok := s.from.(metav1.Object) + if !ok { + return nil, fmt.Errorf("cannot use OptimisticLock, from object %q is not a valid metav1.Object", s.from) + } + resourceVersion := fromMeta.GetResourceVersion() + if len(resourceVersion) == 0 { + return nil, fmt.Errorf("cannot use OptimisticLock, from object %q does not have any resource version we can use", s.from) + } + u := &unstructured.Unstructured{Object: dataMap} + u.SetResourceVersion(resourceVersion) + data, err = json.Marshal(u) + if err != nil { + return nil, err + } + } + + return data, nil +} + +// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. +func MergeFrom(obj runtime.Object) Patch { + return &mergeFromPatch{from: obj} +} + +// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base. +func MergeFromWithOptions(obj runtime.Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{from: obj, opts: *options} +} + +// mergePatch uses a raw merge strategy to patch the object. +type mergePatch struct{} + +// Type implements Patch. +func (p mergePatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (p mergePatch) Data(obj runtime.Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} + +// applyPatch uses server-side apply to patch the object. +type applyPatch struct{} + +// Type implements Patch. +func (p applyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +// Data implements Patch. +func (p applyPatch) Data(obj runtime.Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go new file mode 100644 index 000000000..bf4b861f3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go @@ -0,0 +1,141 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. +type NewDelegatingClientInput struct { + CacheReader Reader + Client Client + UncachedObjects []Object + CacheUnstructured bool +} + +// NewDelegatingClient creates a new delegating client. +// +// A delegating client forms a Client by composing separate reader, writer and +// statusclient interfaces. This way, you can have an Client that reads from a +// cache and writes to the API server. +func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { + uncachedGVKs := map[schema.GroupVersionKind]struct{}{} + for _, obj := range in.UncachedObjects { + gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) + if err != nil { + return nil, err + } + uncachedGVKs[gvk] = struct{}{} + } + + return &delegatingClient{ + scheme: in.Client.Scheme(), + mapper: in.Client.RESTMapper(), + Reader: &delegatingReader{ + CacheReader: in.CacheReader, + ClientReader: in.Client, + scheme: in.Client.Scheme(), + uncachedGVKs: uncachedGVKs, + cacheUnstructured: in.CacheUnstructured, + }, + Writer: in.Client, + StatusClient: in.Client, + }, nil +} + +type delegatingClient struct { + Reader + Writer + StatusClient + + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// Scheme returns the scheme this client is using. +func (d *delegatingClient) Scheme() *runtime.Scheme { + return d.scheme +} + +// RESTMapper returns the rest mapper this client is using. +func (d *delegatingClient) RESTMapper() meta.RESTMapper { + return d.mapper +} + +// delegatingReader forms a Reader that will cause Get and List requests for +// unstructured types to use the ClientReader while requests for any other type +// of object with use the CacheReader. This avoids accidentally caching the +// entire cluster in the common case of loading arbitrary unstructured objects +// (e.g. from OwnerReferences). +type delegatingReader struct { + CacheReader Reader + ClientReader Reader + + uncachedGVKs map[schema.GroupVersionKind]struct{} + scheme *runtime.Scheme + cacheUnstructured bool +} + +func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, d.scheme) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := d.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !d.cacheUnstructured { + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + return isUnstructured || isUnstructuredList, nil + } + return false, nil +} + +// Get retrieves an obj for a given object key from the Kubernetes Cluster. +func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object) error { + if isUncached, err := d.shouldBypassCache(obj); err != nil { + return err + } else if isUncached { + return d.ClientReader.Get(ctx, key, obj) + } + return d.CacheReader.Get(ctx, key, obj) +} + +// List retrieves list of objects for a given namespace and list options. +func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { + if isUncached, err := d.shouldBypassCache(list); err != nil { + return err + } else if isUncached { + return d.ClientReader.List(ctx, list, opts...) + } + return d.CacheReader.List(ctx, list, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go new file mode 100644 index 000000000..a1b32653c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -0,0 +1,205 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &typedClient{} +var _ Writer = &typedClient{} +var _ StatusWriter = &typedClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type typedClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client +func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Update implements client.Client +func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := &UpdateOptions{} + updateOpts.ApplyOptions(opts) + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Delete implements client.Client +func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client +func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client +func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + return r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Name(key.Name).Do(ctx).Into(obj) +} + +// List implements client.Client +func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// UpdateStatus used by StatusWriter to write status. +func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + // TODO(droot): examine the returned error and check if it error needs to be + // wrapped to improve the UX ? + // It will be nice to receive an error saying the object doesn't implement + // status subresource and check CRD definition + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// PatchStatus used by StatusWriter to write status. +func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go new file mode 100644 index 000000000..f8fb3ccec --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -0,0 +1,277 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &unstructuredClient{} +var _ Writer = &unstructuredClient{} +var _ StatusWriter = &unstructuredClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type unstructuredClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client +func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + result := o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Update implements client.Client +func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := UpdateOptions{} + updateOpts.ApplyOptions(opts) + result := o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Delete implements client.Client +func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client +func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), uc.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client +func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client +func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + result := r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Name(key.Name). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + + return result +} + +// List implements client.Client +func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + u, ok := obj.(*unstructured.UnstructuredList) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + result := o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Do(ctx). + Into(u) + + u.SetGroupVersionKind(gvk) + return result +} diff --git a/vendor/sigs.k8s.io/kind/LICENSE b/vendor/sigs.k8s.io/kind/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/sigs.k8s.io/kind/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go new file mode 100644 index 000000000..0d4559d4a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package defaults contains cross-api-version configuration defaults +package defaults + +// Image is the default for the Config.Image field, aka the default node image. +const Image = "kindest/node:v1.20.2@sha256:8f7ea6e7642c0da54f04a7ee10431549c0257315b3a634f6ef2fecaaedb19bab" diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go new file mode 100644 index 000000000..e06f1eeb1 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +import ( + "sigs.k8s.io/kind/pkg/apis/config/defaults" +) + +// SetDefaultsCluster sets uninitialized fields to their default value. +func SetDefaultsCluster(obj *Cluster) { + // default to a one node cluster + if len(obj.Nodes) == 0 { + obj.Nodes = []Node{ + { + Image: defaults.Image, + Role: ControlPlaneRole, + }, + } + } + // default the nodes + for i := range obj.Nodes { + a := &obj.Nodes[i] + SetDefaultsNode(a) + } + if obj.Networking.IPFamily == "" { + obj.Networking.IPFamily = "ipv4" + } + // default to listening on 127.0.0.1:randomPort on ipv4 + // and [::1]:randomPort on ipv6 + if obj.Networking.APIServerAddress == "" { + obj.Networking.APIServerAddress = "127.0.0.1" + if obj.Networking.IPFamily == "ipv6" { + obj.Networking.APIServerAddress = "::1" + } + } + // default the pod CIDR + if obj.Networking.PodSubnet == "" { + obj.Networking.PodSubnet = "10.244.0.0/16" + if obj.Networking.IPFamily == "ipv6" { + // node-mask cidr default is /64 so we need a larger subnet, we use /56 following best practices + // xref: https://www.ripe.net/publications/docs/ripe-690#4--size-of-end-user-prefix-assignment---48---56-or-something-else- + obj.Networking.PodSubnet = "fd00:10:244::/56" + } + } + // default the service CIDR using a different subnet than kubeadm default + // https://github.com/kubernetes/kubernetes/blob/746404f82a28e55e0b76ffa7e40306fb88eb3317/cmd/kubeadm/app/apis/kubeadm/v1beta2/defaults.go#L32 + // Note: kubeadm is using a /12 subnet, that may allocate a 2^20 bitmap in etcd + // we allocate a /16 subnet that allows 65535 services (current Kubernetes tested limit is O(10k) services) + if obj.Networking.ServiceSubnet == "" { + obj.Networking.ServiceSubnet = "10.96.0.0/16" + if obj.Networking.IPFamily == "ipv6" { + obj.Networking.ServiceSubnet = "fd00:10:96::/112" + } + } + // default the KubeProxyMode using iptables as it's already the default + if obj.Networking.KubeProxyMode == "" { + obj.Networking.KubeProxyMode = IPTablesMode + } +} + +// SetDefaultsNode sets uninitialized fields to their default value. +func SetDefaultsNode(obj *Node) { + if obj.Image == "" { + obj.Image = defaults.Image + } + + if obj.Role == "" { + obj.Role = ControlPlaneRole + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go new file mode 100644 index 000000000..68c743e78 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha4 implements the v1alpha4 apiVersion of kind's cluster +// configuration +// +// +k8s:deepcopy-gen=package +package v1alpha4 diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go new file mode 100644 index 000000000..cd831b3f1 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go @@ -0,0 +1,307 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +// Cluster contains kind cluster configuration +type Cluster struct { + TypeMeta `yaml:",inline"` + + // The cluster name. + // Optional, this will be overridden by --name / KIND_CLUSTER_NAME + Name string `yaml:"name,omitempty"` + + // Nodes contains the list of nodes defined in the `kind` Cluster + // If unset this will default to a single control-plane node + // Note that if more than one control plane is specified, an external + // control plane load balancer will be provisioned implicitly + Nodes []Node `yaml:"nodes,omitempty"` + + /* Advanced fields */ + + // Networking contains cluster wide network settings + Networking Networking `yaml:"networking,omitempty"` + + // FeatureGates contains a map of Kubernetes feature gates to whether they + // are enabled. The feature gates specified here are passed to all Kubernetes components as flags or in config. + // + // https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ + FeatureGates map[string]bool `yaml:"featureGates,omitempty"` + + // RuntimeConfig Keys and values are translated into --runtime-config values for kube-apiserver, separated by commas. + // + // Use this to enable alpha APIs. + RuntimeConfig map[string]string `yaml:"runtimeConfig,omitempty"` + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // merge patches. The `kind` field must match the target object, and + // if `apiVersion` is specified it will only be applied to matching objects. + // + // This should be an inline yaml blob-string + // + // https://tools.ietf.org/html/rfc7386 + // + // The cluster-level patches are applied before the node-level patches. + KubeadmConfigPatches []string `yaml:"kubeadmConfigPatches,omitempty"` + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as JSON 6902 patches. The `kind` field must match the target object, and + // if group or version are specified it will only be objects matching the + // apiVersion: group+"/"+version + // + // Name and Namespace are now ignored, but the fields continue to exist for + // backwards compatibility of parsing the config. The name of the generated + // config was/is always fixed as is the namespace so these fields have + // always been a no-op. + // + // https://tools.ietf.org/html/rfc6902 + // + // The cluster-level patches are applied before the node-level patches. + KubeadmConfigPatchesJSON6902 []PatchJSON6902 `yaml:"kubeadmConfigPatchesJSON6902,omitempty"` + + // ContainerdConfigPatches are applied to every node's containerd config + // in the order listed. + // These should be toml stringsto be applied as merge patches + ContainerdConfigPatches []string `yaml:"containerdConfigPatches,omitempty"` + + // ContainerdConfigPatchesJSON6902 are applied to every node's containerd config + // in the order listed. + // These should be YAML or JSON formatting RFC 6902 JSON patches + ContainerdConfigPatchesJSON6902 []string `yaml:"containerdConfigPatchesJSON6902,omitempty"` +} + +// TypeMeta partially copies apimachinery/pkg/apis/meta/v1.TypeMeta +// No need for a direct dependence; the fields are stable. +type TypeMeta struct { + Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` +} + +// Node contains settings for a node in the `kind` Cluster. +// A node in kind config represent a container that will be provisioned with all the components +// required for the assigned role in the Kubernetes cluster +type Node struct { + // Role defines the role of the node in the in the Kubernetes cluster + // created by kind + // + // Defaults to "control-plane" + Role NodeRole `yaml:"role,omitempty"` + + // Image is the node image to use when creating this node + // If unset a default image will be used, see defaults.Image + Image string `yaml:"image,omitempty"` + + /* Advanced fields */ + + // TODO: cri-like types should be inline instead + // ExtraMounts describes additional mount points for the node container + // These may be used to bind a hostPath + ExtraMounts []Mount `yaml:"extraMounts,omitempty"` + + // ExtraPortMappings describes additional port mappings for the node container + // binded to a host Port + ExtraPortMappings []PortMapping `yaml:"extraPortMappings,omitempty"` + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // merge patches. The `kind` field must match the target object, and + // if `apiVersion` is specified it will only be applied to matching objects. + // + // This should be an inline yaml blob-string + // + // https://tools.ietf.org/html/rfc7386 + // + // The node-level patches will be applied after the cluster-level patches + // have been applied. (See Cluster.KubeadmConfigPatches) + KubeadmConfigPatches []string `yaml:"kubeadmConfigPatches,omitempty"` + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as JSON 6902 patches. The `kind` field must match the target object, and + // if group or version are specified it will only be objects matching the + // apiVersion: group+"/"+version + // + // Name and Namespace are now ignored, but the fields continue to exist for + // backwards compatibility of parsing the config. The name of the generated + // config was/is always fixed as is the namespace so these fields have + // always been a no-op. + // + // https://tools.ietf.org/html/rfc6902 + // + // The node-level patches will be applied after the cluster-level patches + // have been applied. (See Cluster.KubeadmConfigPatchesJSON6902) + KubeadmConfigPatchesJSON6902 []PatchJSON6902 `yaml:"kubeadmConfigPatchesJSON6902,omitempty"` +} + +// NodeRole defines possible role for nodes in a Kubernetes cluster managed by `kind` +type NodeRole string + +const ( + // ControlPlaneRole identifies a node that hosts a Kubernetes control-plane. + // NOTE: in single node clusters, control-plane nodes act also as a worker + // nodes, in which case the taint will be removed. see: + // https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#control-plane-node-isolation + ControlPlaneRole NodeRole = "control-plane" + // WorkerRole identifies a node that hosts a Kubernetes worker + WorkerRole NodeRole = "worker" +) + +// Networking contains cluster wide network settings +type Networking struct { + // IPFamily is the network cluster model, currently it can be ipv4 or ipv6 + IPFamily ClusterIPFamily `yaml:"ipFamily,omitempty"` + // APIServerPort is the listen port on the host for the Kubernetes API Server + // Defaults to a random port on the host obtained by kind + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + APIServerPort int32 `yaml:"apiServerPort,omitempty"` + // APIServerAddress is the listen address on the host for the Kubernetes + // API Server. This should be an IP address. + // + // Defaults to 127.0.0.1 + APIServerAddress string `yaml:"apiServerAddress,omitempty"` + // PodSubnet is the CIDR used for pod IPs + // kind will select a default if unspecified + PodSubnet string `yaml:"podSubnet,omitempty"` + // ServiceSubnet is the CIDR used for services VIPs + // kind will select a default if unspecified for IPv6 + ServiceSubnet string `yaml:"serviceSubnet,omitempty"` + // If DisableDefaultCNI is true, kind will not install the default CNI setup. + // Instead the user should install their own CNI after creating the cluster. + DisableDefaultCNI bool `yaml:"disableDefaultCNI,omitempty"` + // KubeProxyMode defines if kube-proxy should operate in iptables or ipvs mode + // Defaults to 'iptables' mode + KubeProxyMode ProxyMode `yaml:"kubeProxyMode,omitempty"` +} + +// ClusterIPFamily defines cluster network IP family +type ClusterIPFamily string + +const ( + // IPv4Family sets ClusterIPFamily to ipv4 + IPv4Family ClusterIPFamily = "ipv4" + // IPv6Family sets ClusterIPFamily to ipv6 + IPv6Family ClusterIPFamily = "ipv6" +) + +// ProxyMode defines a proxy mode for kube-proxy +type ProxyMode string + +const ( + // IPTablesMode sets ProxyMode to iptables + IPTablesMode ProxyMode = "iptables" + // IPVSMode sets ProxyMode to iptables + IPVSMode ProxyMode = "ipvs" +) + +// PatchJSON6902 represents an inline kustomize json 6902 patch +// https://tools.ietf.org/html/rfc6902 +type PatchJSON6902 struct { + // these fields specify the patch target resource + Group string `yaml:"group"` + Version string `yaml:"version"` + Kind string `yaml:"kind"` + // Patch should contain the contents of the json patch as a string + Patch string `yaml:"patch"` +} + +/* +These types are from +https://github.com/kubernetes/kubernetes/blob/063e7ff358fdc8b0916e6f39beedc0d025734cb1/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go#L183 +*/ + +// Mount specifies a host volume to mount into a container. +// This is a close copy of the upstream cri Mount type +// see: k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2 +// It additionally serializes the "propagation" field with the string enum +// names on disk as opposed to the int32 values, and the serialized field names +// have been made closer to core/v1 VolumeMount field names +// In yaml this looks like: +// containerPath: /foo +// hostPath: /bar +// readOnly: true +// selinuxRelabel: false +// propagation: None +// Propagation may be one of: None, HostToContainer, Bidirectional +type Mount struct { + // Path of the mount within the container. + ContainerPath string `yaml:"containerPath,omitempty"` + // Path of the mount on the host. If the hostPath doesn't exist, then runtimes + // should report error. If the hostpath is a symbolic link, runtimes should + // follow the symlink and mount the real destination to container. + HostPath string `yaml:"hostPath,omitempty"` + // If set, the mount is read-only. + Readonly bool `yaml:"readOnly,omitempty"` + // If set, the mount needs SELinux relabeling. + SelinuxRelabel bool `yaml:"selinuxRelabel,omitempty"` + // Requested propagation mode. + Propagation MountPropagation `yaml:"propagation,omitempty"` +} + +// PortMapping specifies a host port mapped into a container port. +// In yaml this looks like: +// containerPort: 80 +// hostPort: 8000 +// listenAddress: 127.0.0.1 +// protocol: TCP +type PortMapping struct { + // Port within the container. + ContainerPort int32 `yaml:"containerPort,omitempty"` + // Port on the host. + // + // If unset, a random port will be selected. + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + HostPort int32 `yaml:"hostPort,omitempty"` + // TODO: add protocol (tcp/udp) and port-ranges + ListenAddress string `yaml:"listenAddress,omitempty"` + // Protocol (TCP/UDP) + Protocol PortMappingProtocol `yaml:"protocol,omitempty"` +} + +// MountPropagation represents an "enum" for mount propagation options, +// see also Mount. +type MountPropagation string + +const ( + // MountPropagationNone specifies that no mount propagation + // ("private" in Linux terminology). + MountPropagationNone MountPropagation = "None" + // MountPropagationHostToContainer specifies that mounts get propagated + // from the host to the container ("rslave" in Linux). + MountPropagationHostToContainer MountPropagation = "HostToContainer" + // MountPropagationBidirectional specifies that mounts get propagated from + // the host to the container and from the container to the host + // ("rshared" in Linux). + MountPropagationBidirectional MountPropagation = "Bidirectional" +) + +// PortMappingProtocol represents an "enum" for port mapping protocol options, +// see also PortMapping. +type PortMappingProtocol string + +const ( + // PortMappingProtocolTCP specifies TCP protocol + PortMappingProtocolTCP PortMappingProtocol = "TCP" + // PortMappingProtocolUDP specifies UDP protocol + PortMappingProtocolUDP PortMappingProtocol = "UDP" + // PortMappingProtocolSCTP specifies SCTP protocol + PortMappingProtocolSCTP PortMappingProtocol = "SCTP" +) diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go new file mode 100644 index 000000000..d34d4c8cc --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/errors" +) + +/* +Custom YAML (de)serialization for these types +*/ + +// UnmarshalYAML implements custom decoding YAML +// https://godoc.org/gopkg.in/yaml.v3 +func (m *Mount) UnmarshalYAML(unmarshal func(interface{}) error) error { + // first unmarshal in the alias type (to avoid a recursion loop on unmarshal) + type MountAlias Mount + var a MountAlias + if err := unmarshal(&a); err != nil { + return err + } + // now handle propagation + switch a.Propagation { + case "": // unset, will be defaulted + case MountPropagationNone: + case MountPropagationHostToContainer: + case MountPropagationBidirectional: + default: + return errors.Errorf("Unknown MountPropagation: %q", a.Propagation) + } + // and copy over the fields + *m = Mount(a) + return nil +} + +// UnmarshalYAML implements custom decoding YAML +// https://godoc.org/gopkg.in/yaml.v3 +func (p *PortMapping) UnmarshalYAML(unmarshal func(interface{}) error) error { + // first unmarshal in the alias type (to avoid a recursion loop on unmarshal) + type PortMappingAlias PortMapping + var a PortMappingAlias + if err := unmarshal(&a); err != nil { + return err + } + // now handle the protocol field + a.Protocol = PortMappingProtocol(strings.ToUpper(string(a.Protocol))) + switch a.Protocol { + case "": // unset, will be defaulted + case PortMappingProtocolTCP: + case PortMappingProtocolUDP: + case PortMappingProtocolSCTP: + default: + return errors.Errorf("Unknown PortMappingProtocol: %q", a.Protocol) + } + // and copy over the fields + *p = PortMapping(a) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go new file mode 100644 index 000000000..6fa61d4e3 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go @@ -0,0 +1,196 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha4 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Networking = in.Networking + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RuntimeConfig != nil { + in, out := &in.RuntimeConfig, &out.RuntimeConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatches != nil { + in, out := &in.ContainerdConfigPatches, &out.ContainerdConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatchesJSON6902 != nil { + in, out := &in.ContainerdConfigPatchesJSON6902, &out.ContainerdConfigPatchesJSON6902 + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } + if in.ExtraPortMappings != nil { + in, out := &in.ExtraPortMappings, &out.ExtraPortMappings + *out = make([]PortMapping, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchJSON6902) DeepCopyInto(out *PatchJSON6902) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchJSON6902. +func (in *PatchJSON6902) DeepCopy() *PatchJSON6902 { + if in == nil { + return nil + } + out := new(PatchJSON6902) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMapping) DeepCopyInto(out *PortMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping. +func (in *PortMapping) DeepCopy() *PortMapping { + if in == nil { + return nil + } + out := new(PortMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeMeta) DeepCopyInto(out *TypeMeta) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeMeta. +func (in *TypeMeta) DeepCopy() *TypeMeta { + if in == nil { + return nil + } + out := new(TypeMeta) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go new file mode 100644 index 000000000..fb9c0734c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package constants contains well known constants for kind clusters +package constants + +// DefaultClusterName is the default cluster Context name +const DefaultClusterName = "kind" + +/* node role value constants */ +const ( + // ControlPlaneNodeRoleValue identifies a node that hosts a Kubernetes + // control-plane. + // + // NOTE: in single node clusters, control-plane nodes act as worker nodes + ControlPlaneNodeRoleValue string = "control-plane" + + // WorkerNodeRoleValue identifies a node that hosts a Kubernetes worker + WorkerNodeRoleValue string = "worker" + + // ExternalLoadBalancerNodeRoleValue identifies a node that hosts an + // external load balancer for the API server in HA configurations. + // + // Please note that `kind` nodes hosting external load balancer are not + // kubernetes nodes + ExternalLoadBalancerNodeRoleValue string = "external-load-balancer" + + // ExternalEtcdNodeRoleValue identifies a node that hosts an external-etcd + // instance. + // + // WARNING: this node type is not yet implemented! + // + // Please note that `kind` nodes hosting external etcd are not + // kubernetes nodes + ExternalEtcdNodeRoleValue string = "external-etcd" +) diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go b/vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go new file mode 100644 index 000000000..699bac510 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go @@ -0,0 +1,126 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "time" + + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + internalcreate "sigs.k8s.io/kind/pkg/cluster/internal/create" + internalencoding "sigs.k8s.io/kind/pkg/internal/apis/config/encoding" +) + +// CreateOption is a Provider.Create option +type CreateOption interface { + apply(*internalcreate.ClusterOptions) error +} + +type createOptionAdapter func(*internalcreate.ClusterOptions) error + +func (c createOptionAdapter) apply(o *internalcreate.ClusterOptions) error { + return c(o) +} + +// CreateWithConfigFile configures the config file path to use +func CreateWithConfigFile(path string) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + var err error + o.Config, err = internalencoding.Load(path) + return err + }) +} + +// CreateWithRawConfig configures the config to use from raw (yaml) bytes +func CreateWithRawConfig(raw []byte) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + var err error + o.Config, err = internalencoding.Parse(raw) + return err + }) +} + +// CreateWithV1Alpha4Config configures the cluster with a v1alpha4 config +func CreateWithV1Alpha4Config(config *v1alpha4.Cluster) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.Config = internalencoding.V1Alpha4ToInternal(config) + return nil + }) +} + +// CreateWithNodeImage overrides the image on all nodes in config +// as an easy way to change the Kubernetes version +func CreateWithNodeImage(nodeImage string) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.NodeImage = nodeImage + return nil + }) +} + +// CreateWithRetain disables deletion of nodes and any other cleanup +// that would normally occur after a failure to create +// This is mainly used for debugging purposes +func CreateWithRetain(retain bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.Retain = retain + return nil + }) +} + +// CreateWithWaitForReady configures a maximum wait time for the control plane +// node(s) to be ready. By default no waiting is performed +func CreateWithWaitForReady(waitTime time.Duration) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.WaitForReady = waitTime + return nil + }) +} + +// CreateWithKubeconfigPath sets the explicit --kubeconfig path +func CreateWithKubeconfigPath(explicitPath string) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.KubeconfigPath = explicitPath + return nil + }) +} + +// CreateWithStopBeforeSettingUpKubernetes enables skipping setting up +// kubernetes (kubeadm init etc.) after creating node containers +// This generally shouldn't be used and is only lightly supported, but allows +// provisioning node containers for experimentation +func CreateWithStopBeforeSettingUpKubernetes(stopBeforeSettingUpKubernetes bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.StopBeforeSettingUpKubernetes = stopBeforeSettingUpKubernetes + return nil + }) +} + +// CreateWithDisplayUsage enables displaying usage if displayUsage is true +func CreateWithDisplayUsage(displayUsage bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.DisplayUsage = displayUsage + return nil + }) +} + +// CreateWithDisplaySalutation enables display a salutation at the end of create +// cluster if displaySalutation is true +func CreateWithDisplaySalutation(displaySalutation bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.DisplaySalutation = displaySalutation + return nil + }) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/doc.go new file mode 100644 index 000000000..af19392fd --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cluster implements kind kubernetes-in-docker cluster management +package cluster diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go new file mode 100644 index 000000000..61dd53cbf --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package actions + +import ( + "sync" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/providers" +) + +// Action defines a step of bringing up a kind cluster after initial node +// container creation +type Action interface { + Execute(ctx *ActionContext) error +} + +// ActionContext is data supplied to all actions +type ActionContext struct { + Logger log.Logger + Status *cli.Status + Config *config.Cluster + Provider providers.Provider + cache *cachedData +} + +// NewActionContext returns a new ActionContext +func NewActionContext( + logger log.Logger, + status *cli.Status, + provider providers.Provider, + cfg *config.Cluster, +) *ActionContext { + return &ActionContext{ + Logger: logger, + Status: status, + Provider: provider, + Config: cfg, + cache: &cachedData{}, + } +} + +type cachedData struct { + mu sync.RWMutex + nodes []nodes.Node +} + +func (cd *cachedData) getNodes() []nodes.Node { + cd.mu.RLock() + defer cd.mu.RUnlock() + return cd.nodes +} + +func (cd *cachedData) setNodes(n []nodes.Node) { + cd.mu.Lock() + defer cd.mu.Unlock() + cd.nodes = n +} + +// Nodes returns the list of cluster nodes, this is a cached call +func (ac *ActionContext) Nodes() ([]nodes.Node, error) { + cachedNodes := ac.cache.getNodes() + if cachedNodes != nil { + return cachedNodes, nil + } + n, err := ac.Provider.ListNodes(ac.Config.Name) + if err != nil { + return nil, err + } + ac.cache.setNodes(n) + return n, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go new file mode 100644 index 000000000..cfe970f38 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go @@ -0,0 +1,262 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config implements the kubeadm config action +package config + +import ( + "bytes" + "fmt" + "net" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/internal/kubeadm" + "sigs.k8s.io/kind/pkg/cluster/internal/patch" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// Action implements action for creating the node config files +type Action struct{} + +// NewAction returns a new action for creating the config files +func NewAction() actions.Action { + return &Action{} +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Writing configuration 📜") + defer ctx.Status.End(false) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + controlPlaneEndpoint, err := ctx.Provider.GetAPIServerInternalEndpoint(ctx.Config.Name) + if err != nil { + return err + } + + // create kubeadm init config + fns := []func() error{} + + provider := fmt.Sprintf("%s", ctx.Provider) + configData := kubeadm.ConfigData{ + NodeProvider: provider, + ClusterName: ctx.Config.Name, + ControlPlaneEndpoint: controlPlaneEndpoint, + APIBindPort: common.APIServerInternalPort, + APIServerAddress: ctx.Config.Networking.APIServerAddress, + Token: kubeadm.Token, + PodSubnet: ctx.Config.Networking.PodSubnet, + KubeProxyMode: string(ctx.Config.Networking.KubeProxyMode), + ServiceSubnet: ctx.Config.Networking.ServiceSubnet, + ControlPlane: true, + IPv6: ctx.Config.Networking.IPFamily == "ipv6", + FeatureGates: ctx.Config.FeatureGates, + RuntimeConfig: ctx.Config.RuntimeConfig, + } + + kubeadmConfigPlusPatches := func(node nodes.Node, data kubeadm.ConfigData) func() error { + return func() error { + data.NodeName = node.String() + kubeadmConfig, err := getKubeadmConfig(ctx.Config, data, node, provider) + if err != nil { + // TODO(bentheelder): logging here + return errors.Wrap(err, "failed to generate kubeadm config content") + } + + ctx.Logger.V(2).Infof("Using the following kubeadm config for node %s:\n%s", node.String(), kubeadmConfig) + return writeKubeadmConfig(kubeadmConfig, node) + } + } + + // create the kubeadm join configuration for control plane nodes + controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes) + if err != nil { + return err + } + + for _, node := range controlPlanes { + node := node // capture loop variable + configData := configData // copy config data + fns = append(fns, kubeadmConfigPlusPatches(node, configData)) + } + + // then create the kubeadm join config for the worker nodes if any + workers, err := nodeutils.SelectNodesByRole(allNodes, constants.WorkerNodeRoleValue) + if err != nil { + return err + } + if len(workers) > 0 { + // create the workers concurrently + for _, node := range workers { + node := node // capture loop variable + configData := configData // copy config data + configData.ControlPlane = false + fns = append(fns, kubeadmConfigPlusPatches(node, configData)) + } + } + + // Create the kubeadm config in all nodes concurrently + if err := errors.UntilErrorConcurrent(fns); err != nil { + return err + } + + // if we have containerd config, patch all the nodes concurrently + if len(ctx.Config.ContainerdConfigPatches) > 0 || len(ctx.Config.ContainerdConfigPatchesJSON6902) > 0 { + // we only want to patch kubernetes nodes + // this is a cheap workaround to re-use the already listed + // workers + control planes + kubeNodes := append([]nodes.Node{}, controlPlanes...) + kubeNodes = append(kubeNodes, workers...) + fns := make([]func() error, len(kubeNodes)) + for i, node := range kubeNodes { + node := node // capture loop variable + fns[i] = func() error { + // read and patch the config + const containerdConfigPath = "/etc/containerd/config.toml" + var buff bytes.Buffer + if err := node.Command("cat", containerdConfigPath).SetStdout(&buff).Run(); err != nil { + return errors.Wrap(err, "failed to read containerd config from node") + } + patched, err := patch.TOML(buff.String(), ctx.Config.ContainerdConfigPatches, ctx.Config.ContainerdConfigPatchesJSON6902) + if err != nil { + return errors.Wrap(err, "failed to patch containerd config") + } + if err := nodeutils.WriteFile(node, containerdConfigPath, patched); err != nil { + return errors.Wrap(err, "failed to write patched containerd config") + } + // restart containerd now that we've re-configured it + // skip if the systemd (also the containerd) is not running + if err := node.Command("bash", "-c", `! systemctl is-system-running || systemctl restart containerd`).Run(); err != nil { + return errors.Wrap(err, "failed to restart containerd after patching config") + } + return nil + } + } + if err := errors.UntilErrorConcurrent(fns); err != nil { + return err + } + } + + // mark success + ctx.Status.End(true) + return nil +} + +// getKubeadmConfig generates the kubeadm config contents for the cluster +// by running data through the template and applying patches as needed. +func getKubeadmConfig(cfg *config.Cluster, data kubeadm.ConfigData, node nodes.Node, provider string) (path string, err error) { + kubeVersion, err := nodeutils.KubeVersion(node) + if err != nil { + // TODO(bentheelder): logging here + return "", errors.Wrap(err, "failed to get kubernetes version from node") + } + data.KubernetesVersion = kubeVersion + + // TODO: gross hack! + // identify node in config by matching name (since these are named in order) + // we should really just streamline the bootstrap code and maintain + // this mapping ... something for the next major refactor + var configNode *config.Node + namer := common.MakeNodeNamer("") + for i := range cfg.Nodes { + n := &cfg.Nodes[i] + nodeSuffix := namer(string(n.Role)) + if strings.HasSuffix(node.String(), nodeSuffix) { + configNode = n + } + } + if configNode == nil { + return "", errors.Errorf("failed to match node %q to config", node.String()) + } + + // get the node ip address + nodeAddress, nodeAddressIPv6, err := node.IP() + if err != nil { + return "", errors.Wrap(err, "failed to get IP for node") + } + + data.NodeAddress = nodeAddress + // configure the right protocol addresses + if cfg.Networking.IPFamily == "ipv6" { + if ip := net.ParseIP(nodeAddressIPv6); ip.To16() == nil { + return "", errors.Errorf("failed to get IPv6 address for node %s; is %s configured to use IPv6 correctly?", node.String(), provider) + } + data.NodeAddress = nodeAddressIPv6 + } + + // generate the config contents + cf, err := kubeadm.Config(data) + if err != nil { + return "", err + } + + clusterPatches, clusterJSONPatches := allPatchesFromConfig(cfg) + // apply cluster-level patches first + patchedConfig, err := patch.KubeYAML(cf, clusterPatches, clusterJSONPatches) + if err != nil { + return "", err + } + + // if needed, apply current node's patches + if len(configNode.KubeadmConfigPatches) > 0 || len(configNode.KubeadmConfigPatchesJSON6902) > 0 { + patchedConfig, err = patch.KubeYAML(patchedConfig, configNode.KubeadmConfigPatches, configNode.KubeadmConfigPatchesJSON6902) + if err != nil { + return "", err + } + } + + // fix all the patches to have name metadata matching the generated config + return removeMetadata(patchedConfig), nil +} + +// trims out the metadata.name we put in the config for kustomize matching, +// kubeadm will complain about this otherwise +func removeMetadata(kustomized string) string { + return strings.Replace( + kustomized, + `metadata: + name: config +`, + "", + -1, + ) +} + +func allPatchesFromConfig(cfg *config.Cluster) (patches []string, jsonPatches []config.PatchJSON6902) { + return cfg.KubeadmConfigPatches, cfg.KubeadmConfigPatchesJSON6902 +} + +// writeKubeadmConfig writes the kubeadm configuration in the specified node +func writeKubeadmConfig(kubeadmConfig string, node nodes.Node) error { + // copy the config to the node + if err := nodeutils.WriteFile(node, "/kind/kubeadm.conf", kubeadmConfig); err != nil { + // TODO(bentheelder): logging here + return errors.Wrap(err, "failed to copy kubeadm config to node") + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go new file mode 100644 index 000000000..94f9ed41c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package installcni implements the install CNI action +package installcni + +import ( + "bytes" + "strings" + "text/template" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" +) + +type action struct{} + +// NewAction returns a new action for installing default CNI +func NewAction() actions.Action { + return &action{} +} + +// Execute runs the action +func (a *action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Installing CNI 🔌") + defer ctx.Status.End(false) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // get the target node for this task + controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes) + if err != nil { + return err + } + node := controlPlanes[0] // kind expects at least one always + + // read the manifest from the node + var raw bytes.Buffer + if err := node.Command("cat", "/kind/manifests/default-cni.yaml").SetStdout(&raw).Run(); err != nil { + return errors.Wrap(err, "failed to read CNI manifest") + } + manifest := raw.String() + + // TODO: remove this check? + // backwards compatibility for mounting your own manifest file to the default + // location + // NOTE: this is intentionally undocumented, as an internal implementation + // detail. Going forward users should disable the default CNI and install + // their own, or use the default. The internal templating mechanism is + // not intended for external usage and is unstable. + if strings.Contains(manifest, "would you kindly template this file") { + t, err := template.New("cni-manifest").Parse(manifest) + if err != nil { + return errors.Wrap(err, "failed to parse CNI manifest template") + } + var out bytes.Buffer + err = t.Execute(&out, &struct { + PodSubnet string + }{ + PodSubnet: ctx.Config.Networking.PodSubnet, + }) + if err != nil { + return errors.Wrap(err, "failed to execute CNI manifest template") + } + manifest = out.String() + } + + // install the manifest + if err := node.Command( + "kubectl", "create", "--kubeconfig=/etc/kubernetes/admin.conf", + "-f", "-", + ).SetStdin(strings.NewReader(manifest)).Run(); err != nil { + return errors.Wrap(err, "failed to apply overlay network") + } + + // mark success + ctx.Status.End(true) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go new file mode 100644 index 000000000..fa9a095b0 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package installstorage implements the an action to install a default +// storageclass +package installstorage + +import ( + "bytes" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" +) + +type action struct{} + +// NewAction returns a new action for installing storage +func NewAction() actions.Action { + return &action{} +} + +// Execute runs the action +func (a *action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Installing StorageClass 💾") + defer ctx.Status.End(false) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // get the target node for this task + controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes) + if err != nil { + return err + } + node := controlPlanes[0] // kind expects at least one always + + // add the default storage class + if err := addDefaultStorage(ctx.Logger, node); err != nil { + return errors.Wrap(err, "failed to add default storage class") + } + + // mark success + ctx.Status.End(true) + return nil +} + +// legacy default storage class +// we need this for e2es (StatefulSet) +// newer kind images ship a storage driver manifest +const defaultStorageManifest = `# host-path based default storage class +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + namespace: kube-system + name: standard + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: kubernetes.io/host-path` + +func addDefaultStorage(logger log.Logger, controlPlane nodes.Node) error { + // start with fallback default, and then try to get the newer kind node + // storage manifest if present + manifest := defaultStorageManifest + var raw bytes.Buffer + if err := controlPlane.Command("cat", "/kind/manifests/default-storage.yaml").SetStdout(&raw).Run(); err != nil { + logger.Warn("Could not read storage manifest, falling back on old k8s.io/host-path default ...") + } else { + manifest = raw.String() + } + + // apply the manifest + in := strings.NewReader(manifest) + cmd := controlPlane.Command( + "kubectl", + "--kubeconfig=/etc/kubernetes/admin.conf", "apply", "-f", "-", + ) + cmd.SetStdin(in) + return cmd.Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go new file mode 100644 index 000000000..5ed3dfa12 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go @@ -0,0 +1,115 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadminit implements the kubeadm init action +package kubeadminit + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" +) + +// kubeadmInitAction implements action for executing the kubeadm init +// and a set of default post init operations like e.g. install the +// CNI network plugin. +type action struct{} + +// NewAction returns a new action for kubeadm init +func NewAction() actions.Action { + return &action{} +} + +// Execute runs the action +func (a *action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Starting control-plane 🕹️") + defer ctx.Status.End(false) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // get the target node for this task + // TODO: eliminate the concept of bootstrapcontrolplane node entirely + // outside this method + node, err := nodeutils.BootstrapControlPlaneNode(allNodes) + if err != nil { + return err + } + + // run kubeadm + cmd := node.Command( + // init because this is the control plane node + "kubeadm", "init", + // skip preflight checks, as these have undesirable side effects + // and don't tell us much. requires kubeadm 1.13+ + "--skip-phases=preflight", + // specify our generated config file + "--config=/kind/kubeadm.conf", + "--skip-token-print", + // increase verbosity for debugging + "--v=6", + ) + lines, err := exec.CombinedOutputLines(cmd) + ctx.Logger.V(3).Info(strings.Join(lines, "\n")) + if err != nil { + return errors.Wrap(err, "failed to init node with kubeadm") + } + + // copy some files to the other control plane nodes + otherControlPlanes, err := nodeutils.SecondaryControlPlaneNodes(allNodes) + if err != nil { + return err + } + for _, otherNode := range otherControlPlanes { + for _, file := range []string{ + // copy over admin config so we can use any control plane to get it later + "/etc/kubernetes/admin.conf", + // copy over certs + "/etc/kubernetes/pki/ca.crt", "/etc/kubernetes/pki/ca.key", + "/etc/kubernetes/pki/front-proxy-ca.crt", "/etc/kubernetes/pki/front-proxy-ca.key", + "/etc/kubernetes/pki/sa.pub", "/etc/kubernetes/pki/sa.key", + // TODO: if we gain external etcd support these will be + // handled differently + "/etc/kubernetes/pki/etcd/ca.crt", "/etc/kubernetes/pki/etcd/ca.key", + } { + if err := nodeutils.CopyNodeToNode(node, otherNode, file); err != nil { + return errors.Wrap(err, "failed to copy admin kubeconfig") + } + } + } + + // if we are only provisioning one node, remove the master taint + // https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#master-isolation + if len(allNodes) == 1 { + if err := node.Command( + "kubectl", "--kubeconfig=/etc/kubernetes/admin.conf", + "taint", "nodes", "--all", "node-role.kubernetes.io/master-", + ).Run(); err != nil { + return errors.Wrap(err, "failed to remove master taint") + } + } + + // mark success + ctx.Status.End(true) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go new file mode 100644 index 000000000..fbd33555d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go @@ -0,0 +1,139 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadmjoin implements the kubeadm join action +package kubeadmjoin + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" +) + +// Action implements action for creating the kubeadm join +// and deploying it on the bootstrap control-plane node. +type Action struct{} + +// NewAction returns a new action for creating the kubeadm jion +func NewAction() actions.Action { + return &Action{} +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // join secondary control plane nodes if any + secondaryControlPlanes, err := nodeutils.SecondaryControlPlaneNodes(allNodes) + if err != nil { + return err + } + if len(secondaryControlPlanes) > 0 { + if err := joinSecondaryControlPlanes(ctx, secondaryControlPlanes); err != nil { + return err + } + } + + // then join worker nodes if any + workers, err := nodeutils.SelectNodesByRole(allNodes, constants.WorkerNodeRoleValue) + if err != nil { + return err + } + if len(workers) > 0 { + if err := joinWorkers(ctx, workers); err != nil { + return err + } + } + + return nil +} + +func joinSecondaryControlPlanes( + ctx *actions.ActionContext, + secondaryControlPlanes []nodes.Node, +) error { + ctx.Status.Start("Joining more control-plane nodes 🎮") + defer ctx.Status.End(false) + + // TODO(bentheelder): it's too bad we can't do this concurrently + // (this is not safe currently) + for _, node := range secondaryControlPlanes { + node := node // capture loop variable + if err := runKubeadmJoin(ctx.Logger, node); err != nil { + return err + } + } + + ctx.Status.End(true) + return nil +} + +func joinWorkers( + ctx *actions.ActionContext, + workers []nodes.Node, +) error { + ctx.Status.Start("Joining worker nodes 🚜") + defer ctx.Status.End(false) + + // create the workers concurrently + fns := []func() error{} + for _, node := range workers { + node := node // capture loop variable + fns = append(fns, func() error { + return runKubeadmJoin(ctx.Logger, node) + }) + } + if err := errors.UntilErrorConcurrent(fns); err != nil { + return err + } + + ctx.Status.End(true) + return nil +} + +// runKubeadmJoin executes kubeadm join command +func runKubeadmJoin(logger log.Logger, node nodes.Node) error { + // run kubeadm join + // TODO(bentheelder): this should be using the config file + cmd := node.Command( + "kubeadm", "join", + // the join command uses the config file generated in a well known location + "--config", "/kind/kubeadm.conf", + // skip preflight checks, as these have undesirable side effects + // and don't tell us much. requires kubeadm 1.13+ + "--skip-phases=preflight", + // increase verbosity for debugging + "--v=6", + ) + lines, err := exec.CombinedOutputLines(cmd) + logger.V(3).Info(strings.Join(lines, "\n")) + if err != nil { + return errors.Wrap(err, "failed to join node with kubeadm") + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go new file mode 100644 index 000000000..bcc2fd107 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package loadbalancer implements the load balancer configuration action +package loadbalancer + +import ( + "fmt" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/internal/apis/config" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" +) + +// Action implements and action for configuring and starting the +// external load balancer in front of the control-plane nodes. +type Action struct{} + +// NewAction returns a new Action for configuring the load balancer +func NewAction() actions.Action { + return &Action{} +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // identify external load balancer node + loadBalancerNode, err := nodeutils.ExternalLoadBalancerNode(allNodes) + if err != nil { + return err + } + + // if there's no loadbalancer we're done + if loadBalancerNode == nil { + return nil + } + + // otherwise notify the user + ctx.Status.Start("Configuring the external load balancer ⚖️") + defer ctx.Status.End(false) + + // collect info about the existing controlplane nodes + var backendServers = map[string]string{} + controlPlaneNodes, err := nodeutils.SelectNodesByRole( + allNodes, + constants.ControlPlaneNodeRoleValue, + ) + if err != nil { + return err + } + for _, n := range controlPlaneNodes { + backendServers[n.String()] = fmt.Sprintf("%s:%d", n.String(), common.APIServerInternalPort) + } + + // create loadbalancer config data + loadbalancerConfig, err := loadbalancer.Config(&loadbalancer.ConfigData{ + ControlPlanePort: common.APIServerInternalPort, + BackendServers: backendServers, + IPv6: ctx.Config.Networking.IPFamily == config.IPv6Family, + }) + if err != nil { + return errors.Wrap(err, "failed to generate loadbalancer config data") + } + + // create loadbalancer config on the node + if err := nodeutils.WriteFile(loadBalancerNode, loadbalancer.ConfigPath, loadbalancerConfig); err != nil { + // TODO: logging here + return errors.Wrap(err, "failed to copy loadbalancer config to node") + } + + // reload the config. haproxy will reload on SIGHUP + if err := loadBalancerNode.Command("kill", "-s", "HUP", "1").Run(); err != nil { + return errors.Wrap(err, "failed to reload loadbalancer") + } + + ctx.Status.End(true) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go new file mode 100644 index 000000000..1dfbf6c29 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go @@ -0,0 +1,129 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package waitforready implements the wait for ready action +package waitforready + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/exec" +) + +// Action implements an action for waiting for the cluster to be ready +type Action struct { + waitTime time.Duration +} + +// NewAction returns a new action for waiting for the cluster to be ready +func NewAction(waitTime time.Duration) actions.Action { + return &Action{ + waitTime: waitTime, + } +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + // skip entirely if the wait time is 0 + if a.waitTime == time.Duration(0) { + return nil + } + ctx.Status.Start( + fmt.Sprintf( + "Waiting ≤ %s for control-plane = Ready ⏳", + formatDuration(a.waitTime), + ), + ) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + // get a control plane node to use to check cluster status + controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes) + if err != nil { + return err + } + node := controlPlanes[0] // kind expects at least one always + + // Wait for the nodes to reach Ready status. + startTime := time.Now() + isReady := waitForReady(node, startTime.Add(a.waitTime)) + if !isReady { + ctx.Status.End(false) + ctx.Logger.V(0).Info(" • WARNING: Timed out waiting for Ready ⚠️") + return nil + } + + // mark success + ctx.Status.End(true) + ctx.Logger.V(0).Infof(" • Ready after %s 💚", formatDuration(time.Since(startTime))) + return nil +} + +// WaitForReady uses kubectl inside the "node" container to check if the +// control plane nodes are "Ready". +func waitForReady(node nodes.Node, until time.Time) bool { + return tryUntil(until, func() bool { + cmd := node.Command( + "kubectl", + "--kubeconfig=/etc/kubernetes/admin.conf", + "get", + "nodes", + "--selector=node-role.kubernetes.io/master", + // When the node reaches status ready, the status field will be set + // to true. + "-o=jsonpath='{.items..status.conditions[-1:].status}'", + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return false + } + + // 'lines' will return the status of all nodes labeled as master. For + // example, if we have three control plane nodes, and all are ready, + // then the status will have the following format: `True True True'. + status := strings.Fields(lines[0]) + for _, s := range status { + // Check node status. If node is ready then this will be 'True', + // 'False' or 'Unknown' otherwise. + if !strings.Contains(s, "True") { + return false + } + } + return true + }) +} + +// helper that calls `try()`` in a loop until the deadline `until` +// has passed or `try()`returns true, returns whether try ever returned true +func tryUntil(until time.Time, try func() bool) bool { + for until.After(time.Now()) { + if try() { + return true + } + } + return false +} + +func formatDuration(duration time.Duration) string { + return duration.Round(time.Second).String() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go new file mode 100644 index 000000000..9d96b7ebe --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go @@ -0,0 +1,251 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "fmt" + "math/rand" + "regexp" + "time" + + "github.com/alessio/shellescape" + + "sigs.k8s.io/kind/pkg/cluster/internal/delete" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/apis/config/encoding" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + configaction "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready" + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig" +) + +const ( + // Typical host name max limit is 64 characters (https://linux.die.net/man/2/sethostname) + // We append -control-plane (14 characters) to the cluster name on the control plane container + clusterNameMax = 50 +) + +// similar to valid docker container names, but since we will prefix +// and suffix this name, we can relax it a little +// see NewContext() for usage +// https://godoc.org/github.com/docker/docker/daemon/names#pkg-constants +var validNameRE = regexp.MustCompile(`^[a-z0-9_.-]+$`) + +// ClusterOptions holds cluster creation options +type ClusterOptions struct { + Config *config.Cluster + NameOverride string // overrides config.Name + // NodeImage overrides the nodes' images in Config if non-zero + NodeImage string + Retain bool + WaitForReady time.Duration + KubeconfigPath string + // see https://github.com/kubernetes-sigs/kind/issues/324 + StopBeforeSettingUpKubernetes bool // if false kind should setup kubernetes after creating nodes + // Options to control output + DisplayUsage bool + DisplaySalutation bool +} + +// Cluster creates a cluster +func Cluster(logger log.Logger, p providers.Provider, opts *ClusterOptions) error { + // default / process options (namely config) + if err := fixupOptions(opts); err != nil { + return err + } + + // Check if the cluster name already exists + if err := alreadyExists(p, opts.Config.Name); err != nil { + return err + } + + // TODO: move to config validation + // validate the name + if !validNameRE.MatchString(opts.Config.Name) { + return errors.Errorf( + "'%s' is not a valid cluster name, cluster names must match `%s`", + opts.Config.Name, validNameRE.String(), + ) + } + // warn if cluster name might typically be too long + if len(opts.Config.Name) > clusterNameMax { + logger.Warnf("cluster name %q is probably too long, this might not work properly on some systems", opts.Config.Name) + } + + // then validate + if err := opts.Config.Validate(); err != nil { + return err + } + + // setup a status object to show progress to the user + status := cli.StatusForLogger(logger) + + // we're going to start creating now, tell the user + logger.V(0).Infof("Creating cluster %q ...\n", opts.Config.Name) + + // Create node containers implementing defined config Nodes + if err := p.Provision(status, opts.Config); err != nil { + // In case of errors nodes are deleted (except if retain is explicitly set) + if !opts.Retain { + _ = delete.Cluster(logger, p, opts.Config.Name, opts.KubeconfigPath) + } + return err + } + + // TODO(bentheelder): make this controllable from the command line? + actionsToRun := []actions.Action{ + loadbalancer.NewAction(), // setup external loadbalancer + configaction.NewAction(), // setup kubeadm config + } + if !opts.StopBeforeSettingUpKubernetes { + actionsToRun = append(actionsToRun, + kubeadminit.NewAction(), // run kubeadm init + ) + // this step might be skipped, but is next after init + if !opts.Config.Networking.DisableDefaultCNI { + actionsToRun = append(actionsToRun, + installcni.NewAction(), // install CNI + ) + } + // add remaining steps + actionsToRun = append(actionsToRun, + installstorage.NewAction(), // install StorageClass + kubeadmjoin.NewAction(), // run kubeadm join + waitforready.NewAction(opts.WaitForReady), // wait for cluster readiness + ) + } + + // run all actions + actionsContext := actions.NewActionContext(logger, status, p, opts.Config) + for _, action := range actionsToRun { + if err := action.Execute(actionsContext); err != nil { + if !opts.Retain { + _ = delete.Cluster(logger, p, opts.Config.Name, opts.KubeconfigPath) + } + return err + } + } + + // skip the rest if we're not setting up kubernetes + if opts.StopBeforeSettingUpKubernetes { + return nil + } + + // try exporting kubeconfig with backoff for locking failures + // TODO: factor out into a public errors API w/ backoff handling? + // for now this is easier than coming up with a good API + var err error + for _, b := range []time.Duration{0, time.Millisecond, time.Millisecond * 50, time.Millisecond * 100} { + time.Sleep(b) + if err = kubeconfig.Export(p, opts.Config.Name, opts.KubeconfigPath); err == nil { + break + } + } + if err != nil { + return err + } + + // optionally display usage + if opts.DisplayUsage { + logUsage(logger, opts.Config.Name, opts.KubeconfigPath) + } + // optionally give the user a friendly salutation + if opts.DisplaySalutation { + logger.V(0).Info("") + logSalutation(logger) + } + return nil +} + +// alreadyExists returns an error if the cluster name already exists +// or if we had an error checking +func alreadyExists(p providers.Provider, name string) error { + n, err := p.ListNodes(name) + if err != nil { + return err + } + if len(n) != 0 { + return errors.Errorf("node(s) already exist for a cluster with the name %q", name) + } + return nil +} + +func logUsage(logger log.Logger, name, explicitKubeconfigPath string) { + // construct a sample command for interacting with the cluster + kctx := kubeconfig.ContextForCluster(name) + sampleCommand := fmt.Sprintf("kubectl cluster-info --context %s", kctx) + if explicitKubeconfigPath != "" { + // explicit path, include this + sampleCommand += " --kubeconfig " + shellescape.Quote(explicitKubeconfigPath) + } + logger.V(0).Infof(`Set kubectl context to "%s"`, kctx) + logger.V(0).Infof("You can now use your cluster with:\n\n" + sampleCommand) +} + +func logSalutation(logger log.Logger) { + salutations := []string{ + "Have a nice day! 👋", + "Thanks for using kind! 😊", + "Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/", + "Have a question, bug, or feature request? Let us know! https://kind.sigs.k8s.io/#community 🙂", + } + r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + s := salutations[r.Intn(len(salutations))] + logger.V(0).Info(s) +} + +func fixupOptions(opts *ClusterOptions) error { + // do post processing for options + // first ensure we at least have a default cluster config + if opts.Config == nil { + cfg, err := encoding.Load("") + if err != nil { + return err + } + opts.Config = cfg + } + + if opts.NameOverride != "" { + opts.Config.Name = opts.NameOverride + } + + // if NodeImage was set, override the image on all nodes + if opts.NodeImage != "" { + // Apply image override to all the Nodes defined in Config + // TODO(fabrizio pandini): this should be reconsidered when implementing + // https://github.com/kubernetes-sigs/kind/issues/133 + for i := range opts.Config.Nodes { + opts.Config.Nodes[i].Image = opts.NodeImage + } + } + + // default config fields (important for usage as a library, where the config + // may be constructed in memory rather than from disk) + config.SetDefaultsCluster(opts.Config) + + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go new file mode 100644 index 000000000..550cc457e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package delete + +import ( + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" +) + +// Cluster deletes the cluster identified by ctx +// explicitKubeconfigPath is --kubeconfig, following the rules from +// https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands +func Cluster(logger log.Logger, p providers.Provider, name, explicitKubeconfigPath string) error { + n, err := p.ListNodes(name) + if err != nil { + return errors.Wrap(err, "error listing nodes") + } + + kerr := kubeconfig.Remove(name, explicitKubeconfigPath) + if kerr != nil { + logger.Errorf("failed to update kubeconfig: %v", kerr) + } + + err = p.DeleteNodes(n) + if err != nil { + return err + } + if kerr != nil { + return err + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go new file mode 100644 index 000000000..3e311d9f9 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go @@ -0,0 +1,411 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + "bytes" + "fmt" + "sort" + "strings" + "text/template" + + "k8s.io/apimachinery/pkg/util/version" + "sigs.k8s.io/kind/pkg/errors" +) + +// ConfigData is supplied to the kubeadm config template, with values populated +// by the cluster package +type ConfigData struct { + ClusterName string + KubernetesVersion string + // The ControlPlaneEndpoint, that is the address of the external loadbalancer + // if defined or the bootstrap node + ControlPlaneEndpoint string + // The Local API Server port + APIBindPort int + // The API server external listen IP (which we will port forward) + APIServerAddress string + + // this should really be used for the --provider-id flag + // ideally cluster config should not depend on the node backend otherwise ... + NodeProvider string + + // ControlPlane flag specifies the node belongs to the control plane + ControlPlane bool + // The main IP address of the node + NodeAddress string + // The name for the node (not the address) + NodeName string + + // The Token for TLS bootstrap + Token string + + // KubeProxyMode defines the kube-proxy mode between iptables or ipvs + KubeProxyMode string + // The subnet used for pods + PodSubnet string + // The subnet used for services + ServiceSubnet string + + // Kubernetes FeatureGates + FeatureGates map[string]bool + + // Kubernetes API Server RuntimeConfig + RuntimeConfig map[string]string + + // IPv4 values take precedence over IPv6 by default, if true set IPv6 default values + IPv6 bool + + // DerivedConfigData is populated by Derive() + // These auto-generated fields are available to Config templates, + // but not meant to be set by hand + DerivedConfigData +} + +// DerivedConfigData fields are automatically derived by +// ConfigData.Derive if they are not specified / zero valued +type DerivedConfigData struct { + // DockerStableTag is automatically derived from KubernetesVersion + DockerStableTag string + // SortedFeatureGateKeys allows us to iterate FeatureGates deterministically + SortedFeatureGateKeys []string + // FeatureGatesString is of the form `Foo=true,Baz=false` + FeatureGatesString string + // RuntimeConfigString is of the form `Foo=true,Baz=false` + RuntimeConfigString string +} + +// Derive automatically derives DockerStableTag if not specified +func (c *ConfigData) Derive() { + if c.DockerStableTag == "" { + c.DockerStableTag = strings.Replace(c.KubernetesVersion, "+", "_", -1) + } + + // get sorted list of FeatureGate keys + featureGateKeys := make([]string, 0, len(c.FeatureGates)) + for k := range c.FeatureGates { + featureGateKeys = append(featureGateKeys, k) + } + sort.Strings(featureGateKeys) + c.SortedFeatureGateKeys = featureGateKeys + + // create a sorted key=value,... string of FeatureGates + var featureGates []string + for _, k := range featureGateKeys { + v := c.FeatureGates[k] + featureGates = append(featureGates, fmt.Sprintf("%s=%t", k, v)) + } + c.FeatureGatesString = strings.Join(featureGates, ",") + + // create a sorted key=value,... string of RuntimeConfig + // first get sorted list of FeatureGate keys + runtimeConfigKeys := make([]string, 0, len(c.RuntimeConfig)) + for k := range c.RuntimeConfig { + runtimeConfigKeys = append(runtimeConfigKeys, k) + } + sort.Strings(runtimeConfigKeys) + // stringify + var runtimeConfig []string + for _, k := range runtimeConfigKeys { + v := c.RuntimeConfig[k] + // TODO: do we need to quote / escape these in the future? + // Currently runtime config is in practice booleans, no special characters + runtimeConfig = append(runtimeConfig, fmt.Sprintf("%s=%s", k, v)) + } + c.RuntimeConfigString = strings.Join(runtimeConfig, ",") +} + +// See docs for these APIs at: +// https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#pkg-subdirectories +// EG: +// https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 + +// ConfigTemplateBetaV1 is the kubeadm config template for API version v1beta1 +const ConfigTemplateBetaV1 = `# config generated by kind +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +metadata: + name: config +kubernetesVersion: {{.KubernetesVersion}} +clusterName: "{{.ClusterName}}" +controlPlaneEndpoint: "{{ .ControlPlaneEndpoint }}" +# on docker for mac we have to expose the api server via port forward, +# so we need to ensure the cert is valid for localhost so we can talk +# to the cluster after rewriting the kubeconfig to point to localhost +apiServer: + certSANs: [localhost, "{{.APIServerAddress}}"] + extraArgs: + "runtime-config": "{{ .RuntimeConfigString }}" +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end}} +controllerManager: +{{ if .FeatureGates }} + extraArgs: + "feature-gates": "{{ .FeatureGatesString }}" +{{ end}} + enable-hostpath-provisioner: "true" + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + bind-address: "::" + {{- end }} +scheduler: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + address: "::" + bind-address: "::1" + {{- end }} +networking: + podSubnet: "{{ .PodSubnet }}" + serviceSubnet: "{{ .ServiceSubnet }}" +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +metadata: + name: config +# we use a well know token for TLS bootstrap +bootstrapTokens: +- token: "{{ .Token }}" +# we use a well know port for making the API server discoverable inside docker network. +# from the host machine such port will be accessible via a random local port instead. +localAPIEndpoint: + advertiseAddress: "{{ .NodeAddress }}" + bindPort: {{.APIBindPort}} +nodeRegistration: + criSocket: "/run/containerd/containerd.sock" + kubeletExtraArgs: + fail-swap-on: "false" + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" +--- +# no-op entry that exists solely so it can be patched +apiVersion: kubeadm.k8s.io/v1beta1 +kind: JoinConfiguration +metadata: + name: config +{{ if .ControlPlane -}} +controlPlane: + localAPIEndpoint: + advertiseAddress: "{{ .NodeAddress }}" + bindPort: {{.APIBindPort}} +{{- end }} +nodeRegistration: + criSocket: "/run/containerd/containerd.sock" + kubeletExtraArgs: + fail-swap-on: "false" + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" +discovery: + bootstrapToken: + apiServerEndpoint: "{{ .ControlPlaneEndpoint }}" + token: "{{ .Token }}" + unsafeSkipCAVerification: true +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +metadata: + name: config +# configure ipv6 addresses in IPv6 mode +{{ if .IPv6 -}} +address: "::" +healthzBindAddress: "::" +{{- end }} +# disable disk resource management by default +# kubelet will see the host disk that the inner container runtime +# is ultimately backed by and attempt to recover disk space. we don't want that. +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +{{if .FeatureGates}}featureGates: +{{ range $key := .SortedFeatureGateKeys }} + "{{ $key }}": {{$.FeatureGates $key }} +{{end}}{{end}} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metadata: + name: config +mode: "{{ .KubeProxyMode }}" +{{if .FeatureGates}}featureGates: +{{ range $key := .SortedFeatureGateKeys }} + "{{ $key }}": {{ index $.FeatureGates $key }} +{{end}}{{end}} +iptables: + minSyncPeriod: 1s +` + +// ConfigTemplateBetaV2 is the kubeadm config template for API version v1beta2 +const ConfigTemplateBetaV2 = `# config generated by kind +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +metadata: + name: config +kubernetesVersion: {{.KubernetesVersion}} +clusterName: "{{.ClusterName}}" +controlPlaneEndpoint: "{{ .ControlPlaneEndpoint }}" +# on docker for mac we have to expose the api server via port forward, +# so we need to ensure the cert is valid for localhost so we can talk +# to the cluster after rewriting the kubeconfig to point to localhost +apiServer: + certSANs: [localhost, "{{.APIServerAddress}}"] + extraArgs: + "runtime-config": "{{ .RuntimeConfigString }}" +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end}} +controllerManager: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + enable-hostpath-provisioner: "true" + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + bind-address: "::" + {{- end }} +scheduler: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + address: "::" + bind-address: "::1" + {{- end }} +networking: + podSubnet: "{{ .PodSubnet }}" + serviceSubnet: "{{ .ServiceSubnet }}" +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +metadata: + name: config +# we use a well know token for TLS bootstrap +bootstrapTokens: +- token: "{{ .Token }}" +# we use a well know port for making the API server discoverable inside docker network. +# from the host machine such port will be accessible via a random local port instead. +localAPIEndpoint: + advertiseAddress: "{{ .NodeAddress }}" + bindPort: {{.APIBindPort}} +nodeRegistration: + criSocket: "unix:///run/containerd/containerd.sock" + kubeletExtraArgs: + fail-swap-on: "false" + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" +--- +# no-op entry that exists solely so it can be patched +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +metadata: + name: config +{{ if .ControlPlane -}} +controlPlane: + localAPIEndpoint: + advertiseAddress: "{{ .NodeAddress }}" + bindPort: {{.APIBindPort}} +{{- end }} +nodeRegistration: + criSocket: "unix:///run/containerd/containerd.sock" + kubeletExtraArgs: + fail-swap-on: "false" + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" +discovery: + bootstrapToken: + apiServerEndpoint: "{{ .ControlPlaneEndpoint }}" + token: "{{ .Token }}" + unsafeSkipCAVerification: true +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +metadata: + name: config +# configure ipv6 addresses in IPv6 mode +{{ if .IPv6 -}} +address: "::" +healthzBindAddress: "::" +{{- end }} +# disable disk resource management by default +# kubelet will see the host disk that the inner container runtime +# is ultimately backed by and attempt to recover disk space. we don't want that. +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +{{if .FeatureGates}}featureGates: +{{ range $key := .SortedFeatureGateKeys }} + "{{ $key }}": {{ index $.FeatureGates $key }} +{{end}}{{end}} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metadata: + name: config +mode: "{{ .KubeProxyMode }}" +{{if .FeatureGates}}featureGates: +{{ range $key := .SortedFeatureGateKeys }} + "{{ $key }}": {{ index $.FeatureGates $key }} +{{end}}{{end}} +iptables: + minSyncPeriod: 1s +` + +// Config returns a kubeadm config generated from config data, in particular +// the kubernetes version +func Config(data ConfigData) (config string, err error) { + ver, err := version.ParseGeneric(data.KubernetesVersion) + if err != nil { + return "", err + } + + // ensure featureGates is non-nil, as we may add entries + if data.FeatureGates == nil { + data.FeatureGates = make(map[string]bool) + } + + // assume the latest API version, then fallback if the k8s version is too low + templateSource := ConfigTemplateBetaV2 + if ver.LessThan(version.MustParseSemantic("v1.15.0")) { + templateSource = ConfigTemplateBetaV1 + } + + t, err := template.New("kubeadm-config").Parse(templateSource) + if err != nil { + return "", errors.Wrap(err, "failed to parse config template") + } + + // derive any automatic fields if not supplied + data.Derive() + + // execute the template + var buff bytes.Buffer + err = t.Execute(&buff, data) + if err != nil { + return "", errors.Wrap(err, "error executing config template") + } + return buff.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go new file mode 100644 index 000000000..f8a0e0df2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +// Token defines a dummy, well known token for automating TLS bootstrap process +const Token = "abcdef.0123456789abcdef" + +// ObjectName is the name every generated object will have +// I.E. `metadata:\nname: config` +const ObjectName = "config" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go new file mode 100644 index 000000000..f023db7df --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadm contains kubeadm related constants and configuration +package kubeadm diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go new file mode 100644 index 000000000..336212eff --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "bytes" + + yaml "gopkg.in/yaml.v3" + kubeyaml "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" +) + +// Encode encodes the cfg to yaml +func Encode(cfg *Config) ([]byte, error) { + // NOTE: kubernetes's yaml library doesn't handle inline fields very well + // so we're not using that to marshal + encoded, err := yaml.Marshal(cfg) + if err != nil { + return nil, errors.Wrap(err, "failed to encode KUBECONFIG") + } + + // normalize with kubernetes's yaml library + // this is not strictly necessary, but it ensures minimal diffs when + // modifying kubeconfig files, which is nice to have + encoded, err = normYaml(encoded) + if err != nil { + return nil, errors.Wrap(err, "failed to normalize KUBECONFIG encoding") + } + + return encoded, nil +} + +// normYaml round trips yaml bytes through sigs.k8s.io/yaml to normalize them +// versus other kubernetes ecosystem yaml output +func normYaml(y []byte) ([]byte, error) { + var unstructured interface{} + if err := kubeyaml.Unmarshal(y, &unstructured); err != nil { + return nil, err + } + encoded, err := kubeyaml.Marshal(&unstructured) + if err != nil { + return nil, err + } + // special case: don't write anything when empty + if bytes.Equal(encoded, []byte("{}\n")) { + return []byte{}, nil + } + return encoded, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go new file mode 100644 index 000000000..b92393083 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "sigs.k8s.io/kind/pkg/errors" +) + +// KINDClusterKey identifies kind clusters in kubeconfig files +func KINDClusterKey(clusterName string) string { + return "kind-" + clusterName +} + +// checkKubeadmExpectations validates that a kubeadm created KUBECONFIG meets +// our expectations, namely on the number of entries +func checkKubeadmExpectations(cfg *Config) error { + if len(cfg.Clusters) != 1 { + return errors.Errorf("kubeadm KUBECONFIG should have one cluster, but read %d", len(cfg.Clusters)) + } + if len(cfg.Users) != 1 { + return errors.Errorf("kubeadm KUBECONFIG should have one user, but read %d", len(cfg.Users)) + } + if len(cfg.Contexts) != 1 { + return errors.Errorf("kubeadm KUBECONFIG should have one context, but read %d", len(cfg.Contexts)) + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go new file mode 100644 index 000000000..41d1be27f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + "path/filepath" +) + +// these are from +// https://github.com/kubernetes/client-go/blob/611184f7c43ae2d520727f01d49620c7ed33412d/tools/clientcmd/loader.go#L439-L440 + +func lockFile(filename string) error { + // Make sure the dir exists before we try to create a lock file. + dir := filepath.Dir(filename) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + } + f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0) + if err != nil { + return err + } + f.Close() + return nil +} + +func unlockFile(filename string) error { + return os.Remove(lockName(filename)) +} + +func lockName(filename string) string { + return filename + ".lock" +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go new file mode 100644 index 000000000..ec4a2164d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go @@ -0,0 +1,111 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + + "sigs.k8s.io/kind/pkg/errors" +) + +// WriteMerged writes a kind kubeconfig (see KINDFromRawKubeadm) into configPath +// merging with the existing contents if any and setting the current context to +// the kind config's current context. +func WriteMerged(kindConfig *Config, explicitConfigPath string) error { + // figure out what filepath we should use + configPath := pathForMerge(explicitConfigPath, os.Getenv) + + // lock config file the same as client-go + if err := lockFile(configPath); err != nil { + return errors.Wrap(err, "failed to lock config file") + } + defer func() { + _ = unlockFile(configPath) + }() + + // read in existing + existing, err := read(configPath) + if err != nil { + return errors.Wrap(err, "failed to get kubeconfig to merge") + } + + // merge with kind kubeconfig + if err := merge(existing, kindConfig); err != nil { + return err + } + + // write back out + return write(existing, configPath) +} + +// merge kind config into an existing config +func merge(existing, kind *Config) error { + // verify assumptions about kubeadm / kind kubeconfigs + if err := checkKubeadmExpectations(kind); err != nil { + return err + } + + // insert or append cluster entry + shouldAppend := true + for i := range existing.Clusters { + if existing.Clusters[i].Name == kind.Clusters[0].Name { + existing.Clusters[i] = kind.Clusters[0] + shouldAppend = false + } + } + if shouldAppend { + existing.Clusters = append(existing.Clusters, kind.Clusters[0]) + } + + // insert or append user entry + shouldAppend = true + for i := range existing.Users { + if existing.Users[i].Name == kind.Users[0].Name { + existing.Users[i] = kind.Users[0] + shouldAppend = false + } + } + if shouldAppend { + existing.Users = append(existing.Users, kind.Users[0]) + } + + // insert or append context entry + shouldAppend = true + for i := range existing.Contexts { + if existing.Contexts[i].Name == kind.Contexts[0].Name { + existing.Contexts[i] = kind.Contexts[0] + shouldAppend = false + } + } + if shouldAppend { + existing.Contexts = append(existing.Contexts, kind.Contexts[0]) + } + + // set the current context + existing.CurrentContext = kind.CurrentContext + + // TODO: We should not need this, but it allows broken clients that depend + // on apiVersion and kind to work. Notably the upstream javascript client. + // See: https://github.com/kubernetes-sigs/kind/issues/1242 + if len(existing.OtherFields) == 0 { + // TODO: Should we be deep-copying? for now we don't need to + // and doing so would be a pain (re and de-serialize maybe?) :shrug: + existing.OtherFields = kind.OtherFields + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go new file mode 100644 index 000000000..23514ed80 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go @@ -0,0 +1,167 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + "path" + "path/filepath" + "runtime" + + "k8s.io/apimachinery/pkg/util/sets" +) + +const kubeconfigEnv = "KUBECONFIG" + +/* +paths returns the list of paths to be considered for kubeconfig files +where explicitPath is the value of --kubeconfig + +Logic based on kubectl + +https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands + +- If the --kubeconfig flag is set, then only that file is loaded. The flag may only be set once and no merging takes place. + +- If $KUBECONFIG environment variable is set, then it is used as a list of paths (normal path delimiting rules for your system). These paths are merged. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. - If no files in the chain exist, then it creates the last file in the list. + +- Otherwise, ${HOME}/.kube/config is used and no merging takes place. +*/ +func paths(explicitPath string, getEnv func(string) string) []string { + if explicitPath != "" { + return []string{explicitPath} + } + + paths := discardEmptyAndDuplicates( + filepath.SplitList(getEnv(kubeconfigEnv)), + ) + if len(paths) != 0 { + return paths + } + + return []string{path.Join(homeDir(runtime.GOOS, getEnv), ".kube", "config")} +} + +// pathForMerge returns the file that kubectl would merge into +func pathForMerge(explicitPath string, getEnv func(string) string) string { + // find the first file that exists + p := paths(explicitPath, getEnv) + if len(p) == 1 { + return p[0] + } + for _, filename := range p { + if fileExists(filename) { + return filename + } + } + // otherwise the last file + return p[len(p)-1] +} + +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} + +func discardEmptyAndDuplicates(paths []string) []string { + seen := sets.NewString() + kept := 0 + for _, p := range paths { + if p != "" && !seen.Has(p) { + paths[kept] = p + kept++ + seen.Insert(p) + } + } + return paths[:kept] +} + +// homeDir returns the home directory for the current user. +// On Windows: +// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned. +// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned. +// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned. +// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned. +// NOTE this is from client-go. Rather than pull in client-go for this one +// standalone method, we have a fork here. +// https://github.com/kubernetes/client-go/blob/6d7018244d72350e2e8c4a19ccdbe4c8083a9143/util/homedir/homedir.go +// We've modified this to require injecting os.Getenv and runtime.GOOS as a dependencies for testing purposes +func homeDir(GOOS string, getEnv func(string) string) string { + if GOOS == "windows" { + home := getEnv("HOME") + homeDriveHomePath := "" + if homeDrive, homePath := getEnv("HOMEDRIVE"), getEnv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { + homeDriveHomePath = homeDrive + homePath + } + userProfile := getEnv("USERPROFILE") + + // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file. + // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility. + for _, p := range []string{home, homeDriveHomePath, userProfile} { + if len(p) == 0 { + continue + } + if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil { + continue + } + return p + } + + firstSetPath := "" + firstExistingPath := "" + + // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools + for _, p := range []string{home, userProfile, homeDriveHomePath} { + if len(p) == 0 { + continue + } + if len(firstSetPath) == 0 { + // remember the first path that is set + firstSetPath = p + } + info, err := os.Stat(p) + if err != nil { + continue + } + if len(firstExistingPath) == 0 { + // remember the first path that exists + firstExistingPath = p + } + if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 { + // return first path that is writeable + return p + } + } + + // If none are writeable, return first location that exists + if len(firstExistingPath) > 0 { + return firstExistingPath + } + + // If none exist, return first location that is set + if len(firstSetPath) > 0 { + return firstSetPath + } + + // We've got nothing + return "" + } + return getEnv("HOME") +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go new file mode 100644 index 000000000..6ea570cc2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "io/ioutil" + "os" + + yaml "gopkg.in/yaml.v3" + + "sigs.k8s.io/kind/pkg/errors" +) + +// KINDFromRawKubeadm returns a kind kubeconfig derived from the raw kubeadm kubeconfig, +// the kind clusterName, and the server. +// server is ignored if unset. +func KINDFromRawKubeadm(rawKubeadmKubeConfig, clusterName, server string) (*Config, error) { + cfg := &Config{} + if err := yaml.Unmarshal([]byte(rawKubeadmKubeConfig), cfg); err != nil { + return nil, err + } + + // verify assumptions about kubeadm kubeconfigs + if err := checkKubeadmExpectations(cfg); err != nil { + return nil, err + } + + // compute unique kubeconfig key for this cluster + key := KINDClusterKey(clusterName) + + // use the unique key for all named references + cfg.Clusters[0].Name = key + cfg.Users[0].Name = key + cfg.Contexts[0].Name = key + cfg.Contexts[0].Context.User = key + cfg.Contexts[0].Context.Cluster = key + cfg.CurrentContext = key + + // patch server field if server was set + if server != "" { + cfg.Clusters[0].Cluster.Server = server + } + + return cfg, nil +} + +// read loads a KUBECONFIG file from configPath +func read(configPath string) (*Config, error) { + // try to open, return default if no such file + f, err := os.Open(configPath) + if os.IsNotExist(err) { + return &Config{}, nil + } else if err != nil { + return nil, errors.WithStack(err) + } + + // otherwise read in and deserialize + cfg := &Config{} + rawExisting, err := ioutil.ReadAll(f) + if err != nil { + return nil, errors.WithStack(err) + } + if err := yaml.Unmarshal(rawExisting, cfg); err != nil { + return nil, errors.WithStack(err) + } + + return cfg, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go new file mode 100644 index 000000000..e5fed9556 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go @@ -0,0 +1,111 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + + "sigs.k8s.io/kind/pkg/errors" +) + +// RemoveKIND removes the kind cluster kindClusterName from the KUBECONFIG +// files at configPaths +func RemoveKIND(kindClusterName string, explicitPath string) error { + // remove kind from each if present + for _, configPath := range paths(explicitPath, os.Getenv) { + if err := func(configPath string) error { + // lock before modifying + if err := lockFile(configPath); err != nil { + return errors.Wrap(err, "failed to lock config file") + } + defer func(configPath string) { + _ = unlockFile(configPath) + }(configPath) + + // read in existing + existing, err := read(configPath) + if err != nil { + return errors.Wrap(err, "failed to read kubeconfig to remove KIND entry") + } + + // remove the kind cluster from the config + if remove(existing, kindClusterName) { + // write out the updated config if we modified anything + if err := write(existing, configPath); err != nil { + return err + } + } + + return nil + }(configPath); err != nil { + return err + } + } + return nil +} + +// remove drops kindClusterName entries from the cfg +func remove(cfg *Config, kindClusterName string) bool { + mutated := false + + // get kind cluster identifier + key := KINDClusterKey(kindClusterName) + + // filter out kind cluster from clusters + kept := 0 + for _, c := range cfg.Clusters { + if c.Name != key { + cfg.Clusters[kept] = c + kept++ + } else { + mutated = true + } + } + cfg.Clusters = cfg.Clusters[:kept] + + // filter out kind cluster from users + kept = 0 + for _, u := range cfg.Users { + if u.Name != key { + cfg.Users[kept] = u + kept++ + } else { + mutated = true + } + } + cfg.Users = cfg.Users[:kept] + + // filter out kind cluster from contexts + kept = 0 + for _, c := range cfg.Contexts { + if c.Name != key { + cfg.Contexts[kept] = c + kept++ + } else { + mutated = true + } + } + cfg.Contexts = cfg.Contexts[:kept] + + // unset current context if it points to this cluster + if cfg.CurrentContext == key { + cfg.CurrentContext = "" + mutated = true + } + + return mutated +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go new file mode 100644 index 000000000..1da5df545 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +/* +NOTE: all of these types are based on the upstream v1 types from client-go +https://github.com/kubernetes/client-go/blob/0bdba2f9188006fc64057c2f6d82a0f9ee0ee422/tools/clientcmd/api/v1/types.go + +We've forked them to: +- remove types and fields kind does not need to inspect / modify +- generically support fields kind doesn't inspect / modify using yaml.v3 +- have clearer names (AuthInfo -> User) +*/ + +// Config represents a KUBECONFIG, with the fields kind is likely to use +// Other fields are handled as unstructured data purely read for writing back +// to disk via the OtherFields field +type Config struct { + // Clusters is a map of referenceable names to cluster configs + Clusters []NamedCluster `yaml:"clusters,omitempty"` + // Users is a map of referenceable names to user configs + Users []NamedUser `yaml:"users,omitempty"` + // Contexts is a map of referenceable names to context configs + Contexts []NamedContext `yaml:"contexts,omitempty"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `yaml:"current-context,omitempty"` + // OtherFields contains fields kind does not inspect or modify, these are + // read purely for writing back + OtherFields map[string]interface{} `yaml:",inline,omitempty"` +} + +// NamedCluster relates nicknames to cluster information +type NamedCluster struct { + // Name is the nickname for this Cluster + Name string `yaml:"name"` + // Cluster holds the cluster information + Cluster Cluster `yaml:"cluster"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `yaml:"server,omitempty"` + // OtherFields contains fields kind does not inspect or modify, these are + // read purely for writing back + OtherFields map[string]interface{} `yaml:",inline,omitempty"` +} + +// NamedUser relates nicknames to user information +type NamedUser struct { + // Name is the nickname for this User + Name string `yaml:"name"` + // User holds the user information + // We do not touch this and merely write it back + User map[string]interface{} `yaml:"user"` +} + +// NamedContext relates nicknames to context information +type NamedContext struct { + // Name is the nickname for this Context + Name string `yaml:"name"` + // Context holds the context information + Context Context `yaml:"context"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // Cluster is the name of the cluster for this context + Cluster string `yaml:"cluster"` + // User is the name of the User for this context + User string `yaml:"user"` + // OtherFields contains fields kind does not inspect or modify, these are + // read purely for writing back + OtherFields map[string]interface{} `yaml:",inline,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go new file mode 100644 index 000000000..85570ae4b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "io/ioutil" + "os" + "path/filepath" + + "sigs.k8s.io/kind/pkg/errors" +) + +// write writes cfg to configPath +// it will ensure the directories in the path if necessary +func write(cfg *Config, configPath string) error { + encoded, err := Encode(cfg) + if err != nil { + return err + } + // NOTE: 0755 / 0600 are to match client-go + dir := filepath.Dir(configPath) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return errors.Wrap(err, "failed to create directory for KUBECONFIG") + } + } + if err := ioutil.WriteFile(configPath, encoded, 0600); err != nil { + return errors.Wrap(err, "failed to write KUBECONFIG") + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go new file mode 100644 index 000000000..3763980f7 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeconfig provides utilities kind uses internally to manage +// kind cluster kubeconfigs +package kubeconfig + +import ( + "bytes" + + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/errors" + + // this package has slightly more generic kubeconfig helpers + // and minimal dependencies on the rest of kind + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" +) + +// Export exports the kubeconfig given the cluster context and a path to write it to +// This will always be an external kubeconfig +func Export(p providers.Provider, name, explicitPath string) error { + cfg, err := get(p, name, true) + if err != nil { + return err + } + return kubeconfig.WriteMerged(cfg, explicitPath) +} + +// Remove removes clusterName from the kubeconfig paths detected based on +// either explicitPath being set or $KUBECONFIG or $HOME/.kube/config, following +// the rules set by kubectl +// clusterName must identify a kind cluster. +func Remove(clusterName, explicitPath string) error { + return kubeconfig.RemoveKIND(clusterName, explicitPath) +} + +// Get returns the kubeconfig for the cluster +// external controls if the internal IP address is used or the host endpoint +func Get(p providers.Provider, name string, external bool) (string, error) { + cfg, err := get(p, name, external) + if err != nil { + return "", err + } + b, err := kubeconfig.Encode(cfg) + if err != nil { + return "", err + } + return string(b), err +} + +// ContextForCluster returns the context name for a kind cluster based on +// it's name. This key is used for all list entries of kind clusters +func ContextForCluster(kindClusterName string) string { + return kubeconfig.KINDClusterKey(kindClusterName) +} + +func get(p providers.Provider, name string, external bool) (*kubeconfig.Config, error) { + // find a control plane node to get the kubeadm config from + n, err := p.ListNodes(name) + if err != nil { + return nil, err + } + var buff bytes.Buffer + nodes, err := nodeutils.ControlPlaneNodes(n) + if err != nil { + return nil, err + } + if len(nodes) < 1 { + return nil, errors.New("could not locate any control plane nodes") + } + node := nodes[0] + + // grab kubeconfig version from the node + if err := node.Command("cat", "/etc/kubernetes/admin.conf").SetStdout(&buff).Run(); err != nil { + return nil, errors.Wrap(err, "failed to get cluster internal kubeconfig") + } + + // if we're doing external we need to override the server endpoint + server := "" + if external { + endpoint, err := p.GetAPIServerEndpoint(name) + if err != nil { + return nil, err + } + server = "https://" + endpoint + } + + // actually encode + return kubeconfig.KINDFromRawKubeadm(buff.String(), name, server) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go new file mode 100644 index 000000000..71e17388b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go @@ -0,0 +1,83 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancer + +import ( + "bytes" + "text/template" + + "sigs.k8s.io/kind/pkg/errors" +) + +// ConfigData is supplied to the loadbalancer config template +type ConfigData struct { + ControlPlanePort int + BackendServers map[string]string + IPv6 bool +} + +// DefaultConfigTemplate is the loadbalancer config template +const DefaultConfigTemplate = `# generated by kind +global + log /dev/log local0 + log /dev/log local1 notice + daemon + +resolvers docker + nameserver dns 127.0.0.11:53 + +defaults + log global + mode tcp + option dontlognull + # TODO: tune these + timeout connect 5000 + timeout client 50000 + timeout server 50000 + # allow to boot despite dns don't resolve backends + default-server init-addr none + +frontend control-plane + bind *:{{ .ControlPlanePort }} + {{ if .IPv6 -}} + bind :::{{ .ControlPlanePort }}; + {{- end }} + default_backend kube-apiservers + +backend kube-apiservers + option httpchk GET /healthz + # TODO: we should be verifying (!) + {{range $server, $address := .BackendServers}} + server {{ $server }} {{ $address }} check check-ssl verify none resolvers docker resolve-prefer {{ if $.IPv6 -}} ipv6 {{- else -}} ipv4 {{- end }} + {{- end}} +` + +// Config returns a kubeadm config generated from config data, in particular +// the kubernetes version +func Config(data *ConfigData) (config string, err error) { + t, err := template.New("loadbalancer-config").Parse(DefaultConfigTemplate) + if err != nil { + return "", errors.Wrap(err, "failed to parse config template") + } + // execute the template + var buff bytes.Buffer + err = t.Execute(&buff, data) + if err != nil { + return "", errors.Wrap(err, "error executing config template") + } + return buff.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go new file mode 100644 index 000000000..14df1565c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancer + +// Image defines the loadbalancer image:tag +const Image = "kindest/haproxy:v20200708-548e36db" + +// ConfigPath defines the path to the config file in the image +const ConfigPath = "/usr/local/etc/haproxy/haproxy.cfg" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go new file mode 100644 index 000000000..6e53f388d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package loadbalancer contains external loadbalancer related constants and configuration +package loadbalancer diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go new file mode 100644 index 000000000..000960370 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package logs contains tooling for obtaining cluster logs +package logs diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go new file mode 100644 index 000000000..1f359aac2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go @@ -0,0 +1,106 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + + "github.com/alessio/shellescape" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" +) + +// DumpDir dumps the dir nodeDir on the node to the dir hostDir on the host +func DumpDir(logger log.Logger, node nodes.Node, nodeDir, hostDir string) (err error) { + cmd := node.Command( + "sh", "-c", + // Tar will exit 1 if a file changed during the archival. + // We don't care about this, so we're invoking it in a shell + // And masking out 1 as a return value. + // Fatal errors will return exit code 2. + // http://man7.org/linux/man-pages/man1/tar.1.html#RETURN_VALUE + fmt.Sprintf( + `tar --hard-dereference -C %s -chf - . || (r=$?; [ $r -eq 1 ] || exit $r)`, + shellescape.Quote(path.Clean(nodeDir)+"/"), + ), + ) + + return exec.RunWithStdoutReader(cmd, func(outReader io.Reader) error { + if err := untar(logger, outReader, hostDir); err != nil { + return errors.Wrapf(err, "Untarring %q: %v", nodeDir, err) + } + return nil + }) +} + +// untar reads the tar file from r and writes it into dir. +func untar(logger log.Logger, r io.Reader, dir string) (err error) { + tr := tar.NewReader(r) + for { + f, err := tr.Next() + + switch { + case err == io.EOF: + // drain the reader, which may have trailing null bytes + // we don't want to leave the writer hanging + _, err := io.Copy(ioutil.Discard, r) + return err + case err != nil: + return errors.Wrapf(err, "tar reading error: %v", err) + case f == nil: + continue + } + + rel := filepath.FromSlash(f.Name) + abs := filepath.Join(dir, rel) + + switch f.Typeflag { + case tar.TypeReg: + wf, err := os.OpenFile(abs, os.O_CREATE|os.O_RDWR, os.FileMode(f.Mode)) + if err != nil { + return err + } + n, err := io.Copy(wf, tr) + if closeErr := wf.Close(); closeErr != nil && err == nil { + err = closeErr + } + if err != nil { + return errors.Errorf("error writing to %s: %v", abs, err) + } + if n != f.Size { + return errors.Errorf("only wrote %d bytes to %s; expected %d", n, abs, f.Size) + } + case tar.TypeDir: + if _, err := os.Stat(abs); err != nil { + if err := os.MkdirAll(abs, 0755); err != nil { + return err + } + } + default: + logger.Warnf("tar file entry %s contained unsupported file type %v", f.Name, f.Typeflag) + } + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/doc.go new file mode 100644 index 000000000..d24d9ce39 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package patch contains helpers for applying patches +package patch diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/json6902patch.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/json6902patch.go new file mode 100644 index 000000000..e140c0e4b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/json6902patch.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + jsonpatch "github.com/evanphx/json-patch/v5" + + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +type json6902Patch struct { + raw string // raw original contents + patch jsonpatch.Patch // processed JSON 6902 patch + matchInfo matchInfo // used to match resources +} + +func convertJSON6902Patches(patchesJSON6902 []config.PatchJSON6902) ([]json6902Patch, error) { + patches := []json6902Patch{} + for _, configPatch := range patchesJSON6902 { + patchJSON, err := yaml.YAMLToJSON([]byte(configPatch.Patch)) + if err != nil { + return nil, errors.WithStack(err) + } + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + return nil, errors.WithStack(err) + } + patches = append(patches, json6902Patch{ + raw: configPatch.Patch, + patch: patch, + matchInfo: matchInfoForConfigJSON6902Patch(configPatch), + }) + } + return patches, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/kubeyaml.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/kubeyaml.go new file mode 100644 index 000000000..9bdd1997f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/kubeyaml.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// KubeYAML takes a Kubernetes object YAML document stream to patch, +// merge patches, and JSON 6902 patches. +// +// It returns a patched a YAML document stream. +// +// Matching is performed on Kubernetes style v1 TypeMeta fields +// (kind and apiVersion), between the YAML documents and the patches. +// +// Patches match if their kind and apiVersion match a document, with the exception +// that if the patch does not set apiVersion it will be ignored. +func KubeYAML(toPatch string, patches []string, patches6902 []config.PatchJSON6902) (string, error) { + // pre-process, including splitting up documents etc. + resources, err := parseResources(toPatch) + if err != nil { + return "", errors.Wrap(err, "failed to parse yaml to patch") + } + mergePatches, err := parseMergePatches(patches) + if err != nil { + return "", errors.Wrap(err, "failed to parse patches") + } + json6902patches, err := convertJSON6902Patches(patches6902) + if err != nil { + return "", errors.Wrap(err, "failed to parse JSON 6902 patches") + } + // apply patches and build result + builder := &strings.Builder{} + for i, r := range resources { + // apply merge patches + for _, p := range mergePatches { + if _, err := r.applyMergePatch(p); err != nil { + return "", errors.Wrap(err, "failed to apply patch") + } + } + // apply RFC 6902 JSON patches + for _, p := range json6902patches { + if _, err := r.apply6902Patch(p); err != nil { + return "", errors.Wrap(err, "failed to apply JSON 6902 patch") + } + } + // write out result + if err := r.encodeTo(builder); err != nil { + return "", errors.Wrap(err, "failed to write patched resource") + } + // write document separator + if i+1 < len(resources) { + if _, err := builder.WriteString("---\n"); err != nil { + return "", errors.Wrap(err, "failed to write document separator") + } + } + } + // verify that all patches were used + return builder.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/matchinfo.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/matchinfo.go new file mode 100644 index 000000000..7674ea5fb --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/matchinfo.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// we match resources and patches on their v1 TypeMeta +type matchInfo struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` +} + +func parseYAMLMatchInfo(raw string) (matchInfo, error) { + m := matchInfo{} + if err := yaml.Unmarshal([]byte(raw), &m); err != nil { + return matchInfo{}, errors.Wrapf(err, "failed to parse type meta for %q", raw) + } + return m, nil +} + +func matchInfoForConfigJSON6902Patch(patch config.PatchJSON6902) matchInfo { + return matchInfo{ + Kind: patch.Kind, + APIVersion: groupVersionToAPIVersion(patch.Group, patch.Version), + } +} + +func groupVersionToAPIVersion(group, version string) string { + if group == "" { + return version + } + return group + "/" + version +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/mergepatch.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/mergepatch.go new file mode 100644 index 000000000..7b8679f73 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/mergepatch.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" +) + +type mergePatch struct { + raw string // the original raw data + json []byte // the processed data (in JSON form) + matchInfo matchInfo // for matching resources +} + +func parseMergePatches(rawPatches []string) ([]mergePatch, error) { + patches := []mergePatch{} + for _, raw := range rawPatches { + matchInfo, err := parseYAMLMatchInfo(raw) + if err != nil { + return nil, errors.WithStack(err) + } + json, err := yaml.YAMLToJSON([]byte(raw)) + if err != nil { + return nil, errors.WithStack(err) + } + patches = append(patches, mergePatch{ + raw: raw, + json: json, + matchInfo: matchInfo, + }) + } + return patches, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/resource.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/resource.go new file mode 100644 index 000000000..17f898ec9 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/resource.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "bufio" + "bytes" + "io" + "strings" + + jsonpatch "github.com/evanphx/json-patch/v5" + + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" +) + +type resource struct { + raw string // the original raw data + json []byte // the processed data (in JSON form), may be mutated + matchInfo matchInfo // for matching patches +} + +func (r *resource) apply6902Patch(patch json6902Patch) (matches bool, err error) { + if !r.matches(patch.matchInfo) { + return false, nil + } + patched, err := patch.patch.Apply(r.json) + if err != nil { + return true, errors.WithStack(err) + } + r.json = patched + return true, nil +} + +func (r *resource) applyMergePatch(patch mergePatch) (matches bool, err error) { + if !r.matches(patch.matchInfo) { + return false, nil + } + patched, err := jsonpatch.MergePatch(r.json, patch.json) + if err != nil { + return true, errors.WithStack(err) + } + r.json = patched + return true, nil +} + +func (r resource) matches(o matchInfo) bool { + m := &r.matchInfo + // we require kind to match, but if the patch does not specify + // APIVersion we ignore it (eg to allow trivial patches across kubeadm versions) + return m.Kind == o.Kind && (o.APIVersion == "" || m.APIVersion == o.APIVersion) +} + +func (r *resource) encodeTo(w io.Writer) error { + encoded, err := yaml.JSONToYAML(r.json) + if err != nil { + return errors.WithStack(err) + } + if _, err := w.Write(encoded); err != nil { + return errors.WithStack(err) + } + return nil +} + +func parseResources(yamlDocumentStream string) ([]resource, error) { + resources := []resource{} + documents, err := splitYAMLDocuments(yamlDocumentStream) + if err != nil { + return nil, err + } + for _, raw := range documents { + matchInfo, err := parseYAMLMatchInfo(raw) + if err != nil { + return nil, errors.WithStack(err) + } + json, err := yaml.YAMLToJSON([]byte(raw)) + if err != nil { + return nil, errors.WithStack(err) + } + resources = append(resources, resource{ + raw: raw, + json: json, + matchInfo: matchInfo, + }) + } + return resources, nil +} + +func splitYAMLDocuments(yamlDocumentStream string) ([]string, error) { + documents := []string{} + scanner := bufio.NewScanner(strings.NewReader(yamlDocumentStream)) + scanner.Split(splitYAMLDocument) + for scanner.Scan() { + documents = append(documents, scanner.Text()) + } + if err := scanner.Err(); err != nil { + return nil, errors.Wrap(err, "error splitting documents") + } + return documents, nil +} + +const yamlSeparator = "\n---" + +// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. +// this is borrowed from k8s.io/apimachinery/pkg/util/yaml/decoder.go +func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + sep := len([]byte(yamlSeparator)) + if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { + // We have a potential document terminator + i += sep + after := data[i:] + if len(after) == 0 { + // we can't read any more characters + if atEOF { + return len(data), data[:len(data)-sep], nil + } + return 0, nil, nil + } + if j := bytes.IndexByte(after, '\n'); j >= 0 { + return i + j + 1, data[0 : i-sep], nil + } + return 0, nil, nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/toml.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/toml.go new file mode 100644 index 000000000..5923977a6 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/patch/toml.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "bytes" + "encoding/json" + + burntoml "github.com/BurntSushi/toml" + jsonpatch "github.com/evanphx/json-patch/v5" + toml "github.com/pelletier/go-toml" + yaml "gopkg.in/yaml.v3" + + "sigs.k8s.io/kind/pkg/errors" +) + +// TOML patches toPatch with the patches (should be TOML merge patches) and patches6902 (should be JSON 6902 patches) +func TOML(toPatch string, patches []string, patches6902 []string) (string, error) { + // convert to JSON for patching + j, err := tomlToJSON([]byte(toPatch)) + if err != nil { + return "", err + } + // apply merge patches + for _, patch := range patches { + pj, err := tomlToJSON([]byte(patch)) + if err != nil { + return "", err + } + patched, err := jsonpatch.MergePatch(j, pj) + if err != nil { + return "", errors.WithStack(err) + } + j = patched + } + // apply JSON 6902 patches + for _, patch6902 := range patches6902 { + patch, err := jsonpatch.DecodePatch([]byte(patch6902)) + if err != nil { + return "", errors.WithStack(err) + } + patched, err := patch.Apply(j) + if err != nil { + return "", errors.WithStack(err) + } + j = patched + } + // convert result back to TOML + return jsonToTOMLString(j) +} + +// tomlToJSON converts arbitrary TOML to JSON +func tomlToJSON(t []byte) ([]byte, error) { + // we use github.com.pelletier/go-toml here to unmarshal arbitrary TOML to JSON + tree, err := toml.LoadBytes(t) + if err != nil { + return nil, errors.WithStack(err) + } + b, err := json.Marshal(tree.ToMap()) + if err != nil { + return nil, errors.WithStack(err) + } + return b, nil +} + +// jsonToTOMLString converts arbitrary JSON to TOML +func jsonToTOMLString(j []byte) (string, error) { + var unstruct interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + if err := yaml.Unmarshal(j, &unstruct); err != nil { + return "", errors.WithStack(err) + } + // we use github.com/BurntSushi/toml here because github.com.pelletier/go-toml + // can only marshal structs AND BurntSushi/toml is what contained uses + // and has more canonically formatted output (we initially plan to use + // this package for patching containerd config) + var buff bytes.Buffer + if err := burntoml.NewEncoder(&buff).Encode(unstruct); err != nil { + return "", errors.WithStack(err) + } + return buff.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go new file mode 100644 index 000000000..131218f19 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// APIServerInternalPort defines the port where the control plane is listening +// _inside_ the node network +const APIServerInternalPort = 6443 diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go new file mode 100644 index 000000000..dd4311626 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package common contains common code for implementing providers +package common diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go new file mode 100644 index 000000000..5c50da94c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "net" +) + +// PortOrGetFreePort is a helper that either returns the provided port +// if valid or returns a new free port on listenAddr +func PortOrGetFreePort(port int32, listenAddr string) (int32, error) { + // in the case of -1 we actually want to pass 0 to the backend to let it pick + if port == -1 { + return 0, nil + } + // in the case of 0 (unset) we want kind to pick one and supply it to the backend + if port == 0 { + return GetFreePort(listenAddr) + } + // otherwise keep the port + return port, nil +} + +// GetFreePort is a helper used to get a free TCP port on the host +func GetFreePort(listenAddr string) (int32, error) { + dummyListener, err := net.Listen("tcp", net.JoinHostPort(listenAddr, "0")) + if err != nil { + return 0, err + } + defer dummyListener.Close() + port := dummyListener.Addr().(*net.TCPAddr).Port + return int32(port), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go new file mode 100644 index 000000000..f66155b38 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "k8s.io/apimachinery/pkg/util/sets" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// RequiredNodeImages returns the set of _node_ images specified by the config +// This does not include the loadbalancer image, and is only used to improve +// the UX by explicit pulling the node images prior to running +func RequiredNodeImages(cfg *config.Cluster) sets.String { + images := sets.NewString() + for _, node := range cfg.Nodes { + images.Insert(node.Image) + } + return images +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go new file mode 100644 index 000000000..57fcb8cf1 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go @@ -0,0 +1,71 @@ +package common + +import ( + "os" + "path/filepath" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cmd/kind/version" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// CollectLogs provides the common functionality +// to get various debug info from the node +func CollectLogs(n nodes.Node, dir string) error { + execToPathFn := func(cmd exec.Cmd, path string) func() error { + return func() error { + f, err := FileOnHost(filepath.Join(dir, path)) + if err != nil { + return err + } + defer f.Close() + return cmd.SetStdout(f).SetStderr(f).Run() + } + } + writeToPathFn := func(s string, path string) func() error { + return func() error { + f, err := FileOnHost(path) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(s) + return err + } + } + + return errors.AggregateConcurrent([]func() error{ + // record info about the node container + writeToPathFn( + version.DisplayVersion(), + filepath.Join(dir, "kind-version.txt"), + ), + execToPathFn( + n.Command("cat", "/kind/version"), + "kubernetes-version.txt", + ), + execToPathFn( + n.Command("journalctl", "--no-pager"), + "journal.log", + ), + execToPathFn( + n.Command("journalctl", "--no-pager", "-u", "kubelet.service"), + "kubelet.log", + ), + execToPathFn( + n.Command("journalctl", "--no-pager", "-u", "containerd.service"), + "containerd.log", + ), + }) +} + +// FileOnHost is a helper to create a file at path +// even if the parent directory doesn't exist +// in which case it will be created with ModePerm +func FileOnHost(path string) (*os.File, error) { + if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil { + return nil, err + } + return os.Create(path) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go new file mode 100644 index 000000000..9abade8ce --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" +) + +// MakeNodeNamer returns a func(role string)(nodeName string) +// used to name nodes based on their role and the clusterName +func MakeNodeNamer(clusterName string) func(string) string { + counter := make(map[string]int) + return func(role string) string { + count := 1 + suffix := "" + if v, ok := counter[role]; ok { + count += v + suffix = fmt.Sprintf("%d", count) + } + counter[role] = count + return fmt.Sprintf("%s-%s%s", clusterName, role, suffix) + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go new file mode 100644 index 000000000..4e896b6e1 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "os" + "strings" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +const ( + // HTTPProxy is the HTTP_PROXY environment variable key + HTTPProxy = "HTTP_PROXY" + // HTTPSProxy is the HTTPS_PROXY environment variable key + HTTPSProxy = "HTTPS_PROXY" + // NOProxy is the NO_PROXY environment variable key + NOProxy = "NO_PROXY" +) + +// GetProxyEnvs returns a map of proxy environment variables to their values +// If proxy settings are set, NO_PROXY is modified to include the cluster subnets +func GetProxyEnvs(cfg *config.Cluster) map[string]string { + return getProxyEnvs(cfg, os.Getenv) +} + +func getProxyEnvs(cfg *config.Cluster, getEnv func(string) string) map[string]string { + envs := make(map[string]string) + for _, name := range []string{HTTPProxy, HTTPSProxy, NOProxy} { + val := getEnv(name) + if val == "" { + val = getEnv(strings.ToLower(name)) + } + if val != "" { + envs[name] = val + envs[strings.ToLower(name)] = val + } + } + // Specifically add the cluster subnets to NO_PROXY if we are using a proxy + if len(envs) > 0 { + noProxy := envs[NOProxy] + if noProxy != "" { + noProxy += "," + } + noProxy += cfg.Networking.ServiceSubnet + "," + cfg.Networking.PodSubnet + envs[NOProxy] = noProxy + envs[strings.ToLower(NOProxy)] = noProxy + } + return envs +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS new file mode 100644 index 000000000..33e190320 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS @@ -0,0 +1,2 @@ +labels: +- area/provider/docker diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go new file mode 100644 index 000000000..40fd79d9d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +// clusterLabelKey is applied to each "node" docker container for identification +const clusterLabelKey = "io.x-k8s.kind.cluster" + +// nodeRoleLabelKey is applied to each "node" docker container for categorization +// of nodes by role +const nodeRoleLabelKey = "io.x-k8s.kind.role" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go new file mode 100644 index 000000000..8ad37d073 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// ensureNodeImages ensures that the node images used by the create +// configuration are present +func ensureNodeImages(logger log.Logger, status *cli.Status, cfg *config.Cluster) error { + // pull each required image + for _, image := range common.RequiredNodeImages(cfg).List() { + // prints user friendly message + friendlyImageName, image := sanitizeImage(image) + status.Start(fmt.Sprintf("Ensuring node image (%s) 🖼", friendlyImageName)) + if _, err := pullIfNotPresent(logger, image, 4); err != nil { + status.End(false) + return err + } + } + return nil +} + +// pullIfNotPresent will pull an image if it is not present locally +// retrying up to retries times +// it returns true if it attempted to pull, and any errors from pulling +func pullIfNotPresent(logger log.Logger, image string, retries int) (pulled bool, err error) { + // TODO(bentheelder): switch most (all) of the logging here to debug level + // once we have configurable log levels + // if this did not return an error, then the image exists locally + cmd := exec.Command("docker", "inspect", "--type=image", image) + if err := cmd.Run(); err == nil { + logger.V(1).Infof("Image: %s present locally", image) + return false, nil + } + // otherwise try to pull it + return true, pull(logger, image, retries) +} + +// pull pulls an image, retrying up to retries times +func pull(logger log.Logger, image string, retries int) error { + logger.V(1).Infof("Pulling image: %s ...", image) + err := exec.Command("docker", "pull", image).Run() + // retry pulling up to retries times if necessary + if err != nil { + for i := 0; i < retries; i++ { + time.Sleep(time.Second * time.Duration(i+1)) + logger.V(1).Infof("Trying again to pull image: %q ... %v", image, err) + // TODO(bentheelder): add some backoff / sleep? + err = exec.Command("docker", "pull", image).Run() + if err == nil { + break + } + } + } + return errors.Wrapf(err, "failed to pull image %q", image) +} + +// sanitizeImage is a helper to return human readable image name and +// the docker pullable image name from the provided image +func sanitizeImage(image string) (string, string) { + if strings.Contains(image, "@sha256:") { + return strings.Split(image, "@sha256:")[0], image + } + return image, image +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go new file mode 100644 index 000000000..46636f289 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go @@ -0,0 +1,308 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "bytes" + "crypto/sha1" + "encoding/binary" + "encoding/json" + "io" + "net" + "regexp" + "sort" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// This may be overridden by KIND_EXPERIMENTAL_DOCKER_NETWORK env, +// experimentally... +// +// By default currently picking a single network is equivalent to the previous +// behavior *except* that we moved from the default bridge to a user defined +// network because the default bridge is actually special versus any other +// docker network and lacks the embedded DNS +// +// For now this also makes it easier for apps to join the same network, and +// leaves users with complex networking desires to create and manage their own +// networks. +const fixedNetworkName = "kind" + +// ensureNetwork checks if docker network by name exists, if not it creates it +func ensureNetwork(name string) error { + // check if network exists already and remove any duplicate networks + exists, err := removeDuplicateNetworks(name) + if err != nil { + return err + } + + // network already exists, we're good + // TODO: the network might already exist and not have ipv6 ... :| + // discussion: https://github.com/kubernetes-sigs/kind/pull/1508#discussion_r414594198 + if exists { + return nil + } + + // Generate unique subnet per network based on the name + // obtained from the ULA fc00::/8 range + // Make N attempts with "probing" in case we happen to collide + subnet := generateULASubnetFromName(name, 0) + err = createNetworkNoDuplicates(name, subnet) + if err == nil { + // Success! + return nil + } + + // On the first try check if ipv6 fails entirely on this machine + // https://github.com/kubernetes-sigs/kind/issues/1544 + // Otherwise if it's not a pool overlap error, fail + // If it is, make more attempts below + if isIPv6UnavailableError(err) { + // only one attempt, IPAM is automatic in ipv4 only + return createNetworkNoDuplicates(name, "") + } + if isPoolOverlapError(err) { + // pool overlap suggests perhaps another process created the network + // check if network exists already and remove any duplicate networks + exists, err := checkIfNetworkExists(name) + if err != nil { + return err + } + if exists { + return nil + } + // otherwise we'll start trying with different subnets + } else { + // unknown error ... + return err + } + + // keep trying for ipv6 subnets + const maxAttempts = 5 + for attempt := int32(1); attempt < maxAttempts; attempt++ { + subnet := generateULASubnetFromName(name, attempt) + err = createNetworkNoDuplicates(name, subnet) + if err == nil { + // success! + return nil + } + if isPoolOverlapError(err) { + // pool overlap suggests perhaps another process created the network + // check if network exists already and remove any duplicate networks + exists, err := checkIfNetworkExists(name) + if err != nil { + return err + } + if exists { + return nil + } + // otherwise we'll try again + continue + } + // unknown error ... + return err + } + return errors.New("exhausted attempts trying to find a non-overlapping subnet") +} + +func createNetworkNoDuplicates(name, ipv6Subnet string) error { + if err := createNetwork(name, ipv6Subnet); err != nil && !isNetworkAlreadyExistsError(err) { + return err + } + _, err := removeDuplicateNetworks(name) + return err +} + +func removeDuplicateNetworks(name string) (bool, error) { + networks, err := sortedNetworksWithName(name) + if err != nil { + return false, err + } + if len(networks) > 1 { + if err := deleteNetworks(networks[1:]...); err != nil && !isOnlyErrorNoSuchNetwork(err) { + return false, err + } + } + return len(networks) > 0, nil +} + +func createNetwork(name, ipv6Subnet string) error { + if ipv6Subnet == "" { + return exec.Command("docker", "network", "create", "-d=bridge", + "-o", "com.docker.network.bridge.enable_ip_masquerade=true", + name).Run() + } + return exec.Command("docker", "network", "create", "-d=bridge", + "-o", "com.docker.network.bridge.enable_ip_masquerade=true", + "--ipv6", "--subnet", ipv6Subnet, name).Run() +} + +func sortedNetworksWithName(name string) ([]string, error) { + // query which networks exist with the name + ids, err := networksWithName(name) + if err != nil { + return nil, err + } + // we can skip sorting if there are less than 2 + if len(ids) < 2 { + return ids, nil + } + // inspect them to get more detail for sorting + networks, err := inspectNetworks(ids) + if err != nil { + return nil, err + } + // deterministically sort networks + // NOTE: THIS PART IS IMPORTANT! + sortNetworkInspectEntries(networks) + // return network IDs + sortedIDs := make([]string, 0, len(networks)) + for i := range networks { + sortedIDs = append(sortedIDs, networks[i].ID) + } + return sortedIDs, nil +} + +func sortNetworkInspectEntries(networks []networkInspectEntry) { + sort.Slice(networks, func(i, j int) bool { + // we want networks with active containers first + if len(networks[i].Containers) > len(networks[j].Containers) { + return true + } + return networks[i].ID < networks[j].ID + }) +} + +func inspectNetworks(networkIDs []string) ([]networkInspectEntry, error) { + inspectOut, err := exec.Output(exec.Command("docker", append([]string{"network", "inspect"}, networkIDs...)...)) + // NOTE: the caller can detect if the network isn't present in the output anyhow + // we don't want to fail on this here. + if err != nil && !isOnlyErrorNoSuchNetwork(err) { + return nil, err + } + // parse + networks := []networkInspectEntry{} + if err := json.Unmarshal(inspectOut, &networks); err != nil { + return nil, errors.Wrap(err, "failed to decode networks list") + } + return networks, nil +} + +type networkInspectEntry struct { + ID string `json:"Id"` + // NOTE: we don't care about the contents here but we need to parse + // how many entries exist in the containers map + Containers map[string]map[string]string `json:"Containers"` +} + +// networksWithName returns a list of network IDs for networks with this name +func networksWithName(name string) ([]string, error) { + lsOut, err := exec.Output(exec.Command( + "docker", "network", "ls", + "--filter=name=^"+regexp.QuoteMeta(name)+"$", + "--format={{.ID}}", // output as unambiguous IDs + )) + if err != nil { + return nil, err + } + cleaned := strings.TrimSuffix(string(lsOut), "\n") + if cleaned == "" { // avoid returning []string{""} + return nil, nil + } + return strings.Split(cleaned, "\n"), nil +} + +func checkIfNetworkExists(name string) (bool, error) { + out, err := exec.Output(exec.Command( + "docker", "network", "ls", + "--filter=name=^"+regexp.QuoteMeta(name)+"$", + "--format={{.Name}}", + )) + return strings.HasPrefix(string(out), name), err +} + +func isIPv6UnavailableError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Cannot read IPv6 setup for bridge") +} + +func isPoolOverlapError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Pool overlaps with other one on this address space") +} + +func isNetworkAlreadyExistsError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: network with name") && strings.Contains(string(rerr.Output), "already exists") +} + +// returns true if: +// - err is nil +// - err only contains no such network errors +func isOnlyErrorNoSuchNetwork(err error) bool { + rerr := exec.RunErrorForError(err) + if rerr == nil { + return false + } + // check all lines of output from errored command + b := bytes.NewBuffer(rerr.Output) + for { + l, err := b.ReadBytes('\n') + if err == io.EOF { + break + } else if err != nil { + return false + } + // if the line begins with Eror: No such network: it's fine + s := string(l) + if strings.HasPrefix(s, "Error: No such network:") { + continue + } + // other errors are not fine + if strings.HasPrefix(s, "Error: ") { + return false + } + // other line contents should just be network references + } + return true +} + +func deleteNetworks(networks ...string) error { + return exec.Command("docker", append([]string{"network", "rm"}, networks...)...).Run() +} + +// generateULASubnetFromName generate an IPv6 subnet based on the +// name and Nth probing attempt +func generateULASubnetFromName(name string, attempt int32) string { + ip := make([]byte, 16) + ip[0] = 0xfc + ip[1] = 0x00 + h := sha1.New() + _, _ = h.Write([]byte(name)) + _ = binary.Write(h, binary.LittleEndian, attempt) + bs := h.Sum(nil) + for i := 2; i < 8; i++ { + ip[i] = bs[i] + } + subnet := &net.IPNet{ + IP: net.IP(ip), + Mask: net.CIDRMask(64, 128), + } + return subnet.String() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go new file mode 100644 index 000000000..9bad7ed01 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "io" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// nodes.Node implementation for the docker provider +type node struct { + name string +} + +func (n *node) String() string { + return n.name +} + +func (n *node) Role() (string, error) { + cmd := exec.Command("docker", "inspect", + "--format", fmt.Sprintf(`{{ index .Config.Labels "%s"}}`, nodeRoleLabelKey), + n.name, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get role for node") + } + if len(lines) != 1 { + return "", errors.Errorf("failed to get role for node: output lines %d != 1", len(lines)) + } + return lines[0], nil +} + +func (n *node) IP() (ipv4 string, ipv6 string, err error) { + // retrieve the IP address of the node using docker inspect + cmd := exec.Command("docker", "inspect", + "-f", "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}", + n.name, // ... against the "node" container + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", "", errors.Wrap(err, "failed to get container details") + } + if len(lines) != 1 { + return "", "", errors.Errorf("file should only be one line, got %d lines", len(lines)) + } + ips := strings.Split(lines[0], ",") + if len(ips) != 2 { + return "", "", errors.Errorf("container addresses should have 2 values, got %d values", len(ips)) + } + return ips[0], ips[1], nil +} + +func (n *node) Command(command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + } +} + +func (n *node) CommandContext(ctx context.Context, command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + ctx: ctx, + } +} + +// nodeCmd implements exec.Cmd for docker nodes +type nodeCmd struct { + nameOrID string // the container name or ID + command string + args []string + env []string + stdin io.Reader + stdout io.Writer + stderr io.Writer + ctx context.Context +} + +func (c *nodeCmd) Run() error { + args := []string{ + "exec", + // run with privileges so we can remount etc.. + // this might not make sense in the most general sense, but it is + // important to many kind commands + "--privileged", + } + if c.stdin != nil { + args = append(args, + "-i", // interactive so we can supply input + ) + } + // set env + for _, env := range c.env { + args = append(args, "-e", env) + } + // specify the container and command, after this everything will be + // args the command in the container rather than to docker + args = append( + args, + c.nameOrID, // ... against the container + c.command, // with the command specified + ) + args = append( + args, + // finally, with the caller args + c.args..., + ) + var cmd exec.Cmd + if c.ctx != nil { + cmd = exec.CommandContext(c.ctx, "docker", args...) + } else { + cmd = exec.Command("docker", args...) + } + if c.stdin != nil { + cmd.SetStdin(c.stdin) + } + if c.stderr != nil { + cmd.SetStderr(c.stderr) + } + if c.stdout != nil { + cmd.SetStdout(c.stdout) + } + return cmd.Run() +} + +func (c *nodeCmd) SetEnv(env ...string) exec.Cmd { + c.env = env + return c +} + +func (c *nodeCmd) SetStdin(r io.Reader) exec.Cmd { + c.stdin = r + return c +} + +func (c *nodeCmd) SetStdout(w io.Writer) exec.Cmd { + c.stdout = w + return c +} + +func (c *nodeCmd) SetStderr(w io.Writer) exec.Cmd { + c.stderr = w + return c +} + +func (n *node) SerialLogs(w io.Writer) error { + return exec.Command("docker", "logs", n.name).SetStdout(w).SetStderr(w).Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go new file mode 100644 index 000000000..0a012a265 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go @@ -0,0 +1,283 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "fmt" + "net" + "os" + "path/filepath" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + internallogs "sigs.k8s.io/kind/pkg/cluster/internal/logs" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// NewProvider returns a new provider based on executing `docker ...` +func NewProvider(logger log.Logger) providers.Provider { + return &provider{ + logger: logger, + } +} + +// Provider implements provider.Provider +// see NewProvider +type provider struct { + logger log.Logger +} + +// String implements fmt.Stringer +// NOTE: the value of this should not currently be relied upon for anything! +// This is only used for setting the Node's providerID +func (p *provider) String() string { + return "docker" +} + +// Provision is part of the providers.Provider interface +func (p *provider) Provision(status *cli.Status, cfg *config.Cluster) (err error) { + // TODO: validate cfg + // ensure node images are pulled before actually provisioning + if err := ensureNodeImages(p.logger, status, cfg); err != nil { + return err + } + + // ensure the pre-requisite network exists + networkName := fixedNetworkName + if n := os.Getenv("KIND_EXPERIMENTAL_DOCKER_NETWORK"); n != "" { + p.logger.Warn("WARNING: Overriding docker network due to KIND_EXPERIMENTAL_DOCKER_NETWORK") + p.logger.Warn("WARNING: Here be dragons! This is not supported currently.") + networkName = n + } + if err := ensureNetwork(networkName); err != nil { + return errors.Wrap(err, "failed to ensure docker network") + } + + // actually provision the cluster + icons := strings.Repeat("📦 ", len(cfg.Nodes)) + status.Start(fmt.Sprintf("Preparing nodes %s", icons)) + defer func() { status.End(err == nil) }() + + // plan creating the containers + createContainerFuncs, err := planCreation(cfg, networkName) + if err != nil { + return err + } + + // actually create nodes + return errors.UntilErrorConcurrent(createContainerFuncs) +} + +// ListClusters is part of the providers.Provider interface +func (p *provider) ListClusters() ([]string, error) { + cmd := exec.Command("docker", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", "label="+clusterLabelKey, + // format to include the cluster name + "--format", fmt.Sprintf(`{{.Label "%s"}}`, clusterLabelKey), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list clusters") + } + return sets.NewString(lines...).List(), nil +} + +// ListNodes is part of the providers.Provider interface +func (p *provider) ListNodes(cluster string) ([]nodes.Node, error) { + cmd := exec.Command("docker", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", fmt.Sprintf("label=%s=%s", clusterLabelKey, cluster), + // format to include the cluster name + "--format", `{{.Names}}`, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list clusters") + } + // convert names to node handles + ret := make([]nodes.Node, 0, len(lines)) + for _, name := range lines { + ret = append(ret, p.node(name)) + } + return ret, nil +} + +// DeleteNodes is part of the providers.Provider interface +func (p *provider) DeleteNodes(n []nodes.Node) error { + if len(n) == 0 { + return nil + } + const command = "docker" + args := make([]string, 0, len(n)+3) // allocate once + args = append(args, + "rm", + "-f", // force the container to be delete now + "-v", // delete volumes + ) + for _, node := range n { + args = append(args, node.String()) + } + if err := exec.Command(command, args...).Run(); err != nil { + return errors.Wrap(err, "failed to delete nodes") + } + return nil +} + +// GetAPIServerEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + + // if the 'desktop.docker.io/ports/<PORT>/tcp' label is present, + // defer to its value for the api server endpoint + // + // For example: + // "Labels": { + // "desktop.docker.io/ports/6443/tcp": "10.0.1.7:6443", + // } + cmd := exec.Command( + "docker", "inspect", + "--format", fmt.Sprintf( + "{{ index .Config.Labels \"desktop.docker.io/ports/%d/tcp\" }}", common.APIServerInternalPort, + ), + n.String(), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) == 1 && lines[0] != "" { + return lines[0], nil + } + + // else, retrieve the specific port mapping via NetworkSettings.Ports + cmd = exec.Command( + "docker", "inspect", + "--format", fmt.Sprintf( + "{{ with (index (index .NetworkSettings.Ports \"%d/tcp\") 0) }}{{ printf \"%%s\t%%s\" .HostIp .HostPort }}{{ end }}", common.APIServerInternalPort, + ), + n.String(), + ) + lines, err = exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) != 1 { + return "", errors.Errorf("network details should only be one line, got %d lines", len(lines)) + } + parts := strings.Split(lines[0], "\t") + if len(parts) != 2 { + return "", errors.Errorf("network details should only be two parts, got %d", len(parts)) + } + + // join host and port + return net.JoinHostPort(parts[0], parts[1]), nil +} + +// GetAPIServerInternalEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerInternalEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + // NOTE: we're using the nodes's hostnames which are their names + return net.JoinHostPort(n.String(), fmt.Sprintf("%d", common.APIServerInternalPort)), nil +} + +// node returns a new node handle for this provider +func (p *provider) node(name string) nodes.Node { + return &node{ + name: name, + } +} + +// CollectLogs will populate dir with cluster logs and other debug files +func (p *provider) CollectLogs(dir string, nodes []nodes.Node) error { + execToPathFn := func(cmd exec.Cmd, path string) func() error { + return func() error { + f, err := common.FileOnHost(path) + if err != nil { + return err + } + defer f.Close() + return cmd.SetStdout(f).SetStderr(f).Run() + } + } + // construct a slice of methods to collect logs + fns := []func() error{ + // record info about the host docker + execToPathFn( + exec.Command("docker", "info"), + filepath.Join(dir, "docker-info.txt"), + ), + } + + // collect /var/log for each node and plan collecting more logs + var errs []error + for _, n := range nodes { + node := n // https://golang.org/doc/faq#closures_and_goroutines + name := node.String() + path := filepath.Join(dir, name) + if err := internallogs.DumpDir(p.logger, node, "/var/log", path); err != nil { + errs = append(errs, err) + } + + fns = append(fns, + func() error { return common.CollectLogs(node, path) }, + execToPathFn(exec.Command("docker", "inspect", name), filepath.Join(path, "inspect.json")), + func() error { + f, err := common.FileOnHost(filepath.Join(path, "serial.log")) + if err != nil { + return err + } + defer f.Close() + return node.SerialLogs(f) + }, + ) + } + + // run and collect up all errors + errs = append(errs, errors.AggregateConcurrent(fns)) + return errors.NewAggregate(errs) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go new file mode 100644 index 000000000..8bd62c048 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go @@ -0,0 +1,398 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "fmt" + "net" + "path/filepath" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/fs" + + "sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// planCreation creates a slice of funcs that will create the containers +func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs []func() error, err error) { + // we need to know all the names for NO_PROXY + // compute the names first before any actual node details + nodeNamer := common.MakeNodeNamer(cfg.Name) + names := make([]string, len(cfg.Nodes)) + for i, node := range cfg.Nodes { + name := nodeNamer(string(node.Role)) // name the node + names[i] = name + } + haveLoadbalancer := clusterHasImplicitLoadBalancer(cfg) + if haveLoadbalancer { + names = append(names, nodeNamer(constants.ExternalLoadBalancerNodeRoleValue)) + } + + // these apply to all container creation + genericArgs, err := commonArgs(cfg.Name, cfg, networkName, names) + if err != nil { + return nil, err + } + + // only the external LB should reflect the port if we have multiple control planes + apiServerPort := cfg.Networking.APIServerPort + apiServerAddress := cfg.Networking.APIServerAddress + if haveLoadbalancer { + // TODO: picking ports locally is less than ideal with remote docker + // but this is supposed to be an implementation detail and NOT picking + // them breaks host reboot ... + // For now remote docker + multi control plane is not supported + apiServerPort = 0 // replaced with random ports + apiServerAddress = "127.0.0.1" // only the LB needs to be non-local + if clusterIsIPv6(cfg) { + apiServerAddress = "::1" // only the LB needs to be non-local + } + // plan loadbalancer node + name := names[len(names)-1] + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForLoadBalancer(cfg, name, genericArgs) + if err != nil { + return err + } + return createContainer(args) + }) + } + + // plan normal nodes + for i, node := range cfg.Nodes { + node := node.DeepCopy() // copy so we can modify + name := names[i] + + // fixup relative paths, docker can only handle absolute paths + for m := range node.ExtraMounts { + hostPath := node.ExtraMounts[m].HostPath + if !fs.IsAbs(hostPath) { + absHostPath, err := filepath.Abs(hostPath) + if err != nil { + return nil, errors.Wrapf(err, "unable to resolve absolute path for hostPath: %q", hostPath) + } + node.ExtraMounts[m].HostPath = absHostPath + } + } + + // plan actual creation based on role + switch node.Role { + case config.ControlPlaneRole: + createContainerFuncs = append(createContainerFuncs, func() error { + node.ExtraPortMappings = append(node.ExtraPortMappings, + config.PortMapping{ + ListenAddress: apiServerAddress, + HostPort: apiServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainer(args) + }) + case config.WorkerRole: + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainer(args) + }) + default: + return nil, errors.Errorf("unknown node role: %q", node.Role) + } + } + return createContainerFuncs, nil +} + +func createContainer(args []string) error { + if err := exec.Command("docker", args...).Run(); err != nil { + return errors.Wrap(err, "docker run error") + } + return nil +} + +func clusterIsIPv6(cfg *config.Cluster) bool { + return cfg.Networking.IPFamily == "ipv6" +} + +func clusterHasImplicitLoadBalancer(cfg *config.Cluster) bool { + controlPlanes := 0 + for _, configNode := range cfg.Nodes { + role := string(configNode.Role) + if role == constants.ControlPlaneNodeRoleValue { + controlPlanes++ + } + } + return controlPlanes > 1 +} + +// commonArgs computes static arguments that apply to all containers +func commonArgs(cluster string, cfg *config.Cluster, networkName string, nodeNames []string) ([]string, error) { + // standard arguments all nodes containers need, computed once + args := []string{ + "--detach", // run the container detached + "--tty", // allocate a tty for entrypoint logs + // label the node with the cluster ID + "--label", fmt.Sprintf("%s=%s", clusterLabelKey, cluster), + // user a user defined docker network so we get embedded DNS + "--net", networkName, + // Docker supports the following restart modes: + // - no + // - on-failure[:max-retries] + // - unless-stopped + // - always + // https://docs.docker.com/engine/reference/commandline/run/#restart-policies---restart + // + // What we desire is: + // - restart on host / dockerd reboot + // - don't restart for any other reason + // + // This means: + // - no is out of the question ... it never restarts + // - always is a poor choice, we'll keep trying to restart nodes that were + // never going to work + // - unless-stopped will also retry failures indefinitely, similar to always + // except that it won't restart when the container is `docker stop`ed + // - on-failure is not great, we're only interested in restarting on + // reboots, not failures. *however* we can limit the number of retries + // *and* it forgets all state on dockerd restart and retries anyhow. + // - on-failure:0 is what we want .. restart on failures, except max + // retries is 0, so only restart on reboots. + // however this _actually_ means the same thing as always + // so the closest thing is on-failure:1, which will retry *once* + "--restart=on-failure:1", + } + + // enable IPv6 if necessary + if clusterIsIPv6(cfg) { + args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1") + } + + // pass proxy environment variables + proxyEnv, err := getProxyEnv(cfg, networkName, nodeNames) + if err != nil { + return nil, errors.Wrap(err, "proxy setup error") + } + for key, val := range proxyEnv { + args = append(args, "-e", fmt.Sprintf("%s=%s", key, val)) + } + + // handle hosts that have user namespace remapping enabled + if usernsRemap() { + args = append(args, "--userns=host") + } + + // handle Docker on Btrfs or ZFS + // https://github.com/kubernetes-sigs/kind/issues/1416#issuecomment-606514724 + if mountDevMapper() { + args = append(args, "--volume", "/dev/mapper:/dev/mapper") + } + + return args, nil +} + +func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, name string, args []string) ([]string, error) { + args = append([]string{ + "run", + "--hostname", name, // make hostname match container name + "--name", name, // ... and set the container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role), + // running containers in a container requires privileged + // NOTE: we could try to replicate this with --cap-add, and use less + // privileges, but this flag also changes some mounts that are necessary + // including some ones docker would otherwise do by default. + // for now this is what we want. in the future we may revisit this. + "--privileged", + "--security-opt", "seccomp=unconfined", // also ignore seccomp + "--security-opt", "apparmor=unconfined", // also ignore apparmor + // runtime temporary storage + "--tmpfs", "/tmp", // various things depend on working /tmp + "--tmpfs", "/run", // systemd wants a writable /run + // runtime persistent storage + // this ensures that E.G. pods, logs etc. are not on the container + // filesystem, which is not only better for performance, but allows + // running kind in kind for "party tricks" + // (please don't depend on doing this though!) + "--volume", "/var", + // some k8s things want to read /lib/modules + "--volume", "/lib/modules:/lib/modules:ro", + }, + args..., + ) + + // convert mounts and port mappings to container run args + args = append(args, generateMountBindings(node.ExtraMounts...)...) + mappingArgs, err := generatePortMappings(clusterIPFamily, node.ExtraPortMappings...) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + // finally, specify the image to run + return append(args, node.Image), nil +} + +func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) { + args = append([]string{ + "run", + "--hostname", name, // make hostname match container name + "--name", name, // ... and set the container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue), + }, + args..., + ) + + // load balancer port mapping + mappingArgs, err := generatePortMappings(cfg.Networking.IPFamily, + config.PortMapping{ + ListenAddress: cfg.Networking.APIServerAddress, + HostPort: cfg.Networking.APIServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + // finally, specify the image to run + return append(args, loadbalancer.Image), nil +} + +func getProxyEnv(cfg *config.Cluster, networkName string, nodeNames []string) (map[string]string, error) { + envs := common.GetProxyEnvs(cfg) + // Specifically add the docker network subnets to NO_PROXY if we are using a proxy + if len(envs) > 0 { + subnets, err := getSubnets(networkName) + if err != nil { + return nil, err + } + + noProxyList := append(subnets, envs[common.NOProxy]) + noProxyList = append(noProxyList, nodeNames...) + // Add pod and service dns names to no_proxy to allow in cluster + // Note: this is best effort based on the default CoreDNS spec + // https://github.com/kubernetes/dns/blob/master/docs/specification.md + // Any user created pod/service hostnames, namespaces, custom DNS services + // are expected to be no-proxied by the user explicitly. + noProxyList = append(noProxyList, ".svc", ".svc.cluster", ".svc.cluster.local") + noProxyJoined := strings.Join(noProxyList, ",") + envs[common.NOProxy] = noProxyJoined + envs[strings.ToLower(common.NOProxy)] = noProxyJoined + } + return envs, nil +} + +func getSubnets(networkName string) ([]string, error) { + format := `{{range (index (index . "IPAM") "Config")}}{{index . "Subnet"}} {{end}}` + cmd := exec.Command("docker", "network", "inspect", "-f", format, networkName) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to get subnets") + } + return strings.Split(strings.TrimSpace(lines[0]), " "), nil +} + +// generateMountBindings converts the mount list to a list of args for docker +// '<HostPath>:<ContainerPath>[:options]', where 'options' +// is a comma-separated list of the following strings: +// 'ro', if the path is read only +// 'Z', if the volume requires SELinux relabeling +func generateMountBindings(mounts ...config.Mount) []string { + args := make([]string, 0, len(mounts)) + for _, m := range mounts { + bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath) + var attrs []string + if m.Readonly { + attrs = append(attrs, "ro") + } + // Only request relabeling if the pod provides an SELinux context. If the pod + // does not provide an SELinux context relabeling will label the volume with + // the container's randomly allocated MCS label. This would restrict access + // to the volume to the container which mounts it first. + if m.SelinuxRelabel { + attrs = append(attrs, "Z") + } + switch m.Propagation { + case config.MountPropagationNone: + // noop, private is default + case config.MountPropagationBidirectional: + attrs = append(attrs, "rshared") + case config.MountPropagationHostToContainer: + attrs = append(attrs, "rslave") + default: // Falls back to "private" + } + if len(attrs) > 0 { + bind = fmt.Sprintf("%s:%s", bind, strings.Join(attrs, ",")) + } + args = append(args, fmt.Sprintf("--volume=%s", bind)) + } + return args +} + +// generatePortMappings converts the portMappings list to a list of args for docker +func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings ...config.PortMapping) ([]string, error) { + args := make([]string, 0, len(portMappings)) + for _, pm := range portMappings { + // do provider internal defaulting + // in a future API revision we will handle this at the API level and remove this + if pm.ListenAddress == "" { + switch clusterIPFamily { + case config.IPv4Family: + pm.ListenAddress = "0.0.0.0" // this is the docker default anyhow + case config.IPv6Family: + pm.ListenAddress = "::" + default: + return nil, errors.Errorf("unknown cluster IP family: %v", clusterIPFamily) + } + } + if string(pm.Protocol) == "" { + pm.Protocol = config.PortMappingProtocolTCP // TCP is the default + } + + // validate that the provider can handle this binding + switch pm.Protocol { + case config.PortMappingProtocolTCP: + case config.PortMappingProtocolUDP: + case config.PortMappingProtocolSCTP: + default: + return nil, errors.Errorf("unknown port mapping protocol: %v", pm.Protocol) + } + + // get a random port if necessary (port = 0) + hostPort, err := common.PortOrGetFreePort(pm.HostPort, pm.ListenAddress) + if err != nil { + return nil, errors.Wrap(err, "failed to get random host port for port mapping") + } + + // generate the actual mapping arg + protocol := string(pm.Protocol) + hostPortBinding := net.JoinHostPort(pm.ListenAddress, fmt.Sprintf("%d", hostPort)) + args = append(args, fmt.Sprintf("--publish=%s:%d/%s", hostPortBinding, pm.ContainerPort, protocol)) + } + return args, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go new file mode 100644 index 000000000..ac2ceb6f7 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/exec" +) + +// IsAvailable checks if docker is available in the system +func IsAvailable() bool { + cmd := exec.Command("docker", "-v") + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + return strings.HasPrefix(lines[0], "Docker version") +} + +// usernsRemap checks if userns-remap is enabled in dockerd +func usernsRemap() bool { + cmd := exec.Command("docker", "info", "--format", "'{{json .SecurityOptions}}'") + lines, err := exec.OutputLines(cmd) + if err != nil { + return false + } + if len(lines) > 0 { + if strings.Contains(lines[0], "name=userns") { + return true + } + } + return false +} + +// mountDevMapper checks if the Docker storage driver is Btrfs or ZFS +func mountDevMapper() bool { + storage := "" + cmd := exec.Command("docker", "info", "-f", "{{.Driver}}") + lines, err := exec.OutputLines(cmd) + if err != nil { + return false + } + if len(lines) > 0 { + storage = strings.ToLower(strings.TrimSpace(lines[0])) + } + return storage == "btrfs" || storage == "zfs" +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS new file mode 100644 index 000000000..4ee938664 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - amwat + - aojea + - BenTheElder +approvers: + - amwat + - aojea + - BenTheElder + +labels: + - area/provider/podman diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go new file mode 100644 index 000000000..e30f167d2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +// clusterLabelKey is applied to each "node" podman container for identification +const clusterLabelKey = "io.x-k8s.kind.cluster" + +// nodeRoleLabelKey is applied to each "node" podman container for categorization +// of nodes by role +const nodeRoleLabelKey = "io.x-k8s.kind.role" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go new file mode 100644 index 000000000..cf6f7c0f6 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// ensureNodeImages ensures that the node images used by the create +// configuration are present +func ensureNodeImages(logger log.Logger, status *cli.Status, cfg *config.Cluster) error { + // pull each required image + for _, image := range common.RequiredNodeImages(cfg).List() { + // prints user friendly message + friendlyImageName, image := sanitizeImage(image) + status.Start(fmt.Sprintf("Ensuring node image (%s) 🖼", friendlyImageName)) + if _, err := pullIfNotPresent(logger, image, 4); err != nil { + status.End(false) + return err + } + } + return nil +} + +// pullIfNotPresent will pull an image if it is not present locally +// retrying up to retries times +// it returns true if it attempted to pull, and any errors from pulling +func pullIfNotPresent(logger log.Logger, image string, retries int) (pulled bool, err error) { + // TODO(bentheelder): switch most (all) of the logging here to debug level + // once we have configurable log levels + // if this did not return an error, then the image exists locally + cmd := exec.Command("podman", "inspect", "--type=image", image) + if err := cmd.Run(); err == nil { + logger.V(1).Infof("Image: %s present locally", image) + return false, nil + } + // otherwise try to pull it + return true, pull(logger, image, retries) +} + +// pull pulls an image, retrying up to retries times +func pull(logger log.Logger, image string, retries int) error { + logger.V(1).Infof("Pulling image: %s ...", image) + err := exec.Command("podman", "pull", image).Run() + // retry pulling up to retries times if necessary + if err != nil { + for i := 0; i < retries; i++ { + time.Sleep(time.Second * time.Duration(i+1)) + logger.V(1).Infof("Trying again to pull image: %q ... %v", image, err) + // TODO(bentheelder): add some backoff / sleep? + err = exec.Command("podman", "pull", image).Run() + if err == nil { + break + } + } + } + return errors.Wrapf(err, "failed to pull image %q", image) +} + +// sanitizeImage is a helper to return human readable image name and +// the podman pullable image name from the provided image +func sanitizeImage(image string) (string, string) { + if strings.Contains(image, "@sha256:") { + splits := strings.Split(image, "@sha256:") + return splits[0], strings.Split(splits[0], ":")[0] + "@sha256:" + splits[1] + } + return image, image +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go new file mode 100644 index 000000000..87d7e6679 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go @@ -0,0 +1,135 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "crypto/sha1" + "encoding/binary" + "net" + "regexp" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// This may be overridden by KIND_EXPERIMENTAL_PODMAN_NETWORK env, +// experimentally... +// +// By default currently picking a single network is equivalent to the previous +// behavior *except* that we moved from the default bridge to a user defined +// network because the default bridge is actually special versus any other +// docker network and lacks the embedded DNS +// +// For now this also makes it easier for apps to join the same network, and +// leaves users with complex networking desires to create and manage their own +// networks. +const fixedNetworkName = "kind" + +// ensureNetwork creates a new network +// podman only creates IPv6 networks for versions >= 2.2.0 +func ensureNetwork(name string) error { + // network already exists + if checkIfNetworkExists(name) { + return nil + } + + // generate unique subnet per network based on the name + // obtained from the ULA fc00::/8 range + // Make N attempts with "probing" in case we happen to collide + subnet := generateULASubnetFromName(name, 0) + err := createNetwork(name, subnet) + if err == nil { + // Success! + return nil + } + + if isUnknownIPv6FlagError(err) { + return createNetwork(name, "") + } + + // Only continue if the error is because of the subnet range + // is already allocated + if !isPoolOverlapError(err) { + return err + } + + // keep trying for ipv6 subnets + const maxAttempts = 5 + for attempt := int32(1); attempt < maxAttempts; attempt++ { + subnet := generateULASubnetFromName(name, attempt) + err = createNetwork(name, subnet) + if err == nil { + // success! + return nil + } else if !isPoolOverlapError(err) { + // unknown error ... + return err + } + } + return errors.New("exhausted attempts trying to find a non-overlapping subnet") + +} + +func createNetwork(name, ipv6Subnet string) error { + if ipv6Subnet == "" { + return exec.Command("podman", "network", "create", "-d=bridge", name).Run() + } + return exec.Command("podman", "network", "create", "-d=bridge", + "--ipv6", "--subnet", ipv6Subnet, name).Run() +} + +func checkIfNetworkExists(name string) bool { + _, err := exec.Output(exec.Command( + "podman", "network", "inspect", + regexp.QuoteMeta(name), + )) + return err == nil +} + +func isUnknownIPv6FlagError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && + strings.Contains(string(rerr.Output), "unknown flag: --ipv6") +} + +func isPoolOverlapError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && + (strings.Contains(string(rerr.Output), "is being used by a network interface") || + strings.Contains(string(rerr.Output), "is already being used by a cni configuration")) +} + +// generateULASubnetFromName generate an IPv6 subnet based on the +// name and Nth probing attempt +func generateULASubnetFromName(name string, attempt int32) string { + ip := make([]byte, 16) + ip[0] = 0xfc + ip[1] = 0x00 + h := sha1.New() + _, _ = h.Write([]byte(name)) + _ = binary.Write(h, binary.LittleEndian, attempt) + bs := h.Sum(nil) + for i := 2; i < 8; i++ { + ip[i] = bs[i] + } + subnet := &net.IPNet{ + IP: net.IP(ip), + Mask: net.CIDRMask(64, 128), + } + return subnet.String() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go new file mode 100644 index 000000000..5285dd224 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "context" + "fmt" + "io" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// nodes.Node implementation for the podman provider +type node struct { + name string +} + +func (n *node) String() string { + return n.name +} + +func (n *node) Role() (string, error) { + cmd := exec.Command("podman", "inspect", + "--format", fmt.Sprintf(`{{ index .Config.Labels "%s"}}`, nodeRoleLabelKey), + n.name, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get role for node") + } + if len(lines) != 1 { + return "", errors.Errorf("failed to get role for node: output lines %d != 1", len(lines)) + } + return lines[0], nil +} + +func (n *node) IP() (ipv4 string, ipv6 string, err error) { + // retrieve the IP address of the node using podman inspect + cmd := exec.Command("podman", "inspect", + "-f", "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}", + n.name, // ... against the "node" container + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", "", errors.Wrap(err, "failed to get container details") + } + if len(lines) != 1 { + return "", "", errors.Errorf("file should only be one line, got %d lines", len(lines)) + } + ips := strings.Split(lines[0], ",") + if len(ips) != 2 { + return "", "", errors.Errorf("container addresses should have 2 values, got %d values", len(ips)) + } + return ips[0], ips[1], nil +} + +func (n *node) Command(command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + } +} + +func (n *node) CommandContext(ctx context.Context, command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + ctx: ctx, + } +} + +// nodeCmd implements exec.Cmd for podman nodes +type nodeCmd struct { + nameOrID string // the container name or ID + command string + args []string + env []string + stdin io.Reader + stdout io.Writer + stderr io.Writer + ctx context.Context +} + +func (c *nodeCmd) Run() error { + args := []string{ + "exec", + // run with privileges so we can remount etc.. + // this might not make sense in the most general sense, but it is + // important to many kind commands + "--privileged", + } + if c.stdin != nil { + args = append(args, + "-i", // interactive so we can supply input + ) + } + // set env + for _, env := range c.env { + args = append(args, "-e", env) + } + // specify the container and command, after this everything will be + // args the command in the container rather than to podman + args = append( + args, + c.nameOrID, // ... against the container + c.command, // with the command specified + ) + args = append( + args, + // finally, with the caller args + c.args..., + ) + var cmd exec.Cmd + if c.ctx != nil { + cmd = exec.CommandContext(c.ctx, "podman", args...) + } else { + cmd = exec.Command("podman", args...) + } + if c.stdin != nil { + cmd.SetStdin(c.stdin) + } + if c.stderr != nil { + cmd.SetStderr(c.stderr) + } + if c.stdout != nil { + cmd.SetStdout(c.stdout) + } + return cmd.Run() +} + +func (c *nodeCmd) SetEnv(env ...string) exec.Cmd { + c.env = env + return c +} + +func (c *nodeCmd) SetStdin(r io.Reader) exec.Cmd { + c.stdin = r + return c +} + +func (c *nodeCmd) SetStdout(w io.Writer) exec.Cmd { + c.stdout = w + return c +} + +func (c *nodeCmd) SetStderr(w io.Writer) exec.Cmd { + c.stderr = w + return c +} + +func (n *node) SerialLogs(w io.Writer) error { + return exec.Command("podman", "logs", n.name).SetStdout(w).SetStderr(w).Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go new file mode 100644 index 000000000..f775ab52e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go @@ -0,0 +1,352 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "encoding/json" + "fmt" + "net" + "os" + "path/filepath" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/version" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + internallogs "sigs.k8s.io/kind/pkg/cluster/internal/logs" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// NewProvider returns a new provider based on executing `podman ...` +func NewProvider(logger log.Logger) providers.Provider { + logger.Warn("enabling experimental podman provider") + return &provider{ + logger: logger, + } +} + +// Provider implements provider.Provider +// see NewProvider +type provider struct { + logger log.Logger +} + +// String implements fmt.Stringer +// NOTE: the value of this should not currently be relied upon for anything! +// This is only used for setting the Node's providerID +func (p *provider) String() string { + return "podman" +} + +// Provision is part of the providers.Provider interface +func (p *provider) Provision(status *cli.Status, cfg *config.Cluster) (err error) { + if err := ensureMinVersion(); err != nil { + return err + } + + // kind doesn't work with podman rootless, surface an error + if os.Geteuid() != 0 { + p.logger.Errorf("podman provider does not work properly in rootless mode") + os.Exit(1) + } + + // TODO: validate cfg + // ensure node images are pulled before actually provisioning + if err := ensureNodeImages(p.logger, status, cfg); err != nil { + return err + } + + // ensure the pre-requisite network exists + networkName := fixedNetworkName + if n := os.Getenv("KIND_EXPERIMENTAL_PODMAN_NETWORK"); n != "" { + p.logger.Warn("WARNING: Overriding podman network due to KIND_EXPERIMENTAL_PODMAN_NETWORK") + p.logger.Warn("WARNING: Here be dragons! This is not supported currently.") + networkName = n + } + if err := ensureNetwork(networkName); err != nil { + return errors.Wrap(err, "failed to ensure podman network") + } + + // actually provision the cluster + icons := strings.Repeat("📦 ", len(cfg.Nodes)) + status.Start(fmt.Sprintf("Preparing nodes %s", icons)) + defer func() { status.End(err == nil) }() + + // plan creating the containers + createContainerFuncs, err := planCreation(cfg, networkName) + if err != nil { + return err + } + + // actually create nodes + return errors.UntilErrorConcurrent(createContainerFuncs) +} + +// ListClusters is part of the providers.Provider interface +func (p *provider) ListClusters() ([]string, error) { + cmd := exec.Command("podman", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", "label="+clusterLabelKey, + // format to include the cluster name + "--format", fmt.Sprintf(`{{index .Labels "%s"}}`, clusterLabelKey), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list clusters") + } + return sets.NewString(lines...).List(), nil +} + +// ListNodes is part of the providers.Provider interface +func (p *provider) ListNodes(cluster string) ([]nodes.Node, error) { + cmd := exec.Command("podman", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", fmt.Sprintf("label=%s=%s", clusterLabelKey, cluster), + // format to include the cluster name + "--format", `{{.Names}}`, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list clusters") + } + // convert names to node handles + ret := make([]nodes.Node, 0, len(lines)) + for _, name := range lines { + ret = append(ret, p.node(name)) + } + return ret, nil +} + +// DeleteNodes is part of the providers.Provider interface +func (p *provider) DeleteNodes(n []nodes.Node) error { + if len(n) == 0 { + return nil + } + const command = "podman" + args := make([]string, 0, len(n)+3) // allocate once + args = append(args, + "rm", + "-f", // force the container to be delete now + "-v", // delete volumes + ) + for _, node := range n { + args = append(args, node.String()) + } + if err := exec.Command(command, args...).Run(); err != nil { + return errors.Wrap(err, "failed to delete nodes") + } + var nodeVolumes []string + for _, node := range n { + volumes, err := getVolumes(node.String()) + if err != nil { + return err + } + nodeVolumes = append(nodeVolumes, volumes...) + } + return deleteVolumes(nodeVolumes) +} + +// GetAPIServerEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + + // TODO: get rid of this once podman settles on how to get the port mapping using podman inspect + // This is only used to get the Kubeconfig server field + v, err := getPodmanVersion() + if err != nil { + return "", errors.Wrap(err, "failed to check podman version") + } + if v.LessThan(version.MustParseSemantic("2.2.0")) { + cmd := exec.Command( + "podman", "inspect", + "--format", + "{{ json .NetworkSettings.Ports }}", + n.String(), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) != 1 { + return "", errors.Errorf("network details should only be one line, got %d lines", len(lines)) + } + + // portMapping19 maps to the standard CNI portmapping capability used in podman 1.9 + // see: https://github.com/containernetworking/cni/blob/spec-v0.4.0/CONVENTIONS.md + type portMapping19 struct { + HostPort int32 `json:"hostPort"` + ContainerPort int32 `json:"containerPort"` + Protocol string `json:"protocol"` + HostIP string `json:"hostIP"` + } + // portMapping20 maps to the podman 2.0 portmap type + // see: https://github.com/containers/podman/blob/05988fc74fc25f2ad2256d6e011dfb7ad0b9a4eb/libpod/define/container_inspect.go#L134-L143 + type portMapping20 struct { + HostPort string `json:"HostPort"` + HostIP string `json:"HostIp"` + } + + portMappings20 := make(map[string][]portMapping20) + if err := json.Unmarshal([]byte(lines[0]), &portMappings20); err == nil { + for k, v := range portMappings20 { + protocol := "tcp" + parts := strings.Split(k, "/") + if len(parts) == 2 { + protocol = strings.ToLower(parts[1]) + } + containerPort, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + for _, pm := range v { + if containerPort == common.APIServerInternalPort && protocol == "tcp" { + return net.JoinHostPort(pm.HostIP, pm.HostPort), nil + } + } + } + } + var portMappings19 []portMapping19 + if err := json.Unmarshal([]byte(lines[0]), &portMappings19); err != nil { + return "", errors.Errorf("invalid network details: %v", err) + } + for _, pm := range portMappings19 { + if pm.ContainerPort == common.APIServerInternalPort && pm.Protocol == "tcp" { + return net.JoinHostPort(pm.HostIP, strconv.Itoa(int(pm.HostPort))), nil + } + } + } + // TODO: hack until https://github.com/containers/podman/issues/8444 is resolved + cmd := exec.Command( + "podman", "inspect", + "--format", + "{{range .NetworkSettings.Ports }}{{range .}}{{.HostIP}}/{{.HostPort}}{{end}}{{end}}", + n.String(), + ) + + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) != 1 { + return "", errors.Errorf("network details should only be one line, got %d lines", len(lines)) + } + // output is in the format IP/Port + parts := strings.Split(strings.TrimSpace(lines[0]), "/") + if len(parts) != 2 { + return "", errors.Errorf("network details should be in the format IP/Port, received: %s", parts) + } + host := parts[0] + port, err := strconv.Atoi(parts[1]) + if err != nil { + return "", errors.Errorf("network port not an integer: %v", err) + } + + return net.JoinHostPort(host, strconv.Itoa(port)), nil +} + +// GetAPIServerInternalEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerInternalEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get apiserver endpoint") + } + // NOTE: we're using the nodes's hostnames which are their names + return net.JoinHostPort(n.String(), fmt.Sprintf("%d", common.APIServerInternalPort)), nil +} + +// node returns a new node handle for this provider +func (p *provider) node(name string) nodes.Node { + return &node{ + name: name, + } +} + +// CollectLogs will populate dir with cluster logs and other debug files +func (p *provider) CollectLogs(dir string, nodes []nodes.Node) error { + execToPathFn := func(cmd exec.Cmd, path string) func() error { + return func() error { + f, err := common.FileOnHost(path) + if err != nil { + return err + } + defer f.Close() + return cmd.SetStdout(f).SetStderr(f).Run() + } + } + // construct a slice of methods to collect logs + fns := []func() error{ + // record info about the host podman + execToPathFn( + exec.Command("podman", "info"), + filepath.Join(dir, "podman-info.txt"), + ), + } + + // collect /var/log for each node and plan collecting more logs + var errs []error + for _, n := range nodes { + node := n // https://golang.org/doc/faq#closures_and_goroutines + name := node.String() + path := filepath.Join(dir, name) + if err := internallogs.DumpDir(p.logger, node, "/var/log", path); err != nil { + errs = append(errs, err) + } + + fns = append(fns, + func() error { return common.CollectLogs(node, path) }, + execToPathFn(exec.Command("podman", "inspect", name), filepath.Join(path, "inspect.json")), + func() error { + f, err := common.FileOnHost(filepath.Join(path, "serial.log")) + if err != nil { + return err + } + return node.SerialLogs(f) + }, + ) + } + + // run and collect up all errors + errs = append(errs, errors.AggregateConcurrent(fns)) + return errors.NewAggregate(errs) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go new file mode 100644 index 000000000..47577d3e5 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go @@ -0,0 +1,362 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "fmt" + "net" + "path/filepath" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + + "sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// planCreation creates a slice of funcs that will create the containers +func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs []func() error, err error) { + // these apply to all container creation + nodeNamer := common.MakeNodeNamer(cfg.Name) + genericArgs, err := commonArgs(cfg, networkName) + if err != nil { + return nil, err + } + + // only the external LB should reflect the port if we have multiple control planes + apiServerPort := cfg.Networking.APIServerPort + apiServerAddress := cfg.Networking.APIServerAddress + if clusterHasImplicitLoadBalancer(cfg) { + // TODO: picking ports locally is less than ideal with a remote runtime + // (does podman have this?) + // but this is supposed to be an implementation detail and NOT picking + // them breaks host reboot ... + // For now remote podman + multi control plane is not supported + apiServerPort = 0 // replaced with random ports + apiServerAddress = "127.0.0.1" // only the LB needs to be non-local + if clusterIsIPv6(cfg) { + apiServerAddress = "::1" // only the LB needs to be non-local + } + // plan loadbalancer node + name := nodeNamer(constants.ExternalLoadBalancerNodeRoleValue) + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForLoadBalancer(cfg, name, genericArgs) + if err != nil { + return err + } + return createContainer(args) + }) + } + + // plan normal nodes + for _, node := range cfg.Nodes { + node := node.DeepCopy() // copy so we can modify + name := nodeNamer(string(node.Role)) // name the node + + // fixup relative paths, podman can only handle absolute paths + for i := range node.ExtraMounts { + hostPath := node.ExtraMounts[i].HostPath + absHostPath, err := filepath.Abs(hostPath) + if err != nil { + return nil, errors.Wrapf(err, "unable to resolve absolute path for hostPath: %q", hostPath) + } + node.ExtraMounts[i].HostPath = absHostPath + } + + // plan actual creation based on role + switch node.Role { + case config.ControlPlaneRole: + createContainerFuncs = append(createContainerFuncs, func() error { + node.ExtraPortMappings = append(node.ExtraPortMappings, + config.PortMapping{ + ListenAddress: apiServerAddress, + HostPort: apiServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainer(args) + }) + case config.WorkerRole: + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainer(args) + }) + default: + return nil, errors.Errorf("unknown node role: %q", node.Role) + } + } + return createContainerFuncs, nil +} + +func createContainer(args []string) error { + if err := exec.Command("podman", args...).Run(); err != nil { + return errors.Wrap(err, "podman run error") + } + return nil +} + +func clusterIsIPv6(cfg *config.Cluster) bool { + return cfg.Networking.IPFamily == "ipv6" +} + +func clusterHasImplicitLoadBalancer(cfg *config.Cluster) bool { + controlPlanes := 0 + for _, configNode := range cfg.Nodes { + role := string(configNode.Role) + if role == constants.ControlPlaneNodeRoleValue { + controlPlanes++ + } + } + return controlPlanes > 1 +} + +// commonArgs computes static arguments that apply to all containers +func commonArgs(cfg *config.Cluster, networkName string) ([]string, error) { + // standard arguments all nodes containers need, computed once + args := []string{ + "--detach", // run the container detached + "--tty", // allocate a tty for entrypoint logs + "--net", networkName, // attach to its own network + // label the node with the cluster ID + "--label", fmt.Sprintf("%s=%s", clusterLabelKey, cfg.Name), + } + + // enable IPv6 if necessary + if clusterIsIPv6(cfg) { + args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1") + } + + // pass proxy environment variables + proxyEnv, err := getProxyEnv(cfg) + if err != nil { + return nil, errors.Wrap(err, "proxy setup error") + } + for key, val := range proxyEnv { + args = append(args, "-e", fmt.Sprintf("%s=%s", key, val)) + } + + return args, nil +} + +func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, name string, args []string) ([]string, error) { + // Pre-create anonymous volumes to enable specifying mount options + // during container run time + varVolume, err := createAnonymousVolume(name) + if err != nil { + return nil, err + } + + args = append([]string{ + "run", + "--hostname", name, // make hostname match container name + "--name", name, // ... and set the container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role), + // running containers in a container requires privileged + // NOTE: we could try to replicate this with --cap-add, and use less + // privileges, but this flag also changes some mounts that are necessary + // including some ones podman would otherwise do by default. + // for now this is what we want. in the future we may revisit this. + "--privileged", + // runtime temporary storage + "--tmpfs", "/tmp", // various things depend on working /tmp + "--tmpfs", "/run", // systemd wants a writable /run + // runtime persistent storage + // this ensures that E.G. pods, logs etc. are not on the container + // filesystem, which is not only better for performance, but allows + // running kind in kind for "party tricks" + // (please don't depend on doing this though!) + // also enable default docker volume options + // suid: SUID applications on the volume will be able to change their privilege + // exec: executables on the volume will be able to executed within the container + // dev: devices on the volume will be able to be used by processes within the container + "--volume", fmt.Sprintf("%s:/var:suid,exec,dev", varVolume), + // some k8s things want to read /lib/modules + "--volume", "/lib/modules:/lib/modules:ro", + }, + args..., + ) + + // convert mounts and port mappings to container run args + args = append(args, generateMountBindings(node.ExtraMounts...)...) + mappingArgs, err := generatePortMappings(clusterIPFamily, node.ExtraPortMappings...) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + // finally, specify the image to run + _, image := sanitizeImage(node.Image) + return append(args, image), nil +} + +func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) { + args = append([]string{ + "run", + "--hostname", name, // make hostname match container name + "--name", name, // ... and set the container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue), + }, + args..., + ) + + // load balancer port mapping + mappingArgs, err := generatePortMappings(cfg.Networking.IPFamily, + config.PortMapping{ + ListenAddress: cfg.Networking.APIServerAddress, + HostPort: cfg.Networking.APIServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + // finally, specify the image to run + _, image := sanitizeImage(loadbalancer.Image) + return append(args, image), nil +} + +func getProxyEnv(cfg *config.Cluster) (map[string]string, error) { + envs := common.GetProxyEnvs(cfg) + // Specifically add the podman network subnets to NO_PROXY if we are using a proxy + if len(envs) > 0 { + // podman default bridge network is named "bridge" (https://docs.podman.com/network/bridge/#use-the-default-bridge-network) + subnets, err := getSubnets("bridge") + if err != nil { + return nil, err + } + noProxyList := append(subnets, envs[common.NOProxy]) + // Add pod and service dns names to no_proxy to allow in cluster + // Note: this is best effort based on the default CoreDNS spec + // https://github.com/kubernetes/dns/blob/master/docs/specification.md + // Any user created pod/service hostnames, namespaces, custom DNS services + // are expected to be no-proxied by the user explicitly. + noProxyList = append(noProxyList, ".svc", ".svc.cluster", ".svc.cluster.local") + noProxyJoined := strings.Join(noProxyList, ",") + envs[common.NOProxy] = noProxyJoined + envs[strings.ToLower(common.NOProxy)] = noProxyJoined + } + return envs, nil +} + +func getSubnets(networkName string) ([]string, error) { + format := `{{range (index (index . "IPAM") "Config")}}{{index . "Subnet"}} {{end}}` + cmd := exec.Command("podman", "network", "inspect", "-f", format, networkName) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to get subnets") + } + return strings.Split(strings.TrimSpace(lines[0]), " "), nil +} + +// generateMountBindings converts the mount list to a list of args for podman +// '<HostPath>:<ContainerPath>[:options]', where 'options' +// is a comma-separated list of the following strings: +// 'ro', if the path is read only +// 'Z', if the volume requires SELinux relabeling +func generateMountBindings(mounts ...config.Mount) []string { + args := make([]string, 0, len(mounts)) + for _, m := range mounts { + bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath) + var attrs []string + if m.Readonly { + attrs = append(attrs, "ro") + } + // Only request relabeling if the pod provides an SELinux context. If the pod + // does not provide an SELinux context relabeling will label the volume with + // the container's randomly allocated MCS label. This would restrict access + // to the volume to the container which mounts it first. + if m.SelinuxRelabel { + attrs = append(attrs, "Z") + } + switch m.Propagation { + case config.MountPropagationNone: + // noop, private is default + case config.MountPropagationBidirectional: + attrs = append(attrs, "rshared") + case config.MountPropagationHostToContainer: + attrs = append(attrs, "rslave") + default: // Falls back to "private" + } + if len(attrs) > 0 { + bind = fmt.Sprintf("%s:%s", bind, strings.Join(attrs, ",")) + } + args = append(args, fmt.Sprintf("--volume=%s", bind)) + } + return args +} + +// generatePortMappings converts the portMappings list to a list of args for podman +func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings ...config.PortMapping) ([]string, error) { + args := make([]string, 0, len(portMappings)) + for _, pm := range portMappings { + // do provider internal defaulting + // in a future API revision we will handle this at the API level and remove this + if pm.ListenAddress == "" { + switch clusterIPFamily { + case config.IPv4Family: + pm.ListenAddress = "0.0.0.0" + case config.IPv6Family: + pm.ListenAddress = "::" + default: + return nil, errors.Errorf("unknown cluster IP family: %v", clusterIPFamily) + } + } + if string(pm.Protocol) == "" { + pm.Protocol = config.PortMappingProtocolTCP // TCP is the default + } + + // validate that the provider can handle this binding + switch pm.Protocol { + case config.PortMappingProtocolTCP: + case config.PortMappingProtocolUDP: + case config.PortMappingProtocolSCTP: + default: + return nil, errors.Errorf("unknown port mapping protocol: %v", pm.Protocol) + } + + // get a random port if necessary (port = 0) + hostPort, err := common.PortOrGetFreePort(pm.HostPort, pm.ListenAddress) + if err != nil { + return nil, errors.Wrap(err, "failed to get random host port for port mapping") + } + + // generate the actual mapping arg + protocol := string(pm.Protocol) + hostPortBinding := net.JoinHostPort(pm.ListenAddress, fmt.Sprintf("%d", hostPort)) + // Podman expects empty string instead of 0 to assign a random port + // https://github.com/containers/libpod/blob/master/pkg/spec/ports.go#L68-L69 + if strings.HasSuffix(hostPortBinding, ":0") { + hostPortBinding = strings.TrimSuffix(hostPortBinding, "0") + } + args = append(args, fmt.Sprintf("--publish=%s:%d/%s", hostPortBinding, pm.ContainerPort, strings.ToLower(protocol))) + } + return args, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go new file mode 100644 index 000000000..853f9301a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go @@ -0,0 +1,117 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/version" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// IsAvailable checks if podman is available in the system +func IsAvailable() bool { + cmd := exec.Command("podman", "-v") + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + return strings.HasPrefix(lines[0], "podman version") +} + +func getPodmanVersion() (*version.Version, error) { + cmd := exec.Command("podman", "--version") + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, err + } + + // output is like `podman version 1.7.1-dev` + if len(lines) != 1 { + return nil, errors.Errorf("podman version should only be one line, got %d", len(lines)) + } + parts := strings.Split(lines[0], " ") + if len(parts) != 3 { + return nil, errors.Errorf("podman --version contents should have 3 parts, got %q", lines[0]) + } + return version.ParseSemantic(parts[2]) +} + +const ( + minSupportedVersion = "1.8.0" +) + +func ensureMinVersion() error { + // ensure that podman version is a compatible version + v, err := getPodmanVersion() + if err != nil { + return errors.Wrap(err, "failed to check podman version") + } + if !v.AtLeast(version.MustParseSemantic(minSupportedVersion)) { + return errors.Errorf("podman version %q is too old, please upgrade to %q or later", v, minSupportedVersion) + } + return nil +} + +// createAnonymousVolume creates a new anonymous volume +// with the specified label=true +// returns the name of the volume created +func createAnonymousVolume(label string) (string, error) { + cmd := exec.Command("podman", + "volume", + "create", + // podman only support filter on key during list + // so we use the unique id as key + "--label", fmt.Sprintf("%s=true", label)) + name, err := exec.Output(cmd) + if err != nil { + return "", err + } + return strings.TrimSuffix(string(name), "\n"), nil +} + +// getVolumes gets volume names filtered on specified label +func getVolumes(label string) ([]string, error) { + cmd := exec.Command("podman", + "volume", + "ls", + "--filter", fmt.Sprintf("label=%s", label), + "--quiet") + // `output` from the above command is names of all volumes each followed by `\n`. + output, err := exec.Output(cmd) + if err != nil { + return nil, err + } + // Trim away the last `\n`. + trimmedOutput := strings.TrimSuffix(string(output), "\n") + // Get names of all volumes by splitting via `\n`. + return strings.Split(string(trimmedOutput), "\n"), nil +} + +func deleteVolumes(names []string) error { + args := []string{ + "volume", + "rm", + "--force", + } + args = append(args, names...) + cmd := exec.Command("podman", args...) + return cmd.Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go new file mode 100644 index 000000000..6e28c4dc7 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go @@ -0,0 +1,48 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package providers + +import ( + "sigs.k8s.io/kind/pkg/cluster/nodes" + + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// Provider represents a provider of cluster / node infrastructure +// This is an alpha-grade internal API +type Provider interface { + // Provision should create and start the nodes, just short of + // actually starting up Kubernetes, based on the given cluster config + Provision(status *cli.Status, cfg *config.Cluster) error + // ListClusters discovers the clusters that currently have resources + // under this providers + ListClusters() ([]string, error) + // ListNodes returns the nodes under this provider for the given + // cluster name, they may or may not be running correctly + ListNodes(cluster string) ([]nodes.Node, error) + // DeleteNodes deletes the provided list of nodes + // These should be from results previously returned by this provider + // E.G. by ListNodes() + DeleteNodes([]nodes.Node) error + // GetAPIServerEndpoint returns the host endpoint for the cluster's API server + GetAPIServerEndpoint(cluster string) (string, error) + // GetAPIServerEndpoint returns the internal network endpoint for the cluster's API server + GetAPIServerInternalEndpoint(cluster string) (string, error) + // CollectLogs will populate dir with cluster logs and other debug files + CollectLogs(dir string, nodes []nodes.Node) error +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go new file mode 100644 index 000000000..4cd87b0ec --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package nodes provides a kind specific definition of a cluster node +package nodes diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go new file mode 100644 index 000000000..a2c1b33b4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodes + +import ( + "io" + + "sigs.k8s.io/kind/pkg/exec" +) + +// Node represents a kind cluster node +type Node interface { + // The node should implement exec.Cmder for running commands against the node + // see: sigs.k8s.io/kind/pkg/exec + exec.Cmder + // String should return the node name + String() string // see also: fmt.Stringer + // Role should return the node's role + Role() (string, error) // see also: pkg/cluster/constants + // TODO(bentheelder): should return node addresses more generally + // Possibly remove this method in favor of obtaining this detail with + // exec or from the provider + IP() (ipv4 string, ipv6 string, err error) + // SerialLogs collects the "node" container logs + SerialLogs(writer io.Writer) error +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go new file mode 100644 index 000000000..b1e72e95a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package nodeutils contains functionality for Kubernetes-in-Docker nodes +// It mostly exists to break up functionality from sigs.k8s.io/kind/pkg/cluster +package nodeutils diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go new file mode 100644 index 000000000..27deb93cc --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go @@ -0,0 +1,153 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeutils + +import ( + "sort" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" +) + +// SelectNodesByRole returns a list of nodes with the matching role +// TODO(bentheelder): remove this in favor of specific role select methods +// and avoid the unnecessary error handling +func SelectNodesByRole(allNodes []nodes.Node, role string) ([]nodes.Node, error) { + out := []nodes.Node{} + for _, node := range allNodes { + nodeRole, err := node.Role() + if err != nil { + return nil, err + } + if nodeRole == role { + out = append(out, node) + } + } + return out, nil +} + +// InternalNodes returns the list of container IDs for the "nodes" in the cluster +// that are ~Kubernetes nodes, as opposed to e.g. the external loadbalancer for HA +func InternalNodes(allNodes []nodes.Node) ([]nodes.Node, error) { + selectedNodes := []nodes.Node{} + for _, node := range allNodes { + nodeRole, err := node.Role() + if err != nil { + return nil, err + } + if nodeRole == constants.WorkerNodeRoleValue || nodeRole == constants.ControlPlaneNodeRoleValue { + selectedNodes = append(selectedNodes, node) + } + } + return selectedNodes, nil +} + +// ExternalLoadBalancerNode returns a node handle for the external control plane +// loadbalancer node or nil if there isn't one +func ExternalLoadBalancerNode(allNodes []nodes.Node) (nodes.Node, error) { + // identify and validate external load balancer node + loadBalancerNodes, err := SelectNodesByRole( + allNodes, + constants.ExternalLoadBalancerNodeRoleValue, + ) + if err != nil { + return nil, err + } + if len(loadBalancerNodes) < 1 { + return nil, nil + } + if len(loadBalancerNodes) > 1 { + return nil, errors.Errorf( + "unexpected number of %s nodes %d", + constants.ExternalLoadBalancerNodeRoleValue, + len(loadBalancerNodes), + ) + } + return loadBalancerNodes[0], nil +} + +// APIServerEndpointNode selects the node from allNodes which hosts the API Server endpoint +// This should be the control plane node if there is one control plane node, or a LoadBalancer otherwise. +// It returns an error if the node list is invalid (E.G. two control planes and no load balancer) +func APIServerEndpointNode(allNodes []nodes.Node) (nodes.Node, error) { + if n, err := ExternalLoadBalancerNode(allNodes); err != nil { + return nil, errors.Wrap(err, "failed to find api-server endpoint node") + } else if n != nil { + return n, nil + } + n, err := ControlPlaneNodes(allNodes) + if err != nil { + return nil, errors.Wrap(err, "failed to find api-server endpoint node") + } + if len(n) != 1 { + return nil, errors.Errorf("expected one control plane node or a load balancer, not %d and none", len(n)) + } + return n[0], nil +} + +// ControlPlaneNodes returns all control plane nodes such that the first entry +// is the bootstrap control plane node +func ControlPlaneNodes(allNodes []nodes.Node) ([]nodes.Node, error) { + controlPlaneNodes, err := SelectNodesByRole( + allNodes, + constants.ControlPlaneNodeRoleValue, + ) + if err != nil { + return nil, err + } + // pick the first by sorting + // TODO(bentheelder): perhaps in the future we should mark this node + // specially at container creation time + sort.Slice(controlPlaneNodes, func(i, j int) bool { + return strings.Compare(controlPlaneNodes[i].String(), controlPlaneNodes[j].String()) < 0 + }) + return controlPlaneNodes, nil +} + +// BootstrapControlPlaneNode returns a handle to the bootstrap control plane node +// TODO(bentheelder): remove this. This node shouldn't be special (fix that first) +func BootstrapControlPlaneNode(allNodes []nodes.Node) (nodes.Node, error) { + controlPlaneNodes, err := ControlPlaneNodes(allNodes) + if err != nil { + return nil, err + } + if len(controlPlaneNodes) < 1 { + return nil, errors.Errorf( + "expected at least one %s node", + constants.ControlPlaneNodeRoleValue, + ) + } + return controlPlaneNodes[0], nil +} + +// SecondaryControlPlaneNodes returns handles to the secondary +// control plane nodes and NOT the bootstrap control plane node +func SecondaryControlPlaneNodes(allNodes []nodes.Node) ([]nodes.Node, error) { + controlPlaneNodes, err := ControlPlaneNodes(allNodes) + if err != nil { + return nil, err + } + if len(controlPlaneNodes) < 1 { + return nil, errors.Errorf( + "expected at least one %s node", + constants.ControlPlaneNodeRoleValue, + ) + } + return controlPlaneNodes[1:], nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go new file mode 100644 index 000000000..df0af0c92 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeutils + +import ( + "bytes" + "encoding/json" + "io" + "path" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// KubeVersion returns the Kubernetes version installed on the node +func KubeVersion(n nodes.Node) (version string, err error) { + // grab kubernetes version from the node image + cmd := n.Command("cat", "/kind/version") + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get file") + } + if len(lines) != 1 { + return "", errors.Errorf("file should only be one line, got %d lines", len(lines)) + } + return lines[0], nil +} + +// WriteFile writes content to dest on the node +func WriteFile(n nodes.Node, dest, content string) error { + // create destination directory + err := n.Command("mkdir", "-p", path.Dir(dest)).Run() + if err != nil { + return errors.Wrapf(err, "failed to create directory %s", path.Dir(dest)) + } + + return n.Command("cp", "/dev/stdin", dest).SetStdin(strings.NewReader(content)).Run() +} + +// CopyNodeToNode copies file from a to b +func CopyNodeToNode(a, b nodes.Node, file string) error { + // create destination directory + err := b.Command("mkdir", "-p", path.Dir(file)).Run() + if err != nil { + return errors.Wrapf(err, "failed to create directory %q", path.Dir(file)) + } + + // TODO: experiment with streaming instead to avoid the copy + // for now we only use this for small files so it's not worth the complexity + var buff bytes.Buffer + if err := a.Command("cat", file).SetStdout(&buff).Run(); err != nil { + return errors.Wrapf(err, "failed to read %q from node", file) + } + if err := b.Command("cp", "/dev/stdin", file).SetStdin(&buff).Run(); err != nil { + return errors.Wrapf(err, "failed to write %q to node", file) + } + + return nil +} + +// LoadImageArchive loads image onto the node, where image is a Reader over an image archive +func LoadImageArchive(n nodes.Node, image io.Reader) error { + cmd := n.Command("ctr", "--namespace=k8s.io", "images", "import", "-").SetStdin(image) + if err := cmd.Run(); err != nil { + return errors.Wrap(err, "failed to load image") + } + return nil +} + +// ImageID returns ID of image on the node with the given image name if present +func ImageID(n nodes.Node, image string) (string, error) { + var out bytes.Buffer + if err := n.Command("crictl", "inspecti", image).SetStdout(&out).Run(); err != nil { + return "", err + } + // we only care about the image ID + crictlOut := struct { + Status struct { + ID string `json:"id"` + } `json:"status"` + }{} + if err := json.Unmarshal(out.Bytes(), &crictlOut); err != nil { + return "", err + } + return crictlOut.Status.ID, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go new file mode 100644 index 000000000..26d308ce4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go @@ -0,0 +1,195 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "sort" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/log" + + internalcreate "sigs.k8s.io/kind/pkg/cluster/internal/create" + internaldelete "sigs.k8s.io/kind/pkg/cluster/internal/delete" + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig" + internalproviders "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/docker" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/podman" +) + +// DefaultName is the default cluster name +const DefaultName = constants.DefaultClusterName + +// defaultName is a helper that given a name defaults it if unset +func defaultName(name string) string { + if name == "" { + name = DefaultName + } + return name +} + +// Provider is used to perform cluster operations +type Provider struct { + provider internalproviders.Provider + logger log.Logger +} + +// NewProvider returns a new provider based on the supplied options +func NewProvider(options ...ProviderOption) *Provider { + p := &Provider{ + logger: log.NoopLogger{}, + } + // Ensure we apply the logger options first, while maintaining the order + // otherwise. This way we can trivially init the internal provider with + // the logger. + sort.SliceStable(options, func(i, j int) bool { + _, iIsLogger := options[i].(providerLoggerOption) + _, jIsLogger := options[j].(providerLoggerOption) + return iIsLogger && !jIsLogger + }) + for _, o := range options { + if o != nil { + o.apply(p) + } + } + + if p.provider == nil { + // auto-detect based on each package IsAvailable() function + // default to docker for backwards compatibility + if docker.IsAvailable() { + p.provider = docker.NewProvider(p.logger) + } else if podman.IsAvailable() { + p.provider = podman.NewProvider(p.logger) + } else { + p.provider = docker.NewProvider(p.logger) + } + } + return p +} + +// ProviderOption is an option for configuring a provider +type ProviderOption interface { + apply(p *Provider) +} + +// providerLoggerOption is a trivial ProviderOption adapter +// we use a type specific to logging options so we can handle them first +type providerLoggerOption func(p *Provider) + +func (a providerLoggerOption) apply(p *Provider) { + a(p) +} + +var _ ProviderOption = providerLoggerOption(nil) + +// ProviderWithLogger configures the provider to use Logger logger +func ProviderWithLogger(logger log.Logger) ProviderOption { + return providerLoggerOption(func(p *Provider) { + p.logger = logger + }) +} + +// providerLoggerOption is a trivial ProviderOption adapter +// we use a type specific to logging options so we can handle them first +type providerRuntimeOption func(p *Provider) + +func (a providerRuntimeOption) apply(p *Provider) { + a(p) +} + +var _ ProviderOption = providerRuntimeOption(nil) + +// ProviderWithDocker configures the provider to use docker runtime +func ProviderWithDocker() ProviderOption { + return providerRuntimeOption(func(p *Provider) { + p.provider = docker.NewProvider(p.logger) + }) +} + +// ProviderWithPodman configures the provider to use podman runtime +func ProviderWithPodman() ProviderOption { + return providerRuntimeOption(func(p *Provider) { + p.provider = podman.NewProvider(p.logger) + }) +} + +// Create provisions and starts a kubernetes-in-docker cluster +// TODO: move name to an option to override config +func (p *Provider) Create(name string, options ...CreateOption) error { + // apply options + opts := &internalcreate.ClusterOptions{ + NameOverride: name, + } + for _, o := range options { + if err := o.apply(opts); err != nil { + return err + } + } + return internalcreate.Cluster(p.logger, p.provider, opts) +} + +// Delete tears down a kubernetes-in-docker cluster +func (p *Provider) Delete(name, explicitKubeconfigPath string) error { + return internaldelete.Cluster(p.logger, p.provider, defaultName(name), explicitKubeconfigPath) +} + +// List returns a list of clusters for which nodes exist +func (p *Provider) List() ([]string, error) { + return p.provider.ListClusters() +} + +// KubeConfig returns the KUBECONFIG for the cluster +// If internal is true, this will contain the internal IP etc. +// If internal is false, this will contain the host IP etc. +func (p *Provider) KubeConfig(name string, internal bool) (string, error) { + return kubeconfig.Get(p.provider, defaultName(name), !internal) +} + +// ExportKubeConfig exports the KUBECONFIG for the cluster, merging +// it into the selected file, following the rules from +// https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#config +// where explicitPath is the --kubeconfig value. +func (p *Provider) ExportKubeConfig(name string, explicitPath string) error { + return kubeconfig.Export(p.provider, defaultName(name), explicitPath) +} + +// ListNodes returns the list of container IDs for the "nodes" in the cluster +func (p *Provider) ListNodes(name string) ([]nodes.Node, error) { + return p.provider.ListNodes(defaultName(name)) +} + +// ListInternalNodes returns the list of container IDs for the "nodes" in the cluster +// that are not external +func (p *Provider) ListInternalNodes(name string) ([]nodes.Node, error) { + n, err := p.provider.ListNodes(name) + if err != nil { + return nil, err + } + return nodeutils.InternalNodes(n) +} + +// CollectLogs will populate dir with cluster logs and other debug files +func (p *Provider) CollectLogs(name, dir string) error { + // TODO: should use ListNodes and Collect should handle nodes differently + // based on role ... + n, err := p.ListInternalNodes(name) + if err != nil { + return err + } + return p.provider.CollectLogs(dir, n) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/doc.go b/vendor/sigs.k8s.io/kind/pkg/cmd/doc.go new file mode 100644 index 000000000..ae20f68a7 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/doc.go @@ -0,0 +1,15 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cmd provides helpers used by kind's commands / cli +package cmd diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go b/vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go new file mode 100644 index 000000000..2903235de --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" +) + +// IOStreams provides the standard names for iostreams. +// This is useful for embedding and for unit testing. +// Inconsistent and different names make it hard to read and review code +// This is based on cli-runtime, but just the nice type without the dependency +type IOStreams struct { + // In think, os.Stdin + In io.Reader + // Out think, os.Stdout + Out io.Writer + // ErrOut think, os.Stderr + ErrOut io.Writer +} + +// StandardIOStreams returns an IOStreams from os.Stdin, os.Stdout +func StandardIOStreams() IOStreams { + return IOStreams{ + In: os.Stdin, + Out: os.Stdout, + ErrOut: os.Stderr, + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go b/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go new file mode 100644 index 000000000..1074c479e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version implements the `version` command +package version + +import ( + "fmt" + "runtime" + + "github.com/spf13/cobra" + + "sigs.k8s.io/kind/pkg/cmd" + "sigs.k8s.io/kind/pkg/log" +) + +// Version returns the kind CLI Semantic Version +func Version() string { + v := VersionCore + // add pre-release version info if we have it + if VersionPreRelease != "" { + v += "-" + VersionPreRelease + // if commit was set, add the + <build> + // we only do this for pre-release versions + if GitCommit != "" { + // NOTE: use 14 character short hash, like Kubernetes + v += "+" + truncate(GitCommit, 14) + } + } + return v +} + +// DisplayVersion is Version() display formatted, this is what the version +// subcommand prints +func DisplayVersion() string { + return "kind v" + Version() + " " + runtime.Version() + " " + runtime.GOOS + "/" + runtime.GOARCH +} + +// VersionCore is the core portion of the kind CLI version per Semantic Versioning 2.0.0 +const VersionCore = "0.10.0" + +// VersionPreRelease is the pre-release portion of the kind CLI version per +// Semantic Versioning 2.0.0 +const VersionPreRelease = "" + +// GitCommit is the commit used to build the kind binary, if available. +// It is injected at build time. +var GitCommit = "" + +// NewCommand returns a new cobra.Command for version +func NewCommand(logger log.Logger, streams cmd.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Args: cobra.NoArgs, + Use: "version", + Short: "Prints the kind CLI version", + Long: "Prints the kind CLI version", + RunE: func(cmd *cobra.Command, args []string) error { + if logger.V(0).Enabled() { + // if not -q / --quiet, show lots of info + fmt.Fprintln(streams.Out, DisplayVersion()) + } else { + // otherwise only show semver + fmt.Fprintln(streams.Out, Version()) + } + return nil + }, + } + return cmd +} + +func truncate(s string, maxLen int) string { + if len(s) < maxLen { + return s + } + return s[:maxLen] +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/logger.go b/vendor/sigs.k8s.io/kind/pkg/cmd/logger.go new file mode 100644 index 000000000..eaf0f944c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/logger.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" + + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/internal/env" +) + +// NewLogger returns the standard logger used by the kind CLI +// This logger writes to os.Stderr +func NewLogger() log.Logger { + var writer io.Writer = os.Stderr + if env.IsSmartTerminal(writer) { + writer = cli.NewSpinner(writer) + } + return cli.NewLogger(writer, 0) +} + +// ColorEnabled returns true if color is enabled for the logger +// this should be used to control output +func ColorEnabled(logger log.Logger) bool { + type maybeColorer interface { + ColorEnabled() bool + } + v, ok := logger.(maybeColorer) + return ok && v.ColorEnabled() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go b/vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go new file mode 100644 index 000000000..d8ed79f07 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + k8serrors "k8s.io/apimachinery/pkg/util/errors" +) + +// NewAggregate is a k8s.io/apimachinery/pkg/util/errors.NewAggregate wrapper +// note that while it returns a StackTrace wrapped Aggregate +// That has been Flattened and Reduced +func NewAggregate(errlist []error) error { + return WithStack( + k8serrors.Reduce( + k8serrors.Flatten( + k8serrors.NewAggregate(errlist), + ), + ), + ) +} + +// Errors returns the deepest Aggregate in a Cause chain +func Errors(err error) []error { + var errors k8serrors.Aggregate + for { + if v, ok := err.(k8serrors.Aggregate); ok { + errors = v + } + if causerErr, ok := err.(Causer); ok { + err = causerErr.Cause() + } else { + break + } + } + if errors != nil { + return errors.Errors() + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go b/vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go new file mode 100644 index 000000000..1d90ad197 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go @@ -0,0 +1,69 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "sync" +) + +// UntilErrorConcurrent runs all funcs in separate goroutines, returning the +// first non-nil error returned from funcs, or nil if all funcs return nil +func UntilErrorConcurrent(funcs []func() error) error { + errCh := make(chan error, len(funcs)) + for _, f := range funcs { + f := f // capture f + go func() { + errCh <- f() + }() + } + for i := 0; i < len(funcs); i++ { + if err := <-errCh; err != nil { + return err + } + } + return nil +} + +// AggregateConcurrent runs fns concurrently, returning a NewAggregate if there are > 1 errors +func AggregateConcurrent(funcs []func() error) error { + // run all fns concurrently + ch := make(chan error, len(funcs)) + var wg sync.WaitGroup + for _, f := range funcs { + f := f // capture f + wg.Add(1) + go func() { + defer wg.Done() + ch <- f() + }() + } + wg.Wait() + close(ch) + // collect up and return errors + errs := []error{} + for err := range ch { + if err != nil { + errs = append(errs, err) + } + } + if len(errs) > 1 { + return NewAggregate(errs) + } else if len(errs) == 1 { + return errs[0] + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/doc.go b/vendor/sigs.k8s.io/kind/pkg/errors/doc.go new file mode 100644 index 000000000..c93a68db3 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors provides common utilities for dealing with errors +package errors diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/errors.go b/vendor/sigs.k8s.io/kind/pkg/errors/errors.go new file mode 100644 index 000000000..3e2d1d714 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/errors.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + pkgerrors "github.com/pkg/errors" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return pkgerrors.New(message) +} + +// Errorf formats according to a format specifier and returns the string as a +// value that satisfies error. Errorf also records the stack trace at the +// point it was called. +func Errorf(format string, args ...interface{}) error { + return pkgerrors.Errorf(format, args...) +} + +// Wrap returns an error annotating err with a stack trace at the point Wrap +// is called, and the supplied message. If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + return pkgerrors.Wrap(err, message) +} + +// Wrapf returns an error annotating err with a stack trace at the point Wrapf +// is called, and the format specifier. If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + return pkgerrors.Wrapf(err, format, args...) +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + return pkgerrors.WithStack(err) +} + +// Causer is an interface to github.com/pkg/errors error's Cause() wrapping +type Causer interface { + // Cause returns the underlying error + Cause() error +} + +// StackTracer is an interface to github.com/pkg/errors error's StackTrace() +type StackTracer interface { + // StackTrace returns the StackTrace ... + // TODO: return our own type instead? + // https://github.com/pkg/errors#roadmap + StackTrace() pkgerrors.StackTrace +} + +// StackTrace returns the deepest StackTrace in a Cause chain +// https://github.com/pkg/errors/issues/173 +func StackTrace(err error) pkgerrors.StackTrace { + var stackErr error + for { + if _, ok := err.(StackTracer); ok { + stackErr = err + } + if causerErr, ok := err.(Causer); ok { + err = causerErr.Cause() + } else { + break + } + } + if stackErr != nil { + return stackErr.(StackTracer).StackTrace() + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/default.go b/vendor/sigs.k8s.io/kind/pkg/exec/default.go new file mode 100644 index 000000000..98a215cdf --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/default.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import "context" + +// DefaultCmder is a LocalCmder instance used for convenience, packages +// originally using os/exec.Command can instead use pkg/kind/exec.Command +// which forwards to this instance +// TODO(bentheelder): swap this for testing +// TODO(bentheelder): consider not using a global for this :^) +var DefaultCmder = &LocalCmder{} + +// Command is a convenience wrapper over DefaultCmder.Command +func Command(command string, args ...string) Cmd { + return DefaultCmder.Command(command, args...) +} + +// CommandContext is a convenience wrapper over DefaultCmder.CommandContext +func CommandContext(ctx context.Context, command string, args ...string) Cmd { + return DefaultCmder.CommandContext(ctx, command, args...) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/doc.go b/vendor/sigs.k8s.io/kind/pkg/exec/doc.go new file mode 100644 index 000000000..68214779b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec contains an interface for executing commands, along with helpers +// TODO(bentheelder): add standardized timeout functionality & a default timeout +// so that commands cannot hang indefinitely (!) +package exec diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/helpers.go b/vendor/sigs.k8s.io/kind/pkg/exec/helpers.go new file mode 100644 index 000000000..e579589e8 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/helpers.go @@ -0,0 +1,142 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "bufio" + "bytes" + "io" + "os" + "strings" + + "github.com/alessio/shellescape" + + "sigs.k8s.io/kind/pkg/errors" +) + +// PrettyCommand takes arguments identical to Cmder.Command, +// it returns a pretty printed command that could be pasted into a shell +func PrettyCommand(name string, args ...string) string { + var out strings.Builder + out.WriteString(shellescape.Quote(name)) + for _, arg := range args { + out.WriteByte(' ') + out.WriteString(shellescape.Quote(arg)) + } + return out.String() +} + +// RunErrorForError returns a RunError if the error contains a RunError. +// Otherwise it returns nil +func RunErrorForError(err error) *RunError { + var runError *RunError + for { + if rErr, ok := err.(*RunError); ok { + runError = rErr + } + if causerErr, ok := err.(errors.Causer); ok { + err = causerErr.Cause() + } else { + break + } + } + return runError +} + +// CombinedOutputLines is like os/exec's cmd.CombinedOutput(), +// but over our Cmd interface, and instead of returning the byte buffer of +// stderr + stdout, it scans these for lines and returns a slice of output lines +func CombinedOutputLines(cmd Cmd) (lines []string, err error) { + var buff bytes.Buffer + cmd.SetStdout(&buff) + cmd.SetStderr(&buff) + err = cmd.Run() + scanner := bufio.NewScanner(&buff) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines, err +} + +// OutputLines is like os/exec's cmd.Output(), +// but over our Cmd interface, and instead of returning the byte buffer of +// stdout, it scans these for lines and returns a slice of output lines +func OutputLines(cmd Cmd) (lines []string, err error) { + var buff bytes.Buffer + cmd.SetStdout(&buff) + err = cmd.Run() + scanner := bufio.NewScanner(&buff) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines, err +} + +// Output is like os/exec's cmd.Output, but over our Cmd interface +func Output(cmd Cmd) ([]byte, error) { + var buff bytes.Buffer + cmd.SetStdout(&buff) + err := cmd.Run() + return buff.Bytes(), err +} + +// InheritOutput sets cmd's output to write to the current process's stdout and stderr +func InheritOutput(cmd Cmd) Cmd { + cmd.SetStderr(os.Stderr) + cmd.SetStdout(os.Stdout) + return cmd +} + +// RunWithStdoutReader runs cmd with stdout piped to readerFunc +func RunWithStdoutReader(cmd Cmd, readerFunc func(io.Reader) error) error { + pr, pw, err := os.Pipe() + if err != nil { + return err + } + cmd.SetStdout(pw) + + return errors.AggregateConcurrent([]func() error{ + func() error { + defer pr.Close() + return readerFunc(pr) + }, + func() error { + defer pw.Close() + return cmd.Run() + }, + }) +} + +// RunWithStdinWriter runs cmd with writerFunc piped to stdin +func RunWithStdinWriter(cmd Cmd, writerFunc func(io.Writer) error) error { + pr, pw, err := os.Pipe() + if err != nil { + return err + } + cmd.SetStdin(pr) + + return errors.AggregateConcurrent([]func() error{ + func() error { + defer pw.Close() + return writerFunc(pw) + }, + func() error { + defer pr.Close() + return cmd.Run() + }, + }) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/local.go b/vendor/sigs.k8s.io/kind/pkg/exec/local.go new file mode 100644 index 000000000..65bb73c9a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/local.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "bytes" + "context" + "io" + osexec "os/exec" + "sync" + + "sigs.k8s.io/kind/pkg/errors" +) + +// LocalCmd wraps os/exec.Cmd, implementing the kind/pkg/exec.Cmd interface +type LocalCmd struct { + *osexec.Cmd +} + +var _ Cmd = &LocalCmd{} + +// LocalCmder is a factory for LocalCmd, implementing Cmder +type LocalCmder struct{} + +var _ Cmder = &LocalCmder{} + +// Command returns a new exec.Cmd backed by Cmd +func (c *LocalCmder) Command(name string, arg ...string) Cmd { + return &LocalCmd{ + Cmd: osexec.Command(name, arg...), + } +} + +// CommandContext is like Command but includes a context +func (c *LocalCmder) CommandContext(ctx context.Context, name string, arg ...string) Cmd { + return &LocalCmd{ + Cmd: osexec.CommandContext(ctx, name, arg...), + } +} + +// SetEnv sets env +func (cmd *LocalCmd) SetEnv(env ...string) Cmd { + cmd.Env = env + return cmd +} + +// SetStdin sets stdin +func (cmd *LocalCmd) SetStdin(r io.Reader) Cmd { + cmd.Stdin = r + return cmd +} + +// SetStdout set stdout +func (cmd *LocalCmd) SetStdout(w io.Writer) Cmd { + cmd.Stdout = w + return cmd +} + +// SetStderr sets stderr +func (cmd *LocalCmd) SetStderr(w io.Writer) Cmd { + cmd.Stderr = w + return cmd +} + +// Run runs the command +// If the returned error is non-nil, it should be of type *RunError +func (cmd *LocalCmd) Run() error { + // Background: + // Go's stdlib will setup and use a shared fd when cmd.Stderr == cmd.Stdout + // In any other case, it will use different fds, which will involve + // two different io.Copy goroutines writing to cmd.Stderr and cmd.Stdout + // + // Given this, we must synchronize capturing the output to a buffer + // IFF ! interfaceEqual(cmd.Sterr, cmd.Stdout) + var combinedOutput bytes.Buffer + var combinedOutputWriter io.Writer = &combinedOutput + if cmd.Stdout == nil && cmd.Stderr == nil { + // Case 1: If stdout and stderr are nil, we can just use the buffer + // The buffer will be == and Go will use one fd / goroutine + cmd.Stdout = combinedOutputWriter + cmd.Stderr = combinedOutputWriter + } else if interfaceEqual(cmd.Stdout, cmd.Stderr) { + // Case 2: If cmd.Stdout == cmd.Stderr go will still share the fd, + // but we need to wrap with a MultiWriter to respect the other writer + // and our buffer. + // The MultiWriter will be == and Go will use one fd / goroutine + cmd.Stdout = io.MultiWriter(cmd.Stdout, combinedOutputWriter) + cmd.Stderr = cmd.Stdout + } else { + // Case 3: If cmd.Stdout != cmd.Stderr, we need to synchronize the + // combined output writer. + // Go will use different fds / write routines for stdout and stderr + combinedOutputWriter = &mutexWriter{ + writer: &combinedOutput, + } + // wrap writers if non-nil + if cmd.Stdout != nil { + cmd.Stdout = io.MultiWriter(cmd.Stdout, combinedOutputWriter) + } else { + cmd.Stdout = combinedOutputWriter + } + if cmd.Stderr != nil { + cmd.Stderr = io.MultiWriter(cmd.Stderr, combinedOutputWriter) + } else { + cmd.Stderr = combinedOutputWriter + } + } + // TODO: should be in the caller or logger should be injected somehow ... + if err := cmd.Cmd.Run(); err != nil { + return errors.WithStack(&RunError{ + Command: cmd.Args, + Output: combinedOutput.Bytes(), + Inner: err, + }) + } + return nil +} + +// interfaceEqual protects against panics from doing equality tests on +// two interfaces with non-comparable underlying types. +// This trivial is borrowed from the go stdlib in os/exec +// Note that the recover will only happen if a is not comparable to b, +// in which case we'll return false +// We've lightly modified this to pass errcheck (explicitly ignoring recover) +func interfaceEqual(a, b interface{}) bool { + defer func() { + _ = recover() + }() + return a == b +} + +// mutexWriter is a simple synchronized wrapper around an io.Writer +type mutexWriter struct { + writer io.Writer + mu sync.Mutex +} + +func (m *mutexWriter) Write(b []byte) (int, error) { + m.mu.Lock() + defer m.mu.Unlock() + n, err := m.writer.Write(b) + return n, err +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/types.go b/vendor/sigs.k8s.io/kind/pkg/exec/types.go new file mode 100644 index 000000000..4ce60431e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/types.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "context" + "fmt" + "io" +) + +// Cmd abstracts over running a command somewhere, this is useful for testing +type Cmd interface { + // Run executes the command (like os/exec.Cmd.Run), it should return + // a *RunError if there is any error + Run() error + // Each entry should be of the form "key=value" + SetEnv(...string) Cmd + SetStdin(io.Reader) Cmd + SetStdout(io.Writer) Cmd + SetStderr(io.Writer) Cmd +} + +// Cmder abstracts over creating commands +type Cmder interface { + // command, args..., just like os/exec.Cmd + Command(string, ...string) Cmd + CommandContext(context.Context, string, ...string) Cmd +} + +// RunError represents an error running a Cmd +type RunError struct { + Command []string // [Name Args...] + Output []byte // Captured Stdout / Stderr of the command + Inner error // Underlying error if any +} + +var _ error = &RunError{} + +func (e *RunError) Error() string { + // TODO(BenTheElder): implement formatter, and show output for %+v ? + return fmt.Sprintf("command \"%s\" failed with error: %v", e.PrettyCommand(), e.Inner) +} + +// PrettyCommand pretty prints the command in a way that could be pasted +// into a shell +func (e *RunError) PrettyCommand() string { + return PrettyCommand(e.Command[0], e.Command[1:]...) +} + +// Cause mimics github.com/pkg/errors's Cause pattern for errors +func (e *RunError) Cause() error { + if e.Inner != nil { + return e.Inner + } + return e +} diff --git a/vendor/sigs.k8s.io/kind/pkg/fs/fs.go b/vendor/sigs.k8s.io/kind/pkg/fs/fs.go new file mode 100644 index 000000000..0ccdef9a5 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/fs/fs.go @@ -0,0 +1,153 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fs contains utilities for interacting with the host filesystem +// in a docker friendly way +// TODO(bentheelder): this should be internal +package fs + +import ( + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" +) + +// TempDir is like ioutil.TempDir, but more docker friendly +func TempDir(dir, prefix string) (name string, err error) { + // create a tempdir as normal + name, err = ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + // on macOS $TMPDIR is typically /var/..., which is not mountable + // /private/var/... is the mountable equivalent + if runtime.GOOS == "darwin" && strings.HasPrefix(name, "/var/") { + name = filepath.Join("/private", name) + } + return name, nil +} + +// IsAbs is like filepath.IsAbs but also considering posix absolute paths +// to be absolute even if filepath.IsAbs would not +// This fixes the case of Posix paths on Windows +func IsAbs(hostPath string) bool { + return path.IsAbs(hostPath) || filepath.IsAbs(hostPath) +} + +// Copy recursively directories, symlinks, files copies from src to dst +// Copy will make dirs as necessary, and keep file modes +// Symlinks will be dereferenced similar to `cp -r src dst` +func Copy(src, dst string) error { + // get source info + info, err := os.Lstat(src) + if err != nil { + return err + } + // make sure dest dir exists + if err := os.MkdirAll(filepath.Dir(dst), os.ModePerm); err != nil { + return err + } + // do real copy work + return copy(src, dst, info) +} + +func copy(src, dst string, info os.FileInfo) error { + if info.Mode()&os.ModeSymlink != 0 { + return copySymlink(src, dst) + } + if info.IsDir() { + return copyDir(src, dst, info) + } + return copyFile(src, dst, info) +} + +// CopyFile copies a file from src to dst +func CopyFile(src, dst string) (err error) { + // get source information + info, err := os.Stat(src) + if err != nil { + return err + } + return copyFile(src, dst, info) +} + +func copyFile(src, dst string, info os.FileInfo) error { + // open src for reading + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + // create dst file + // this is like f, err := os.Create(dst); os.Chmod(f.Name(), src.Mode()) + out, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, info.Mode()) + if err != nil { + return err + } + // make sure we close the file + defer func() { + closeErr := out.Close() + // if we weren't returning an error + if err == nil { + err = closeErr + } + }() + // actually copy + if _, err = io.Copy(out, in); err != nil { + return err + } + err = out.Sync() + return err +} + +// copySymlink dereferences and then copies a symlink +func copySymlink(src, dst string) error { + // read through the symlink + realSrc, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + info, err := os.Lstat(realSrc) + if err != nil { + return err + } + // copy the underlying contents + return copy(realSrc, dst, info) +} + +func copyDir(src, dst string, info os.FileInfo) error { + // make sure the target dir exists + if err := os.MkdirAll(dst, info.Mode()); err != nil { + return err + } + // copy every source dir entry + entries, err := ioutil.ReadDir(src) + if err != nil { + return err + } + for _, entry := range entries { + entrySrc := filepath.Join(src, entry.Name()) + entryDst := filepath.Join(dst, entry.Name()) + if err := copy(entrySrc, entryDst, entry); err != nil { + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go new file mode 100644 index 000000000..96e7aa8d4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + v1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" +) + +// Convertv1alpha4 converts a v1alpha4 cluster to a cluster at the internal API version +func Convertv1alpha4(in *v1alpha4.Cluster) *Cluster { + in = in.DeepCopy() // deep copy first to avoid touching the original + out := &Cluster{ + Name: in.Name, + Nodes: make([]Node, len(in.Nodes)), + FeatureGates: in.FeatureGates, + RuntimeConfig: in.RuntimeConfig, + KubeadmConfigPatches: in.KubeadmConfigPatches, + KubeadmConfigPatchesJSON6902: make([]PatchJSON6902, len(in.KubeadmConfigPatchesJSON6902)), + ContainerdConfigPatches: in.ContainerdConfigPatches, + ContainerdConfigPatchesJSON6902: in.ContainerdConfigPatchesJSON6902, + } + + for i := range in.Nodes { + convertv1alpha4Node(&in.Nodes[i], &out.Nodes[i]) + } + + convertv1alpha4Networking(&in.Networking, &out.Networking) + + for i := range in.KubeadmConfigPatchesJSON6902 { + convertv1alpha4PatchJSON6902(&in.KubeadmConfigPatchesJSON6902[i], &out.KubeadmConfigPatchesJSON6902[i]) + } + + return out +} + +func convertv1alpha4Node(in *v1alpha4.Node, out *Node) { + out.Role = NodeRole(in.Role) + out.Image = in.Image + + out.KubeadmConfigPatches = in.KubeadmConfigPatches + out.ExtraMounts = make([]Mount, len(in.ExtraMounts)) + out.ExtraPortMappings = make([]PortMapping, len(in.ExtraPortMappings)) + out.KubeadmConfigPatchesJSON6902 = make([]PatchJSON6902, len(in.KubeadmConfigPatchesJSON6902)) + + for i := range in.ExtraMounts { + convertv1alpha4Mount(&in.ExtraMounts[i], &out.ExtraMounts[i]) + } + + for i := range in.ExtraPortMappings { + convertv1alpha4PortMapping(&in.ExtraPortMappings[i], &out.ExtraPortMappings[i]) + } + + for i := range in.KubeadmConfigPatchesJSON6902 { + convertv1alpha4PatchJSON6902(&in.KubeadmConfigPatchesJSON6902[i], &out.KubeadmConfigPatchesJSON6902[i]) + } +} + +func convertv1alpha4PatchJSON6902(in *v1alpha4.PatchJSON6902, out *PatchJSON6902) { + out.Group = in.Group + out.Version = in.Version + out.Kind = in.Kind + out.Patch = in.Patch +} + +func convertv1alpha4Networking(in *v1alpha4.Networking, out *Networking) { + out.IPFamily = ClusterIPFamily(in.IPFamily) + out.APIServerPort = in.APIServerPort + out.APIServerAddress = in.APIServerAddress + out.PodSubnet = in.PodSubnet + out.KubeProxyMode = ProxyMode(in.KubeProxyMode) + out.ServiceSubnet = in.ServiceSubnet + out.DisableDefaultCNI = in.DisableDefaultCNI +} + +func convertv1alpha4Mount(in *v1alpha4.Mount, out *Mount) { + out.ContainerPath = in.ContainerPath + out.HostPath = in.HostPath + out.Readonly = in.Readonly + out.SelinuxRelabel = in.SelinuxRelabel + out.Propagation = MountPropagation(in.Propagation) +} + +func convertv1alpha4PortMapping(in *v1alpha4.PortMapping, out *PortMapping) { + out.ContainerPort = in.ContainerPort + out.HostPort = in.HostPort + out.ListenAddress = in.ListenAddress + out.Protocol = PortMappingProtocol(in.Protocol) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go new file mode 100644 index 000000000..b4486e929 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this comment makes golint ignore this file, feel free to edit the file. +// Code generated by not-actually-generated-but-go-away-golint. DO NOT EDIT. +// https://github.com/kubernetes/code-generator/issues/30 + +package config + +import ( + "sigs.k8s.io/kind/pkg/apis/config/defaults" + "sigs.k8s.io/kind/pkg/cluster/constants" +) + +// SetDefaultsCluster sets uninitialized fields to their default value. +func SetDefaultsCluster(obj *Cluster) { + // default cluster name + if obj.Name == "" { + obj.Name = constants.DefaultClusterName + } + + // default to a one node cluster + if len(obj.Nodes) == 0 { + obj.Nodes = []Node{ + { + Image: defaults.Image, + Role: ControlPlaneRole, + }, + } + } + + // default nodes + for i := range obj.Nodes { + a := &obj.Nodes[i] + SetDefaultsNode(a) + } + if obj.Networking.IPFamily == "" { + obj.Networking.IPFamily = "ipv4" + } + + // default to listening on 127.0.0.1:randomPort on ipv4 + // and [::1]:randomPort on ipv6 + if obj.Networking.APIServerAddress == "" { + obj.Networking.APIServerAddress = "127.0.0.1" + if obj.Networking.IPFamily == "ipv6" { + obj.Networking.APIServerAddress = "::1" + } + } + + // default the pod CIDR + if obj.Networking.PodSubnet == "" { + obj.Networking.PodSubnet = "10.244.0.0/16" + if obj.Networking.IPFamily == "ipv6" { + // node-mask cidr default is /64 so we need a larger subnet, we use /56 following best practices + // xref: https://www.ripe.net/publications/docs/ripe-690#4--size-of-end-user-prefix-assignment---48---56-or-something-else- + obj.Networking.PodSubnet = "fd00:10:244::/56" + } + } + + // default the service CIDR using the kubeadm default + // https://github.com/kubernetes/kubernetes/blob/746404f82a28e55e0b76ffa7e40306fb88eb3317/cmd/kubeadm/app/apis/kubeadm/v1beta2/defaults.go#L32 + // Note: kubeadm is using a /12 subnet, that may allocate a 2^20 bitmap in etcd + // we allocate a /16 subnet that allows 65535 services (current Kubernetes tested limit is O(10k) services) + if obj.Networking.ServiceSubnet == "" { + obj.Networking.ServiceSubnet = "10.96.0.0/16" + if obj.Networking.IPFamily == "ipv6" { + obj.Networking.ServiceSubnet = "fd00:10:96::/112" + } + } + // default the KubeProxyMode using iptables as it's already the default + if obj.Networking.KubeProxyMode == "" { + obj.Networking.KubeProxyMode = IPTablesMode + } +} + +// SetDefaultsNode sets uninitialized fields to their default value. +func SetDefaultsNode(obj *Node) { + if obj.Image == "" { + obj.Image = defaults.Image + } + + if obj.Role == "" { + obj.Role = ControlPlaneRole + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go new file mode 100644 index 000000000..520aac207 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config implements the current apiVersion of the `kind` Config +// along with some common abstractions +// +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=sigs.k8s.io/kind/pkg/internal/apis/config +// +k8s:defaulter-gen=TypeMeta +package config diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go new file mode 100644 index 000000000..eded931b3 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encoding + +import ( + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// V1Alpha4ToInternal converts to the internal API version +func V1Alpha4ToInternal(cluster *v1alpha4.Cluster) *config.Cluster { + v1alpha4.SetDefaultsCluster(cluster) + return config.Convertv1alpha4(cluster) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go new file mode 100644 index 000000000..f6f90a9ef --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package encoding implements utilities for decoding from yaml the `kind` Config +package encoding diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go new file mode 100644 index 000000000..c891564ba --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encoding + +import ( + "bytes" + "io/ioutil" + + yaml "gopkg.in/yaml.v3" + + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// Load reads the file at path and attempts to convert into a `kind` Config; the file +// can be one of the different API versions defined in scheme. +// If path == "" then the default config is returned +// If path == "-" then reads from stdin +func Load(path string) (*config.Cluster, error) { + // special case: empty path -> default config + // TODO(bentheelder): consider removing this + if path == "" { + out := &config.Cluster{} + config.SetDefaultsCluster(out) + return out, nil + } + + // read in file + raw, err := ioutil.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "error reading file") + } + + return Parse(raw) +} + +// Parse parses a cluster config from raw (yaml) bytes +// It will always return the current internal version after defaulting and +// conversion from the read version +func Parse(raw []byte) (*config.Cluster, error) { + // get kind & apiVersion + tm := typeMeta{} + if err := yaml.Unmarshal(raw, &tm); err != nil { + return nil, errors.Wrap(err, "could not determine kind / apiVersion for config") + } + + // decode specific (apiVersion, kind) + switch tm.APIVersion { + // handle v1alpha4 + case "kind.x-k8s.io/v1alpha4": + if tm.Kind != "Cluster" { + return nil, errors.Errorf("unknown kind %s for apiVersion: %s", tm.Kind, tm.APIVersion) + } + // load version + cfg := &v1alpha4.Cluster{} + if err := yamlUnmarshalStrict(raw, cfg); err != nil { + return nil, errors.Wrap(err, "unable to decode config") + } + // apply defaults for version and convert + return V1Alpha4ToInternal(cfg), nil + } + + // unknown apiVersion if we haven't already returned ... + return nil, errors.Errorf("unknown apiVersion: %s", tm.APIVersion) +} + +// basically metav1.TypeMeta, but with yaml tags +type typeMeta struct { + Kind string `yaml:"kind,omitempty"` + APIVersion string `yaml:"apiVersion,omitempty"` +} + +func yamlUnmarshalStrict(raw []byte, v interface{}) error { + d := yaml.NewDecoder(bytes.NewReader(raw)) + d.KnownFields(true) + return d.Decode(v) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go new file mode 100644 index 000000000..49b3625cc --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go @@ -0,0 +1,263 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +/* +NOTE: unlike the public types these should not have serialization tags and +should stay 100% internal. These are used to pass around the processed public +config for internal usage. +*/ + +// Cluster contains kind cluster configuration +type Cluster struct { + // The cluster name. + // Optional, this will be overridden by --name / KIND_CLUSTER_NAME + Name string + + // Nodes contains the list of nodes defined in the `kind` Cluster + // If unset this will default to a single control-plane node + // Note that if more than one control plane is specified, an external + // control plane load balancer will be provisioned implicitly + Nodes []Node + + /* Advanced fields */ + + // Networking contains cluster wide network settings + Networking Networking + + // FeatureGates contains a map of Kubernetes feature gates to whether they + // are enabled. The feature gates specified here are passed to all Kubernetes components as flags or in config. + // + // https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ + FeatureGates map[string]bool + + // RuntimeConfig Keys and values are translated into --runtime-config values for kube-apiserver, separated by commas. + // + // Use this to enable alpha APIs. + RuntimeConfig map[string]string + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // strategic merge patches to `kustomize build` internally + // https://github.com/kubernetes/community/blob/a9cf5c8f3380bb52ebe57b1e2dbdec136d8dd484/contributors/devel/sig-api-machinery/strategic-merge-patch.md + // This should be an inline yaml blob-string + KubeadmConfigPatches []string + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as patchesJson6902 to `kustomize build` + KubeadmConfigPatchesJSON6902 []PatchJSON6902 + + // ContainerdConfigPatches are applied to every node's containerd config + // in the order listed. + // These should be toml stringsto be applied as merge patches + ContainerdConfigPatches []string + + // ContainerdConfigPatchesJSON6902 are applied to every node's containerd config + // in the order listed. + // These should be YAML or JSON formatting RFC 6902 JSON patches + ContainerdConfigPatchesJSON6902 []string +} + +// Node contains settings for a node in the `kind` Cluster. +// A node in kind config represent a container that will be provisioned with all the components +// required for the assigned role in the Kubernetes cluster +type Node struct { + // Role defines the role of the node in the in the Kubernetes cluster + // created by kind + // + // Defaults to "control-plane" + Role NodeRole + + // Image is the node image to use when creating this node + // If unset a default image will be used, see defaults.Image + Image string + + /* Advanced fields */ + + // ExtraMounts describes additional mount points for the node container + // These may be used to bind a hostPath + ExtraMounts []Mount + + // ExtraPortMappings describes additional port mappings for the node container + // binded to a host Port + ExtraPortMappings []PortMapping + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // strategic merge patches to `kustomize build` internally + // https://github.com/kubernetes/community/blob/a9cf5c8f3380bb52ebe57b1e2dbdec136d8dd484/contributors/devel/sig-api-machinery/strategic-merge-patch.md + // This should be an inline yaml blob-string + KubeadmConfigPatches []string + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as patchesJson6902 to `kustomize build` + KubeadmConfigPatchesJSON6902 []PatchJSON6902 +} + +// NodeRole defines possible role for nodes in a Kubernetes cluster managed by `kind` +type NodeRole string + +const ( + // ControlPlaneRole identifies a node that hosts a Kubernetes control-plane. + // NOTE: in single node clusters, control-plane nodes act also as a worker + // nodes, in which case the taint will be removed. see: + // https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#control-plane-node-isolation + ControlPlaneRole NodeRole = "control-plane" + // WorkerRole identifies a node that hosts a Kubernetes worker + WorkerRole NodeRole = "worker" +) + +// Networking contains cluster wide network settings +type Networking struct { + // IPFamily is the network cluster model, currently it can be ipv4 or ipv6 + IPFamily ClusterIPFamily + // APIServerPort is the listen port on the host for the Kubernetes API Server + // Defaults to a random port on the host obtained by kind + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + APIServerPort int32 + // APIServerAddress is the listen address on the host for the Kubernetes + // API Server. This should be an IP address. + // + // Defaults to 127.0.0.1 + APIServerAddress string + // PodSubnet is the CIDR used for pod IPs + // kind will select a default if unspecified + PodSubnet string + // ServiceSubnet is the CIDR used for services VIPs + // kind will select a default if unspecified + ServiceSubnet string + // If DisableDefaultCNI is true, kind will not install the default CNI setup. + // Instead the user should install their own CNI after creating the cluster. + DisableDefaultCNI bool + // KubeProxyMode defines if kube-proxy should operate in iptables or ipvs mode + KubeProxyMode ProxyMode +} + +// ClusterIPFamily defines cluster network IP family +type ClusterIPFamily string + +const ( + // IPv4Family sets ClusterIPFamily to ipv4 + IPv4Family ClusterIPFamily = "ipv4" + // IPv6Family sets ClusterIPFamily to ipv6 + IPv6Family ClusterIPFamily = "ipv6" +) + +// ProxyMode defines a proxy mode for kube-proxy +type ProxyMode string + +const ( + // IPTablesMode sets ProxyMode to iptables + IPTablesMode ProxyMode = "iptables" + // IPVSMode sets ProxyMode to iptables + IPVSMode ProxyMode = "ipvs" +) + +// PatchJSON6902 represents an inline kustomize json 6902 patch +// https://tools.ietf.org/html/rfc6902 +type PatchJSON6902 struct { + // these fields specify the patch target resource + Group string + Version string + Kind string + // Patch should contain the contents of the json patch as a string + Patch string +} + +// Mount specifies a host volume to mount into a container. +// This is a close copy of the upstream cri Mount type +// see: k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2 +// It additionally serializes the "propagation" field with the string enum +// names on disk as opposed to the int32 values, and the serialized field names +// have been made closer to core/v1 VolumeMount field names +// In yaml this looks like: +// containerPath: /foo +// hostPath: /bar +// readOnly: true +// selinuxRelabel: false +// propagation: None +// Propagation may be one of: None, HostToContainer, Bidirectional +type Mount struct { + // Path of the mount within the container. + ContainerPath string + // Path of the mount on the host. If the hostPath doesn't exist, then runtimes + // should report error. If the hostpath is a symbolic link, runtimes should + // follow the symlink and mount the real destination to container. + HostPath string + // If set, the mount is read-only. + Readonly bool + // If set, the mount needs SELinux relabeling. + SelinuxRelabel bool + // Requested propagation mode. + Propagation MountPropagation +} + +// PortMapping specifies a host port mapped into a container port. +// In yaml this looks like: +// containerPort: 80 +// hostPort: 8000 +// listenAddress: 127.0.0.1 +// protocol: TCP +type PortMapping struct { + // Port within the container. + ContainerPort int32 + // Port on the host. + // + // If unset, a random port will be selected. + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + HostPort int32 + // TODO: add protocol (tcp/udp) and port-ranges + ListenAddress string + // Protocol (TCP/UDP) + Protocol PortMappingProtocol +} + +// MountPropagation represents an "enum" for mount propagation options, +// see also Mount. +type MountPropagation string + +const ( + // MountPropagationNone specifies that no mount propagation + // ("private" in Linux terminology). + MountPropagationNone MountPropagation = "None" + // MountPropagationHostToContainer specifies that mounts get propagated + // from the host to the container ("rslave" in Linux). + MountPropagationHostToContainer MountPropagation = "HostToContainer" + // MountPropagationBidirectional specifies that mounts get propagated from + // the host to the container and from the container to the host + // ("rshared" in Linux). + MountPropagationBidirectional MountPropagation = "Bidirectional" +) + +// PortMappingProtocol represents an "enum" for port mapping protocol options, +// see also PortMapping. +type PortMappingProtocol string + +const ( + // PortMappingProtocolTCP specifies TCP protocol + PortMappingProtocolTCP PortMappingProtocol = "TCP" + // PortMappingProtocolUDP specifies UDP protocol + PortMappingProtocolUDP PortMappingProtocol = "UDP" + // PortMappingProtocolSCTP specifies SCTP protocol + PortMappingProtocolSCTP PortMappingProtocol = "SCTP" +) diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go new file mode 100644 index 000000000..62671621b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go @@ -0,0 +1,123 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "net" + + "sigs.k8s.io/kind/pkg/errors" +) + +// Validate returns a ConfigErrors with an entry for each problem +// with the config, or nil if there are none +func (c *Cluster) Validate() error { + errs := []error{} + + // the api server port only needs checking if we aren't picking a random one + // at runtime + if c.Networking.APIServerPort != 0 { + // validate api server listen port + if err := validatePort(c.Networking.APIServerPort); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid apiServerPort")) + } + } + + // podSubnet should be a valid CIDR + if _, _, err := net.ParseCIDR(c.Networking.PodSubnet); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid podSubnet")) + } + // serviceSubnet should be a valid CIDR + if _, _, err := net.ParseCIDR(c.Networking.ServiceSubnet); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid serviceSubnet")) + } + + // KubeProxyMode should be iptables or ipvs + if c.Networking.KubeProxyMode != IPTablesMode && c.Networking.KubeProxyMode != IPVSMode { + errs = append(errs, errors.Errorf("invalid kubeProxyMode: %s", c.Networking.KubeProxyMode)) + } + + // validate nodes + numByRole := make(map[NodeRole]int32) + // All nodes in the config should be valid + for i, n := range c.Nodes { + // validate the node + if err := n.Validate(); err != nil { + errs = append(errs, errors.Errorf("invalid configuration for node %d: %v", i, err)) + } + // update role count + if num, ok := numByRole[n.Role]; ok { + numByRole[n.Role] = 1 + num + } else { + numByRole[n.Role] = 1 + } + } + + // there must be at least one control plane node + numControlPlane, anyControlPlane := numByRole[ControlPlaneRole] + if !anyControlPlane || numControlPlane < 1 { + errs = append(errs, errors.Errorf("must have at least one %s node", string(ControlPlaneRole))) + } + + if len(errs) > 0 { + return errors.NewAggregate(errs) + } + return nil +} + +// Validate returns a ConfigErrors with an entry for each problem +// with the Node, or nil if there are none +func (n *Node) Validate() error { + errs := []error{} + + // validate node role should be one of the expected values + switch n.Role { + case ControlPlaneRole, + WorkerRole: + default: + errs = append(errs, errors.Errorf("%q is not a valid node role", n.Role)) + } + + // image should be defined + if n.Image == "" { + errs = append(errs, errors.New("image is a required field")) + } + + // validate extra port forwards + for _, mapping := range n.ExtraPortMappings { + if err := validatePort(mapping.HostPort); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid hostPort")) + } + if err := validatePort(mapping.ContainerPort); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid containerPort")) + } + } + + if len(errs) > 0 { + return errors.NewAggregate(errs) + } + + return nil +} + +func validatePort(port int32) error { + // NOTE: -1 is a special value for auto-selecting the port in the container + // backend where possible as opposed to in kind itself. + if port < -1 || port > 65535 { + return errors.Errorf("invalid port number: %d", port) + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go new file mode 100644 index 000000000..74e0c9d5f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,179 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Networking = in.Networking + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RuntimeConfig != nil { + in, out := &in.RuntimeConfig, &out.RuntimeConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatches != nil { + in, out := &in.ContainerdConfigPatches, &out.ContainerdConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatchesJSON6902 != nil { + in, out := &in.ContainerdConfigPatchesJSON6902, &out.ContainerdConfigPatchesJSON6902 + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } + if in.ExtraPortMappings != nil { + in, out := &in.ExtraPortMappings, &out.ExtraPortMappings + *out = make([]PortMapping, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchJSON6902) DeepCopyInto(out *PatchJSON6902) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchJSON6902. +func (in *PatchJSON6902) DeepCopy() *PatchJSON6902 { + if in == nil { + return nil + } + out := new(PatchJSON6902) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMapping) DeepCopyInto(out *PortMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping. +func (in *PortMapping) DeepCopy() *PortMapping { + if in == nil { + return nil + } + out := new(PortMapping) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/logger.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/logger.go new file mode 100644 index 000000000..73eff7a1d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/logger.go @@ -0,0 +1,255 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "bytes" + "fmt" + "io" + "runtime" + "strings" + "sync" + "sync/atomic" + + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/internal/env" +) + +// Logger is the kind cli's log.Logger implementation +type Logger struct { + writer io.Writer + writerMu sync.Mutex + verbosity log.Level + bufferPool *bufferPool + // kind special additions + isSmartWriter bool +} + +var _ log.Logger = &Logger{} + +// NewLogger returns a new Logger with the given verbosity +func NewLogger(writer io.Writer, verbosity log.Level) *Logger { + l := &Logger{ + verbosity: verbosity, + bufferPool: newBufferPool(), + } + l.SetWriter(writer) + return l +} + +// SetWriter sets the output writer +func (l *Logger) SetWriter(w io.Writer) { + l.writerMu.Lock() + defer l.writerMu.Unlock() + l.writer = w + _, isSpinner := w.(*Spinner) + l.isSmartWriter = isSpinner || env.IsSmartTerminal(w) +} + +// ColorEnabled returns true if the caller is OK to write colored output +func (l *Logger) ColorEnabled() bool { + l.writerMu.Lock() + defer l.writerMu.Unlock() + return l.isSmartWriter +} + +func (l *Logger) getVerbosity() log.Level { + return log.Level(atomic.LoadInt32((*int32)(&l.verbosity))) +} + +// SetVerbosity sets the loggers verbosity +func (l *Logger) SetVerbosity(verbosity log.Level) { + atomic.StoreInt32((*int32)(&l.verbosity), int32(verbosity)) +} + +// synchronized write to the inner writer +func (l *Logger) write(p []byte) (n int, err error) { + l.writerMu.Lock() + defer l.writerMu.Unlock() + return l.writer.Write(p) +} + +// writeBuffer writes buf with write, ensuring there is a trailing newline +func (l *Logger) writeBuffer(buf *bytes.Buffer) { + // ensure trailing newline + if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + // TODO: should we handle this somehow?? + // Who logs for the logger? 🤔 + _, _ = l.write(buf.Bytes()) +} + +// print writes a simple string to the log writer +func (l *Logger) print(message string) { + buf := bytes.NewBufferString(message) + l.writeBuffer(buf) +} + +// printf is roughly fmt.Fprintf against the log writer +func (l *Logger) printf(format string, args ...interface{}) { + buf := l.bufferPool.Get() + fmt.Fprintf(buf, format, args...) + l.writeBuffer(buf) + l.bufferPool.Put(buf) +} + +// addDebugHeader inserts the debug line header to buf +func addDebugHeader(buf *bytes.Buffer) { + _, file, line, ok := runtime.Caller(3) + // lifted from klog + if !ok { + file = "???" + line = 1 + } else { + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } + } + buf.Grow(len(file) + 11) // we know at least this many bytes are needed + buf.WriteString("DEBUG: ") + buf.WriteString(file) + buf.WriteByte(':') + fmt.Fprintf(buf, "%d", line) + buf.WriteByte(']') + buf.WriteByte(' ') +} + +// debug is like print but with a debug log header +func (l *Logger) debug(message string) { + buf := l.bufferPool.Get() + addDebugHeader(buf) + buf.WriteString(message) + l.writeBuffer(buf) + l.bufferPool.Put(buf) +} + +// debugf is like printf but with a debug log header +func (l *Logger) debugf(format string, args ...interface{}) { + buf := l.bufferPool.Get() + addDebugHeader(buf) + fmt.Fprintf(buf, format, args...) + l.writeBuffer(buf) + l.bufferPool.Put(buf) +} + +// Warn is part of the log.Logger interface +func (l *Logger) Warn(message string) { + l.print(message) +} + +// Warnf is part of the log.Logger interface +func (l *Logger) Warnf(format string, args ...interface{}) { + l.printf(format, args...) +} + +// Error is part of the log.Logger interface +func (l *Logger) Error(message string) { + l.print(message) +} + +// Errorf is part of the log.Logger interface +func (l *Logger) Errorf(format string, args ...interface{}) { + l.printf(format, args...) +} + +// V is part of the log.Logger interface +func (l *Logger) V(level log.Level) log.InfoLogger { + return infoLogger{ + logger: l, + level: level, + enabled: level <= l.getVerbosity(), + } +} + +// infoLogger implements log.InfoLogger for Logger +type infoLogger struct { + logger *Logger + level log.Level + enabled bool +} + +// Enabled is part of the log.InfoLogger interface +func (i infoLogger) Enabled() bool { + return i.enabled +} + +// Info is part of the log.InfoLogger interface +func (i infoLogger) Info(message string) { + if !i.enabled { + return + } + // for > 0, we are writing debug messages, include extra info + if i.level > 0 { + i.logger.debug(message) + } else { + i.logger.print(message) + } +} + +// Infof is part of the log.InfoLogger interface +func (i infoLogger) Infof(format string, args ...interface{}) { + if !i.enabled { + return + } + // for > 0, we are writing debug messages, include extra info + if i.level > 0 { + i.logger.debugf(format, args...) + } else { + i.logger.printf(format, args...) + } +} + +// bufferPool is a type safe sync.Pool of *byte.Buffer, guaranteed to be Reset +type bufferPool struct { + sync.Pool +} + +// newBufferPool returns a new bufferPool +func newBufferPool() *bufferPool { + return &bufferPool{ + sync.Pool{ + New: func() interface{} { + // The Pool's New function should generally only return pointer + // types, since a pointer can be put into the return interface + // value without an allocation: + return new(bytes.Buffer) + }, + }, + } +} + +// Get obtains a buffer from the pool +func (b *bufferPool) Get() *bytes.Buffer { + return b.Pool.Get().(*bytes.Buffer) +} + +// Put returns a buffer to the pool, resetting it first +func (b *bufferPool) Put(x *bytes.Buffer) { + // only store small buffers to avoid pointless allocation + // avoid keeping arbitrarily large buffers + if x.Len() > 256 { + return + } + x.Reset() + b.Pool.Put(x) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/override.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/override.go new file mode 100644 index 000000000..695106522 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/override.go @@ -0,0 +1,18 @@ +package cli + +import ( + "os" + + "github.com/spf13/pflag" +) + +// OverrideDefaultName conditionally allows overriding the default cluster name +// by setting the KIND_CLUSTER_NAME environment variable +// only if --name wasn't set explicitly +func OverrideDefaultName(fs *pflag.FlagSet) { + if !fs.Changed("name") { + if name := os.Getenv("KIND_CLUSTER_NAME"); name != "" { + _ = fs.Set("name", name) + } + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/spinner.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/spinner.go new file mode 100644 index 000000000..2af86a250 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/spinner.go @@ -0,0 +1,168 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "fmt" + "io" + "runtime" + "sync" + "time" +) + +// custom CLI loading spinner for kind +var spinnerFrames = []string{ + "⠈⠁", + "⠈⠑", + "⠈⠱", + "⠈⡱", + "⢀⡱", + "⢄⡱", + "⢄⡱", + "⢆⡱", + "⢎⡱", + "⢎⡰", + "⢎⡠", + "⢎⡀", + "⢎⠁", + "⠎⠁", + "⠊⠁", +} + +// Spinner is a simple and efficient CLI loading spinner used by kind +// It is simplistic and assumes that the line length will not change. +type Spinner struct { + stop chan struct{} // signals writer goroutine to stop from Stop() + stopped chan struct{} // signals Stop() that the writer goroutine stopped + mu *sync.Mutex // protects the mutable bits + // below are protected by mu + running bool + writer io.Writer + ticker *time.Ticker // signals that it is time to write a frame + prefix string + suffix string + // format string used to write a frame, depends on the host OS / terminal + frameFormat string +} + +// spinner implements writer +var _ io.Writer = &Spinner{} + +// NewSpinner initializes and returns a new Spinner that will write to w +// NOTE: w should be os.Stderr or similar, and it should be a Terminal +func NewSpinner(w io.Writer) *Spinner { + frameFormat := "\x1b[?7l\r%s%s%s\x1b[?7h" + // toggling wrapping seems to behave poorly on windows + // in general only the simplest escape codes behave well at the moment, + // and only in newer shells + if runtime.GOOS == "windows" { + frameFormat = "\r%s%s%s" + } + return &Spinner{ + stop: make(chan struct{}, 1), + stopped: make(chan struct{}), + mu: &sync.Mutex{}, + writer: w, + frameFormat: frameFormat, + } +} + +// SetPrefix sets the prefix to print before the spinner +func (s *Spinner) SetPrefix(prefix string) { + s.mu.Lock() + defer s.mu.Unlock() + s.prefix = prefix +} + +// SetSuffix sets the suffix to print after the spinner +func (s *Spinner) SetSuffix(suffix string) { + s.mu.Lock() + defer s.mu.Unlock() + s.suffix = suffix +} + +// Start starts the spinner running +func (s *Spinner) Start() { + s.mu.Lock() + defer s.mu.Unlock() + // don't start if we've already started + if s.running { + return + } + // flag that we've started + s.running = true + // start / create a frame ticker + s.ticker = time.NewTicker(time.Millisecond * 100) + // spin in the background + go func() { + // write frames forever (until signaled to stop) + for { + for _, frame := range spinnerFrames { + select { + // prefer stopping, select this signal first + case <-s.stop: + func() { + s.mu.Lock() + defer s.mu.Unlock() + s.ticker.Stop() // free up the ticker + s.running = false // mark as stopped (it's fine to start now) + s.stopped <- struct{}{} // tell Stop() that we're done + }() + return // ... and stop + // otherwise continue and write one frame + case <-s.ticker.C: + func() { + s.mu.Lock() + defer s.mu.Unlock() + fmt.Fprintf(s.writer, s.frameFormat, s.prefix, frame, s.suffix) + }() + } + } + } + }() +} + +// Stop signals the spinner to stop +func (s *Spinner) Stop() { + s.mu.Lock() + if !s.running { + s.mu.Unlock() + return + } + // try to stop, do nothing if channel is full (IE already busy stopping) + s.stop <- struct{}{} + s.mu.Unlock() + // wait for stop to be finished + <-s.stopped +} + +// Write implements io.Writer, interrupting the spinner and writing to +// the inner writer +func (s *Spinner) Write(p []byte) (n int, err error) { + // lock first, so nothing else can start writing until we are done + s.mu.Lock() + defer s.mu.Unlock() + // it the spinner is not running, just write directly + if !s.running { + return s.writer.Write(p) + } + // otherwise: we will rewrite the line first + if _, err := s.writer.Write([]byte("\r")); err != nil { + return 0, err + } + return s.writer.Write(p) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/status.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/status.go new file mode 100644 index 000000000..d7ef7eb7a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/status.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "fmt" + + "sigs.k8s.io/kind/pkg/log" +) + +// Status is used to track ongoing status in a CLI, with a nice loading spinner +// when attached to a terminal +type Status struct { + spinner *Spinner + status string + logger log.Logger + // for controlling coloring etc + successFormat string + failureFormat string +} + +// StatusForLogger returns a new status object for the logger l, +// if l is the kind cli logger and the writer is a Spinner, that spinner +// will be used for the status +func StatusForLogger(l log.Logger) *Status { + s := &Status{ + logger: l, + successFormat: " ✓ %s\n", + failureFormat: " ✗ %s\n", + } + // if we're using the CLI logger, check for if it has a spinner setup + // and wire the status to that + if v, ok := l.(*Logger); ok { + if v2, ok := v.writer.(*Spinner); ok { + s.spinner = v2 + // use colored success / failure messages + s.successFormat = " \x1b[32m✓\x1b[0m %s\n" + s.failureFormat = " \x1b[31m✗\x1b[0m %s\n" + } + } + return s +} + +// Start starts a new phase of the status, if attached to a terminal +// there will be a loading spinner with this status +func (s *Status) Start(status string) { + s.End(true) + // set new status + s.status = status + if s.spinner != nil { + s.spinner.SetSuffix(fmt.Sprintf(" %s ", s.status)) + s.spinner.Start() + } else { + s.logger.V(0).Infof(" • %s ...\n", s.status) + } +} + +// End completes the current status, ending any previous spinning and +// marking the status as success or failure +func (s *Status) End(success bool) { + if s.status == "" { + return + } + + if s.spinner != nil { + s.spinner.Stop() + fmt.Fprint(s.spinner.writer, "\r") + } + if success { + s.logger.V(0).Infof(s.successFormat, s.status) + } else { + s.logger.V(0).Infof(s.failureFormat, s.status) + } + + s.status = "" +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/env/term.go b/vendor/sigs.k8s.io/kind/pkg/internal/env/term.go new file mode 100644 index 000000000..5f809dd9f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/env/term.go @@ -0,0 +1,107 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env + +import ( + "io" + "os" + "runtime" + + isatty "github.com/mattn/go-isatty" +) + +// a fake TTY type for testing that can only be implemented within this package +type isTestFakeTTY interface { + isTestFakeTTY() +} + +// IsTerminal returns true if the writer w is a terminal +func IsTerminal(w io.Writer) bool { + // check for internal fake type we can use for testing. + if _, ok := (w).(isTestFakeTTY); ok { + return true + } + // check for real terminals + if v, ok := (w).(*os.File); ok { + return isatty.IsTerminal(v.Fd()) + } + return false +} + +// IsSmartTerminal returns true if the writer w is a terminal AND +// we think that the terminal is smart enough to use VT escape codes etc. +func IsSmartTerminal(w io.Writer) bool { + return isSmartTerminal(w, runtime.GOOS, os.LookupEnv) +} + +func isSmartTerminal(w io.Writer, GOOS string, lookupEnv func(string) (string, bool)) bool { + // Not smart if it's not a tty + if !IsTerminal(w) { + return false + } + + // getenv helper for when we only care about the value + getenv := func(e string) string { + v, _ := lookupEnv(e) + return v + } + + // Explicit request for no ANSI escape codes + // https://no-color.org/ + if _, set := lookupEnv("NO_COLOR"); set { + return false + } + + // Explicitly dumb terminals are not smart + // https://en.wikipedia.org/wiki/Computer_terminal#Dumb_terminals + term := getenv("TERM") + if term == "dumb" { + return false + } + // st has some bug 🤷♂️ + // https://github.com/kubernetes-sigs/kind/issues/1892 + if term == "st-256color" { + return false + } + + // On Windows WT_SESSION is set by the modern terminal component. + // Older terminals have poor support for UTF-8, VT escape codes, etc. + if GOOS == "windows" && getenv("WT_SESSION") == "" { + return false + } + + /* CI Systems with bad Fake TTYs */ + // Travis CI + // https://github.com/kubernetes-sigs/kind/issues/1478 + // We can detect it with documented magical environment variables + // https://docs.travis-ci.com/user/environment-variables/#default-environment-variables + if getenv("HAS_JOSH_K_SEAL_OF_APPROVAL") == "true" && getenv("TRAVIS") == "true" { + return false + } + + // OK, we'll assume it's smart now, given no evidence otherwise. + return true +} + +// trivial fake TTY writer for testing +type testFakeTTY struct{} + +func (t *testFakeTTY) Write(p []byte) (int, error) { + return len(p), nil +} + +func (t *testFakeTTY) isTestFakeTTY() {} diff --git a/vendor/sigs.k8s.io/kind/pkg/log/doc.go b/vendor/sigs.k8s.io/kind/pkg/log/doc.go new file mode 100644 index 000000000..ffda513b9 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/log/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log defines a logging interface that kind uses +// This is roughly a minimal subset of klog github.com/kubernetes/klog +package log diff --git a/vendor/sigs.k8s.io/kind/pkg/log/noop.go b/vendor/sigs.k8s.io/kind/pkg/log/noop.go new file mode 100644 index 000000000..f5a20ee69 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/log/noop.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +// NoopLogger implements the Logger interface and never logs anything +type NoopLogger struct{} + +// Warn meets the Logger interface but does nothing +func (n NoopLogger) Warn(message string) {} + +// Warnf meets the Logger interface but does nothing +func (n NoopLogger) Warnf(format string, args ...interface{}) {} + +// Error meets the Logger interface but does nothing +func (n NoopLogger) Error(message string) {} + +// Errorf meets the Logger interface but does nothing +func (n NoopLogger) Errorf(format string, args ...interface{}) {} + +// V meets the Logger interface but does nothing +func (n NoopLogger) V(level Level) InfoLogger { return NoopInfoLogger{} } + +// NoopInfoLogger implements the InfoLogger interface and never logs anything +type NoopInfoLogger struct{} + +// Enabled meets the InfoLogger interface but always returns false +func (n NoopInfoLogger) Enabled() bool { return false } + +// Info meets the InfoLogger interface but does nothing +func (n NoopInfoLogger) Info(message string) {} + +// Infof meets the InfoLogger interface but does nothing +func (n NoopInfoLogger) Infof(format string, args ...interface{}) {} diff --git a/vendor/sigs.k8s.io/kind/pkg/log/types.go b/vendor/sigs.k8s.io/kind/pkg/log/types.go new file mode 100644 index 000000000..875eabdf4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/log/types.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +// Level is a verbosity logging level for Info logs +// See also https://github.com/kubernetes/klog +type Level int32 + +// Logger defines the logging interface kind uses +// It is roughly a subset of github.com/kubernetes/klog +type Logger interface { + // Warn should be used to write user facing warnings + Warn(message string) + // Warnf should be used to write Printf style user facing warnings + Warnf(format string, args ...interface{}) + // Error may be used to write an error message when it occurs + // Prefer returning an error instead in most cases + Error(message string) + // Errorf may be used to write a Printf style error message when it occurs + // Prefer returning an error instead in most cases + Errorf(format string, args ...interface{}) + // V() returns an InfoLogger for a given verbosity Level + // + // Normal verbosity levels: + // V(0): normal user facing messages go to V(0) + // V(1): debug messages start when V(N > 0), these should be high level + // V(2): more detailed log messages + // V(3+): trace level logging, in increasing "noisiness" ... allowing + // arbitrarily detailed logging at extremely low cost unless the + // logger has actually been configured to display these (E.G. via the -v + // command line flag) + // + // It is expected that the returned InfoLogger will be extremely cheap + // to interact with for a Level greater than the enabled level + V(Level) InfoLogger +} + +// InfoLogger defines the info logging interface kind uses +// It is roughly a subset of Verbose from github.com/kubernetes/klog +type InfoLogger interface { + // Info is used to write a user facing status message + // + // See: Logger.V + Info(message string) + // Infof is used to write a Printf style user facing status message + Infof(format string, args ...interface{}) + // Enabled should return true if this verbosity level is enabled + // on the Logger + // + // See: Logger.V + Enabled() bool +}