diff --git a/.circleci/config.yml b/.circleci/config.yml index b675a0d8e..4b6e3bd51 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,20 +1,62 @@ version: 2 jobs: build: - working_directory: /go/src/github.com/weaveworks/flux - docker: - - image: circleci/golang:1.10 - - image: memcached + working_directory: ~/flux + machine: true + environment: + GO_VERSION: 1.12.5 + # We don't need a GOPATH but CircleCI defines it, so we override it + GOPATH: /home/circleci/go + PATH: /bin:/usr/bin:/usr/local/go/bin:/home/circleci/go/bin steps: - checkout - - setup_remote_docker - - - run: curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - - run: dep ensure -vendor-only + - run: + name: Install Golang + command: | + curl -OL https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz + tar -xf go${GO_VERSION}.linux-amd64.tar.gz + sudo rm -rf /usr/local/go + sudo mv go /usr/local + mkdir -p "$HOME/go/bin" + go version + - run: + name: Update packages and Start Memcached + command: | + # These repos fail and we don't need them: + sudo rm /etc/apt/sources.list.d/circleci_trusty.list /etc/apt/sources.list.d/google-chrome.list + sudo apt-get update + sudo apt-get install -y git rng-tools docker-ce memcached + git version + docker version + - restore_cache: + keys: + - cache-{{ checksum "Makefile" }} + - cache- + - restore_cache: + keys: + - go-build-{{ .Branch }}-{{ .Revision }} + - go-build-{{ .Branch }}- + - go-build- + - restore_cache: + keys: + - go-mod-{{ checksum "go.mod" }} + - go-mod- - run: make check-generated - run: make test TEST_FLAGS="-race -tags integration -timeout 60s" - run: make all - + - run: make e2e + - save_cache: + key: cache-{{ checksum "Makefile" }} + paths: + - "cache" + - save_cache: + key: go-build-{{ .Branch }}-{{ .Revision }} + paths: + - "~/.cache/go-build/" + - save_cache: + key: go-mod-{{ checksum "go.mod" }} + paths: + - "~/go/pkg/mod/" - deploy: name: Maybe push prerelease images command: | @@ -27,7 +69,6 @@ jobs: docker tag "docker.io/weaveworks/helm-operator:$(docker/image-tag)" "docker.io/weaveworks/helm-operator-prerelease:$(docker/image-tag)" docker push "docker.io/weaveworks/helm-operator-prerelease:$(docker/image-tag)" fi - - deploy: name: Maybe push release image and upload binaries command: | @@ -43,17 +84,6 @@ jobs: RELEASE_TAG=$(echo "$CIRCLE_TAG" | cut -c 6-) docker push "docker.io/weaveworks/helm-operator:${RELEASE_TAG}" fi - e2e-testing: - machine: true - working_directory: ~/go/src/github.com/weaveworks/flux - steps: - - checkout - - run: test/e2e/e2e-golang.sh - - run: test/e2e/e2e-flux-build.sh - - run: test/e2e/e2e-kind.sh - - run: test/e2e/e2e-helm.sh - - run: test/e2e/e2e-git.sh - - run: test/e2e/e2e-flux-chart.sh workflows: version: 2 @@ -63,6 +93,4 @@ workflows: filters: tags: only: /(helm-)?[0-9]+(\.[0-9]+)*(-[a-z]+)?/ - - e2e-testing: - requires: - - build + diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 1352d54ca..e9ff42cbb 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -2,7 +2,7 @@ name: Bug report about: Create a report to help us improve Weave Flux title: '' -labels: [blocked-needs-validation, bug] +labels: blocked-needs-validation, bug assignees: '' --- diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index e6d76287e..66345527c 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -2,7 +2,7 @@ name: Feature request about: Suggest a new feature for Weave Flux title: '' -labels: [blocked-needs-validation, enhancement] +labels: blocked-needs-validation, enhancement assignees: '' --- diff --git a/.gitignore b/.gitignore index 7e72e279e..1b9eddf4b 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,14 @@ _testmain.go .ackrc .envrc +### Package builds (e.g. snap of fluxctl) +fluxctl_*_*.snap +fluxctl_*_*.snap.xdelta3 +parts +prime +stage + + # Specific to this project vendor/* !vendor/manifest @@ -39,4 +47,6 @@ testdata/sidecar/.sidecar docker/fluxy-dumbconf.priv test/profiles test/bin/kubectl +test/bin/kustomize test/bin/helm +test/bin/kind diff --git a/CHANGELOG-helmop.md b/CHANGELOG-helmop.md index 92e0f1b5a..714e67d5d 100644 --- a/CHANGELOG-helmop.md +++ b/CHANGELOG-helmop.md @@ -1,3 +1,36 @@ +## 0.9.2 (2019-06-13) + +### Bug fixes + + - Ensure releases are enqueued on clone change only + [weaveworks/flux#2081][#2081] + - Reorder start of processes on boot and verify informer cache sync + early, to prevent the operator from hanging on boot + [weaveworks/flux#2103][#2103] + - Use openssh-client rather than openssh in container image + [weaveworks/flux#2142][#2142] + +### Improvements + + - Enable pprof to ease profiling + [weaveworks/flux#2095][#2095] + +### Maintenance and documentation + + - Add notes about production setup Tiller + [weaveworks/flux#2146][#2146] + +### Thanks + +Thanks @2opremio, @willholley ,@runningman84, @stefanprodan, @squaremo, +@rossf7, @hiddeco for contributing. + +[#2081]: https://github.com/weaveworks/flux/pull/2081 +[#2095]: https://github.com/weaveworks/flux/pull/2095 +[#2103]: https://github.com/weaveworks/flux/pull/2103 +[#2142]: https://github.com/weaveworks/flux/pull/2142 +[#2146]: https://github.com/weaveworks/flux/pull/2146 + ## 0.9.1 (2019-05-09) ### Bug fixes diff --git a/CHANGELOG.md b/CHANGELOG.md index 259bc5097..f5fcea3e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,46 @@ This is the changelog for the Flux daemon; the changelog for the Helm operator is in [./CHANGELOG-helmop.md](./CHANGELOG-helmop.md). +## 1.12.3 (2019-05-22) + +This is a patch release. + +### Fixes + +- Show tag image for workload in list-images + [weaveworks/flux#2024][] +- Log warning when not applying resource by namespace + [weaveworks/flux#2034][] +- Always list the status of a workload in `fluxctl` + [weaveworks/flux#2035][] +- Ensure Flux installs gnutls >=3.6.7, to resolve security scan issues + [weaveworks/flux#2044][] +- Rename controller to workload in `fluxctl release` + [weaveworks/flux#2048][] +- Give full output of git command on errors + [weaveworks/flux#2054][] + +### Maintenance and documentation + +- Warn about Flux only supporting YAML and not JSON + [weaveworks/flux#2010][] +- Fix and refactor end-to-end tests + [weaveworks/flux#2050][] [weaveworks/flux#2058][] + +### Thanks + +Thanks to @2opremio, @hiddeco, @squaremo and @xtellurian for contributions. + +[weaveworks/flux#2010]: https://github.com/weaveworks/flux/pull/2010 +[weaveworks/flux#2024]: https://github.com/weaveworks/flux/pull/2024 +[weaveworks/flux#2034]: https://github.com/weaveworks/flux/pull/2034 +[weaveworks/flux#2035]: https://github.com/weaveworks/flux/pull/2035 +[weaveworks/flux#2044]: https://github.com/weaveworks/flux/pull/2044 +[weaveworks/flux#2048]: https://github.com/weaveworks/flux/pull/2048 +[weaveworks/flux#2050]: https://github.com/weaveworks/flux/pull/2050 +[weaveworks/flux#2054]: https://github.com/weaveworks/flux/pull/2054 +[weaveworks/flux#2058]: https://github.com/weaveworks/flux/pull/2058 + ## 1.12.2 (2019-05-08) This is a patch release. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1a3040961..f9b02fb39 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,24 @@ pull requests. This document outlines some of the conventions on development workflow, commit message formatting, contact points and other resources to make it easier to get your contribution accepted. -We gratefully welcome improvements to documentation as well as to code. +We gratefully welcome improvements to issues and documentation as well as to code. + +## Working on issues + +If you like Flux and want to get involved in the project, a great way to get started +is reviewing our [blocked-needs-validation](https://github.com/weaveworks/flux/issues?q=is%3Aissue+is%3Aopen+label%3Ablocked-needs-validation) issues. + +The idea here is that new issues are confirmed, which might require asking +for more information, testing with a fresh Flux environment. Once confirmed, +the `blocked-needs-validation` label is removed, and the issue can be worked +on. + +To set up Flux to test things, there's documentation about setting up a +[standalone install](site/get-started.md) and a [Helm +install](site/helm-get-started.md), which might be helpful. + +Please talk to us on Slack, if you should get stuck anywhere. We appreciate +any help and look forward to talking to you soon! ## Certificate of Origin @@ -24,7 +41,8 @@ The project uses Slack: To join the conversation, simply join the The Flux developers use a mailing list to discuss development as well. Simply subscribe to [flux-dev on Google Groups](https://groups.google.com/forum/#!forum/flux-dev) to join the -conversation. +conversation (this will also add an invitation to your Google calendar +for our [Flux meeting](https://github.com/weaveworks/flux/wiki/Meeting)). ## Getting Started diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index b7c2e4bd6..000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,1425 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:dd11641415f7854e5ee0c83b3d865bf8dced203f6b050b4ad7bd16707dc25cc6" - name = "cloud.google.com/go" - packages = ["compute/metadata"] - pruneopts = "" - revision = "fcb9a2d5f791d07be64506ab54434de65989d370" - version = "v0.37.4" - -[[projects]] - digest = "1:6158256042564abf0da300ea7cb016f79ddaf24fdda2cc06c9712b0c2e06dd2a" - name = "contrib.go.opencensus.io/exporter/ocagent" - packages = ["."] - pruneopts = "" - revision = "dcb33c7f3b7cfe67e8a2cea10207ede1b7c40764" - version = "v0.4.12" - -[[projects]] - branch = "master" - digest = "1:99a3f886f2b4b7a527cdcd92892c91d22c5c7494f1bc6a4c75f263bdacf3d4aa" - name = "github.com/2opremio/go-k8s-portforward" - packages = ["."] - pruneopts = "" - revision = "65d7d2deb269dfb7f143ddb9eff5d4e642c653b3" - -[[projects]] - digest = "1:1ad46af12dc747317e6f0af297028af2e125e2ccd37fbe837a184c493a217dad" - name = "github.com/Azure/go-autorest" - packages = [ - "autorest", - "autorest/adal", - "autorest/azure", - "autorest/date", - "logger", - "tracing", - ] - pruneopts = "" - revision = "d10011c9c6fd34ca45f1e15ee330e3af8ed8b96a" - version = "v11.7.1" - -[[projects]] - digest = "1:e4b30804a381d7603b8a344009987c1ba351c26043501b23b8c7ce21f0b67474" - name = "github.com/BurntSushi/toml" - packages = ["."] - pruneopts = "" - revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" - version = "v0.3.1" - -[[projects]] - digest = "1:debe400440582cfe5580591afd0f7fdef5b17a25945f06bd3087bc67b4b8fd03" - name = "github.com/Masterminds/goutils" - packages = ["."] - pruneopts = "" - revision = "41ac8693c5c10a92ea1ff5ac3a7f95646f6123b0" - version = "v1.1.0" - -[[projects]] - digest = "1:b856d8248663c39265a764561c1a1a149783f6cc815feb54a1f3a591b91f6eca" - name = "github.com/Masterminds/semver" - packages = ["."] - pruneopts = "" - revision = "c7af12943936e8c39859482e61f0574c2fd7fc75" - version = "v1.4.2" - -[[projects]] - digest = "1:844a29d20675e6187639c578b6c690d198aac895ede377ebcb5545405d0ef80d" - name = "github.com/Masterminds/sprig" - packages = ["."] - pruneopts = "" - revision = "9f8fceff796fb9f4e992cd2bece016be0121ab74" - version = "2.19.0" - -[[projects]] - digest = "1:e1fc4e86ce21dca811fb1a59438f55e442cfcbdfc3346637ad87a448dca6a657" - name = "github.com/aws/aws-sdk-go" - packages = [ - "aws", - "aws/awserr", - "aws/awsutil", - "aws/client", - "aws/client/metadata", - "aws/corehandlers", - "aws/credentials", - "aws/credentials/ec2rolecreds", - "aws/credentials/endpointcreds", - "aws/credentials/processcreds", - "aws/credentials/stscreds", - "aws/csm", - "aws/defaults", - "aws/ec2metadata", - "aws/endpoints", - "aws/request", - "aws/session", - "aws/signer/v4", - "internal/ini", - "internal/sdkio", - "internal/sdkrand", - "internal/sdkuri", - "internal/shareddefaults", - "private/protocol", - "private/protocol/json/jsonutil", - "private/protocol/jsonrpc", - "private/protocol/query", - "private/protocol/query/queryutil", - "private/protocol/rest", - "private/protocol/xml/xmlutil", - "service/ecr", - "service/sts", - ] - pruneopts = "" - revision = "56c1def75689cceec1fa6f14c2eedb4b798827f9" - version = "v1.19.11" - -[[projects]] - branch = "master" - digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - branch = "master" - digest = "1:f3ae35eb2933cb4f4e26b67e53fa5e09a444f2c91e5611336b2c9be50fd4b161" - name = "github.com/bradfitz/gomemcache" - packages = ["memcache"] - pruneopts = "" - revision = "551aad21a6682b95329c1f5bd62ee5060d64f7e8" - -[[projects]] - digest = "1:10139b9c841acd336ceff02e8a85e0c558dee738b2277bb24d61d9d3c90ef1f2" - name = "github.com/census-instrumentation/opencensus-proto" - packages = [ - "gen-go/agent/common/v1", - "gen-go/agent/metrics/v1", - "gen-go/agent/trace/v1", - "gen-go/metrics/v1", - "gen-go/resource/v1", - "gen-go/trace/v1", - ] - pruneopts = "" - revision = "a105b96453fe85139acc07b68de48f2cbdd71249" - version = "v0.2.0" - -[[projects]] - digest = "1:a9854984bc40330dde2125537b7f46d0a8d7860b3750de2e7cd0a6f904506212" - name = "github.com/cyphar/filepath-securejoin" - packages = ["."] - pruneopts = "" - revision = "a261ee33d7a517f054effbf451841abaafe3e0fd" - version = "v0.2.2" - -[[projects]] - digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - pruneopts = "" - revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" - version = "v3.2.0" - -[[projects]] - branch = "master" - digest = "1:44fa5dc9f471d5285648d966808fe191cde2c80d337683886d9b7aa5091b39dc" - name = "github.com/docker/distribution" - packages = [ - ".", - "digestset", - "manifest", - "manifest/manifestlist", - "manifest/schema1", - "manifest/schema2", - "metrics", - "reference", - "registry/api/errcode", - "registry/api/v2", - "registry/client", - "registry/client/auth", - "registry/client/auth/challenge", - "registry/client/transport", - "registry/storage/cache", - "registry/storage/cache/memory", - ] - pruneopts = "" - revision = "6c9727e5e5ded4589f2d653a2e523b41619af05c" - source = "github.com/2opremio/distribution" - -[[projects]] - branch = "master" - digest = "1:2b126e77be4ab4b92cdb3924c87894dd76bf365ba282f358a13133e848aa0059" - name = "github.com/docker/go-metrics" - packages = ["."] - pruneopts = "" - revision = "b84716841b82eab644a0c64fc8b42d480e49add5" - -[[projects]] - branch = "master" - digest = "1:1d0c4a7f7d52eb8da75993ab0526467360795e9dfdd559020a15bfb53a028809" - name = "github.com/docker/libtrust" - packages = ["."] - pruneopts = "" - revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20" - -[[projects]] - branch = "master" - digest = "1:d6c13a378213e3de60445e49084b8a0a9ce582776dfc77927775dbeb3ff72a35" - name = "github.com/docker/spdystream" - packages = [ - ".", - "spdy", - ] - pruneopts = "" - revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85" - -[[projects]] - digest = "1:4216202f4088a73e2982df875e2f0d1401137bbc248e57391e70547af167a18a" - name = "github.com/evanphx/json-patch" - packages = ["."] - pruneopts = "" - revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" - version = "v4.1.0" - -[[projects]] - digest = "1:b13707423743d41665fd23f0c36b2f37bb49c30e94adb813319c44188a51ba22" - name = "github.com/ghodss/yaml" - packages = ["."] - pruneopts = "" - revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" - version = "v1.0.0" - -[[projects]] - digest = "1:48e65aaf8ce34ffb3e8d56daa9417826db162afbc2040705db331e9a2e9eebe3" - name = "github.com/go-kit/kit" - packages = [ - "log", - "log/level", - "metrics", - "metrics/internal/lv", - "metrics/prometheus", - ] - pruneopts = "" - revision = "12210fb6ace19e0496167bb3e667dcd91fa9f69b" - version = "v0.8.0" - -[[projects]] - digest = "1:df89444601379b2e1ee82bf8e6b72af9901cbeed4b469fa380a519c89c339310" - name = "github.com/go-logfmt/logfmt" - packages = ["."] - pruneopts = "" - revision = "07c9b44f60d7ffdfb7d8efe1ad539965737836dc" - version = "v0.4.0" - -[[projects]] - digest = "1:9ab1b1c637d7c8f49e39d8538a650d7eb2137b076790cff69d160823b505964c" - name = "github.com/gobwas/glob" - packages = [ - ".", - "compiler", - "match", - "syntax", - "syntax/ast", - "syntax/lexer", - "util/runes", - "util/strings", - ] - pruneopts = "" - revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" - version = "v0.2.3" - -[[projects]] - digest = "1:8b49904d2ff610bb1c7414c7f976ed46450c053633274fafb61a66589da056a2" - name = "github.com/gogo/googleapis" - packages = ["google/rpc"] - pruneopts = "" - revision = "d31c731455cb061f42baff3bda55bad0118b126b" - version = "v1.2.0" - -[[projects]] - digest = "1:fd53b471edb4c28c7d297f617f4da0d33402755f58d6301e7ca1197ef0a90937" - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys", - "types", - ] - pruneopts = "" - revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" - version = "v1.2.1" - -[[projects]] - digest = "1:9cc85fd9c6beff7b4ef0f16077d6f667d49e69224b00b82b1372d33bf4991415" - name = "github.com/gogo/status" - packages = ["."] - pruneopts = "" - revision = "935308aef7372e7685e8fbee162aae8f7a7e515a" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:f91cac5a5ab507a267224eda8ad9dcf52717fddd15ddde4935bd00fa41d418d1" - name = "github.com/golang/gddo" - packages = ["httputil/header"] - pruneopts = "" - revision = "5a2505f3dbf049b47a091040906b7b2856339099" - -[[projects]] - branch = "master" - digest = "1:f9714c0c017f2b821bccceeec2c7a93d29638346bb546c36ca5f90e751f91b9e" - name = "github.com/golang/groupcache" - packages = ["lru"] - pruneopts = "" - revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b" - -[[projects]] - digest = "1:529d738b7976c3848cae5cf3a8036440166835e389c1f617af701eeb12a0518d" - name = "github.com/golang/protobuf" - packages = [ - "jsonpb", - "proto", - "protoc-gen-go/descriptor", - "protoc-gen-go/generator", - "protoc-gen-go/generator/internal/remap", - "protoc-gen-go/plugin", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/struct", - "ptypes/timestamp", - "ptypes/wrappers", - ] - pruneopts = "" - revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" - version = "v1.3.1" - -[[projects]] - digest = "1:f9f45f75f332e03fc7e9fe9188ea4e1ce4d14779ef34fa1b023da67518e36327" - name = "github.com/google/go-cmp" - packages = [ - "cmp", - "cmp/internal/diff", - "cmp/internal/function", - "cmp/internal/value", - ] - pruneopts = "" - revision = "3af367b6b30c263d47e8895973edcca9a49cf029" - version = "v0.2.0" - -[[projects]] - digest = "1:8d4a577a9643f713c25a32151c0f26af7228b4b97a219b5ddb7fd38d16f6e673" - name = "github.com/google/gofuzz" - packages = ["."] - pruneopts = "" - revision = "f140a6486e521aad38f5917de355cbf147cc0496" - version = "v1.0.0" - -[[projects]] - digest = "1:ad92aa49f34cbc3546063c7eb2cabb55ee2278b72842eda80e2a20a8a06a8d73" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "" - revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" - version = "v1.1.1" - -[[projects]] - digest = "1:16b2837c8b3cf045fa2cdc82af0cf78b19582701394484ae76b2c3bc3c99ad73" - name = "github.com/googleapis/gnostic" - packages = [ - "OpenAPIv2", - "compiler", - "extensions", - ] - pruneopts = "" - revision = "7c663266750e7d82587642f65e60bc4083f1f84e" - version = "v0.2.0" - -[[projects]] - branch = "master" - digest = "1:3c9133db4e9f297da9eccd7e318a5964c8251c3fa5ccf9a382954d7f2b3ed6a9" - name = "github.com/gophercloud/gophercloud" - packages = [ - ".", - "openstack", - "openstack/identity/v2/tenants", - "openstack/identity/v2/tokens", - "openstack/identity/v3/tokens", - "openstack/utils", - "pagination", - ] - pruneopts = "" - revision = "2c55d17f707cc8333ca4f49690cb2970d12a25f6" - -[[projects]] - digest = "1:65c7ed49d9f36dd4752e43013323fa9229db60b29aa4f5a75aaecda3130c74e2" - name = "github.com/gorilla/mux" - packages = ["."] - pruneopts = "" - revision = "c5c6c98bc25355028a63748a498942a6398ccd22" - version = "v1.7.1" - -[[projects]] - digest = "1:09aa5dd1332b93c96bde671bafb053249dc813febf7d5ca84e8f382ba255d67d" - name = "github.com/gorilla/websocket" - packages = ["."] - pruneopts = "" - revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" - version = "v1.4.0" - -[[projects]] - digest = "1:6e87163a02e172de60dc68c263b375079f6b08fe81d32611fe12ffadd673b1b8" - name = "github.com/grpc-ecosystem/grpc-gateway" - packages = [ - "internal", - "runtime", - "utilities", - ] - pruneopts = "" - revision = "20f268a412e5b342ebfb1a0eef7c3b7bd6c260ea" - version = "v1.8.5" - -[[projects]] - digest = "1:984b627a3c838daa9f4c949ec8e6f049a7021b1156eb4db0337c3a5afe07aada" - name = "github.com/hashicorp/go-cleanhttp" - packages = ["."] - pruneopts = "" - revision = "eda1e5db218aad1db63ca4642c8906b26bcf2744" - version = "v0.5.1" - -[[projects]] - digest = "1:85f8f8d390a03287a563e215ea6bd0610c858042731a8b42062435a0dcbc485f" - name = "github.com/hashicorp/golang-lru" - packages = [ - ".", - "simplelru", - ] - pruneopts = "" - revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c" - version = "v0.5.1" - -[[projects]] - digest = "1:35979179658d20a73693589e67bdc3baf4dc0ef9f524b1dfd3cc55fb5f6ae384" - name = "github.com/huandu/xstrings" - packages = ["."] - pruneopts = "" - revision = "f02667b379e2fb5916c3cda2cf31e0eb885d79f8" - version = "v1.2.0" - -[[projects]] - digest = "1:31bfd110d31505e9ffbc9478e31773bf05bf02adcaeb9b139af42684f9294c13" - name = "github.com/imdario/mergo" - packages = ["."] - pruneopts = "" - revision = "7c29201646fa3de8506f701213473dd407f19646" - version = "v0.3.7" - -[[projects]] - digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" - name = "github.com/inconshreveable/mousetrap" - packages = ["."] - pruneopts = "" - revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - version = "v1.0" - -[[projects]] - digest = "1:13fe471d0ed891e8544eddfeeb0471fd3c9f2015609a1c000aefdedf52a19d40" - name = "github.com/jmespath/go-jmespath" - packages = ["."] - pruneopts = "" - revision = "c2b33e84" - -[[projects]] - digest = "1:31c6f3c4f1e15fcc24fcfc9f5f24603ff3963c56d6fa162116493b4025fb6acc" - name = "github.com/json-iterator/go" - packages = ["."] - pruneopts = "" - revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682" - -[[projects]] - digest = "1:0f51cee70b0d254dbc93c22666ea2abf211af81c1701a96d04e2284b408621db" - name = "github.com/konsorten/go-windows-terminal-sequences" - packages = ["."] - pruneopts = "" - revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" - version = "v1.0.2" - -[[projects]] - branch = "master" - digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a" - name = "github.com/kr/logfmt" - packages = ["."] - pruneopts = "" - revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" - -[[projects]] - digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:0c0ff2a89c1bb0d01887e1dac043ad7efbf3ec77482ef058ac423d13497e16fd" - name = "github.com/modern-go/concurrent" - packages = ["."] - pruneopts = "" - revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" - version = "1.0.3" - -[[projects]] - digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" - name = "github.com/modern-go/reflect2" - packages = ["."] - pruneopts = "" - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[[projects]] - branch = "master" - digest = "1:ead67482454251fc15dd2158803ddd12adacc5b75cdcbc9241e0c377175d2d5b" - name = "github.com/ncabatoff/go-seq" - packages = ["seq"] - pruneopts = "" - revision = "b08ef85ed83364cba413c98a94bbd4169a0ce70b" - -[[projects]] - digest = "1:5d9b668b0b4581a978f07e7d2e3314af18eb27b3fb5d19b70185b7c575723d11" - name = "github.com/opencontainers/go-digest" - packages = ["."] - pruneopts = "" - revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" - version = "v1.0.0-rc1" - -[[projects]] - digest = "1:f26c8670b11e29a49c8e45f7ec7f2d5bac62e8fd4e3c0ae1662baa4a697f984a" - name = "github.com/opencontainers/image-spec" - packages = [ - "specs-go", - "specs-go/v1", - ] - pruneopts = "" - revision = "d60099175f88c47cd379c4738d158884749ed235" - version = "v1.0.1" - -[[projects]] - branch = "master" - digest = "1:db8323f83e495a11f468bb2e68314f86bd7304ed4ab937f095aa86e9afea8969" - name = "github.com/opentracing-contrib/go-stdlib" - packages = ["nethttp"] - pruneopts = "" - revision = "3020fec0e66bdb65fd42cb346cb65d58deb92e0d" - -[[projects]] - digest = "1:1fc4897d3cc482d070651563c16a51489296cd9150e6d53fb7ff4d59a24334bc" - name = "github.com/opentracing/opentracing-go" - packages = [ - ".", - "ext", - "log", - ] - pruneopts = "" - revision = "659c90643e714681897ec2521c60567dd21da733" - version = "v1.1.0" - -[[projects]] - digest = "1:1d7e1867c49a6dd9856598ef7c3123604ea3daabf5b83f303ff457bcbc410b1d" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "" - revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" - version = "v0.8.1" - -[[projects]] - branch = "master" - digest = "1:4f9447b6733d5fce8181fa332dca4ab72f615d7020c1cc67567f4ccd96a62436" - name = "github.com/pkg/term" - packages = [ - ".", - "termios", - ] - pruneopts = "" - revision = "aa71e9d9e942418fbb97d80895dcea70efed297c" - -[[projects]] - digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - branch = "master" - digest = "1:b8aebc4a3b8e6e71ad3be8faa0cf071287c7bf6dc67aca5b5d1c79afeb512d40" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/internal", - "prometheus/promhttp", - ] - pruneopts = "" - revision = "5a3ec6a883d301737d83860ed604021342ff2144" - -[[projects]] - branch = "master" - digest = "1:cd67319ee7536399990c4b00fae07c3413035a53193c644549a676091507cadc" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "" - revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" - -[[projects]] - digest = "1:cf0d0ad53268d7c0e6cbe9a037059a6583ffd58d25f1cd6d48920f8c3ccc537f" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "" - revision = "a82f4c12f983cc2649298185f296632953e50d3e" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:a256e5de73d8ecd20a7e2e41fbe17864e60b6dc396e5e6ec3e9576b0fb1e9742" - name = "github.com/prometheus/procfs" - packages = ["."] - pruneopts = "" - revision = "e22ddced71425e65e388b17b7d0289c5ea77d06e" - -[[projects]] - digest = "1:4244255905cb95c3c98894d671367f84a6292608ae528936fe46ba9c86f68393" - name = "github.com/ryanuber/go-glob" - packages = ["."] - pruneopts = "" - revision = "51a8f68e6c24dc43f1e371749c89a267de4ebc53" - version = "v1.0.0" - -[[projects]] - digest = "1:631ea4a52a20ca54eceb1077e8c7e553a4f86a58639824825d9259374f7c362f" - name = "github.com/sirupsen/logrus" - packages = ["."] - pruneopts = "" - revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f" - version = "v1.4.1" - -[[projects]] - digest = "1:a1403cc8a94b8d7956ee5e9694badef0e7b051af289caad1cf668331e3ffa4f6" - name = "github.com/spf13/cobra" - packages = ["."] - pruneopts = "" - revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" - version = "v0.0.3" - -[[projects]] - digest = "1:cbaf13cdbfef0e4734ed8a7504f57fe893d471d62a35b982bf6fb3f036449a66" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "" - revision = "298182f68c66c05229eb03ac171abe6e309ee79a" - version = "v1.0.3" - -[[projects]] - digest = "1:381bcbeb112a51493d9d998bbba207a529c73dbb49b3fd789e48c63fac1f192c" - name = "github.com/stretchr/testify" - packages = ["assert"] - pruneopts = "" - revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" - version = "v1.3.0" - -[[projects]] - digest = "1:92c0006b9a06518452bad6275babc396a0b9afcdfe870d5494fe67a2beb6c2be" - name = "github.com/uber/jaeger-client-go" - packages = [ - ".", - "internal/baggage", - "internal/spanlog", - "internal/throttler", - "log", - "thrift", - "thrift-gen/agent", - "thrift-gen/jaeger", - "thrift-gen/sampling", - "thrift-gen/zipkincore", - "utils", - ] - pruneopts = "" - revision = "2f47546e3facd43297739439600bcf43f44cce5d" - version = "v2.16.0" - -[[projects]] - digest = "1:2e2e1bf63381476d203354c0c3c4692d103e8d02ea2bfafe8e3f80a04c925b87" - name = "github.com/uber/jaeger-lib" - packages = ["metrics"] - pruneopts = "" - revision = "0e30338a695636fe5bcf7301e8030ce8dd2a8530" - version = "v2.0.0" - -[[projects]] - branch = "master" - digest = "1:da57805987e05805c1dad6d2a0d5b123ae1e3a40ed862b7e85d045bcc6b03a06" - name = "github.com/weaveworks/common" - packages = [ - "errors", - "httpgrpc", - "logging", - "middleware", - "user", - ] - pruneopts = "" - revision = "87611edc252e21e7de58c08270384450b126b48a" - -[[projects]] - branch = "master" - digest = "1:df053b63f516ff71ba2d6baa921e43152bb03d661da92f697cb8765710e40d25" - name = "github.com/weaveworks/go-checkpoint" - packages = ["."] - pruneopts = "" - revision = "ebbb8b0518ab326368631225cabc9435fe580dcc" - -[[projects]] - digest = "1:d676526dd24c92bfe86269f9e6a70c4d340f6017d96915a01c23fef98db4db4f" - name = "github.com/weaveworks/promrus" - packages = ["."] - pruneopts = "" - revision = "0599d764e054d4e983bb120e30759179fafe3942" - version = "v1.2.0" - -[[projects]] - branch = "master" - digest = "1:2e43242796ee48ff0256eaf784ffaca015614ea5cb284cbc7b6e0fb65c219887" - name = "github.com/whilp/git-urls" - packages = ["."] - pruneopts = "" - revision = "31bac0d230fa29f36ed1b3279c2343752e7196c0" - -[[projects]] - digest = "1:785b4eb9fa77cc5290dff2c72c00fb6759c2c82767895f472582f1aae7421cad" - name = "go.opencensus.io" - packages = [ - ".", - "internal", - "internal/tagencoding", - "metric/metricdata", - "metric/metricproducer", - "plugin/ocgrpc", - "plugin/ochttp", - "plugin/ochttp/propagation/b3", - "plugin/ochttp/propagation/tracecontext", - "resource", - "stats", - "stats/internal", - "stats/view", - "tag", - "trace", - "trace/internal", - "trace/propagation", - "trace/tracestate", - ] - pruneopts = "" - revision = "43463a80402d8447b7fce0d2c58edf1687ff0b58" - version = "v0.19.3" - -[[projects]] - branch = "master" - digest = "1:958ad9932fc5ac9fb5c794f97580ed123ddfed1d965e1de0f98e2a590d6e9e3e" - name = "golang.org/x/crypto" - packages = [ - "cast5", - "openpgp", - "openpgp/armor", - "openpgp/clearsign", - "openpgp/elgamal", - "openpgp/errors", - "openpgp/packet", - "openpgp/s2k", - "pbkdf2", - "scrypt", - "ssh/terminal", - ] - pruneopts = "" - revision = "88737f569e3a9c7ab309cdc09a07fe7fc87233c3" - -[[projects]] - branch = "master" - digest = "1:09972eaa1645553c1cf5b0d2b471aa3aef8d9ab88ca45528e131cd32e8572fb9" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace", - ] - pruneopts = "" - revision = "eb5bcb51f2a31c7d5141d810b70815c05d9c9146" - -[[projects]] - branch = "master" - digest = "1:348696484a568aa816b0aa29d4924afa1a4e5492e29a003eaf365f650a53c7b4" - name = "golang.org/x/oauth2" - packages = [ - ".", - "google", - "internal", - "jws", - "jwt", - ] - pruneopts = "" - revision = "9f3314589c9a9136388751d9adae6b0ed400978a" - -[[projects]] - branch = "master" - digest = "1:0142c968b74c157abbb0220c05fa2bdde8a3a4509d6134b35ef75d5b58afb721" - name = "golang.org/x/sync" - packages = ["semaphore"] - pruneopts = "" - revision = "e225da77a7e68af35c70ccbf71af2b83e6acac3c" - -[[projects]] - branch = "master" - digest = "1:227fd48f127e252f68fecb328ee448020d26c93f9429356da9235b2845a04be8" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - ] - pruneopts = "" - revision = "b44545bcd369ef9ff9bebcfebf5f3370ef9b1932" - -[[projects]] - digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:9522af4be529c108010f95b05f1022cb872f2b9ff8b101080f554245673466e1" - name = "golang.org/x/time" - packages = ["rate"] - pruneopts = "" - revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" - -[[projects]] - branch = "master" - digest = "1:8eb8d3e3ac8104f933168344d8961d774cfeecc8340a65a4b0ca01b273fcda9d" - name = "golang.org/x/tools" - packages = [ - "go/ast/astutil", - "go/gcexportdata", - "go/internal/gcimporter", - "go/internal/packagesdriver", - "go/packages", - "go/types/typeutil", - "imports", - "internal/fastwalk", - "internal/gopathwalk", - "internal/module", - "internal/semver", - ] - pruneopts = "" - revision = "681f9ce8ac52c4ba431539a515ecb7f2ab72eca0" - -[[projects]] - digest = "1:5da26cd5580705cdb4082571febd49693717070d6d3543926bd20b8093482b1c" - name = "google.golang.org/api" - packages = ["support/bundler"] - pruneopts = "" - revision = "0cbcb99a9ea0c8023c794b2693cbe1def82ed4d7" - version = "v0.3.2" - -[[projects]] - digest = "1:0a6cbf5be24f00105d33c9f6d2f40b8149e0316537a92be1b0d4c761b7ae39fb" - name = "google.golang.org/appengine" - packages = [ - ".", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/urlfetch", - "urlfetch", - ] - pruneopts = "" - revision = "54a98f90d1c46b7731eb8fb305d2a321c30ef610" - version = "v1.5.0" - -[[projects]] - branch = "master" - digest = "1:6cbc03e8d4c5724d6228c88f1402d8cbd0a515561f73f0cecbb72f3d6576ff28" - name = "google.golang.org/genproto" - packages = [ - "googleapis/api/httpbody", - "googleapis/rpc/status", - "protobuf/field_mask", - ] - pruneopts = "" - revision = "64821d5d210748c883cd2b809589555ae4654203" - -[[projects]] - digest = "1:c4e8733914b7b1b535988fb5d5bd3de60845fb4553227b7a1ce1b7180204e462" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "binarylog/grpc_binarylog_v1", - "codes", - "connectivity", - "credentials", - "credentials/internal", - "encoding", - "encoding/proto", - "grpclog", - "health/grpc_health_v1", - "internal", - "internal/backoff", - "internal/balancerload", - "internal/binarylog", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/grpcsync", - "internal/syscall", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - ] - pruneopts = "" - revision = "236199dd5f8031d698fb64091194aecd1c3895b2" - version = "v1.20.0" - -[[projects]] - digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "" - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - digest = "1:cedccf16b71e86db87a24f8d4c70b0a855872eb967cb906a66b95de56aefbd0d" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "" - revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" - version = "v2.2.2" - -[[projects]] - digest = "1:d8a6f1ec98713e685346a2e4b46c6ec4a1792a5535f8b0dffe3b1c08c9d69b12" - name = "k8s.io/api" - packages = [ - "admissionregistration/v1beta1", - "apps/v1", - "apps/v1beta1", - "apps/v1beta2", - "auditregistration/v1alpha1", - "authentication/v1", - "authentication/v1beta1", - "authorization/v1", - "authorization/v1beta1", - "autoscaling/v1", - "autoscaling/v2beta1", - "autoscaling/v2beta2", - "batch/v1", - "batch/v1beta1", - "batch/v2alpha1", - "certificates/v1beta1", - "coordination/v1", - "coordination/v1beta1", - "core/v1", - "events/v1beta1", - "extensions/v1beta1", - "networking/v1", - "networking/v1beta1", - "node/v1alpha1", - "node/v1beta1", - "policy/v1beta1", - "rbac/v1", - "rbac/v1alpha1", - "rbac/v1beta1", - "scheduling/v1", - "scheduling/v1alpha1", - "scheduling/v1beta1", - "settings/v1alpha1", - "storage/v1", - "storage/v1alpha1", - "storage/v1beta1", - ] - pruneopts = "" - revision = "40a48860b5abbba9aa891b02b32da429b08d96a0" - version = "kubernetes-1.14.0" - -[[projects]] - digest = "1:32c3bbb0278417aa36d7600a0ef701f5d5eb3148830895c2634872bc55400be7" - name = "k8s.io/apiextensions-apiserver" - packages = [ - "pkg/apis/apiextensions", - "pkg/apis/apiextensions/v1beta1", - "pkg/client/clientset/clientset", - "pkg/client/clientset/clientset/fake", - "pkg/client/clientset/clientset/scheme", - "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", - "pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake", - ] - pruneopts = "" - revision = "53c4693659ed354d76121458fb819202dd1635fa" - version = "kubernetes-1.14.0" - -[[projects]] - digest = "1:002f84e9f3a08359c968075f8effc76b898dd901459a63817f9d9c568e3a5a57" - name = "k8s.io/apimachinery" - packages = [ - "pkg/api/errors", - "pkg/api/meta", - "pkg/api/resource", - "pkg/apis/meta/internalversion", - "pkg/apis/meta/v1", - "pkg/apis/meta/v1/unstructured", - "pkg/apis/meta/v1beta1", - "pkg/conversion", - "pkg/conversion/queryparams", - "pkg/fields", - "pkg/labels", - "pkg/runtime", - "pkg/runtime/schema", - "pkg/runtime/serializer", - "pkg/runtime/serializer/json", - "pkg/runtime/serializer/protobuf", - "pkg/runtime/serializer/recognizer", - "pkg/runtime/serializer/streaming", - "pkg/runtime/serializer/versioning", - "pkg/selection", - "pkg/types", - "pkg/util/cache", - "pkg/util/clock", - "pkg/util/diff", - "pkg/util/errors", - "pkg/util/framer", - "pkg/util/httpstream", - "pkg/util/httpstream/spdy", - "pkg/util/intstr", - "pkg/util/json", - "pkg/util/mergepatch", - "pkg/util/naming", - "pkg/util/net", - "pkg/util/runtime", - "pkg/util/sets", - "pkg/util/strategicpatch", - "pkg/util/validation", - "pkg/util/validation/field", - "pkg/util/wait", - "pkg/util/yaml", - "pkg/version", - "pkg/watch", - "third_party/forked/golang/json", - "third_party/forked/golang/netutil", - "third_party/forked/golang/reflect", - ] - pruneopts = "" - revision = "d7deff9243b165ee192f5551710ea4285dcfd615" - version = "kubernetes-1.14.0" - -[[projects]] - digest = "1:c2944ab044242534ad7c4b20de548b1df8a6e06ad739ab53b01042a59c4b6991" - name = "k8s.io/client-go" - packages = [ - "discovery", - "discovery/cached", - "discovery/cached/memory", - "discovery/fake", - "dynamic", - "dynamic/fake", - "kubernetes", - "kubernetes/fake", - "kubernetes/scheme", - "kubernetes/typed/admissionregistration/v1beta1", - "kubernetes/typed/admissionregistration/v1beta1/fake", - "kubernetes/typed/apps/v1", - "kubernetes/typed/apps/v1/fake", - "kubernetes/typed/apps/v1beta1", - "kubernetes/typed/apps/v1beta1/fake", - "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/apps/v1beta2/fake", - "kubernetes/typed/auditregistration/v1alpha1", - "kubernetes/typed/auditregistration/v1alpha1/fake", - "kubernetes/typed/authentication/v1", - "kubernetes/typed/authentication/v1/fake", - "kubernetes/typed/authentication/v1beta1", - "kubernetes/typed/authentication/v1beta1/fake", - "kubernetes/typed/authorization/v1", - "kubernetes/typed/authorization/v1/fake", - "kubernetes/typed/authorization/v1beta1", - "kubernetes/typed/authorization/v1beta1/fake", - "kubernetes/typed/autoscaling/v1", - "kubernetes/typed/autoscaling/v1/fake", - "kubernetes/typed/autoscaling/v2beta1", - "kubernetes/typed/autoscaling/v2beta1/fake", - "kubernetes/typed/autoscaling/v2beta2", - "kubernetes/typed/autoscaling/v2beta2/fake", - "kubernetes/typed/batch/v1", - "kubernetes/typed/batch/v1/fake", - "kubernetes/typed/batch/v1beta1", - "kubernetes/typed/batch/v1beta1/fake", - "kubernetes/typed/batch/v2alpha1", - "kubernetes/typed/batch/v2alpha1/fake", - "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/certificates/v1beta1/fake", - "kubernetes/typed/coordination/v1", - "kubernetes/typed/coordination/v1/fake", - "kubernetes/typed/coordination/v1beta1", - "kubernetes/typed/coordination/v1beta1/fake", - "kubernetes/typed/core/v1", - "kubernetes/typed/core/v1/fake", - "kubernetes/typed/events/v1beta1", - "kubernetes/typed/events/v1beta1/fake", - "kubernetes/typed/extensions/v1beta1", - "kubernetes/typed/extensions/v1beta1/fake", - "kubernetes/typed/networking/v1", - "kubernetes/typed/networking/v1/fake", - "kubernetes/typed/networking/v1beta1", - "kubernetes/typed/networking/v1beta1/fake", - "kubernetes/typed/node/v1alpha1", - "kubernetes/typed/node/v1alpha1/fake", - "kubernetes/typed/node/v1beta1", - "kubernetes/typed/node/v1beta1/fake", - "kubernetes/typed/policy/v1beta1", - "kubernetes/typed/policy/v1beta1/fake", - "kubernetes/typed/rbac/v1", - "kubernetes/typed/rbac/v1/fake", - "kubernetes/typed/rbac/v1alpha1", - "kubernetes/typed/rbac/v1alpha1/fake", - "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/rbac/v1beta1/fake", - "kubernetes/typed/scheduling/v1", - "kubernetes/typed/scheduling/v1/fake", - "kubernetes/typed/scheduling/v1alpha1", - "kubernetes/typed/scheduling/v1alpha1/fake", - "kubernetes/typed/scheduling/v1beta1", - "kubernetes/typed/scheduling/v1beta1/fake", - "kubernetes/typed/settings/v1alpha1", - "kubernetes/typed/settings/v1alpha1/fake", - "kubernetes/typed/storage/v1", - "kubernetes/typed/storage/v1/fake", - "kubernetes/typed/storage/v1alpha1", - "kubernetes/typed/storage/v1alpha1/fake", - "kubernetes/typed/storage/v1beta1", - "kubernetes/typed/storage/v1beta1/fake", - "pkg/apis/clientauthentication", - "pkg/apis/clientauthentication/v1alpha1", - "pkg/apis/clientauthentication/v1beta1", - "pkg/version", - "plugin/pkg/client/auth", - "plugin/pkg/client/auth/azure", - "plugin/pkg/client/auth/exec", - "plugin/pkg/client/auth/gcp", - "plugin/pkg/client/auth/oidc", - "plugin/pkg/client/auth/openstack", - "rest", - "rest/watch", - "testing", - "third_party/forked/golang/template", - "tools/auth", - "tools/cache", - "tools/clientcmd", - "tools/clientcmd/api", - "tools/clientcmd/api/latest", - "tools/clientcmd/api/v1", - "tools/metrics", - "tools/pager", - "tools/portforward", - "tools/record", - "tools/record/util", - "tools/reference", - "transport", - "transport/spdy", - "util/cert", - "util/connrotation", - "util/flowcontrol", - "util/homedir", - "util/jsonpath", - "util/keyutil", - "util/retry", - "util/workqueue", - ] - pruneopts = "" - revision = "6ee68ca5fd8355d024d02f9db0b3b667e8357a0f" - version = "v11.0.0" - -[[projects]] - branch = "release-1.14" - digest = "1:742ce70d2c6de0f02b5331a25d4d549f55de6b214af22044455fd6e6b451cad9" - name = "k8s.io/code-generator" - packages = [ - "cmd/client-gen", - "cmd/client-gen/args", - "cmd/client-gen/generators", - "cmd/client-gen/generators/fake", - "cmd/client-gen/generators/scheme", - "cmd/client-gen/generators/util", - "cmd/client-gen/path", - "cmd/client-gen/types", - "pkg/namer", - "pkg/util", - ] - pruneopts = "" - revision = "50b561225d70b3eb79a1faafd3dfe7b1a62cbe73" - -[[projects]] - branch = "master" - digest = "1:6a2a63e09a59caff3fd2d36d69b7b92c2fe7cf783390f0b7349fb330820f9a8e" - name = "k8s.io/gengo" - packages = [ - "args", - "generator", - "namer", - "parser", - "types", - ] - pruneopts = "" - revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985" - -[[projects]] - digest = "1:93dacf333c11ff29b3a336c1bbff8f2f1dc689a47a49f9e58a183202eaeae184" - name = "k8s.io/helm" - packages = [ - "pkg/chartutil", - "pkg/engine", - "pkg/getter", - "pkg/helm", - "pkg/helm/environment", - "pkg/helm/helmpath", - "pkg/ignore", - "pkg/manifest", - "pkg/plugin", - "pkg/proto/hapi/chart", - "pkg/proto/hapi/release", - "pkg/proto/hapi/services", - "pkg/proto/hapi/version", - "pkg/provenance", - "pkg/releaseutil", - "pkg/renderutil", - "pkg/repo", - "pkg/storage/errors", - "pkg/sympath", - "pkg/tlsutil", - "pkg/urlutil", - "pkg/version", - ] - pruneopts = "" - revision = "618447cbf203d147601b4b9bd7f8c37a5d39fbb4" - version = "v2.13.1" - -[[projects]] - digest = "1:4b78eccecdf36f29cacc19ca79411f2235e0387af52b11f1d77328d7ad5d84a2" - name = "k8s.io/klog" - packages = ["."] - pruneopts = "" - revision = "e531227889390a39d9533dde61f590fe9f4b0035" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:e5e6165f043c38f641355912f5bf94134531e82abceaa5f086a9008f1bea6655" - name = "k8s.io/kube-openapi" - packages = ["pkg/util/proto"] - pruneopts = "" - revision = "94e1e7b7574c44c4c0f2007de6fe617e259191f3" - -[[projects]] - branch = "master" - digest = "1:f6c19347011ba9a072aa55f5c7fa630c0b88303ac4ca83008454aef95b0c2078" - name = "k8s.io/utils" - packages = [ - "buffer", - "integer", - "trace", - ] - pruneopts = "" - revision = "21c4ce38f2a793ec01e925ddc31216500183b773" - -[[projects]] - digest = "1:321081b4a44256715f2b68411d8eda9a17f17ebfe6f0cc61d2cc52d11c08acfa" - name = "sigs.k8s.io/yaml" - packages = ["."] - pruneopts = "" - revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" - version = "v1.1.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/2opremio/go-k8s-portforward", - "github.com/Masterminds/semver", - "github.com/aws/aws-sdk-go/aws", - "github.com/aws/aws-sdk-go/aws/ec2metadata", - "github.com/aws/aws-sdk-go/aws/session", - "github.com/aws/aws-sdk-go/service/ecr", - "github.com/bradfitz/gomemcache/memcache", - "github.com/docker/distribution", - "github.com/docker/distribution/manifest/manifestlist", - "github.com/docker/distribution/manifest/schema1", - "github.com/docker/distribution/manifest/schema2", - "github.com/docker/distribution/registry/api/errcode", - "github.com/docker/distribution/registry/client", - "github.com/docker/distribution/registry/client/auth", - "github.com/docker/distribution/registry/client/auth/challenge", - "github.com/docker/distribution/registry/client/transport", - "github.com/ghodss/yaml", - "github.com/go-kit/kit/log", - "github.com/go-kit/kit/metrics", - "github.com/go-kit/kit/metrics/prometheus", - "github.com/golang/gddo/httputil/header", - "github.com/golang/protobuf/ptypes/any", - "github.com/google/go-cmp/cmp", - "github.com/gorilla/mux", - "github.com/gorilla/websocket", - "github.com/imdario/mergo", - "github.com/ncabatoff/go-seq/seq", - "github.com/opencontainers/go-digest", - "github.com/pkg/errors", - "github.com/pkg/term", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/ryanuber/go-glob", - "github.com/spf13/cobra", - "github.com/spf13/pflag", - "github.com/stretchr/testify/assert", - "github.com/weaveworks/common/middleware", - "github.com/weaveworks/go-checkpoint", - "github.com/whilp/git-urls", - "golang.org/x/sys/unix", - "golang.org/x/time/rate", - "gopkg.in/yaml.v2", - "k8s.io/api/apps/v1", - "k8s.io/api/batch/v1beta1", - "k8s.io/api/core/v1", - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/runtime/serializer", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/runtime", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apimachinery/pkg/watch", - "k8s.io/client-go/discovery", - "k8s.io/client-go/discovery/cached", - "k8s.io/client-go/discovery/fake", - "k8s.io/client-go/dynamic", - "k8s.io/client-go/dynamic/fake", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/fake", - "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/kubernetes/typed/core/v1", - "k8s.io/client-go/rest", - "k8s.io/client-go/testing", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/tools/record", - "k8s.io/client-go/util/flowcontrol", - "k8s.io/client-go/util/workqueue", - "k8s.io/code-generator/cmd/client-gen", - "k8s.io/helm/pkg/chartutil", - "k8s.io/helm/pkg/getter", - "k8s.io/helm/pkg/helm", - "k8s.io/helm/pkg/helm/environment", - "k8s.io/helm/pkg/proto/hapi/chart", - "k8s.io/helm/pkg/proto/hapi/release", - "k8s.io/helm/pkg/proto/hapi/services", - "k8s.io/helm/pkg/releaseutil", - "k8s.io/helm/pkg/repo", - "k8s.io/helm/pkg/tlsutil", - "k8s.io/klog", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 582b6d713..000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,68 +0,0 @@ -# See usage of https://github.com/kubernetes/code-generator if regeneration of the following is required: -# integrations/apis/flux.weave.works/*/zz_generated.deepcopy.go -# integrations/client/* -# -required = ["k8s.io/code-generator/cmd/client-gen"] - -[[override]] - name = "github.com/ugorji/go" - revision = "8c0409fcbb70099c748d71f714529204975f6c3f" - -[[constraint]] - name = "github.com/docker/distribution" - branch = "master" - source = "github.com/2opremio/distribution" - -# Pin to master branch until there is a more recent stable release: -# https://github.com/prometheus/client_golang/issues/375 -[[constraint]] - name = "github.com/prometheus/client_golang" - branch = "master" - -[[constraint]] - name = "k8s.io/api" - version = "kubernetes-1.14.0" - -[[constraint]] - name = "k8s.io/apimachinery" - version = "kubernetes-1.14.0" - -[[constraint]] - name = "k8s.io/apiextensions-apiserver" - version = "kubernetes-1.14.0" - -[[constraint]] - name = "k8s.io/client-go" - version = "11.0.0" - -[[constraint]] - name = "k8s.io/code-generator" - branch = "release-1.14" - -[[override]] - name = "github.com/json-iterator/go" - revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682" - -[[constraint]] - name = "k8s.io/helm" - version = "~v2.13.0" - -[[constraint]] - name = "github.com/2opremio/go-k8s-portforward" - branch = "master" - -[[constraint]] - branch = "master" - name = "github.com/pkg/term" - -[[constraint]] - name = "github.com/Masterminds/semver" - version = "1.4.0" - -[[override]] - name = "github.com/BurntSushi/toml" - version = "v0.3.1" - -[[constraint]] - name = "github.com/imdario/mergo" - version = "0.3.7" diff --git a/Makefile b/Makefile index 1df81e4bb..b44de0b31 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,7 @@ SUDO := $(shell docker info > /dev/null 2> /dev/null || echo "sudo") TEST_FLAGS?= include docker/kubectl.version +include docker/kustomize.version include docker/helm.version # NB default target architecture is amd64. If you would like to try the @@ -15,6 +16,7 @@ ifeq ($(ARCH),) ARCH=amd64 endif CURRENT_OS_ARCH=$(shell echo `go env GOOS`-`go env GOARCH`) +GOBIN?=$(shell echo `go env GOPATH`/bin) # NB because this outputs absolute file names, you have to be careful # if you're testing out the Makefile with `-W` (pretend a file is @@ -30,7 +32,7 @@ IMAGE_TAG:=$(shell ./docker/image-tag) VCS_REF:=$(shell git rev-parse HEAD) BUILD_DATE:=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ') -all: $(GOPATH)/bin/fluxctl $(GOPATH)/bin/fluxd $(GOPATH)/bin/helm-operator build/.flux.done build/.helm-operator.done +all: $(GOBIN)/fluxctl $(GOBIN)/fluxd $(GOBIN)/helm-operator build/.flux.done build/.helm-operator.done release-bins: for arch in amd64; do \ @@ -47,14 +49,17 @@ release-bins: clean: go clean rm -rf ./build - rm -f test/bin/kubectl test/bin/helm + rm -f test/bin/kubectl test/bin/helm test/bin/kind test/bin/kustomize realclean: clean rm -rf ./cache -test: test/bin/helm test/bin/kubectl +test: test/bin/helm test/bin/kubectl test/bin/kustomize PATH="${PWD}/bin:${PWD}/test/bin:${PATH}" go test ${TEST_FLAGS} $(shell go list ./... | grep -v "^github.com/weaveworks/flux/vendor" | sort -u) +e2e: test/bin/helm test/bin/kubectl build/.flux.done build/.helm-operator.done + PATH="${PWD}/test/bin:${PATH}" CURRENT_OS_ARCH=$(CURRENT_OS_ARCH) test/e2e/run.sh + build/.%.done: docker/Dockerfile.% mkdir -p ./build/docker/$* cp $^ ./build/docker/$*/ @@ -64,7 +69,7 @@ build/.%.done: docker/Dockerfile.% -f build/docker/$*/Dockerfile.$* ./build/docker/$* touch $@ -build/.flux.done: build/fluxd build/kubectl docker/ssh_config docker/kubeconfig docker/known_hosts.sh +build/.flux.done: build/fluxd build/kubectl build/kustomize docker/ssh_config docker/kubeconfig docker/known_hosts.sh build/.helm-operator.done: build/helm-operator build/kubectl build/helm docker/ssh_config docker/known_hosts.sh docker/helm-repositories.yaml build/fluxd: $(FLUXD_DEPS) @@ -79,7 +84,10 @@ build/kubectl: cache/linux-$(ARCH)/kubectl-$(KUBECTL_VERSION) test/bin/kubectl: cache/$(CURRENT_OS_ARCH)/kubectl-$(KUBECTL_VERSION) build/helm: cache/linux-$(ARCH)/helm-$(HELM_VERSION) test/bin/helm: cache/$(CURRENT_OS_ARCH)/helm-$(HELM_VERSION) -build/kubectl test/bin/kubectl build/helm test/bin/helm: +build/kustomize: cache/linux-amd64/kustomize-$(KUSTOMIZE_VERSION) +test/bin/kustomize: cache/$(CURRENT_OS_ARCH)/kustomize-$(KUSTOMIZE_VERSION) + +build/kubectl test/bin/kubectl build/kustomize test/bin/kustomize build/helm test/bin/helm: mkdir -p build cp $< $@ if [ `basename $@` = "build" -a $(CURRENT_OS_ARCH) = "linux-$(ARCH)" ]; then strip $@; fi @@ -87,28 +95,33 @@ build/kubectl test/bin/kubectl build/helm test/bin/helm: cache/%/kubectl-$(KUBECTL_VERSION): docker/kubectl.version mkdir -p cache/$* - curl -L -o cache/$*/kubectl-$(KUBECTL_VERSION).tar.gz "https://dl.k8s.io/$(KUBECTL_VERSION)/kubernetes-client-$*.tar.gz" + curl --fail -L -o cache/$*/kubectl-$(KUBECTL_VERSION).tar.gz "https://dl.k8s.io/$(KUBECTL_VERSION)/kubernetes-client-$*.tar.gz" [ $* != "linux-$(ARCH)" ] || echo "$(KUBECTL_CHECKSUM_$(ARCH)) cache/$*/kubectl-$(KUBECTL_VERSION).tar.gz" | shasum -a 256 -c tar -m --strip-components 3 -C ./cache/$* -xzf cache/$*/kubectl-$(KUBECTL_VERSION).tar.gz kubernetes/client/bin/kubectl mv ./cache/$*/kubectl $@ +cache/%/kustomize-$(KUSTOMIZE_VERSION): docker/kustomize.version + mkdir -p cache/$* + curl --fail -L -o $@ "https://github.com/kubernetes-sigs/kustomize/releases/download/v$(KUSTOMIZE_VERSION)/kustomize_$(KUSTOMIZE_VERSION)_`echo $* | tr - _`" + [ $* != "linux-amd64" ] || echo "$(KUSTOMIZE_CHECKSUM) $@" | shasum -a 256 -c + cache/%/helm-$(HELM_VERSION): docker/helm.version mkdir -p cache/$* - curl -L -o cache/$*/helm-$(HELM_VERSION).tar.gz "https://storage.googleapis.com/kubernetes-helm/helm-v$(HELM_VERSION)-$*.tar.gz" + curl --fail -L -o cache/$*/helm-$(HELM_VERSION).tar.gz "https://storage.googleapis.com/kubernetes-helm/helm-v$(HELM_VERSION)-$*.tar.gz" [ $* != "linux-$(ARCH)" ] || echo "$(HELM_CHECKSUM_$(ARCH)) cache/$*/helm-$(HELM_VERSION).tar.gz" | shasum -a 256 -c tar -m -C ./cache -xzf cache/$*/helm-$(HELM_VERSION).tar.gz $*/helm mv cache/$*/helm $@ -$(GOPATH)/bin/fluxctl: $(FLUXCTL_DEPS) -$(GOPATH)/bin/fluxctl: ./cmd/fluxctl/*.go +$(GOBIN)/fluxctl: $(FLUXCTL_DEPS) +$(GOBIN)/fluxctl: ./cmd/fluxctl/*.go go install ./cmd/fluxctl -$(GOPATH)/bin/fluxd: $(FLUXD_DEPS) -$(GOPATH)/bin/fluxd: cmd/fluxd/*.go +$(GOBIN)/fluxd: $(FLUXD_DEPS) +$(GOBIN)/fluxd: cmd/fluxd/*.go go install ./cmd/fluxd -$(GOPATH)/bin/helm-operator: $(HELM_OPERATOR_DEPS) -$(GOPATH)/bin/help-operator: cmd/helm-operator/*.go +$(GOBIN)/helm-operator: $(HELM_OPERATOR_DEPS) +$(GOBIN)/help-operator: cmd/helm-operator/*.go go install ./cmd/helm-operator integration-test: all diff --git a/README.md b/README.md index ed7cef3a2..9d9d70c4d 100644 --- a/README.md +++ b/README.md @@ -57,20 +57,26 @@ Its major features are: ### Who is using Flux in production +- [ABA English](https://www.abaenglish.com) - [APPUiO](https://appuio.ch) +- [Avisi](https://avisi.nl) +- [Babylon Health](https://www.babylonhealth.com/) +- [College of William & Mary](https://www.wm.edu) - [Control Plane](https://control-plane.io) - [Crowd Riff](https://crowdriff.com) -- [College of William & Mary](https://www.wm.edu) - [Gini](https://gini.net) +- [Improwised Technologies](https://www.improwised.com/) - [iQmetrix](https://www.iqmetrix.com) +- [karriere tutor GmbH](https://www.karrieretutor.de) +- [loveholidays](https://www.loveholidays.com/) - [Mettle](https://mettle.co.uk) - [Payout](https://payout.one) - [Qordoba](https://qordoba.com) +- [Rungway](https://rungway.com) - [Troii](https://troii.com/) - [Under Armour](https://www.underarmour.com) - [VSHN](https://vshn.ch) - [Weave Cloud](https://cloud.weave.works) -- [Babylon Health](https://www.babylonhealth.com/) If you too are using Flux in production; please submit a PR to add your organization to the list! diff --git a/bin/helm/update_codegen.sh b/bin/helm/update_codegen.sh index 6c7c1ac10..8f0b9f2fa 100755 --- a/bin/helm/update_codegen.sh +++ b/bin/helm/update_codegen.sh @@ -5,10 +5,11 @@ set -o nounset set -o pipefail SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/../.. -CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ${GOPATH}/src/k8s.io/code-generator)} +CODEGEN_PKG=${CODEGEN_PKG:-$(echo `go env GOPATH`'/pkg/mod/k8s.io/code-generator@v0.0.0-20190511023357-639c964206c2')} -${CODEGEN_PKG}/generate-groups.sh all github.com/weaveworks/flux/integrations/client \ +go mod download # make sure the code-generator is downloaded +bash ${CODEGEN_PKG}/generate-groups.sh all github.com/weaveworks/flux/integrations/client \ github.com/weaveworks/flux/integrations/apis \ "flux.weave.works:v1beta1 helm.integrations.flux.weave.works:v1alpha2" \ - --go-header-file ${SCRIPT_ROOT}/bin/helm/custom-boilerplate.go.txt + --go-header-file "${SCRIPT_ROOT}/bin/helm/custom-boilerplate.go.txt" diff --git a/chart/flux/CHANGELOG.md b/chart/flux/CHANGELOG.md index f1d17b31f..b970ecaa0 100644 --- a/chart/flux/CHANGELOG.md +++ b/chart/flux/CHANGELOG.md @@ -1,3 +1,13 @@ +## 0.9.5 (2019-05-22) + + - Updated Flux to `1.12.3` + [weaveworks/flux#2076](https://github.com/weaveworks/flux/pull/2076) + +## 0.9.4 (2019-05-09) + + - Updated Helm operator to `0.9.1` + [weaveworks/flux#2032](https://github.com/weaveworks/flux/pull/2032) + ## 0.9.3 (2019-05-08) ### Improvements diff --git a/chart/flux/Chart.yaml b/chart/flux/Chart.yaml index eaaa52fd5..57f85a7e3 100644 --- a/chart/flux/Chart.yaml +++ b/chart/flux/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 -appVersion: "1.12.2" -version: 0.9.3 +appVersion: "1.12.3" +version: 0.9.5 kubeVersion: ">=1.9.0-0" name: flux description: Flux is a tool that automatically ensures that the state of a cluster matches what is specified in version control diff --git a/chart/flux/README.md b/chart/flux/README.md index aa6467435..fc281dc4d 100755 --- a/chart/flux/README.md +++ b/chart/flux/README.md @@ -188,12 +188,17 @@ The following tables lists the configurable parameters of the Weave Flux chart a | `replicaCount` | `1` | Number of Flux pods to deploy, more than one is not desirable. | `image.pullPolicy` | `IfNotPresent` | Image pull policy | `image.pullSecret` | `None` | Image pull secret +| `logFormat` | `fmt` | Log format (fmt or json) | `resources.requests.cpu` | `50m` | CPU resource requests for the Flux deployment | `resources.requests.memory` | `64Mi` | Memory resource requests for the Flux deployment | `resources.limits` | `None` | CPU/memory resource limits for the Flux deployment | `nodeSelector` | `{}` | Node Selector properties for the Flux deployment | `tolerations` | `[]` | Tolerations properties for the Flux deployment | `affinity` | `{}` | Affinity properties for the Flux deployment +| `extraVolumeMounts` | `[]` | Extra volumes mounts +| `extraVolumes` | `[]` | Extra volumes +| `dnsPolicy` | `` | Pod DNS policy +| `dnsConfig` | `` | Pod DNS config | `token` | `None` | Weave Cloud service token | `extraEnvs` | `[]` | Extra environment variables for the Flux pod(s) | `rbac.create` | `true` | If `true`, create and use RBAC resources @@ -208,20 +213,22 @@ The following tables lists the configurable parameters of the Weave Flux chart a | `git.email` | `support@weave.works` | Email to use as git committer | `git.setAuthor` | `false` | If set, the author of git commits will reflect the user who initiated the commit and will differ from the git committer. | `git.signingKey` | `None` | If set, commits will be signed with this GPG key +| `git.verifySignatures` | `false` | If set, the signatures of the sync tag and commits will be verified | `git.label` | `flux-sync` | Label to keep track of sync progress, used to tag the Git branch | `git.ciSkip` | `false` | Append "[ci skip]" to commit messages so that CI will skip builds | `git.pollInterval` | `5m` | Period at which to poll git repo for new commits | `git.timeout` | `20s` | Duration after which git operations time out -| `git.secretName` | `None` | Kubernetes secret with the SSH private key. Superceded by `helmOperator.git.secretName` if set. +| `git.secretName` | `None` | Kubernetes secret with the SSH private key. Superseded by `helmOperator.git.secretName` if set. | `git.config.enabled` | `false` | Mount `$HOME/.gitconfig` via Secret into the Flux and HelmOperator Pods, allowing for custom global Git configuration | `git.config.secretName` | `Computed` | Kubernetes secret with the global Git configuration | `git.config.data` | `None` | Global Git configuration per [git-config](https://git-scm.com/docs/git-config) | `gpgKeys.secretName` | `None` | Kubernetes secret with GPG keys the Flux daemon should import +| `gpgKeys.configMapName` | `None` | Kubernetes config map with public GPG keys the Flux daemon should import | `ssh.known_hosts` | `None` | The contents of an SSH `known_hosts` file, if you need to supply host key(s) | `registry.pollInterval` | `5m` | Period at which to check for updated images | `registry.rps` | `200` | Maximum registry requests per second per host | `registry.burst` | `125` | Maximum number of warmer connections to remote and memcache -| `registry.trace` | `false` | Output trace of image registry requests to log +| `registry.trace` | `false` | Output trace of image registry requests to log | `registry.insecureHosts` | `None` | Use HTTP rather than HTTPS for the image registry domains | `registry.cacheExpiry` | `None` | Duration to keep cached image info (deprecated) | `registry.excludeImage` | `None` | Do not scan images that match these glob expressions; if empty, 'k8s.gcr.io/*' images are excluded @@ -240,6 +247,7 @@ The following tables lists the configurable parameters of the Weave Flux chart a | `memcached.pullSecret` | `None` | Image pull secret | `memcached.repository` | `memcached` | Image repository | `memcached.resources` | `None` | CPU/memory resource requests/limits for memcached +| `memcached.securityContext` | [See values.yaml](/chart/flux/values.yaml#L192-L195) | Container security context for memcached | `helmOperator.create` | `false` | If `true`, install the Helm operator | `helmOperator.createCRD` | `true` | Create the `v1beta1` and `v1alpha2` Flux CRDs. Dependent on `helmOperator.create=true` | `helmOperator.repository` | `docker.io/weaveworks/helm-operator` | Helm operator image repository @@ -277,6 +285,7 @@ The following tables lists the configurable parameters of the Weave Flux chart a | `kube.config` | [See values.yaml](/chart/flux/values.yaml#L151-L165) | Override for kubectl default config in the Flux pod(s). | `prometheus.enabled` | `false` | If enabled, adds prometheus annotations to Flux and helmOperator pod(s) | `syncGarbageCollection.enabled` | `false` | If enabled, fluxd will delete resources that it created, but are no longer present in git (experimental, see [garbage collection](/site/garbagecollection.md)) +| `syncGarbageCollection.dry` | `false` | If enabled, fluxd won't delete any resources, but log the garbage collection output (experimental, see [garbage collection](/site/garbagecollection.md)) Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: diff --git a/chart/flux/templates/deployment.yaml b/chart/flux/templates/deployment.yaml index b08e77803..e292dbabe 100644 --- a/chart/flux/templates/deployment.yaml +++ b/chart/flux/templates/deployment.yaml @@ -75,8 +75,14 @@ spec: secretName: {{ .Values.gpgKeys.secretName }} defaultMode: 0400 {{- end }} + {{- if .Values.gpgKeys.configMapName }} + - name: gpg-public-keys + configMap: + name: {{ .Values.gpgKeys.configMapName }} + defaultMode: 0400 + {{- end }} {{- if .Values.extraVolumes }} -{{ toYaml .Values.extraVolumes | indent 8 }} +{{ toYaml .Values.extraVolumes | indent 6 }} {{- end }} {{- if .Values.initContainers }} initContainers: @@ -124,7 +130,12 @@ spec: {{- end }} {{- if .Values.gpgKeys.secretName }} - name: gpg-keys - mountPath: /root/gpg-import + mountPath: /root/gpg-import/private + readOnly: true + {{- end }} + {{- if .Values.gpgKeys.configMapName }} + - name: gpg-public-keys + mountPath: /root/gpg-import/public readOnly: true {{- end }} {{- if .Values.extraVolumeMounts }} @@ -137,6 +148,9 @@ spec: {{ toYaml .Values.extraEnvs | indent 10 }} {{- end }} args: + {{- if .Values.logFormat }} + - --log-format={{ .Values.logFormat }} + {{end}} - --ssh-keygen-dir=/var/fluxd/keygen - --k8s-secret-name={{ .Values.git.secretName | default (printf "%s-git-deploy" (include "flux.fullname" .)) }} - --memcached-hostname={{ template "flux.fullname" . }}-memcached @@ -148,12 +162,19 @@ spec: - --git-path={{ .Values.git.path }} - --git-user={{ .Values.git.user }} - --git-email={{ .Values.git.email }} - {{- if .Values.gpgKeys.secretName }} - - --git-gpg-key-import=/root/gpg-import - {{- end }} + {{- if (and .Values.gpgKeys.secretName .Values.gpgKeys.configMapName) }} + - --git-gpg-key-import=/root/gpg-import/private,/root/gpg-import/public + {{- else if .Values.gpgKeys.secretName }} + - --git-gpg-key-import=/root/gpg-import/private + {{- else if .Values.gpgKeys.configMapName }} + - --git-gpg-key-import=/root/gpg-import/public + {{- end -}} {{- if .Values.git.signingKey }} - --git-signing-key={{ .Values.git.signingKey }} {{- end }} + {{- if .Values.git.verifySignatures }} + - --git-verify-signatures + {{- end }} - --git-set-author={{ .Values.git.setAuthor }} - --git-poll-interval={{ .Values.git.pollInterval }} - --git-timeout={{ .Values.git.timeout }} @@ -191,14 +212,26 @@ spec: - --connect=wss://cloud.weave.works/api/flux - --token={{ .Values.token }} {{- end }} - {{- if .Values.syncGarbageCollection.enabled }} + {{- if and .Values.syncGarbageCollection.enabled (not .Values.syncGarbageCollection.dry) }} - --sync-garbage-collection={{ .Values.syncGarbageCollection.enabled }} + {{- else if .Values.syncGarbageCollection.dry }} + - --sync-garbage-collection-dry={{ .Values.syncGarbageCollection.dry }} {{- end }} {{- if .Values.additionalArgs }} {{ toYaml .Values.additionalArgs | indent 10 }} {{- end }} resources: {{ toYaml .Values.resources | indent 12 }} +{{- if .Values.extraContainers }} +{{ toYaml .Values.extraContainers | indent 8}} +{{- end }} +{{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} +{{- end }} +{{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} +{{- end }} {{- with .Values.nodeSelector }} nodeSelector: {{ toYaml . | indent 8 }} diff --git a/chart/flux/templates/helm-operator-deployment.yaml b/chart/flux/templates/helm-operator-deployment.yaml index c11da139a..8308635eb 100644 --- a/chart/flux/templates/helm-operator-deployment.yaml +++ b/chart/flux/templates/helm-operator-deployment.yaml @@ -118,6 +118,9 @@ spec: mountPath: /var/fluxd/helm/repository/cache {{- end }} args: + {{- if .Values.logFormat }} + - --log-format={{ .Values.logFormat }} + {{end}} - --git-timeout={{ $gitTimeout }} - --git-poll-interval={{ $gitPollInterval }} - --charts-sync-interval={{ .Values.helmOperator.chartsSyncInterval }} diff --git a/chart/flux/templates/memcached.yaml b/chart/flux/templates/memcached.yaml index f3a6458c1..a23c14a76 100755 --- a/chart/flux/templates/memcached.yaml +++ b/chart/flux/templates/memcached.yaml @@ -41,6 +41,8 @@ spec: containerPort: 11211 resources: {{ toYaml .Values.memcached.resources | indent 10 }} + securityContext: +{{ toYaml .Values.memcached.securityContext | indent 10 }} {{- with .Values.memcached.nodeSelector }} nodeSelector: {{ toYaml . | indent 8 }} diff --git a/chart/flux/values.yaml b/chart/flux/values.yaml index e6ae01740..9e0750b3a 100644 --- a/chart/flux/values.yaml +++ b/chart/flux/values.yaml @@ -5,9 +5,11 @@ token: "" replicaCount: 1 +logFormat: fmt + image: repository: docker.io/weaveworks/flux - tag: 1.12.2 + tag: 1.12.3 pullPolicy: IfNotPresent pullSecret: @@ -20,7 +22,7 @@ helmOperator: create: false createCRD: true repository: docker.io/weaveworks/helm-operator - tag: 0.9.0 + tag: 0.9.2 pullPolicy: IfNotPresent pullSecret: # Limit the operator scope to a single namespace @@ -104,9 +106,21 @@ extraVolumeMounts: [] extraVolumes: [] +# Optional DNS settings, configuring the ndots option may resolve +# nslookup issues on some Kubernetes setups. +# dnsPolicy: "None" +# dnsConfig: +# options: +# - name: ndots +# value: "1" + gpgKeys: # These keys will be imported into GPG in the Flux container. secretName: "" + # These keys will be imported into GPG in the Flux container. + # NB: should only be used for public keys used to verify the + # signatures of commits. + configMapName: "" git: # URL of git repo with Kubernetes manifests; e.g. git.url=ssh://git@github.com/weaveworks/flux-get-started @@ -121,6 +135,8 @@ git: email: "support@weave.works" # If set, commits will be signed with this GPG key. signingKey: "" + # If set, the signatures of the sync tag and commits will be verified. + verifySignatures: false # If set, the author of git commits will reflect the user who initiated the commit and will differ from the git committer. setAuthor: false # Label to keep track of sync progress @@ -177,12 +193,16 @@ registry: memcached: repository: memcached - tag: 1.4.25 + tag: 1.5.15 pullSecret: createClusterIP: true verbose: false maxItemSize: 5m maxMemory: 512 + securityContext: + runAsUser: 11211 + runAsGroup: 11211 + allowPrivilegeEscalation: false nodeSelector: {} tolerations: [] affinity: {} @@ -231,6 +251,7 @@ prometheus: syncGarbageCollection: enabled: false + dry: false # Add your own init container or uncomment and modify the given example. initContainers: {} @@ -241,3 +262,6 @@ initContainers: {} # volumeMounts: # - mountPath: /tmp/flux-deploy-key # name: flux-deploy-key + +# Additional containers to be added to the flux pod. +extraContainers: [] diff --git a/cluster/kubernetes/cached_disco.go b/cluster/kubernetes/cached_disco.go index 60420e370..21963d029 100644 --- a/cluster/kubernetes/cached_disco.go +++ b/cluster/kubernetes/cached_disco.go @@ -60,33 +60,6 @@ func (d *cachedDiscovery) ServerResourcesForGroupVersion(groupVersion string) (* // the only avenue of a change to the API resources in a running // system is CRDs being added, updated or deleted. func MakeCachedDiscovery(d discovery.DiscoveryInterface, c crd.Interface, shutdown <-chan struct{}) discovery.CachedDiscoveryInterface { - result, _, _ := makeCachedDiscovery(d, c, shutdown, makeInvalidatingHandler) - return result -} - -// --- - -func makeInvalidatingHandler(cached discovery.CachedDiscoveryInterface) toolscache.ResourceEventHandler { - var handler toolscache.ResourceEventHandler = toolscache.ResourceEventHandlerFuncs{ - AddFunc: func(_ interface{}) { - cached.Invalidate() - }, - UpdateFunc: func(_, _ interface{}) { - cached.Invalidate() - }, - DeleteFunc: func(_ interface{}) { - cached.Invalidate() - }, - } - return handler -} - -type makeHandle func(discovery.CachedDiscoveryInterface) toolscache.ResourceEventHandler - -// makeCachedDiscovery constructs a cached discovery client, with more -// flexibility than MakeCachedDiscovery; e.g., with extra handlers for -// testing. -func makeCachedDiscovery(d discovery.DiscoveryInterface, c crd.Interface, shutdown <-chan struct{}, handlerFn makeHandle) (*cachedDiscovery, toolscache.Store, toolscache.Controller) { cachedDisco := &cachedDiscovery{CachedDiscoveryInterface: memory.NewMemCacheClient(d)} // We have an empty cache, so it's _a priori_ invalid. (Yes, that's the zero value, but better safe than sorry) cachedDisco.Invalidate() @@ -100,12 +73,21 @@ func makeCachedDiscovery(d discovery.DiscoveryInterface, c crd.Interface, shutdo return crdClient.Watch(options) }, } - - handler := handlerFn(cachedDisco) - store, controller := toolscache.NewInformer(lw, &crdv1beta1.CustomResourceDefinition{}, 0, handler) + handle := toolscache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { + cachedDisco.Invalidate() + }, + UpdateFunc: func(_, _ interface{}) { + cachedDisco.Invalidate() + }, + DeleteFunc: func(_ interface{}) { + cachedDisco.Invalidate() + }, + } + _, controller := toolscache.NewInformer(lw, &crdv1beta1.CustomResourceDefinition{}, 0, handle) go cachedDisco.invalidatePeriodically(shutdown) go controller.Run(shutdown) - return cachedDisco, store, controller + return cachedDisco } func (d *cachedDiscovery) invalidatePeriodically(shutdown <-chan struct{}) { diff --git a/cluster/kubernetes/cached_disco_test.go b/cluster/kubernetes/cached_disco_test.go index 6c56cb2dd..f0d9eb1f4 100644 --- a/cluster/kubernetes/cached_disco_test.go +++ b/cluster/kubernetes/cached_disco_test.go @@ -4,33 +4,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" crdv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" crdfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/discovery" - toolscache "k8s.io/client-go/tools/cache" ) -type chainHandler struct { - first toolscache.ResourceEventHandler - next toolscache.ResourceEventHandler -} - -func (h chainHandler) OnAdd(obj interface{}) { - h.first.OnAdd(obj) - h.next.OnAdd(obj) -} - -func (h chainHandler) OnUpdate(old, new interface{}) { - h.first.OnUpdate(old, new) - h.next.OnUpdate(old, new) -} - -func (h chainHandler) OnDelete(old interface{}) { - h.first.OnDelete(old) - h.next.OnDelete(old) -} - func TestCachedDiscovery(t *testing.T) { coreClient := makeFakeClient() @@ -55,20 +34,7 @@ func TestCachedDiscovery(t *testing.T) { shutdown := make(chan struct{}) defer close(shutdown) - // this extra handler means we can synchronise on the add later - // being processed - allowAdd := make(chan interface{}) - - addHandler := toolscache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - allowAdd <- obj - }, - } - makeHandler := func(d discovery.CachedDiscoveryInterface) toolscache.ResourceEventHandler { - return chainHandler{first: makeInvalidatingHandler(d), next: addHandler} - } - - cachedDisco, store, _ := makeCachedDiscovery(coreClient.Discovery(), crdClient, shutdown, makeHandler) + cachedDisco := MakeCachedDiscovery(coreClient.Discovery(), crdClient, shutdown) saved := getDefaultNamespace getDefaultNamespace = func() (string, error) { return "bar-ns", nil } @@ -109,26 +75,19 @@ func TestCachedDiscovery(t *testing.T) { } // Wait for the update to "go through" - select { - case <-allowAdd: - break - case <-time.After(time.Second): - t.Fatal("timed out waiting for Add to happen") - } - - _, exists, err := store.Get(myCRD) - if err != nil { - t.Error(err) - } - if !exists { - t.Error("does not exist") - } - - namespaced, err = namespacer.lookupNamespaced("foo/v1", "Custom", nil) - if err != nil { - t.Fatal(err) - } - if namespaced { - t.Error("got true from lookupNamespaced, expecting false (after changing it)") + c := time.After(time.Second) +loop: + for { + select { + default: + namespaced, err = namespacer.lookupNamespaced("foo/v1", "Custom", nil) + assert.NoError(t, err) + if !namespaced { + break loop + } + time.Sleep(10 * time.Millisecond) + case <-c: + t.Fatal("timed out waiting for Update to happen") + } } } diff --git a/cluster/kubernetes/kubernetes.go b/cluster/kubernetes/kubernetes.go index dbbd4d715..5de47558a 100644 --- a/cluster/kubernetes/kubernetes.go +++ b/cluster/kubernetes/kubernetes.go @@ -85,6 +85,8 @@ func isAddon(obj k8sObject) bool { type Cluster struct { // Do garbage collection when syncing resources GC bool + // dry run garbage collection without syncing + DryGC bool client ExtendedClient applier Applier @@ -135,7 +137,8 @@ func (c *Cluster) SomeWorkloads(ids []flux.ResourceID) (res []cluster.Workload, resourceKind, ok := resourceKinds[kind] if !ok { - return nil, fmt.Errorf("Unsupported kind %v", kind) + c.logger.Log("warning", "unsupported kind", "resource", id) + continue } workload, err := resourceKind.getWorkload(c, ns, name) diff --git a/cluster/kubernetes/manifests.go b/cluster/kubernetes/manifests.go index f02ad309d..0dcf28237 100644 --- a/cluster/kubernetes/manifests.go +++ b/cluster/kubernetes/manifests.go @@ -1,6 +1,7 @@ package kubernetes import ( + "bytes" "fmt" "strings" @@ -107,16 +108,60 @@ func (m *manifests) setEffectiveNamespaces(manifests map[string]kresource.KubeMa return result, nil } -func (m *manifests) LoadManifests(base string, paths []string) (map[string]resource.Resource, error) { - manifests, err := kresource.Load(base, paths) +func (m *manifests) LoadManifests(baseDir string, paths []string) (map[string]resource.Resource, error) { + manifests, err := kresource.Load(baseDir, paths) if err != nil { return nil, err } return m.setEffectiveNamespaces(manifests) } -func (m *manifests) UpdateImage(def []byte, id flux.ResourceID, container string, image image.Ref) ([]byte, error) { +func (m *manifests) ParseManifest(def []byte, source string) (map[string]resource.Resource, error) { + resources, err := kresource.ParseMultidoc(def, source) + if err != nil { + return nil, err + } + // Note: setEffectiveNamespaces() won't work for CRD instances whose CRD is yet to be created + // (due to the CRD not being present in kresources). + // We could get out of our way to fix this (or give a better error) but: + // 1. With the exception of HelmReleases CRD instances are not workloads anyways. + // 2. The problem is eventually fixed by the first successful sync. + result, err := m.setEffectiveNamespaces(resources) + if err != nil { + return nil, err + } + return result, nil +} + +func (m *manifests) SetWorkloadContainerImage(def []byte, id flux.ResourceID, container string, image image.Ref) ([]byte, error) { return updateWorkload(def, id, container, image) } -// UpdatePolicies and ServicesWithPolicies in policies.go +func (m *manifests) CreateManifestPatch(originalManifests, modifiedManifests []byte, originalSource, modifiedSource string) ([]byte, error) { + return createManifestPatch(originalManifests, modifiedManifests, originalSource, modifiedSource) +} + +func (m *manifests) ApplyManifestPatch(originalManifests, patchManifests []byte, originalSource, patchSource string) ([]byte, error) { + return applyManifestPatch(originalManifests, patchManifests, originalSource, patchSource) +} + +func (m *manifests) AppendManifestToBuffer(manifest []byte, buffer *bytes.Buffer) error { + return appendYAMLToBuffer(manifest, buffer) +} + +func appendYAMLToBuffer(manifest []byte, buffer *bytes.Buffer) error { + separator := "---\n" + bytes := buffer.Bytes() + if len(bytes) > 0 && bytes[len(bytes)-1] != '\n' { + separator = "\n---\n" + } + if _, err := buffer.WriteString(separator); err != nil { + return fmt.Errorf("cannot write to internal buffer: %s", err) + } + if _, err := buffer.Write(manifest); err != nil { + return fmt.Errorf("cannot write to internal buffer: %s", err) + } + return nil +} + +// UpdateWorkloadPolicies in policies.go diff --git a/cluster/kubernetes/patch.go b/cluster/kubernetes/patch.go new file mode 100644 index 000000000..808f795f5 --- /dev/null +++ b/cluster/kubernetes/patch.go @@ -0,0 +1,234 @@ +package kubernetes + +import ( + "bytes" + "fmt" + "sort" + + "github.com/evanphx/json-patch" + jsonyaml "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/strategicpatch" + k8sscheme "k8s.io/client-go/kubernetes/scheme" + + "github.com/weaveworks/flux" + "github.com/weaveworks/flux/cluster/kubernetes/resource" +) + +func createManifestPatch(originalManifests, modifiedManifests []byte, originalSource, modifiedSource string) ([]byte, error) { + originalResources, err := resource.ParseMultidoc(originalManifests, originalSource) + if err != nil { + fmt.Errorf("cannot parse %s: %s", originalSource, err) + } + + modifiedResources, err := resource.ParseMultidoc(modifiedManifests, modifiedSource) + if err != nil { + fmt.Errorf("cannot parse %s: %s", modifiedSource, err) + } + // Sort output by resource identifiers + var originalIDs []string + for id, _ := range originalResources { + originalIDs = append(originalIDs, id) + } + sort.Strings(originalIDs) + + buf := bytes.NewBuffer(nil) + scheme := getFullScheme() + for _, id := range originalIDs { + originalResource := originalResources[id] + modifiedResource, ok := modifiedResources[id] + if !ok { + // Only generate patches for resources present in both files + continue + } + patch, err := getPatch(originalResource, modifiedResource, scheme) + if err != nil { + return nil, fmt.Errorf("cannot obtain patch for resource %s: %s", id, err) + } + if bytes.Equal(patch, []byte("{}\n")) { + // Avoid outputting empty patches + continue + } + if err := appendYAMLToBuffer(patch, buf); err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} + +func applyManifestPatch(originalManifests, patchManifests []byte, originalSource, patchSource string) ([]byte, error) { + originalResources, err := resource.ParseMultidoc(originalManifests, originalSource) + if err != nil { + return nil, fmt.Errorf("cannot parse %s: %s", originalSource, err) + } + + patchResources, err := resource.ParseMultidoc(patchManifests, patchSource) + if err != nil { + return nil, fmt.Errorf("cannot parse %s: %s", patchSource, err) + } + + // Make sure all patch resources have a matching resource + for id, patchResource := range patchResources { + if _, ok := originalResources[id]; !ok { + return nil, fmt.Errorf("missing resource (%s) for patch", resourceID(patchResource)) + } + } + + // Sort output by resource identifiers + var originalIDs []string + for id, _ := range originalResources { + originalIDs = append(originalIDs, id) + } + sort.Strings(originalIDs) + + buf := bytes.NewBuffer(nil) + scheme := getFullScheme() + for _, id := range originalIDs { + originalResource := originalResources[id] + resourceBytes := originalResource.Bytes() + if patchedResource, ok := patchResources[id]; ok { + // There was a patch, apply it + patched, err := applyPatch(originalResource, patchedResource, scheme) + if err != nil { + return nil, fmt.Errorf("cannot obtain patch for resource %s: %s", id, err) + } + resourceBytes = patched + } + if err := appendYAMLToBuffer(resourceBytes, buf); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +func getFullScheme() *runtime.Scheme { + fullScheme := runtime.NewScheme() + utilruntime.Must(k8sscheme.AddToScheme(fullScheme)) + // HelmRelease and FluxHelmRelease are intentionally not added to the scheme. + // This is done for two reasons: + // 1. The kubernetes strategic merge patcher chokes on the freeform + // values under `values:`. + // 2. External tools like kustomize won't be able to apply SMPs + // on Custom Resources, thus we use a normal jsonmerge instead. + // + // utilruntime.Must(fluxscheme.AddToScheme(fullScheme)) + return fullScheme +} + +func getPatch(originalManifest resource.KubeManifest, modifiedManifest resource.KubeManifest, scheme *runtime.Scheme) ([]byte, error) { + groupVersion, err := schema.ParseGroupVersion(originalManifest.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("cannot parse groupVersion %q: %s", originalManifest.GroupVersion(), err) + } + manifest1JSON, err := jsonyaml.YAMLToJSON(originalManifest.Bytes()) + if err != nil { + return nil, fmt.Errorf("cannot transform original resource (%s) to JSON: %s", + resourceID(originalManifest), err) + } + manifest2JSON, err := jsonyaml.YAMLToJSON(modifiedManifest.Bytes()) + if err != nil { + return nil, fmt.Errorf("cannot transform modified resource (%s) to JSON: %s", + resourceID(modifiedManifest), err) + } + gvk := groupVersion.WithKind(originalManifest.GetKind()) + obj, err := scheme.New(gvk) + var patchJSON []byte + switch { + case runtime.IsNotRegisteredError(err): + // try a normal JSON merge patch + patchJSON, err = jsonpatch.CreateMergePatch(manifest1JSON, manifest2JSON) + case err != nil: + err = fmt.Errorf("cannot obtain scheme for GroupVersionKind %q: %s", gvk, err) + default: + patchJSON, err = strategicpatch.CreateTwoWayMergePatch(manifest1JSON, manifest2JSON, obj) + } + if err != nil { + return nil, err + } + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{} + err = yaml.Unmarshal(patchJSON, &jsonObj) + if err != nil { + return nil, fmt.Errorf("cannot parse patch (resource %s): %s", + resourceID(originalManifest), err) + } + // Make sure the non-empty patches come with metadata so that they can be matched in multidoc yaml context + if m, ok := jsonObj.(map[interface{}]interface{}); ok && len(m) > 0 { + jsonObj, err = addIdentifyingData(originalManifest.GroupVersion(), + originalManifest.GetKind(), originalManifest.GetName(), originalManifest.GetNamespace(), m) + } + if err != nil { + return nil, fmt.Errorf("cannot add metadata to patch (resource %s): %s", resourceID(originalManifest), err) + } + patch, err := yaml.Marshal(jsonObj) + if err != nil { + return nil, fmt.Errorf("cannot transform updated patch (resource %s) to YAML: %s", + resourceID(originalManifest), err) + } + return patch, nil +} + +func addIdentifyingData(apiVersion string, kind string, name string, namespace string, + obj map[interface{}]interface{}) (map[interface{}]interface{}, error) { + + toMerge := map[interface{}]interface{}{} + toMerge["apiVersion"] = apiVersion + toMerge["kind"] = kind + metadata := map[string]string{ + "name": name, + } + if len(namespace) > 0 { + metadata["namespace"] = namespace + } + toMerge["metadata"] = metadata + err := mergo.Merge(&obj, toMerge) + return obj, err +} + +func applyPatch(originalManifest, patchManifest resource.KubeManifest, scheme *runtime.Scheme) ([]byte, error) { + groupVersion, err := schema.ParseGroupVersion(originalManifest.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("cannot parse groupVersion %q: %s", originalManifest.GroupVersion(), err) + } + originalJSON, err := jsonyaml.YAMLToJSON(originalManifest.Bytes()) + if err != nil { + return nil, fmt.Errorf("cannot transform original resource (%s) to JSON: %s", + resourceID(originalManifest), err) + } + patchJSON, err := jsonyaml.YAMLToJSON(patchManifest.Bytes()) + if err != nil { + return nil, fmt.Errorf("cannot transform patch resource (%s) to JSON: %s", + resourceID(patchManifest), err) + } + obj, err := scheme.New(groupVersion.WithKind(originalManifest.GetKind())) + var patchedJSON []byte + switch { + case runtime.IsNotRegisteredError(err): + // try a normal JSON merging + patchedJSON, err = jsonpatch.MergePatch(originalJSON, patchJSON) + default: + patchedJSON, err = strategicpatch.StrategicMergePatch(originalJSON, patchJSON, obj) + } + if err != nil { + return nil, fmt.Errorf("cannot patch resource %s: %s", resourceID(originalManifest), err) + } + patched, err := jsonyaml.JSONToYAML(patchedJSON) + if err != nil { + return nil, fmt.Errorf("cannot transform patched resource (%s) to YAML: %s", + resourceID(originalManifest), err) + } + return patched, nil +} + +// resourceID works like Resource.ResourceID() but avoids namespaces, +// since they may be incorrect +func resourceID(manifest resource.KubeManifest) flux.ResourceID { + return flux.MakeResourceID(manifest.GetNamespace(), manifest.GetKind(), manifest.GetKind()) +} diff --git a/cluster/kubernetes/patch_test.go b/cluster/kubernetes/patch_test.go new file mode 100644 index 000000000..da05802b3 --- /dev/null +++ b/cluster/kubernetes/patch_test.go @@ -0,0 +1,264 @@ +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v2" + + "github.com/weaveworks/flux/cluster/kubernetes/resource" +) + +func TestPatchAndApply(t *testing.T) { + for _, entry := range []struct { + original string + modified string + }{ + { // unmodified + original: `apiVersion: v1 +kind: Namespace +metadata: + name: namespace +`, + modified: `apiVersion: v1 +kind: Namespace +metadata: + name: namespace +`, + }, + { + original: `apiVersion: flux.weave.works/v1beta1 +kind: HelmRelease +metadata: + name: ghost + namespace: demo + annotations: + flux.weave.works/automated: "false" + flux.weave.works/tag.chart-image: glob:1.21.* +spec: + values: + image: bitnami/ghost + tag: 1.21.5-r0 +`, + modified: `apiVersion: flux.weave.works/v1beta1 +kind: HelmRelease +metadata: + name: ghost + namespace: demo + annotations: + flux.weave.works/automated: "false" + flux.weave.works/tag.chart-image: glob:1.21.* +spec: + values: + image: bitnami/ghost + tag: 1.21.6 +`, + }, + { + original: `apiVersion: flux.weave.works/v1beta1 +kind: HelmRelease +metadata: + name: name + namespace: namespace + annotations: + flux.weave.works/tag.container: glob:1.4.* +spec: + values: + container: + image: + repository: stefanprodan/podinfo + tag: 1.4.4 +`, + modified: `apiVersion: flux.weave.works/v1beta1 +kind: HelmRelease +metadata: + name: name + namespace: namespace + annotations: + flux.weave.works/tag.container: glob:1.4.* +spec: + values: + container: + image: + repository: stefanprodan/podinfo + tag: 1.6 +`, + }, + { + original: `apiVersion: apps/v1 +kind: Deployment +metadata: + name: name +spec: + template: + spec: + containers: + - name: one + image: one:one + - name: two + image: two:two + initContainers: + - name: one + image: one:one +`, + modified: `apiVersion: apps/v1 +kind: Deployment +metadata: + name: name +spec: + template: + spec: + containers: + - name: one + image: oneplus:oneplus + - name: two + image: two:two + initContainers: + - name: one + image: one:one +`, + }, + { + original: `apiVersion: apps/v1 +kind: Deployment +metadata: + name: name +spec: + template: + spec: + containers: + - name: one + image: one:one +`, + modified: `apiVersion: apps/v1 +kind: Deployment +metadata: + name: name + annotations: + flux.weave.works/locked: "true" +spec: + template: + spec: + containers: + - name: one + image: oneplus:oneplus +`, + }, + { + original: `apiVersion: flux.weave.works/v1beta1 +kind: HelmRelease +metadata: + name: ghost + namespace: demo + annotations: + flux.weave.works/automated: "false" + flux.weave.works/tag.chart-image: glob:1.21.* +spec: + values: + image: bitnami/ghost + tag: 1.21.5-r0 +`, + modified: `apiVersion: flux.weave.works/v1beta1 +kind: HelmRelease +metadata: + name: ghost + namespace: demo + annotations: + flux.weave.works/automated: "true" +spec: + values: + image: bitnami/ghost + tag: 1.21.6 +`, + }, + { // multiple documents + original: `apiVersion: v1 +kind: Namespace +metadata: + name: namespace +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: name +spec: + template: + spec: + containers: + - name: one + image: one:one +--- +apiVersion: flux.weave.works/v1beta1 +kind: HelmRelease +metadata: + name: ghost + namespace: demo + annotations: + flux.weave.works/automated: "false" + flux.weave.works/tag.chart-image: glob:1.21.* +spec: + values: + image: bitnami/ghost + tag: 1.21.5-r0 +`, + + modified: `apiVersion: flux.weave.works/v1beta1 +kind: HelmRelease +metadata: + name: ghost + namespace: demo + annotations: + flux.weave.works/automated: "true" +spec: + values: + image: bitnami/ghost + tag: 1.21.6 +--- +apiVersion: v1 +kind: Namespace +metadata: + name: namespace +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: name + annotations: + flux.weave.works/locked: "true" +spec: + template: + spec: + containers: + - name: one + image: oneplus:oneplus +`, + }, + } { + // Make sure creating the patch works + patch, err := createManifestPatch([]byte(entry.original), []byte(entry.modified), "original", "updated") + assert.NoError(t, err, "original:\n%s\n\nupdated:\n%s", entry.original, entry.modified) + + // Make sure that when applying the patch to the original manifest, we obtain the updated manifest + patched, err := applyManifestPatch([]byte(entry.original), patch, "original", "patch") + assert.NoError(t, err) + expected, err := resource.ParseMultidoc([]byte(entry.modified), "updated") + assert.NoError(t, err) + actual, err := resource.ParseMultidoc(patched, "patched") + assert.NoError(t, err) + assert.Equal(t, len(actual), len(expected), "updated:\n%s\n\npatched:\n%s", entry.modified, string(patched)) + for id, expectedManifest := range expected { + actualManifest, ok := actual[id] + assert.True(t, ok, "resource %s missing in patched document stream", id) + equalYAML(t, string(expectedManifest.Bytes()), string(actualManifest.Bytes())) + } + } +} + +func equalYAML(t *testing.T, expected, actual string) { + var obj1, obj2 interface{} + err := yaml.Unmarshal([]byte(expected), &obj1) + assert.NoError(t, err) + err = yaml.Unmarshal([]byte(actual), &obj2) + assert.NoError(t, err) + assert.Equal(t, obj1, obj2, "expected:\n%s\n\nactual:\n%s", expected, actual) +} diff --git a/cluster/kubernetes/policies.go b/cluster/kubernetes/policies.go index f475d9f47..0136ee072 100644 --- a/cluster/kubernetes/policies.go +++ b/cluster/kubernetes/policies.go @@ -11,54 +11,8 @@ import ( "github.com/weaveworks/flux/resource" ) -func (m *manifests) UpdatePolicies(def []byte, id flux.ResourceID, update policy.Update) ([]byte, error) { - ns, kind, name := id.Components() - add, del := update.Add, update.Remove - - // We may be sent the pseudo-policy `policy.TagAll`, which means - // apply this filter to all containers. To do so, we need to know - // what all the containers are. - if tagAll, ok := update.Add.Get(policy.TagAll); ok { - add = add.Without(policy.TagAll) - containers, err := m.extractWorkloadContainers(def, id) - if err != nil { - return nil, err - } - - for _, container := range containers { - if tagAll == policy.PatternAll.String() { - del = del.Add(policy.TagPrefix(container.Name)) - } else { - add = add.Set(policy.TagPrefix(container.Name), tagAll) - } - } - } - - var args []string - for pol, val := range add { - if policy.Tag(pol) && !policy.NewPattern(val).Valid() { - return nil, fmt.Errorf("invalid tag pattern: %q", val) - } - args = append(args, fmt.Sprintf("%s%s=%s", kresource.PolicyPrefix, pol, val)) - } - for pol, _ := range del { - args = append(args, fmt.Sprintf("%s%s=", kresource.PolicyPrefix, pol)) - } - - return (KubeYAML{}).Annotate(def, ns, kind, name, args...) -} - -func (m *manifests) extractWorkloadContainers(def []byte, id flux.ResourceID) ([]resource.Container, error) { - kresources, err := kresource.ParseMultidoc(def, "stdin") - if err != nil { - return nil, err - } - // Note: setEffectiveNamespaces() won't work for CRD instances whose CRD is yet to be created - // (due to the CRD not being present in kresources). - // We could get out of our way to fix this (or give a better error) but: - // 1. With the exception of HelmReleases CRD instances are not workloads anyways. - // 2. The problem is eventually fixed by the first successful sync. - resources, err := m.setEffectiveNamespaces(kresources) +func (m *manifests) UpdateWorkloadPolicies(def []byte, id flux.ResourceID, update policy.Update) ([]byte, error) { + resources, err := m.ParseManifest(def, "stdin") if err != nil { return nil, err } @@ -70,5 +24,20 @@ func (m *manifests) extractWorkloadContainers(def []byte, id flux.ResourceID) ([ if !ok { return nil, errors.New("resource " + id.String() + " does not have containers") } - return workload.Containers(), nil + if err != nil { + return nil, err + } + + changes, err := resource.ChangesForPolicyUpdate(workload, update) + if err != nil { + return nil, err + } + + var args []string + for k, v := range changes { + args = append(args, fmt.Sprintf("%s%s=%s", kresource.PolicyPrefix, k, v)) + } + + ns, kind, name := id.Components() + return (KubeYAML{}).Annotate(def, ns, kind, name, args...) } diff --git a/cluster/kubernetes/policies_test.go b/cluster/kubernetes/policies_test.go index 4c37ed571..2438a21ec 100644 --- a/cluster/kubernetes/policies_test.go +++ b/cluster/kubernetes/policies_test.go @@ -182,7 +182,7 @@ func TestUpdatePolicies(t *testing.T) { caseOut := templToString(t, annotationsTemplate, c.out) resourceID := flux.MustParseResourceID("default:deployment/nginx") manifests := NewManifests(ConstNamespacer("default"), log.NewLogfmtLogger(os.Stdout)) - out, err := manifests.UpdatePolicies([]byte(caseIn), resourceID, c.update) + out, err := manifests.UpdateWorkloadPolicies([]byte(caseIn), resourceID, c.update) assert.Equal(t, c.wantErr, err != nil, "unexpected error value: %s", err) if !c.wantErr { assert.Equal(t, string(out), caseOut) @@ -196,7 +196,7 @@ func TestUpdatePolicies_invalidTagPattern(t *testing.T) { update := policy.Update{ Add: policy.Set{policy.TagPrefix("nginx"): "semver:invalid"}, } - _, err := (&manifests{}).UpdatePolicies(nil, resourceID, update) + _, err := (&manifests{}).UpdateWorkloadPolicies(nil, resourceID, update) assert.Error(t, err) } diff --git a/cluster/kubernetes/resource/resource.go b/cluster/kubernetes/resource/resource.go index 13b8376f2..4599a9a1b 100644 --- a/cluster/kubernetes/resource/resource.go +++ b/cluster/kubernetes/resource/resource.go @@ -23,6 +23,7 @@ type KubeManifest interface { resource.Resource GroupVersion() string GetKind() string + GetName() string GetNamespace() string SetNamespace(string) } @@ -59,6 +60,11 @@ func (o baseObject) GetKind() string { return o.Kind } +// GetName implements KubeManifest.GetName +func (o baseObject) GetName() string { + return o.Meta.Name +} + func (o baseObject) ResourceID() flux.ResourceID { ns := o.Meta.Namespace if ns == "" { diff --git a/cluster/kubernetes/sync.go b/cluster/kubernetes/sync.go index e6f7f67a7..acc858be4 100644 --- a/cluster/kubernetes/sync.go +++ b/cluster/kubernetes/sync.go @@ -62,12 +62,14 @@ func (c *Cluster) Sync(syncSet cluster.SyncSet) error { cs := makeChangeSet() var errs cluster.SyncError + var excluded []string for _, res := range syncSet.Resources { resID := res.ResourceID() + id := resID.String() if !c.IsAllowedResource(resID) { + excluded = append(excluded, id) continue } - id := resID.String() // make a record of the checksum, whether we stage it to // be applied or not, so that we don't delete it later. csum := sha1.Sum(res.Bytes()) @@ -93,6 +95,10 @@ func (c *Cluster) Sync(syncSet cluster.SyncSet) error { } } + if len(excluded) > 0 { + logger.Log("warning", "not applying resources; excluded by namespace constraints", "resources", strings.Join(excluded, ",")) + } + c.mu.Lock() defer c.mu.Unlock() c.muSyncErrors.RLock() @@ -101,8 +107,8 @@ func (c *Cluster) Sync(syncSet cluster.SyncSet) error { } c.muSyncErrors.RUnlock() - if c.GC { - deleteErrs, gcFailure := c.collectGarbage(syncSet, checksums, logger) + if c.GC || c.DryGC { + deleteErrs, gcFailure := c.collectGarbage(syncSet, checksums, logger, c.DryGC) if gcFailure != nil { return gcFailure } @@ -123,7 +129,8 @@ func (c *Cluster) Sync(syncSet cluster.SyncSet) error { func (c *Cluster) collectGarbage( syncSet cluster.SyncSet, checksums map[string]string, - logger log.Logger) (cluster.SyncError, error) { + logger log.Logger, + dryRun bool) (cluster.SyncError, error) { orphanedResources := makeChangeSet() @@ -138,10 +145,12 @@ func (c *Cluster) collectGarbage( switch { case !ok: // was not recorded as having been staged for application - c.logger.Log("info", "cluster resource not in resources to be synced; deleting", "resource", resourceID) - orphanedResources.stage("delete", res.ResourceID(), "", res.IdentifyingBytes()) + c.logger.Log("info", "cluster resource not in resources to be synced; deleting", "dry-run", dryRun, "resource", resourceID) + if !dryRun { + orphanedResources.stage("delete", res.ResourceID(), "", res.IdentifyingBytes()) + } case actual != expected: - c.logger.Log("warning", "resource to be synced has not been updated; skipping", "resource", resourceID) + c.logger.Log("warning", "resource to be synced has not been updated; skipping", "dry-run", dryRun, "resource", resourceID) continue default: // The checksum is the same, indicating that it was @@ -206,7 +215,7 @@ func (c *Cluster) getAllowedResourcesBySelector(selector string) (map[string]*ku return nil, err } for gv, e := range discErr.Groups { - if strings.HasSuffix(gv.Group, "metrics.k8s.io") { + if gv.Group == "metrics" || strings.HasSuffix(gv.Group, "metrics.k8s.io") { // The Metrics API tends to be misconfigured, causing errors. // We just ignore them, since it doesn't make sense to sync metrics anyways. continue @@ -540,8 +549,7 @@ func (c *Kubectl) doCommand(logger log.Logger, r io.Reader, args ...string) erro func makeMultidoc(objs []applyObject) *bytes.Buffer { buf := &bytes.Buffer{} for _, obj := range objs { - buf.WriteString("\n---\n") - buf.Write(obj.Payload) + appendYAMLToBuffer(obj.Payload, buf) } return buf } diff --git a/cluster/kubernetes/sync_test.go b/cluster/kubernetes/sync_test.go index 3bd780387..e83e297fd 100644 --- a/cluster/kubernetes/sync_test.go +++ b/cluster/kubernetes/sync_test.go @@ -32,6 +32,7 @@ import ( "github.com/weaveworks/flux/cluster" kresource "github.com/weaveworks/flux/cluster/kubernetes/resource" fluxfake "github.com/weaveworks/flux/integrations/client/clientset/versioned/fake" + "github.com/weaveworks/flux/resource" "github.com/weaveworks/flux/sync" ) @@ -383,8 +384,11 @@ metadata: if err != nil { t.Fatal(err) } - - err = sync.Sync("testset", resources, kube) + resourcesByID := map[string]resource.Resource{} + for _, r := range resources { + resourcesByID[r.ResourceID().String()] = r + } + err = sync.Sync("testset", resourcesByID, kube) if !expectErrors && err != nil { t.Error(err) } @@ -436,6 +440,23 @@ metadata: test(t, kube, "", "", false) }) + t.Run("sync adds and GCs dry run", func(t *testing.T) { + kube, _, cancel := setup(t) + defer cancel() + + // without GC on, resources persist if they are not mentioned in subsequent syncs. + test(t, kube, "", "", false) + test(t, kube, ns1+defs1, ns1+defs1, false) + test(t, kube, ns1+defs1+defs2, ns1+defs1+defs2, false) + test(t, kube, ns3+defs3, ns1+defs1+defs2+ns3+defs3, false) + + // with GC dry run the collect garbage routine is running but only logging results with out collecting any resources + kube.DryGC = true + test(t, kube, ns1+defs2+ns3+defs3, ns1+defs1+defs2+ns3+defs3, false) + test(t, kube, ns1+defs1+defs2, ns1+defs1+defs2+ns3+defs3, false) + test(t, kube, "", ns1+defs1+defs2+ns3+defs3, false) + }) + t.Run("sync won't incorrectly delete non-namespaced resources", func(t *testing.T) { kube, _, cancel := setup(t) defer cancel() diff --git a/cluster/manifests.go b/cluster/manifests.go deleted file mode 100644 index fe74a4863..000000000 --- a/cluster/manifests.go +++ /dev/null @@ -1,68 +0,0 @@ -package cluster - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/weaveworks/flux" - "github.com/weaveworks/flux/image" - "github.com/weaveworks/flux/policy" - "github.com/weaveworks/flux/resource" -) - -type ManifestError struct { - error -} - -func ErrResourceNotFound(name string) error { - return ManifestError{fmt.Errorf("manifest for resource %s not found under manifests path", name)} -} - -// Manifests represents how a set of files are used as definitions of -// resources, e.g., in Kubernetes, YAML files describing Kubernetes -// resources. -type Manifests interface { - // Update the image in a manifest's bytes to that given - UpdateImage(def []byte, resourceID flux.ResourceID, container string, newImageID image.Ref) ([]byte, error) - // Load all the resource manifests under the paths - // given. `baseDir` is used to relativise the paths, which are - // supplied as absolute paths to directories or files; at least - // one path should be supplied, even if it is the same as `baseDir`. - LoadManifests(baseDir string, paths []string) (map[string]resource.Resource, error) - // UpdatePolicies modifies a manifest to apply the policy update specified - UpdatePolicies([]byte, flux.ResourceID, policy.Update) ([]byte, error) -} - -// UpdateManifest looks for the manifest for the identified resource, -// reads its contents, applies f(contents), and writes the results -// back to the file. -func UpdateManifest(m Manifests, root string, paths []string, id flux.ResourceID, f func(manifest []byte) ([]byte, error)) error { - resources, err := m.LoadManifests(root, paths) - if err != nil { - return err - } - - resource, ok := resources[id.String()] - if !ok { - return ErrResourceNotFound(id.String()) - } - - path := filepath.Join(root, resource.Source()) - def, err := ioutil.ReadFile(path) - if err != nil { - return err - } - - newDef, err := f(def) - if err != nil { - return err - } - - fi, err := os.Stat(path) - if err != nil { - return err - } - return ioutil.WriteFile(path, newDef, fi.Mode()) -} diff --git a/cluster/mock.go b/cluster/mock.go deleted file mode 100644 index bba43c792..000000000 --- a/cluster/mock.go +++ /dev/null @@ -1,63 +0,0 @@ -package cluster - -import ( - "github.com/weaveworks/flux" - "github.com/weaveworks/flux/image" - "github.com/weaveworks/flux/policy" - "github.com/weaveworks/flux/resource" - "github.com/weaveworks/flux/ssh" -) - -// Doubles as a cluster.Cluster and cluster.Manifests implementation -type Mock struct { - AllWorkloadsFunc func(maybeNamespace string) ([]Workload, error) - SomeWorkloadsFunc func([]flux.ResourceID) ([]Workload, error) - IsAllowedResourceFunc func(flux.ResourceID) bool - PingFunc func() error - ExportFunc func() ([]byte, error) - SyncFunc func(SyncSet) error - PublicSSHKeyFunc func(regenerate bool) (ssh.PublicKey, error) - UpdateImageFunc func(def []byte, id flux.ResourceID, container string, newImageID image.Ref) ([]byte, error) - LoadManifestsFunc func(base string, paths []string) (map[string]resource.Resource, error) - UpdatePoliciesFunc func([]byte, flux.ResourceID, policy.Update) ([]byte, error) -} - -func (m *Mock) AllWorkloads(maybeNamespace string) ([]Workload, error) { - return m.AllWorkloadsFunc(maybeNamespace) -} - -func (m *Mock) SomeWorkloads(s []flux.ResourceID) ([]Workload, error) { - return m.SomeWorkloadsFunc(s) -} - -func (m *Mock) IsAllowedResource(id flux.ResourceID) bool { - return m.IsAllowedResourceFunc(id) -} - -func (m *Mock) Ping() error { - return m.PingFunc() -} - -func (m *Mock) Export() ([]byte, error) { - return m.ExportFunc() -} - -func (m *Mock) Sync(c SyncSet) error { - return m.SyncFunc(c) -} - -func (m *Mock) PublicSSHKey(regenerate bool) (ssh.PublicKey, error) { - return m.PublicSSHKeyFunc(regenerate) -} - -func (m *Mock) UpdateImage(def []byte, id flux.ResourceID, container string, newImageID image.Ref) ([]byte, error) { - return m.UpdateImageFunc(def, id, container, newImageID) -} - -func (m *Mock) LoadManifests(base string, paths []string) (map[string]resource.Resource, error) { - return m.LoadManifestsFunc(base, paths) -} - -func (m *Mock) UpdatePolicies(def []byte, id flux.ResourceID, p policy.Update) ([]byte, error) { - return m.UpdatePoliciesFunc(def, id, p) -} diff --git a/cluster/mock/mock.go b/cluster/mock/mock.go new file mode 100644 index 000000000..ae6c47bef --- /dev/null +++ b/cluster/mock/mock.go @@ -0,0 +1,90 @@ +package mock + +import ( + "bytes" + + "github.com/weaveworks/flux" + "github.com/weaveworks/flux/cluster" + "github.com/weaveworks/flux/image" + "github.com/weaveworks/flux/manifests" + "github.com/weaveworks/flux/policy" + "github.com/weaveworks/flux/resource" + "github.com/weaveworks/flux/ssh" +) + +// Doubles as a cluster.Cluster and cluster.Manifests implementation +type Mock struct { + AllWorkloadsFunc func(maybeNamespace string) ([]cluster.Workload, error) + SomeWorkloadsFunc func([]flux.ResourceID) ([]cluster.Workload, error) + IsAllowedResourceFunc func(flux.ResourceID) bool + PingFunc func() error + ExportFunc func() ([]byte, error) + SyncFunc func(cluster.SyncSet) error + PublicSSHKeyFunc func(regenerate bool) (ssh.PublicKey, error) + SetWorkloadContainerImageFunc func(def []byte, id flux.ResourceID, container string, newImageID image.Ref) ([]byte, error) + LoadManifestsFunc func(base string, paths []string) (map[string]resource.Resource, error) + ParseManifestFunc func(def []byte, source string) (map[string]resource.Resource, error) + UpdateWorkloadPoliciesFunc func([]byte, flux.ResourceID, policy.Update) ([]byte, error) + CreateManifestPatchFunc func(originalManifests, modifiedManifests []byte, originalSource, modifiedSource string) ([]byte, error) + ApplyManifestPatchFunc func(originalManifests, patch []byte, originalSource, patchSource string) ([]byte, error) + AppendManifestToBufferFunc func([]byte, *bytes.Buffer) error +} + +var _ cluster.Cluster = &Mock{} +var _ manifests.Manifests = &Mock{} + +func (m *Mock) AllWorkloads(maybeNamespace string) ([]cluster.Workload, error) { + return m.AllWorkloadsFunc(maybeNamespace) +} + +func (m *Mock) SomeWorkloads(s []flux.ResourceID) ([]cluster.Workload, error) { + return m.SomeWorkloadsFunc(s) +} + +func (m *Mock) IsAllowedResource(id flux.ResourceID) bool { + return m.IsAllowedResourceFunc(id) +} + +func (m *Mock) Ping() error { + return m.PingFunc() +} + +func (m *Mock) Export() ([]byte, error) { + return m.ExportFunc() +} + +func (m *Mock) Sync(c cluster.SyncSet) error { + return m.SyncFunc(c) +} + +func (m *Mock) PublicSSHKey(regenerate bool) (ssh.PublicKey, error) { + return m.PublicSSHKeyFunc(regenerate) +} + +func (m *Mock) SetWorkloadContainerImage(def []byte, id flux.ResourceID, container string, newImageID image.Ref) ([]byte, error) { + return m.SetWorkloadContainerImageFunc(def, id, container, newImageID) +} + +func (m *Mock) LoadManifests(baseDir string, paths []string) (map[string]resource.Resource, error) { + return m.LoadManifestsFunc(baseDir, paths) +} + +func (m *Mock) ParseManifest(def []byte, source string) (map[string]resource.Resource, error) { + return m.ParseManifestFunc(def, source) +} + +func (m *Mock) UpdateWorkloadPolicies(def []byte, id flux.ResourceID, p policy.Update) ([]byte, error) { + return m.UpdateWorkloadPoliciesFunc(def, id, p) +} + +func (m *Mock) CreateManifestPatch(originalManifests, modifiedManifests []byte, originalSource, modifiedSource string) ([]byte, error) { + return m.CreateManifestPatchFunc(originalManifests, modifiedManifests, originalSource, modifiedSource) +} + +func (m *Mock) ApplyManifestPatch(originalManifests, patch []byte, originalSource, patchSource string) ([]byte, error) { + return m.ApplyManifestPatch(originalManifests, patch, originalSource, patchSource) +} + +func (m *Mock) AppendManifestToBuffer(b []byte, buf *bytes.Buffer) error { + return m.AppendManifestToBuffer(b, buf) +} diff --git a/cmd/fluxctl/await.go b/cmd/fluxctl/await.go index 0885c641e..681e34691 100644 --- a/cmd/fluxctl/await.go +++ b/cmd/fluxctl/await.go @@ -70,6 +70,7 @@ func awaitJob(ctx context.Context, client api.Server, jobID job.ID) (job.Result, } switch j.StatusString { case job.StatusFailed: + result = j.Result return false, j case job.StatusSucceeded: if j.Err != "" { diff --git a/cmd/fluxctl/list_images_cmd.go b/cmd/fluxctl/list_images_cmd.go index 416bab759..5fbbba7d6 100644 --- a/cmd/fluxctl/list_images_cmd.go +++ b/cmd/fluxctl/list_images_cmd.go @@ -130,12 +130,20 @@ func (opts *imageListOpts) RunE(cmd *cobra.Command, args []string) error { } if printLine { createdAt := "" - if !available.CreatedAt.IsZero() { - createdAt = available.CreatedAt.Format(time.RFC822) + if !available.CreatedTS().IsZero() { + createdAt = available.CreatedTS().Format(time.RFC822) } fmt.Fprintf(out, "\t\t%s %s\t%s\n", running, tag, createdAt) } } + if !foundRunning { + running := "'->" + if currentTag == "" { + currentTag = "(untagged)" + } + fmt.Fprintf(out, "\t\t%s %s\t%s\n", running, currentTag, "?") + + } workloadName = "" } } diff --git a/cmd/fluxctl/list_workloads_cmd.go b/cmd/fluxctl/list_workloads_cmd.go index 8e8af38ef..715439531 100644 --- a/cmd/fluxctl/list_workloads_cmd.go +++ b/cmd/fluxctl/list_workloads_cmd.go @@ -63,7 +63,7 @@ func (opts *workloadListOpts) RunE(cmd *cobra.Command, args []string) error { fmt.Fprintf(w, "\t%s\t%s\t\t\n", c.Name, c.Current.ID) } } else { - fmt.Fprintf(w, "%s\t\t\t\t\n", workload.ID) + fmt.Fprintf(w, "%s\t\t\t%s\t%s\n", workload.ID, workload.Status, policies(workload)) } } w.Flush() diff --git a/cmd/fluxctl/portforward.go b/cmd/fluxctl/portforward.go index 272cf9bfd..3b3d2db3b 100644 --- a/cmd/fluxctl/portforward.go +++ b/cmd/fluxctl/portforward.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/2opremio/go-k8s-portforward" + "github.com/justinbarrick/go-k8s-portforward" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/cmd/fluxctl/release_cmd.go b/cmd/fluxctl/release_cmd.go index 05b565b59..4be590747 100644 --- a/cmd/fluxctl/release_cmd.go +++ b/cmd/fluxctl/release_cmd.go @@ -42,7 +42,7 @@ func newWorkloadRelease(parent *rootOpts) *workloadReleaseOpts { func (opts *workloadReleaseOpts) Command() *cobra.Command { cmd := &cobra.Command{ Use: "release", - Short: "Release a new version of a controller.", + Short: "Release a new version of a workload.", Example: makeExample( "fluxctl release -n default --workload=deployment/foo --update-image=library/hello:v2", "fluxctl release --all --update-image=library/hello:v2", @@ -56,10 +56,10 @@ func (opts *workloadReleaseOpts) Command() *cobra.Command { cmd.Flags().StringVarP(&opts.namespace, "namespace", "n", "default", "Workload namespace") // Note: we cannot define a shorthand for --workload since it clashes with the shorthand of --watch cmd.Flags().StringSliceVarP(&opts.workloads, "workload", "", []string{}, "List of workloads to release :/") - cmd.Flags().BoolVar(&opts.allWorkloads, "all", false, "Release all controllers") + cmd.Flags().BoolVar(&opts.allWorkloads, "all", false, "Release all workloads") cmd.Flags().StringVarP(&opts.image, "update-image", "i", "", "Update a specific image") cmd.Flags().BoolVar(&opts.allImages, "update-all-images", false, "Update all images to latest versions") - cmd.Flags().StringSliceVar(&opts.exclude, "exclude", []string{}, "List of controllers to exclude") + cmd.Flags().StringSliceVar(&opts.exclude, "exclude", []string{}, "List of workloads to exclude") cmd.Flags().BoolVar(&opts.dryRun, "dry-run", false, "Do not release anything; just report back what would have been done") cmd.Flags().BoolVar(&opts.interactive, "interactive", false, "Select interactively which containers to update") cmd.Flags().BoolVarP(&opts.force, "force", "f", false, "Disregard locks and container image filters (has no effect when used with --all or --update-all-images)") diff --git a/cmd/fluxctl/sync_cmd.go b/cmd/fluxctl/sync_cmd.go index f909735ec..f13fccf9b 100644 --- a/cmd/fluxctl/sync_cmd.go +++ b/cmd/fluxctl/sync_cmd.go @@ -3,6 +3,7 @@ package main import ( "context" "fmt" + "strings" "github.com/spf13/cobra" @@ -59,13 +60,15 @@ func (opts *syncOpts) RunE(cmd *cobra.Command, args []string) error { return err } result, err := awaitJob(ctx, opts.API, jobID) - if err != nil { + if isUnverifiedHead(err) { + fmt.Fprintf(cmd.OutOrStderr(), "Warning: %s\n", err) + } else if err != nil { fmt.Fprintf(cmd.OutOrStderr(), "Failed to complete sync job (ID %q)\n", jobID) return err } rev := result.Revision[:7] - fmt.Fprintf(cmd.OutOrStderr(), "HEAD of %s is %s\n", gitConfig.Remote.Branch, rev) + fmt.Fprintf(cmd.OutOrStderr(), "Revision of %s to apply is %s\n", gitConfig.Remote.Branch, rev) fmt.Fprintf(cmd.OutOrStderr(), "Waiting for %s to be applied ...\n", rev) err = awaitSync(ctx, opts.API, rev) if err != nil { @@ -74,3 +77,9 @@ func (opts *syncOpts) RunE(cmd *cobra.Command, args []string) error { fmt.Fprintln(cmd.OutOrStderr(), "Done.") return nil } + +func isUnverifiedHead(err error) bool { + return err != nil && + (strings.Contains(err.Error(), "branch HEAD in the git repo is not verified") && + strings.Contains(err.Error(), "last verified commit was")) +} diff --git a/cmd/fluxd/main.go b/cmd/fluxd/main.go index 999601d39..1e4dd210d 100644 --- a/cmd/fluxd/main.go +++ b/cmd/fluxd/main.go @@ -8,6 +8,7 @@ import ( "os" "os/exec" "os/signal" + "path/filepath" "runtime" "strconv" "strings" @@ -24,6 +25,7 @@ import ( k8sclientdynamic "k8s.io/client-go/dynamic" k8sclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" "k8s.io/klog" "github.com/weaveworks/flux/checkpoint" @@ -38,6 +40,7 @@ import ( "github.com/weaveworks/flux/image" integrations "github.com/weaveworks/flux/integrations/client/clientset/versioned" "github.com/weaveworks/flux/job" + "github.com/weaveworks/flux/manifests" "github.com/weaveworks/flux/registry" "github.com/weaveworks/flux/registry/cache" registryMemcache "github.com/weaveworks/flux/registry/cache/memcached" @@ -98,6 +101,7 @@ func main() { } // This mirrors how kubectl extracts information from the environment. var ( + logFormat = fs.String("log-format", "fmt", "change the log format.") listenAddr = fs.StringP("listen", "l", ":3030", "listen address where /metrics and API will be served") listenMetricsAddr = fs.String("listen-metrics", "", "listen address for /metrics endpoint") kubernetesKubectl = fs.String("kubernetes-kubectl", "", "optional, explicit path to kubectl tool") @@ -120,15 +124,18 @@ func main() { gitTimeout = fs.Duration("git-timeout", 20*time.Second, "duration after which git operations time out") // GPG commit signing - gitImportGPG = fs.String("git-gpg-key-import", "", "keys at the path given (either a file or a directory) will be imported for use in signing commits") - gitSigningKey = fs.String("git-signing-key", "", "if set, commits will be signed with this GPG key") + gitImportGPG = fs.StringSlice("git-gpg-key-import", []string{}, "keys at the paths given will be imported for use of signing and verifying commits") + gitSigningKey = fs.String("git-signing-key", "", "if set, commits Flux makes will be signed with this GPG key") + gitVerifySignatures = fs.Bool("git-verify-signatures", false, "if set, the signature of commits will be verified before Flux applies them") // syncing syncInterval = fs.Duration("sync-interval", 5*time.Minute, "apply config in git to cluster at least this often, even if there are no new commits") syncGC = fs.Bool("sync-garbage-collection", false, "experimental; delete resources that were created by fluxd, but are no longer in the git repo") + dryGC = fs.Bool("sync-garbage-collection-dry", false, "experimental; only log what would be garbage collected, rather than deleting. Implies --sync-garbage-collection") // registry memcachedHostname = fs.String("memcached-hostname", "memcached", "hostname for memcached service.") + memcachedPort = fs.Int("memcached-port", 11211, "memcached service port.") memcachedTimeout = fs.Duration("memcached-timeout", time.Second, "maximum time to wait before giving up on memcached requests.") memcachedService = fs.String("memcached-service", "memcached", "SRV service used to discover memcache servers.") @@ -147,16 +154,21 @@ func main() { registryRequire = fs.StringSlice("registry-require", nil, fmt.Sprintf(`exit with an error if auto-authentication with any of the given registries is not possible (possible values: {%s})`, strings.Join(RequireValues, ","))) // k8s-secret backed ssh keyring configuration + k8sInCluster = fs.Bool("k8s-in-cluster", true, "set this to true if fluxd is deployed as a container inside Kubernetes") k8sSecretName = fs.String("k8s-secret-name", "flux-git-deploy", "name of the k8s secret used to store the private SSH key") k8sSecretVolumeMountPath = fs.String("k8s-secret-volume-mount-path", "/etc/fluxd/ssh", "mount location of the k8s secret storing the private SSH key") k8sSecretDataKey = fs.String("k8s-secret-data-key", "identity", "data key holding the private SSH key within the k8s secret") k8sNamespaceWhitelist = fs.StringSlice("k8s-namespace-whitelist", []string{}, "experimental, optional: restrict the view of the cluster to the namespaces listed. All namespaces are included if this is not set") k8sAllowNamespace = fs.StringSlice("k8s-allow-namespace", []string{}, "experimental: restrict all operations to the provided namespaces") + // SSH key generation sshKeyBits = optionalVar(fs, &ssh.KeyBitsValue{}, "ssh-keygen-bits", "-b argument to ssh-keygen (default unspecified)") sshKeyType = optionalVar(fs, &ssh.KeyTypeValue{}, "ssh-keygen-type", "-t argument to ssh-keygen (default unspecified)") sshKeygenDir = fs.String("ssh-keygen-dir", "", "directory, ideally on a tmpfs volume, in which to generate new SSH keys when necessary") + // manifest generation + manifestGeneration = fs.Bool("manifest-generation", false, "experimental; search for .flux.yaml files to generate manifests") + upstreamURL = fs.String("connect", "", "connect to an upstream service e.g., Weave Cloud, at this base address") token = fs.String("token", "", "authentication token for upstream service") @@ -167,6 +179,16 @@ func main() { fs.MarkDeprecated("registry-cache-expiry", "no longer used; cache entries are expired adaptively according to how often they change") fs.MarkDeprecated("k8s-namespace-whitelist", "changed to --k8s-allow-namespace, use that instead") + var kubeConfig *string + { + // Set the default kube config + if home := homeDir(); home != "" { + kubeConfig = fs.String("kube-config", filepath.Join(home, ".kube", "config"), "the absolute path of the k8s config file.") + } else { + kubeConfig = fs.String("kube-config", "", "the absolute path of the k8s config file.") + } + } + // Explicitly initialize klog to enable stderr logging, // and parse our own flags. klog.InitFlags(nil) @@ -186,7 +208,14 @@ func main() { // Logger component. var logger log.Logger { - logger = log.NewLogfmtLogger(os.Stderr) + switch *logFormat { + case "json": + logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr)) + case "fmt": + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + default: + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + } logger = log.With(logger, "ts", log.DefaultTimestampUTC) logger = log.With(logger, "caller", log.DefaultCaller) } @@ -197,7 +226,7 @@ func main() { "type", "internal kubernetes error", "kubernetes_caller", log.Valuer(func() interface{} { _, file, line, _ := runtime.Caller(5) // we want to log one level deeper than k8sruntime.HandleError - idx := strings.Index(file, "/vendor/") + idx := strings.Index(file, "/k8s.io/") return file[idx+1:] + ":" + strconv.Itoa(line) })) logErrorUnlessAccessRelated := func(err error) { @@ -242,13 +271,13 @@ func main() { } // Import GPG keys, if we've been told where to look for them - if *gitImportGPG != "" { - keyfiles, err := gpg.ImportKeys(*gitImportGPG) + for _, p := range *gitImportGPG { + keyfiles, err := gpg.ImportKeys(p, *gitVerifySignatures) if err != nil { - logger.Log("error", "failed to import GPG keys", "err", err.Error()) + logger.Log("error", fmt.Sprintf("failed to import GPG key(s) from %s", p), "err", err.Error()) } if keyfiles != nil { - logger.Log("info", "imported GPG keys", "files", fmt.Sprintf("%v", keyfiles)) + logger.Log("info", fmt.Sprintf("imported GPG key(s) from %s", p), "files", fmt.Sprintf("%v", keyfiles)) } } @@ -278,21 +307,34 @@ func main() { }() // Cluster component. - var clusterVersion string - var sshKeyRing ssh.KeyRing - var k8s cluster.Cluster - var k8sManifests cluster.Manifests - var imageCreds func() registry.ImageCreds + + var restClientConfig *rest.Config { - restClientConfig, err := rest.InClusterConfig() - if err != nil { - logger.Log("err", err) - os.Exit(1) + if *k8sInCluster { + logger.Log("msg", "using in cluster config to connect to the cluster") + restClientConfig, err = rest.InClusterConfig() + if err != nil { + logger.Log("err", err) + os.Exit(1) + } + } else { + logger.Log("msg", fmt.Sprintf("using kube config: %q to connect to the cluster", *kubeConfig)) + restClientConfig, err = clientcmd.BuildConfigFromFlags("", *kubeConfig) + if err != nil { + logger.Log("err", err) + os.Exit(1) + } } - restClientConfig.QPS = 50.0 restClientConfig.Burst = 100 + } + var clusterVersion string + var sshKeyRing ssh.KeyRing + var k8s cluster.Cluster + var k8sManifests manifests.Manifests + var imageCreds func() registry.ImageCreds + { clientset, err := k8sclient.NewForConfig(restClientConfig) if err != nil { logger.Log("err", err) @@ -324,31 +366,36 @@ func main() { } clusterVersion = "kubernetes-" + serverVersion.GitVersion - namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") - if err != nil { - logger.Log("err", err) - os.Exit(1) - } + if *k8sInCluster { + namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + logger.Log("err", err) + os.Exit(1) + } - sshKeyRing, err = kubernetes.NewSSHKeyRing(kubernetes.SSHKeyRingConfig{ - SecretAPI: clientset.CoreV1().Secrets(string(namespace)), - SecretName: *k8sSecretName, - SecretVolumeMountPath: *k8sSecretVolumeMountPath, - SecretDataKey: *k8sSecretDataKey, - KeyBits: sshKeyBits, - KeyType: sshKeyType, - KeyGenDir: *sshKeygenDir, - }) - if err != nil { - logger.Log("err", err) - os.Exit(1) - } + sshKeyRing, err = kubernetes.NewSSHKeyRing(kubernetes.SSHKeyRingConfig{ + SecretAPI: clientset.CoreV1().Secrets(string(namespace)), + SecretName: *k8sSecretName, + SecretVolumeMountPath: *k8sSecretVolumeMountPath, + SecretDataKey: *k8sSecretDataKey, + KeyBits: sshKeyBits, + KeyType: sshKeyType, + KeyGenDir: *sshKeygenDir, + }) + if err != nil { + logger.Log("err", err) + os.Exit(1) + } - publicKey, privateKeyPath := sshKeyRing.KeyPair() + publicKey, privateKeyPath := sshKeyRing.KeyPair() + + logger := log.With(logger, "component", "cluster") + logger.Log("identity", privateKeyPath) + logger.Log("identity.pub", strings.TrimSpace(publicKey.Key)) + } else { + sshKeyRing = ssh.NewNopSSHKeyRing() + } - logger := log.With(logger, "component", "cluster") - logger.Log("identity", privateKeyPath) - logger.Log("identity.pub", strings.TrimSpace(publicKey.Key)) logger.Log("host", restClientConfig.Host, "version", clusterVersion) kubectl := *kubernetesKubectl @@ -368,6 +415,7 @@ func main() { allowedNamespaces := append(*k8sNamespaceWhitelist, *k8sAllowNamespace...) k8sInst := kubernetes.NewCluster(client, kubectlApplier, sshKeyRing, logger, allowedNamespaces, *registryExcludeImage) k8sInst.GC = *syncGC + k8sInst.DryGC = *dryGC if err := k8sInst.Ping(); err != nil { logger.Log("ping", err) @@ -433,7 +481,7 @@ func main() { // if no memcached service is specified use the ClusterIP name instead of SRV records if *memcachedService == "" { memcacheClient = registryMemcache.NewFixedServerMemcacheClient(memcacheConfig, - fmt.Sprintf("%s:11211", *memcachedHostname)) + fmt.Sprintf("%s:%d", *memcachedHostname, *memcachedPort)) } else { memcacheClient = registryMemcache.NewMemcacheClient(memcacheConfig) } @@ -510,6 +558,7 @@ func main() { "user", *gitUser, "email", *gitEmail, "signing-key", *gitSigningKey, + "verify-signatures", *gitVerifySignatures, "sync-tag", *gitSyncTag, "notes-ref", *gitNotesRef, "set-author", *gitSetAuthor, @@ -521,20 +570,22 @@ func main() { } daemon := &daemon.Daemon{ - V: version, - Cluster: k8s, - Manifests: k8sManifests, - Registry: cacheRegistry, - ImageRefresh: make(chan image.Name, 100), // size chosen by fair dice roll - Repo: repo, - GitConfig: gitConfig, - Jobs: jobs, - JobStatusCache: &job.StatusCache{Size: 100}, - Logger: log.With(logger, "component", "daemon"), + V: version, + Cluster: k8s, + Manifests: k8sManifests, + Registry: cacheRegistry, + ImageRefresh: make(chan image.Name, 100), // size chosen by fair dice roll + Repo: repo, + GitConfig: gitConfig, + Jobs: jobs, + JobStatusCache: &job.StatusCache{Size: 100}, + Logger: log.With(logger, "component", "daemon"), + ManifestGenerationEnabled: *manifestGeneration, LoopVars: &daemon.LoopVars{ SyncInterval: *syncInterval, RegistryPollInterval: *registryPollInterval, - GitOpTimeout: *gitTimeout, + GitTimeout: *gitTimeout, + GitVerifySignatures: *gitVerifySignatures, }, } @@ -601,3 +652,15 @@ func main() { close(shutdown) shutdownWg.Wait() } + +func homeDir() string { + // nix + if h := os.Getenv("HOME"); h != "" { + return h + } + // windows + if h := os.Getenv("USERPROFILE"); h != "" { + return h + } + return "" +} diff --git a/cmd/helm-operator/main.go b/cmd/helm-operator/main.go index f2f80729b..78fca0d90 100644 --- a/cmd/helm-operator/main.go +++ b/cmd/helm-operator/main.go @@ -11,6 +11,7 @@ import ( "github.com/go-kit/kit/log" "github.com/spf13/pflag" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/workqueue" "k8s.io/klog" @@ -32,6 +33,8 @@ var ( versionFlag *bool + logFormat *string + kubeconfig *string master *string namespace *string @@ -77,6 +80,8 @@ func init() { versionFlag = fs.Bool("version", false, "print version and exit") + logFormat = fs.String("log-format", "fmt", "change the log format.") + kubeconfig = fs.String("kubeconfig", "", "path to a kubeconfig; required if out-of-cluster") master = fs.String("master", "", "address of the Kubernetes API server; overrides any value in kubeconfig; required if out-of-cluster") namespace = fs.String("allow-namespace", "", "if set, this limits the scope to a single namespace; if not specified, all namespaces will be watched") @@ -115,7 +120,14 @@ func main() { // init go-kit log { - logger = log.NewLogfmtLogger(os.Stderr) + switch *logFormat { + case "json": + logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr)) + case "fmt": + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + default: + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + } logger = log.With(logger, "ts", log.DefaultTimestampUTC) logger = log.With(logger, "caller", log.DefaultCaller) } @@ -166,19 +178,16 @@ func main() { TLSHostname: *tillerTLSHostname, }) - // The status updater, to keep track the release status for each - // HelmRelease. It runs as a separate loop for now. - statusUpdater := status.New(ifClient, kubeClient, helmClient, *namespace) - go statusUpdater.Loop(shutdown, log.With(logger, "component", "annotator")) - + // setup shared informer for HelmReleases nsOpt := ifinformers.WithNamespace(*namespace) ifInformerFactory := ifinformers.NewSharedInformerFactoryWithOptions(ifClient, *chartsSyncInterval, nsOpt) fhrInformer := ifInformerFactory.Flux().V1beta1().HelmReleases() - go ifInformerFactory.Start(shutdown) + // setup workqueue for HelmReleases queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ChartRelease") - // release instance is needed during the sync of git chart changes and during the sync of HelmRelease changes + // release instance is needed during the sync of git chart changes + // and during the sync of HelmRelease changes rel := release.New(log.With(logger, "component", "release"), helmClient) chartSync := chartsync.New( log.With(logger, "component", "chartsync"), @@ -188,21 +197,37 @@ func main() { chartsync.Config{LogDiffs: *logReleaseDiffs, UpdateDeps: *updateDependencies, GitTimeout: *gitTimeout, GitPollInterval: *gitPollInterval}, *namespace, ) - chartSync.Run(shutdown, errc, shutdownWg) - // start FluxRelease informer + // prepare operator and start FluxRelease informer + // NB: the operator needs to do its magic with the informer + // _before_ starting it or else the cache sync seems to hang at + // random opr := operator.New(log.With(logger, "component", "operator"), *logReleaseDiffs, kubeClient, fhrInformer, queue, chartSync) - checkpoint.CheckForUpdates(product, version, nil, log.With(logger, "component", "checkpoint")) + go ifInformerFactory.Start(shutdown) + + // wait for the caches to be synced before starting _any_ workers + mainLogger.Log("info", "waiting for informer caches to sync") + if ok := cache.WaitForCacheSync(shutdown, fhrInformer.Informer().HasSynced); !ok { + mainLogger.Log("error", "failed to wait for caches to sync") + os.Exit(1) + } + mainLogger.Log("info", "informer caches synced") + + // start operator + go opr.Run(1, shutdown, shutdownWg) + + // start git sync loop + go chartSync.Run(shutdown, errc, shutdownWg) + + // the status updater, to keep track of the release status for + // every HelmRelease + statusUpdater := status.New(ifClient, kubeClient, helmClient, *namespace) + go statusUpdater.Loop(shutdown, log.With(logger, "component", "annotator")) // start HTTP server go daemonhttp.ListenAndServe(*listenAddr, chartSync, log.With(logger, "component", "daemonhttp"), shutdown) - // start operator - go func() { - if err = opr.Run(1, shutdown, shutdownWg); err != nil { - errc <- fmt.Errorf(ErrOperatorFailure, err) - } - }() + checkpoint.CheckForUpdates(product, version, nil, log.With(logger, "component", "checkpoint")) shutdownErr := <-errc logger.Log("exiting...", shutdownErr) diff --git a/daemon/daemon.go b/daemon/daemon.go index a0c6ea5be..f7be8b008 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -23,6 +23,7 @@ import ( "github.com/weaveworks/flux/guid" "github.com/weaveworks/flux/image" "github.com/weaveworks/flux/job" + "github.com/weaveworks/flux/manifests" "github.com/weaveworks/flux/policy" "github.com/weaveworks/flux/registry" "github.com/weaveworks/flux/release" @@ -42,17 +43,18 @@ const ( // Daemon is the fully-functional state of a daemon (compare to // `NotReadyDaemon`). type Daemon struct { - V string - Cluster cluster.Cluster - Manifests cluster.Manifests - Registry registry.Registry - ImageRefresh chan image.Name - Repo *git.Repo - GitConfig git.Config - Jobs *job.Queue - JobStatusCache *job.StatusCache - EventWriter event.EventWriter - Logger log.Logger + V string + Cluster cluster.Cluster + Manifests manifests.Manifests + Registry registry.Registry + ImageRefresh chan image.Name + Repo *git.Repo + GitConfig git.Config + Jobs *job.Queue + JobStatusCache *job.StatusCache + EventWriter event.EventWriter + Logger log.Logger + ManifestGenerationEnabled bool // bookkeeping *LoopVars } @@ -72,12 +74,22 @@ func (d *Daemon) Export(ctx context.Context) ([]byte, error) { return d.Cluster.Export() } +func (d *Daemon) getManifestStore(checkout *git.Checkout) (manifests.Store, error) { + if d.ManifestGenerationEnabled { + return manifests.NewConfigAware(checkout.Dir(), checkout.ManifestDirs(), d.Manifests) + } + return manifests.NewRawFiles(checkout.Dir(), checkout.ManifestDirs(), d.Manifests), nil +} + func (d *Daemon) getResources(ctx context.Context) (map[string]resource.Resource, v6.ReadOnlyReason, error) { var resources map[string]resource.Resource var globalReadOnly v6.ReadOnlyReason err := d.WithClone(ctx, func(checkout *git.Checkout) error { - var err error - resources, err = d.Manifests.LoadManifests(checkout.Dir(), checkout.ManifestDirs()) + cm, err := d.getManifestStore(checkout) + if err != nil { + return err + } + resources, err = cm.GetAllResourcesByID(ctx) return err }) @@ -236,6 +248,9 @@ func (d *Daemon) makeJobFromUpdate(update updateFunc) jobFunc { var result job.Result err := d.WithClone(ctx, func(working *git.Checkout) error { var err error + if err = verifyWorkingRepo(ctx, d.Repo, working, d.GitConfig); d.GitVerifySignatures && err != nil { + return err + } result, err = update(ctx, jobID, working, logger) if err != nil { return err @@ -257,7 +272,7 @@ func (d *Daemon) executeJob(id job.ID, do jobFunc, logger log.Logger) (job.Resul d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusRunning}) result, err := do(ctx, id, logger) if err != nil { - d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusFailed, Err: err.Error()}) + d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusFailed, Err: err.Error(), Result: result}) return result, err } d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusSucceeded, Result: result}) @@ -336,7 +351,7 @@ func (d *Daemon) UpdateManifests(ctx context.Context, spec update.Spec) (job.ID, } return d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.release(spec, s)))), nil case policy.Updates: - return d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.updatePolicy(spec, s)))), nil + return d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.updatePolicies(spec, s)))), nil case update.ManualSync: return d.queueJob(d.sync()), nil default: @@ -353,16 +368,29 @@ func (d *Daemon) sync() jobFunc { if err != nil { return result, err } - head, err := d.Repo.Revision(ctx, d.GitConfig.Branch) + head, err := d.Repo.BranchHead(ctx) if err != nil { return result, err } + if d.GitVerifySignatures { + var latestValidRev string + if latestValidRev, _, err = latestValidRevision(ctx, d.Repo, d.GitConfig); err != nil { + return result, err + } else if head != latestValidRev { + result.Revision = latestValidRev + return result, fmt.Errorf( + "The branch HEAD in the git repo is not verified, and fluxd is unable to sync to it. The last verified commit was %.8s. HEAD is %.8s.", + latestValidRev, + head, + ) + } + } result.Revision = head - return result, nil + return result, err } } -func (d *Daemon) updatePolicy(spec update.Spec, updates policy.Updates) updateFunc { +func (d *Daemon) updatePolicies(spec update.Spec, updates policy.Updates) updateFunc { return func(ctx context.Context, jobID job.ID, working *git.Checkout, logger log.Logger) (job.Result, error) { // For each update var workloadIDs []flux.ResourceID @@ -385,31 +413,18 @@ func (d *Daemon) updatePolicy(spec update.Spec, updates policy.Updates) updateFu if policy.Set(u.Add).Has(policy.Automated) { anythingAutomated = true } - // find the workload manifest - err := cluster.UpdateManifest(d.Manifests, working.Dir(), working.ManifestDirs(), workloadID, func(def []byte) ([]byte, error) { - newDef, err := d.Manifests.UpdatePolicies(def, workloadID, u) - if err != nil { - result.Result[workloadID] = update.WorkloadResult{ - Status: update.ReleaseStatusFailed, - Error: err.Error(), - } - return nil, err - } - if string(newDef) == string(def) { - result.Result[workloadID] = update.WorkloadResult{ - Status: update.ReleaseStatusSkipped, - } - } else { - workloadIDs = append(workloadIDs, workloadID) - result.Result[workloadID] = update.WorkloadResult{ - Status: update.ReleaseStatusSuccess, - } - } - return newDef, nil - }) + cm, err := d.getManifestStore(working) + if err != nil { + return result, err + } + updated, err := cm.UpdateWorkloadPolicies(ctx, workloadID, u) if err != nil { + result.Result[workloadID] = update.WorkloadResult{ + Status: update.ReleaseStatusFailed, + Error: err.Error(), + } switch err := err.(type) { - case cluster.ManifestError: + case manifests.StoreError: result.Result[workloadID] = update.WorkloadResult{ Status: update.ReleaseStatusFailed, Error: err.Error(), @@ -418,6 +433,16 @@ func (d *Daemon) updatePolicy(spec update.Spec, updates policy.Updates) updateFu return result, err } } + if !updated { + result.Result[workloadID] = update.WorkloadResult{ + Status: update.ReleaseStatusSkipped, + } + } else { + workloadIDs = append(workloadIDs, workloadID) + result.Result[workloadID] = update.WorkloadResult{ + Status: update.ReleaseStatusSuccess, + } + } } if len(workloadIDs) == 0 { return result, nil @@ -431,7 +456,7 @@ func (d *Daemon) updatePolicy(spec update.Spec, updates policy.Updates) updateFu Author: commitAuthor, Message: policyCommitMessage(updates, spec.Cause), } - if err := working.CommitAndPush(ctx, commitAction, ¬e{JobID: jobID, Spec: spec}); err != nil { + if err := working.CommitAndPush(ctx, commitAction, ¬e{JobID: jobID, Spec: spec}, d.ManifestGenerationEnabled); err != nil { // On the chance pushing failed because it was not // possible to fast-forward, ask for a sync so the // next attempt is more likely to succeed. @@ -453,10 +478,13 @@ func (d *Daemon) updatePolicy(spec update.Spec, updates policy.Updates) updateFu func (d *Daemon) release(spec update.Spec, c release.Changes) updateFunc { return func(ctx context.Context, jobID job.ID, working *git.Checkout, logger log.Logger) (job.Result, error) { - rc := release.NewReleaseContext(d.Cluster, d.Manifests, d.Registry, working) - result, err := release.Release(rc, c, logger) - var zero job.Result + rs, err := d.getManifestStore(working) + if err != nil { + return zero, err + } + rc := release.NewReleaseContext(d.Cluster, rs, d.Registry) + result, err := release.Release(ctx, rc, c, logger) if err != nil { return zero, err } @@ -476,7 +504,7 @@ func (d *Daemon) release(spec update.Spec, c release.Changes) updateFunc { Author: commitAuthor, Message: commitMsg, } - if err := working.CommitAndPush(ctx, commitAction, ¬e{JobID: jobID, Spec: spec, Result: result}); err != nil { + if err := working.CommitAndPush(ctx, commitAction, ¬e{JobID: jobID, Spec: spec, Result: result}, d.ManifestGenerationEnabled); err != nil { // On the chance pushing failed because it was not // possible to fast-forward, ask the repo to fetch // from upstream ASAP, so the next attempt is more @@ -751,3 +779,76 @@ func policyEventTypes(u policy.Update) []string { sort.Strings(result) return result } + +// latestValidRevision returns the HEAD of the configured branch if it +// has a valid signature, or the SHA of the latest valid commit it +// could find plus the invalid commit thereafter. +// +// Signature validation happens for commits between the revision of the +// sync tag and the HEAD, after the signature of the sync tag itself +// has been validated, as the branch can not be trusted when the tag +// originates from an unknown source. +// +// In case the signature of the tag can not be verified, or it points +// towards a revision we can not get a commit range for, it returns an +// error. +func latestValidRevision(ctx context.Context, repo *git.Repo, gitConfig git.Config) (string, git.Commit, error) { + var invalidCommit = git.Commit{} + newRevision, err := repo.BranchHead(ctx) + if err != nil { + return "", invalidCommit, err + } + + // Validate tag and retrieve the revision it points to + tagRevision, err := repo.VerifyTag(ctx, gitConfig.SyncTag) + if err != nil && !strings.Contains(err.Error(), "not found.") { + return "", invalidCommit, errors.Wrap(err, "failed to verify signature of sync tag") + } + + var commits []git.Commit + if tagRevision == "" { + commits, err = repo.CommitsBefore(ctx, newRevision) + } else { + // Assure the revision from the tag is a signed and valid commit + if err = repo.VerifyCommit(ctx, tagRevision); err != nil { + return "", invalidCommit, errors.Wrap(err, "failed to verify signature of sync tag revision") + } + commits, err = repo.CommitsBetween(ctx, tagRevision, newRevision) + } + + if err != nil { + return tagRevision, invalidCommit, err + } + + // Loop through commits in ascending order, validating the + // signature of each commit. In case we hit an invalid commit, we + // return the revision of the commit before that, as that one is + // valid. + for i := len(commits) - 1; i >= 0; i-- { + if !commits[i].Signature.Valid() { + if i+1 < len(commits) { + return commits[i+1].Revision, commits[i], nil + } + return tagRevision, commits[i], nil + } + } + + return newRevision, invalidCommit, nil +} + +func verifyWorkingRepo(ctx context.Context, repo *git.Repo, working *git.Checkout, gitConfig git.Config) error { + if latestVerifiedRev, _, err := latestValidRevision(ctx, repo, gitConfig); err != nil { + return err + } else if headRev, err := working.HeadRevision(ctx); err != nil { + return err + } else if headRev != latestVerifiedRev { + return unsignedHeadRevisionError(latestVerifiedRev, headRev) + } + return nil +} + +func isUnknownRevision(err error) bool { + return err != nil && + (strings.Contains(err.Error(), "unknown revision or path not in the working tree.") || + strings.Contains(err.Error(), "bad revision")) +} diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go index 5c1e1bfac..6600c5df3 100644 --- a/daemon/daemon_test.go +++ b/daemon/daemon_test.go @@ -13,6 +13,7 @@ import ( "github.com/go-kit/kit/log" "github.com/stretchr/testify/assert" + "github.com/weaveworks/flux" "github.com/weaveworks/flux/api/v10" "github.com/weaveworks/flux/api/v11" @@ -20,12 +21,13 @@ import ( "github.com/weaveworks/flux/api/v9" "github.com/weaveworks/flux/cluster" "github.com/weaveworks/flux/cluster/kubernetes" - kresource "github.com/weaveworks/flux/cluster/kubernetes/resource" + "github.com/weaveworks/flux/cluster/mock" "github.com/weaveworks/flux/event" "github.com/weaveworks/flux/git" "github.com/weaveworks/flux/git/gittest" "github.com/weaveworks/flux/image" "github.com/weaveworks/flux/job" + "github.com/weaveworks/flux/manifests" "github.com/weaveworks/flux/policy" "github.com/weaveworks/flux/registry" registryMock "github.com/weaveworks/flux/registry/mock" @@ -514,8 +516,8 @@ func TestDaemon_PolicyUpdate(t *testing.T) { return false } defer co.Clean() - dirs := co.ManifestDirs() - m, err := d.Manifests.LoadManifests(co.Dir(), dirs) + cm := manifests.NewRawFiles(co.Dir(), co.ManifestDirs(), d.Manifests) + m, err := cm.GetAllResourcesByID(context.TODO()) if err != nil { t.Fatalf("Error: %s", err.Error()) } @@ -632,17 +634,7 @@ func mustParseImageRef(ref string) image.Ref { return r } -type anonNamespacer func(kresource.KubeManifest) string - -func (fn anonNamespacer) EffectiveNamespace(m kresource.KubeManifest, _ kubernetes.ResourceScopes) (string, error) { - return fn(m), nil -} - -var alwaysDefault anonNamespacer = func(kresource.KubeManifest) string { - return "default" -} - -func mockDaemon(t *testing.T) (*Daemon, func(), func(), *cluster.Mock, *mockEventWriter, func(func())) { +func mockDaemon(t *testing.T) (*Daemon, func(), func(), *mock.Mock, *mockEventWriter, func(func())) { logger := log.NewNopLogger() singleService := cluster.Workload{ @@ -680,9 +672,9 @@ func mockDaemon(t *testing.T) (*Daemon, func(), func(), *cluster.Mock, *mockEven NotesRef: "fluxtest", } - var k8s *cluster.Mock + var k8s *mock.Mock { - k8s = &cluster.Mock{} + k8s = &mock.Mock{} k8s.AllWorkloadsFunc = func(maybeNamespace string) ([]cluster.Workload, error) { if maybeNamespace == ns { return []cluster.Workload{ @@ -731,7 +723,7 @@ func mockDaemon(t *testing.T) (*Daemon, func(), func(), *cluster.Mock, *mockEven // Jobs queue (starts itself) jobs := job.NewQueue(jshutdown, jwg) - manifests := kubernetes.NewManifests(alwaysDefault, log.NewLogfmtLogger(os.Stdout)) + manifests := kubernetes.NewManifests(kubernetes.ConstNamespacer("default"), log.NewLogfmtLogger(os.Stdout)) // Finally, the daemon d := &Daemon{ @@ -745,7 +737,7 @@ func mockDaemon(t *testing.T) (*Daemon, func(), func(), *cluster.Mock, *mockEven JobStatusCache: &job.StatusCache{Size: 100}, EventWriter: events, Logger: logger, - LoopVars: &LoopVars{GitOpTimeout: timeout}, + LoopVars: &LoopVars{GitTimeout: timeout}, } start := func() { @@ -865,9 +857,8 @@ func (w *wait) ForImageTag(t *testing.T, d *Daemon, workload, container, tag str return false } defer co.Clean() - - dirs := co.ManifestDirs() - resources, err := d.Manifests.LoadManifests(co.Dir(), dirs) + cm := manifests.NewRawFiles(co.Dir(), co.ManifestDirs(), d.Manifests) + resources, err := cm.GetAllResourcesByID(context.TODO()) assert.NoError(t, err) workload, ok := resources[workload].(resource.Workload) diff --git a/daemon/errors.go b/daemon/errors.go index 7291cbb26..32e095d0e 100644 --- a/daemon/errors.go +++ b/daemon/errors.go @@ -67,3 +67,21 @@ daemon if possible: `, } } + +func unsignedHeadRevisionError(latestValidRevision, headRevision string) error { + return &fluxerr.Error{ + Type: fluxerr.User, + Err: fmt.Errorf("HEAD revision is unsigned"), + Help: `HEAD is not a verified commit. + +The branch HEAD in the git repo is not verified, and fluxd is unable to +make a change on top of it. The last verified commit was + + ` + latestValidRevision + ` + +HEAD is + + ` + headRevision + `. +`, + } +} diff --git a/daemon/images.go b/daemon/images.go index 4e9ca1b62..6fca631d5 100644 --- a/daemon/images.go +++ b/daemon/images.go @@ -103,13 +103,13 @@ func calculateChanges(logger log.Logger, candidateWorkloads resources, workloads continue containers } current := repoMetadata.FindImageWithRef(currentImageID) - if current.CreatedAt.IsZero() || latest.CreatedAt.IsZero() { - logger.Log("warning", "image with zero created timestamp", "current", fmt.Sprintf("%s (%s)", current.ID, current.CreatedAt), "latest", fmt.Sprintf("%s (%s)", latest.ID, latest.CreatedAt), "action", "skip container") + if current.CreatedTS().IsZero() || latest.CreatedTS().IsZero() { + logger.Log("warning", "image with zero created timestamp", "current", fmt.Sprintf("%s (%s)", current.ID, current.CreatedTS()), "latest", fmt.Sprintf("%s (%s)", latest.ID, latest.CreatedTS()), "action", "skip container") continue containers } newImage := currentImageID.WithNewTag(latest.ID.Tag) changes.Add(workload.ID, container, newImage) - logger.Log("info", "added update to automation run", "new", newImage, "reason", fmt.Sprintf("latest %s (%s) > current %s (%s)", latest.ID.Tag, latest.CreatedAt, currentImageID.Tag, current.CreatedAt)) + logger.Log("info", "added update to automation run", "new", newImage, "reason", fmt.Sprintf("latest %s (%s) > current %s (%s)", latest.ID.Tag, latest.CreatedTS(), currentImageID.Tag, current.CreatedTS())) } } } diff --git a/daemon/loop.go b/daemon/loop.go index b8256c086..2eee8c266 100644 --- a/daemon/loop.go +++ b/daemon/loop.go @@ -2,30 +2,20 @@ package daemon import ( "context" - "crypto/sha256" - "encoding/base64" "fmt" - "strings" + "github.com/weaveworks/flux/git" "sync" "time" "github.com/go-kit/kit/log" - "github.com/pkg/errors" - - "github.com/weaveworks/flux" - "github.com/weaveworks/flux/cluster" - "github.com/weaveworks/flux/event" - "github.com/weaveworks/flux/git" fluxmetrics "github.com/weaveworks/flux/metrics" - "github.com/weaveworks/flux/resource" - fluxsync "github.com/weaveworks/flux/sync" - "github.com/weaveworks/flux/update" ) type LoopVars struct { SyncInterval time.Duration RegistryPollInterval time.Duration - GitOpTimeout time.Duration + GitTimeout time.Duration + GitVerifySignatures bool initOnce sync.Once syncSoon chan struct{} @@ -50,7 +40,8 @@ func (d *Daemon) Loop(stop chan struct{}, wg *sync.WaitGroup, logger log.Logger) // available. imagePollTimer := time.NewTimer(d.RegistryPollInterval) - // Keep track of current HEAD, so we can know when to treat a repo + // Keep track of current, verified (if signature verification is + // enabled), HEAD, so we can know when to treat a repo // mirror notification as a change. Otherwise, we'll just sync // every timer tick as well as every mirror refresh. syncHead := "" @@ -61,8 +52,7 @@ func (d *Daemon) Loop(stop chan struct{}, wg *sync.WaitGroup, logger log.Logger) for { var ( - lastKnownSyncTagRev string - warnedAboutSyncTagChange bool + lastKnownSyncTag = &lastKnownSyncTag{logger: logger, syncTag: d.GitConfig.SyncTag} ) select { case <-stop: @@ -86,20 +76,38 @@ func (d *Daemon) Loop(stop chan struct{}, wg *sync.WaitGroup, logger log.Logger) default: } } - if err := d.doSync(logger, &lastKnownSyncTagRev, &warnedAboutSyncTagChange); err != nil { + started := time.Now().UTC() + err := d.Sync(context.Background(), started, syncHead, lastKnownSyncTag) + syncDuration.With( + fluxmetrics.LabelSuccess, fmt.Sprint(err == nil), + ).Observe(time.Since(started).Seconds()) + if err != nil { logger.Log("err", err) } syncTimer.Reset(d.SyncInterval) case <-syncTimer.C: d.AskForSync() case <-d.Repo.C: - ctx, cancel := context.WithTimeout(context.Background(), d.GitOpTimeout) - newSyncHead, err := d.Repo.Revision(ctx, d.GitConfig.Branch) + var newSyncHead string + var invalidCommit git.Commit + var err error + + ctx, cancel := context.WithTimeout(context.Background(), d.GitTimeout) + if d.GitVerifySignatures { + newSyncHead, invalidCommit, err = latestValidRevision(ctx, d.Repo, d.GitConfig) + } else { + newSyncHead, err = d.Repo.BranchHead(ctx) + } cancel() + if err != nil { logger.Log("url", d.Repo.Origin().URL, "err", err) continue } + if invalidCommit.Revision != "" { + logger.Log("err", "found invalid GPG signature for commit", "revision", invalidCommit.Revision, "key", invalidCommit.Signature.Key) + } + logger.Log("event", "refreshed", "url", d.Repo.Origin().URL, "branch", d.GitConfig.Branch, "HEAD", newSyncHead) if newSyncHead != syncHead { syncHead = newSyncHead @@ -121,7 +129,7 @@ func (d *Daemon) Loop(stop chan struct{}, wg *sync.WaitGroup, logger log.Logger) jobLogger.Log("state", "done", "success", "false", "err", err) } else { jobLogger.Log("state", "done", "success", "true") - ctx, cancel := context.WithTimeout(context.Background(), d.GitOpTimeout) + ctx, cancel := context.WithTimeout(context.Background(), d.GitTimeout) err := d.Repo.Refresh(ctx) if err != nil { logger.Log("err", err) @@ -150,318 +158,51 @@ func (d *LoopVars) AskForImagePoll() { } } -// -- extra bits the loop needs - -func (d *Daemon) doSync(logger log.Logger, lastKnownSyncTagRev *string, warnedAboutSyncTagChange *bool) (retErr error) { - started := time.Now().UTC() - defer func() { - syncDuration.With( - fluxmetrics.LabelSuccess, fmt.Sprint(retErr == nil), - ).Observe(time.Since(started).Seconds()) - }() - - syncSetName := makeGitConfigHash(d.Repo.Origin(), d.GitConfig) - - // We don't care how long this takes overall, only about not - // getting bogged down in certain operations, so use an - // undeadlined context in general. - ctx := context.Background() - - // checkout a working clone so we can mess around with tags later - var working *git.Checkout - { - var err error - ctx, cancel := context.WithTimeout(ctx, d.GitOpTimeout) - defer cancel() - working, err = d.Repo.Clone(ctx, d.GitConfig) - if err != nil { - return err - } - defer working.Clean() - } - - // For comparison later. - oldTagRev, err := working.SyncRevision(ctx) - if err != nil && !isUnknownRevision(err) { - return err - } - // Check if something other than the current instance of fluxd changed the sync tag. - // This is likely to be caused by another fluxd instance using the same tag. - // Having multiple instances fighting for the same tag can lead to fluxd missing manifest changes. - if *lastKnownSyncTagRev != "" && oldTagRev != *lastKnownSyncTagRev && !*warnedAboutSyncTagChange { - logger.Log("warning", - "detected external change in git sync tag; the sync tag should not be shared by fluxd instances") - *warnedAboutSyncTagChange = true - } - - newTagRev, err := working.HeadRevision(ctx) - if err != nil { - return err - } - - // Get a map of all resources defined in the repo - allResources, err := d.Manifests.LoadManifests(working.Dir(), working.ManifestDirs()) - if err != nil { - return errors.Wrap(err, "loading resources from repo") - } - - var resourceErrors []event.ResourceError - if err := fluxsync.Sync(syncSetName, allResources, d.Cluster); err != nil { - switch syncerr := err.(type) { - case cluster.SyncError: - logger.Log("err", err) - for _, e := range syncerr { - resourceErrors = append(resourceErrors, event.ResourceError{ - ID: e.ResourceID, - Path: e.Source, - Error: e.Error.Error(), - }) - } - default: - return err - } - } - - // update notes and emit events for applied commits - - var initialSync bool - var commits []git.Commit - { - var err error - ctx, cancel := context.WithTimeout(ctx, d.GitOpTimeout) - if oldTagRev != "" { - commits, err = d.Repo.CommitsBetween(ctx, oldTagRev, newTagRev, d.GitConfig.Paths...) - } else { - initialSync = true - commits, err = d.Repo.CommitsBefore(ctx, newTagRev, d.GitConfig.Paths...) - } - cancel() - if err != nil { - return err - } - } - - // Figure out which workload IDs changed in this release - changedResources := map[string]resource.Resource{} - - if initialSync { - // no synctag, We are syncing everything from scratch - changedResources = allResources - } else { - ctx, cancel := context.WithTimeout(ctx, d.GitOpTimeout) - changedFiles, err := working.ChangedFiles(ctx, oldTagRev) - if err == nil && len(changedFiles) > 0 { - // We had some changed files, we're syncing a diff - // FIXME(michael): this won't be accurate when a file can have more than one resource - changedResources, err = d.Manifests.LoadManifests(working.Dir(), changedFiles) - } - cancel() - if err != nil { - return errors.Wrap(err, "loading resources from repo") - } - } - - workloadIDs := flux.ResourceIDSet{} - for _, r := range changedResources { - workloadIDs.Add([]flux.ResourceID{r.ResourceID()}) - } - - var notes map[string]struct{} - { - ctx, cancel := context.WithTimeout(ctx, d.GitOpTimeout) - notes, err = working.NoteRevList(ctx) - cancel() - if err != nil { - return errors.Wrap(err, "loading notes from repo") - } - } - - // Collect any events that come from notes attached to the commits - // we just synced. While we're doing this, keep track of what - // other things this sync includes e.g., releases and - // autoreleases, that we're already posting as events, so upstream - // can skip the sync event if it wants to. - includes := make(map[string]bool) - if len(commits) > 0 { - var noteEvents []event.Event - - // Find notes in revisions. - for i := len(commits) - 1; i >= 0; i-- { - if _, ok := notes[commits[i].Revision]; !ok { - includes[event.NoneOfTheAbove] = true - continue - } - ctx, cancel := context.WithTimeout(ctx, d.GitOpTimeout) - var n note - ok, err := working.GetNote(ctx, commits[i].Revision, &n) - cancel() - if err != nil { - return errors.Wrap(err, "loading notes from repo") - } - if !ok { - includes[event.NoneOfTheAbove] = true - continue - } - - // If this is the first sync, we should expect no notes, - // since this is supposedly the first time we're seeing - // the repo. But there are circumstances in which we can - // nonetheless see notes -- if the tag was deleted from - // the upstream repo, or if this accidentally has the same - // notes ref as another daemon using the same repo (but a - // different tag). Either way, we don't want to report any - // notes on an initial sync, since they (most likely) - // don't belong to us. - if initialSync { - logger.Log("warning", "no notes expected on initial sync; this repo may be in use by another fluxd") - break - } - - // Interpret some notes as events to send to the upstream - switch n.Spec.Type { - case update.Containers: - spec := n.Spec.Spec.(update.ReleaseContainersSpec) - noteEvents = append(noteEvents, event.Event{ - ServiceIDs: n.Result.AffectedResources(), - Type: event.EventRelease, - StartedAt: started, - EndedAt: time.Now().UTC(), - LogLevel: event.LogLevelInfo, - Metadata: &event.ReleaseEventMetadata{ - ReleaseEventCommon: event.ReleaseEventCommon{ - Revision: commits[i].Revision, - Result: n.Result, - Error: n.Result.Error(), - }, - Spec: event.ReleaseSpec{ - Type: event.ReleaseContainersSpecType, - ReleaseContainersSpec: &spec, - }, - Cause: n.Spec.Cause, - }, - }) - includes[event.EventRelease] = true - case update.Images: - spec := n.Spec.Spec.(update.ReleaseImageSpec) - noteEvents = append(noteEvents, event.Event{ - ServiceIDs: n.Result.AffectedResources(), - Type: event.EventRelease, - StartedAt: started, - EndedAt: time.Now().UTC(), - LogLevel: event.LogLevelInfo, - Metadata: &event.ReleaseEventMetadata{ - ReleaseEventCommon: event.ReleaseEventCommon{ - Revision: commits[i].Revision, - Result: n.Result, - Error: n.Result.Error(), - }, - Spec: event.ReleaseSpec{ - Type: event.ReleaseImageSpecType, - ReleaseImageSpec: &spec, - }, - Cause: n.Spec.Cause, - }, - }) - includes[event.EventRelease] = true - case update.Auto: - spec := n.Spec.Spec.(update.Automated) - noteEvents = append(noteEvents, event.Event{ - ServiceIDs: n.Result.AffectedResources(), - Type: event.EventAutoRelease, - StartedAt: started, - EndedAt: time.Now().UTC(), - LogLevel: event.LogLevelInfo, - Metadata: &event.AutoReleaseEventMetadata{ - ReleaseEventCommon: event.ReleaseEventCommon{ - Revision: commits[i].Revision, - Result: n.Result, - Error: n.Result.Error(), - }, - Spec: spec, - }, - }) - includes[event.EventAutoRelease] = true - case update.Policy: - // Use this to mean any change to policy - includes[event.EventUpdatePolicy] = true - default: - // Presume it's not something we're otherwise sending - // as an event - includes[event.NoneOfTheAbove] = true - } - } - - cs := make([]event.Commit, len(commits)) - for i, c := range commits { - cs[i].Revision = c.Revision - cs[i].Message = c.Message - } - if err = d.LogEvent(event.Event{ - ServiceIDs: workloadIDs.ToSlice(), - Type: event.EventSync, - StartedAt: started, - EndedAt: started, - LogLevel: event.LogLevelInfo, - Metadata: &event.SyncEventMetadata{ - Commits: cs, - InitialSync: initialSync, - Includes: includes, - Errors: resourceErrors, - }, - }); err != nil { - logger.Log("err", err) - // Abort early to ensure at least once delivery of events - return err - } - - for _, event := range noteEvents { - if err = d.LogEvent(event); err != nil { - logger.Log("err", err) - // Abort early to ensure at least once delivery of events - return err - } - } - } - - // Move the tag and push it so we know how far we've gotten. - if oldTagRev != newTagRev { - { - ctx, cancel := context.WithTimeout(ctx, d.GitOpTimeout) - tagAction := git.TagAction{ - Revision: newTagRev, - Message: "Sync pointer", - } - err := working.MoveSyncTagAndPush(ctx, tagAction) - cancel() - if err != nil { - return err - } - *lastKnownSyncTagRev = newTagRev - } - logger.Log("tag", d.GitConfig.SyncTag, "old", oldTagRev, "new", newTagRev) - { - ctx, cancel := context.WithTimeout(ctx, d.GitOpTimeout) - err := d.Repo.Refresh(ctx) - cancel() - return err - } - } - return nil -} - -func isUnknownRevision(err error) bool { - return err != nil && - (strings.Contains(err.Error(), "unknown revision or path not in the working tree.") || - strings.Contains(err.Error(), "bad revision")) +// -- internals to keep track of sync tag state +type lastKnownSyncTag struct { + logger log.Logger + syncTag string + revision string + warnedAboutChange bool } -func makeGitConfigHash(remote git.Remote, conf git.Config) string { - urlbit := remote.SafeURL() - pathshash := sha256.New() - pathshash.Write([]byte(urlbit)) - pathshash.Write([]byte(conf.Branch)) - for _, path := range conf.Paths { - pathshash.Write([]byte(path)) - } - return base64.RawURLEncoding.EncodeToString(pathshash.Sum(nil)) +// SetRevision updates the sync tag revision in git _and_ the +// in-memory revision, if it has changed. In addition, it validates +// if the in-memory revision matches the old revision from git before +// making the update, to notify a user about multiple Flux daemons +// using the same tag. +func (s *lastKnownSyncTag) SetRevision(ctx context.Context, working *git.Checkout, timeout time.Duration, + oldRev, newRev string) (bool, error) { + // Check if something other than the current instance of fluxd + // changed the sync tag. This is likely caused by another instance + // using the same tag. Having multiple instances fight for the same + // tag can lead to fluxd missing manifest changes. + if s.revision != "" && oldRev != s.revision && !s.warnedAboutChange { + s.logger.Log("warning", + "detected external change in git sync tag; the sync tag should not be shared by fluxd instances", + "tag", s.syncTag) + s.warnedAboutChange = true + } + + // Did it actually change? + if s.revision == newRev { + return false, nil + } + + // Update the sync tag revision in git + tagAction := git.TagAction{ + Revision: newRev, + Message: "Sync pointer", + } + ctx, cancel := context.WithTimeout(ctx, timeout) + if err := working.MoveSyncTagAndPush(ctx, tagAction); err != nil { + return false, err + } + cancel() + + // Update in-memory revision + s.revision = newRev + + s.logger.Log("tag", s.syncTag, "old", oldRev, "new", newRev) + return true, nil } diff --git a/daemon/sync.go b/daemon/sync.go new file mode 100644 index 000000000..64a92681d --- /dev/null +++ b/daemon/sync.go @@ -0,0 +1,397 @@ +package daemon + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "github.com/go-kit/kit/log" + "github.com/pkg/errors" + "path/filepath" + "time" + + "github.com/weaveworks/flux" + "github.com/weaveworks/flux/cluster" + "github.com/weaveworks/flux/event" + "github.com/weaveworks/flux/git" + "github.com/weaveworks/flux/manifests" + "github.com/weaveworks/flux/resource" + fluxsync "github.com/weaveworks/flux/sync" + "github.com/weaveworks/flux/update" +) + +type syncTag interface { + SetRevision(ctx context.Context, working *git.Checkout, timeout time.Duration, oldRev, newRev string) (bool, error) +} + +type eventLogger interface { + LogEvent(e event.Event) error +} + +type changeSet struct { + commits []git.Commit + oldTagRev string + newTagRev string + initialSync bool +} + +// Sync starts the synchronization of the cluster with git. +func (d *Daemon) Sync(ctx context.Context, started time.Time, revision string, syncTag syncTag) error { + // Checkout a working clone used for this sync + ctxt, cancel := context.WithTimeout(ctx, d.GitTimeout) + working, err := d.Repo.Clone(ctxt, d.GitConfig) + if err != nil { + return err + } + cancel() + defer working.Clean() + + // Ensure we are syncing the given revision + if err := working.Checkout(ctx, revision); err != nil { + return err + } + + // Retrieve change set of commits we need to sync + c, err := getChangeSet(ctx, working, d.Repo, d.GitTimeout, d.GitConfig.Paths) + if err != nil { + return err + } + + // Run actual sync of resources on cluster + syncSetName := makeGitConfigHash(d.Repo.Origin(), d.GitConfig) + resourceStore, err := d.getManifestStore(working) + if err != nil { + return errors.Wrap(err, "reading the respository checkout") + } + resources, resourceErrors, err := doSync(ctx, resourceStore, d.Cluster, syncSetName, d.Logger) + if err != nil { + return err + } + + // Determine what resources changed during the sync + changedResources, err := getChangedResources(ctx, c, d.GitTimeout, working, resourceStore, resources) + serviceIDs := flux.ResourceIDSet{} + for _, r := range changedResources { + serviceIDs.Add([]flux.ResourceID{r.ResourceID()}) + } + + // Retrieve git notes and collect events from them + notes, err := getNotes(ctx, d.GitTimeout, working) + if err != nil { + return err + } + noteEvents, includesEvents, err := collectNoteEvents(ctx, c, notes, d.GitTimeout, working, started, d.Logger) + if err != nil { + return err + } + + // Report all synced commits + if err := logCommitEvent(d, c, serviceIDs, started, includesEvents, resourceErrors, d.Logger); err != nil { + return err + } + + // Report all collected events + for _, event := range noteEvents { + if err = d.LogEvent(event); err != nil { + d.Logger.Log("err", err) + // Abort early to ensure at least once delivery of events + return err + } + } + + // Move sync tag + if ok, err := syncTag.SetRevision(ctx, working, d.GitTimeout, c.oldTagRev, c.newTagRev); err != nil { + return err + } else if !ok { + return nil + } + + err = refresh(ctx, d.GitTimeout, d.Repo) + return err +} + +// getChangeSet returns the change set of commits for this sync, +// including the revision range and if it is an initial sync. +func getChangeSet(ctx context.Context, working *git.Checkout, repo *git.Repo, timeout time.Duration, + paths []string) (changeSet, error) { + var c changeSet + var err error + + c.oldTagRev, err = working.SyncRevision(ctx) + if err != nil && !isUnknownRevision(err) { + return c, err + } + c.newTagRev, err = working.HeadRevision(ctx) + if err != nil { + return c, err + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + if c.oldTagRev != "" { + c.commits, err = repo.CommitsBetween(ctx, c.oldTagRev, c.newTagRev, paths...) + } else { + c.initialSync = true + c.commits, err = repo.CommitsBefore(ctx, c.newTagRev, paths...) + } + cancel() + + return c, err +} + +// doSync runs the actual sync of workloads on the cluster. It returns +// a map with all resources it applied and sync errors it encountered. +func doSync(ctx context.Context, manifestsStore manifests.Store, clus cluster.Cluster, syncSetName string, + logger log.Logger) (map[string]resource.Resource, []event.ResourceError, error) { + resources, err := manifestsStore.GetAllResourcesByID(ctx) + if err != nil { + return nil, nil, errors.Wrap(err, "loading resources from repo") + } + + var resourceErrors []event.ResourceError + if err := fluxsync.Sync(syncSetName, resources, clus); err != nil { + switch syncerr := err.(type) { + case cluster.SyncError: + logger.Log("err", err) + for _, e := range syncerr { + resourceErrors = append(resourceErrors, event.ResourceError{ + ID: e.ResourceID, + Path: e.Source, + Error: e.Error.Error(), + }) + } + default: + return nil, nil, err + } + } + return resources, resourceErrors, nil +} + +// getChangedResources calculates what resources are modified during +// this sync. +func getChangedResources(ctx context.Context, c changeSet, timeout time.Duration, working *git.Checkout, + manifestsStore manifests.Store, resources map[string]resource.Resource) (map[string]resource.Resource, error) { + if c.initialSync { + return resources, nil + } + + errorf := func(err error) error { return errors.Wrap(err, "loading resources from repo") } + ctx, cancel := context.WithTimeout(ctx, timeout) + changedFiles, err := working.ChangedFiles(ctx, c.oldTagRev) + if err != nil { + return nil, errorf(err) + } + cancel() + // Get the resources by source + resourcesByID, err := manifestsStore.GetAllResourcesByID(ctx) + if err != nil { + return nil, errorf(err) + } + resourcesBySource := make(map[string]resource.Resource, len(resourcesByID)) + for _, r := range resourcesByID { + resourcesBySource[r.Source()] = r + } + + changedResources := map[string]resource.Resource{} + // FIXME(michael): this won't be accurate when a file can have more than one resource + for _, absolutePath := range changedFiles { + relPath, err := filepath.Rel(working.Dir(), absolutePath) + if err != nil { + return nil, errorf(err) + } + if r, ok := resourcesBySource[relPath]; ok { + changedResources[r.ResourceID().String()] = r + } + } + // All resources generated from .flux.yaml files need to be considered as changed + // (even if the .flux.yaml file itself didn't) since external dependencies of the file + // (e.g. scripts invoked), which we cannot track, may have changed + for sourcePath, r := range resourcesBySource { + _, sourceFilename := filepath.Split(sourcePath) + if sourceFilename == manifests.ConfigFilename { + changedResources[r.ResourceID().String()] = r + } + } + return changedResources, nil +} + +// getNotes retrieves the git notes from the working clone. +func getNotes(ctx context.Context, timeout time.Duration, working *git.Checkout) (map[string]struct{}, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + notes, err := working.NoteRevList(ctx) + cancel() + if err != nil { + return nil, errors.Wrap(err, "loading notes from repo") + } + return notes, nil +} + +// collectNoteEvents collects any events that come from notes attached +// to the commits we just synced. While we're doing this, keep track +// of what other things this sync includes e.g., releases and +// autoreleases, that we're already posting as events, so upstream +// can skip the sync event if it wants to. +func collectNoteEvents(ctx context.Context, c changeSet, notes map[string]struct{}, timeout time.Duration, + working *git.Checkout, started time.Time, logger log.Logger) ([]event.Event, map[string]bool, error) { + if len(c.commits) == 0 { + return nil, nil, nil + } + + var noteEvents []event.Event + var eventTypes = make(map[string]bool) + + // Find notes in revisions. + for i := len(c.commits) - 1; i >= 0; i-- { + if _, ok := notes[c.commits[i].Revision]; !ok { + eventTypes[event.NoneOfTheAbove] = true + continue + } + var n note + ctx, cancel := context.WithTimeout(ctx, timeout) + ok, err := working.GetNote(ctx, c.commits[i].Revision, &n) + cancel() + if err != nil { + return nil, nil, errors.Wrap(err, "loading notes from repo") + } + if !ok { + eventTypes[event.NoneOfTheAbove] = true + continue + } + + // If this is the first sync, we should expect no notes, + // since this is supposedly the first time we're seeing + // the repo. But there are circumstances in which we can + // nonetheless see notes -- if the tag was deleted from + // the upstream repo, or if this accidentally has the same + // notes ref as another daemon using the same repo (but a + // different tag). Either way, we don't want to report any + // notes on an initial sync, since they (most likely) + // don't belong to us. + if c.initialSync { + logger.Log("warning", "no notes expected on initial sync; this repo may be in use by another fluxd") + return noteEvents, eventTypes, nil + } + + // Interpret some notes as events to send to the upstream + switch n.Spec.Type { + case update.Containers: + spec := n.Spec.Spec.(update.ReleaseContainersSpec) + noteEvents = append(noteEvents, event.Event{ + ServiceIDs: n.Result.AffectedResources(), + Type: event.EventRelease, + StartedAt: started, + EndedAt: time.Now().UTC(), + LogLevel: event.LogLevelInfo, + Metadata: &event.ReleaseEventMetadata{ + ReleaseEventCommon: event.ReleaseEventCommon{ + Revision: c.commits[i].Revision, + Result: n.Result, + Error: n.Result.Error(), + }, + Spec: event.ReleaseSpec{ + Type: event.ReleaseContainersSpecType, + ReleaseContainersSpec: &spec, + }, + Cause: n.Spec.Cause, + }, + }) + eventTypes[event.EventRelease] = true + case update.Images: + spec := n.Spec.Spec.(update.ReleaseImageSpec) + noteEvents = append(noteEvents, event.Event{ + ServiceIDs: n.Result.AffectedResources(), + Type: event.EventRelease, + StartedAt: started, + EndedAt: time.Now().UTC(), + LogLevel: event.LogLevelInfo, + Metadata: &event.ReleaseEventMetadata{ + ReleaseEventCommon: event.ReleaseEventCommon{ + Revision: c.commits[i].Revision, + Result: n.Result, + Error: n.Result.Error(), + }, + Spec: event.ReleaseSpec{ + Type: event.ReleaseImageSpecType, + ReleaseImageSpec: &spec, + }, + Cause: n.Spec.Cause, + }, + }) + eventTypes[event.EventRelease] = true + case update.Auto: + spec := n.Spec.Spec.(update.Automated) + noteEvents = append(noteEvents, event.Event{ + ServiceIDs: n.Result.AffectedResources(), + Type: event.EventAutoRelease, + StartedAt: started, + EndedAt: time.Now().UTC(), + LogLevel: event.LogLevelInfo, + Metadata: &event.AutoReleaseEventMetadata{ + ReleaseEventCommon: event.ReleaseEventCommon{ + Revision: c.commits[i].Revision, + Result: n.Result, + Error: n.Result.Error(), + }, + Spec: spec, + }, + }) + eventTypes[event.EventAutoRelease] = true + case update.Policy: + // Use this to mean any change to policy + eventTypes[event.EventUpdatePolicy] = true + default: + // Presume it's not something we're otherwise sending + // as an event + eventTypes[event.NoneOfTheAbove] = true + } + } + return noteEvents, eventTypes, nil +} + +// logCommitEvent reports all synced commits to the upstream. +func logCommitEvent(el eventLogger, c changeSet, serviceIDs flux.ResourceIDSet, started time.Time, + includesEvents map[string]bool, resourceErrors []event.ResourceError, logger log.Logger) error { + if len(c.commits) == 0 { + return nil + } + cs := make([]event.Commit, len(c.commits)) + for i, ci := range c.commits { + cs[i].Revision = ci.Revision + cs[i].Message = ci.Message + } + if err := el.LogEvent(event.Event{ + ServiceIDs: serviceIDs.ToSlice(), + Type: event.EventSync, + StartedAt: started, + EndedAt: started, + LogLevel: event.LogLevelInfo, + Metadata: &event.SyncEventMetadata{ + Commits: cs, + InitialSync: c.initialSync, + Includes: includesEvents, + Errors: resourceErrors, + }, + }); err != nil { + logger.Log("err", err) + return err + } + return nil +} + +// refresh refreshes the repository, notifying the daemon we have a new +// sync head. +func refresh(ctx context.Context, timeout time.Duration, repo *git.Repo) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + err := repo.Refresh(ctx) + cancel() + return err +} + +func makeGitConfigHash(remote git.Remote, conf git.Config) string { + urlbit := remote.SafeURL() + pathshash := sha256.New() + pathshash.Write([]byte(urlbit)) + pathshash.Write([]byte(conf.Branch)) + for _, path := range conf.Paths { + pathshash.Write([]byte(path)) + } + return base64.RawURLEncoding.EncodeToString(pathshash.Sum(nil)) +} diff --git a/daemon/loop_test.go b/daemon/sync_test.go similarity index 81% rename from daemon/loop_test.go rename to daemon/sync_test.go index 0a7db386f..7e65bc3bc 100644 --- a/daemon/loop_test.go +++ b/daemon/sync_test.go @@ -1,26 +1,29 @@ package daemon import ( + "bytes" + "context" + "fmt" "io/ioutil" "os" + "path" "reflect" - "strings" "sync" "testing" "time" "github.com/go-kit/kit/log" - "context" - "github.com/weaveworks/flux" "github.com/weaveworks/flux/cluster" "github.com/weaveworks/flux/cluster/kubernetes" "github.com/weaveworks/flux/cluster/kubernetes/testfiles" + "github.com/weaveworks/flux/cluster/mock" "github.com/weaveworks/flux/event" "github.com/weaveworks/flux/git" "github.com/weaveworks/flux/git/gittest" "github.com/weaveworks/flux/job" + "github.com/weaveworks/flux/manifests" registryMock "github.com/weaveworks/flux/registry/mock" ) @@ -33,14 +36,14 @@ const ( ) var ( - k8s *cluster.Mock + k8s *mock.Mock events *mockEventWriter ) func daemon(t *testing.T) (*Daemon, func()) { repo, repoCleanup := gittest.Repo(t) - k8s = &cluster.Mock{} + k8s = &mock.Mock{} k8s.ExportFunc = func() ([]byte, error) { return nil, nil } events = &mockEventWriter{} @@ -60,7 +63,7 @@ func daemon(t *testing.T) (*Daemon, func()) { UserEmail: gitEmail, } - manifests := kubernetes.NewManifests(alwaysDefault, log.NewLogfmtLogger(os.Stdout)) + manifests := kubernetes.NewManifests(kubernetes.ConstNamespacer("default"), log.NewLogfmtLogger(os.Stdout)) jobs := job.NewQueue(shutdown, wg) d := &Daemon{ @@ -73,7 +76,7 @@ func daemon(t *testing.T) (*Daemon, func()) { JobStatusCache: &job.StatusCache{Size: 100}, EventWriter: events, Logger: log.NewLogfmtLogger(os.Stdout), - LoopVars: &LoopVars{GitOpTimeout: 5 * time.Second}, + LoopVars: &LoopVars{GitTimeout: timeout}, } return d, func() { close(shutdown) @@ -85,8 +88,6 @@ func daemon(t *testing.T) (*Daemon, func()) { } func TestPullAndSync_InitialSync(t *testing.T) { - // No tag - // No notes d, cleanup := daemon(t) defer cleanup() @@ -102,12 +103,17 @@ func TestPullAndSync_InitialSync(t *testing.T) { syncDef = &def return nil } - var ( - logger = log.NewLogfmtLogger(ioutil.Discard) - lastKnownSyncTagRev string - warnedAboutSyncTagChange bool - ) - d.doSync(logger, &lastKnownSyncTagRev, &warnedAboutSyncTagChange) + + ctx := context.Background() + head, err := d.Repo.BranchHead(ctx) + if err != nil { + t.Fatal(err) + } + syncTag := lastKnownSyncTag{logger: d.Logger, syncTag: d.GitConfig.SyncTag} + + if err := d.Sync(ctx, time.Now().UTC(), head, &syncTag); err != nil { + t.Error(err) + } // It applies everything if syncCalled != 1 { @@ -131,6 +137,7 @@ func TestPullAndSync_InitialSync(t *testing.T) { t.Errorf("Unexpected event workload ids: %#v, expected: %#v", gotResourceIDs, expectedResourceIDs) } } + // It creates the tag at HEAD if err := d.Repo.Refresh(context.Background()); err != nil { t.Errorf("pulling sync tag: %v", err) @@ -177,12 +184,14 @@ func TestDoSync_NoNewCommits(t *testing.T) { syncDef = &def return nil } - var ( - logger = log.NewLogfmtLogger(ioutil.Discard) - lastKnownSyncTagRev string - warnedAboutSyncTagChange bool - ) - if err := d.doSync(logger, &lastKnownSyncTagRev, &warnedAboutSyncTagChange); err != nil { + + head, err := d.Repo.BranchHead(ctx) + if err != nil { + t.Fatal(err) + } + syncTag := lastKnownSyncTag{logger: d.Logger, syncTag: d.GitConfig.SyncTag} + + if err := d.Sync(ctx, time.Now().UTC(), head, &syncTag); err != nil { t.Error(err) } @@ -239,17 +248,29 @@ func TestDoSync_WithNewCommit(t *testing.T) { return err } // Push some new changes - dirs := checkout.ManifestDirs() - err = cluster.UpdateManifest(d.Manifests, checkout.Dir(), dirs, flux.MustParseResourceID("default:deployment/helloworld"), func(def []byte) ([]byte, error) { - // A simple modification so we have changes to push - return []byte(strings.Replace(string(def), "replicas: 5", "replicas: 4", -1)), nil - }) + cm := manifests.NewRawFiles(checkout.Dir(), checkout.ManifestDirs(), d.Manifests) + resourcesByID, err := cm.GetAllResourcesByID(context.TODO()) + if err != nil { + return err + } + targetResource := "default:deployment/helloworld" + res, ok := resourcesByID[targetResource] + if !ok { + return fmt.Errorf("resource not found: %q", targetResource) + + } + absolutePath := path.Join(checkout.Dir(), res.Source()) + def, err := ioutil.ReadFile(absolutePath) if err != nil { return err } + newDef := bytes.Replace(def, []byte("replicas: 5"), []byte("replicas: 4"), -1) + if err := ioutil.WriteFile(absolutePath, newDef, 0600); err != nil { + return err + } commitAction := git.CommitAction{Author: "", Message: "test commit"} - err = checkout.CommitAndPush(ctx, commitAction, nil) + err = checkout.CommitAndPush(ctx, commitAction, nil, false) if err != nil { return err } @@ -277,12 +298,16 @@ func TestDoSync_WithNewCommit(t *testing.T) { syncDef = &def return nil } - var ( - logger = log.NewLogfmtLogger(ioutil.Discard) - lastKnownSyncTagRev string - warnedAboutSyncTagChange bool - ) - d.doSync(logger, &lastKnownSyncTagRev, &warnedAboutSyncTagChange) + + head, err := d.Repo.BranchHead(ctx) + if err != nil { + t.Fatal(err) + } + syncTag := lastKnownSyncTag{logger: d.Logger, syncTag: d.GitConfig.SyncTag} + + if err := d.Sync(ctx, time.Now().UTC(), head, &syncTag); err != nil { + t.Error(err) + } // It applies everything if syncCalled != 1 { diff --git a/deploy-helm/helm-operator-deployment.yaml b/deploy-helm/helm-operator-deployment.yaml index 317e55f10..1f868ef66 100644 --- a/deploy-helm/helm-operator-deployment.yaml +++ b/deploy-helm/helm-operator-deployment.yaml @@ -62,7 +62,7 @@ spec: # There are no ":latest" images for helm-operator. Find the most recent # release or image version at https://hub.docker.com/r/weaveworks/helm-operator/tags # and replace the tag here. - image: docker.io/weaveworks/helm-operator:0.9.1 + image: docker.io/weaveworks/helm-operator:0.9.2 imagePullPolicy: IfNotPresent ports: - name: http diff --git a/deploy-helm/weave-cloud-helm-operator-deployment.yaml b/deploy-helm/weave-cloud-helm-operator-deployment.yaml index cb62131e7..415c7a358 100644 --- a/deploy-helm/weave-cloud-helm-operator-deployment.yaml +++ b/deploy-helm/weave-cloud-helm-operator-deployment.yaml @@ -27,7 +27,7 @@ spec: secretName: flux-git-deploy containers: - name: flux-helm-operator - image: docker.io/weaveworks/helm-operator:0.9.1 + image: docker.io/weaveworks/helm-operator:0.9.2 imagePullPolicy: IfNotPresent args: - --git-timeout=20s diff --git a/deploy/flux-deployment.yaml b/deploy/flux-deployment.yaml index 7bfd7cdbb..06b2615b1 100644 --- a/deploy/flux-deployment.yaml +++ b/deploy/flux-deployment.yaml @@ -49,12 +49,21 @@ spec: # configMap: # name: flux-kubeconfig + # The following volume is used to import GPG keys (for signing + # and verification purposes). You will also need to provide the + # secret with the keys, and uncomment the volumeMount and args + # below. + # - name: gpg-keys + # secret: + # secretName: flux-gpg-keys + # defaultMode: 0400 + containers: - name: flux # There are no ":latest" images for flux. Find the most recent # release or image version at https://hub.docker.com/r/weaveworks/flux/tags # and replace the tag here. - image: docker.io/weaveworks/flux:1.12.2 + image: docker.io/weaveworks/flux:1.12.3 imagePullPolicy: IfNotPresent resources: requests: @@ -87,33 +96,54 @@ spec: # - name: KUBECONFIG # value: /etc/fluxd/kube/config + # Include this and the volume "gpg-keys" above, and the + # args below. + # - name: gpg-keys + # mountPath: /root/gpg-import + # readOnly: true + args: - # if you deployed memcached in a different namespace to flux, + # If you deployed memcached in a different namespace to flux, # or with a different service name, you can supply these # following two arguments to tell fluxd how to connect to it. # - --memcached-hostname=memcached.default.svc.cluster.local - # use the memcached ClusterIP service name by setting the + # Use the memcached ClusterIP service name by setting the # memcached-service to string empty - --memcached-service= - # this must be supplied, and be in the tmpfs (emptyDir) + # This must be supplied, and be in the tmpfs (emptyDir) # mounted above, for K8s >= 1.10 - --ssh-keygen-dir=/var/fluxd/keygen - # replace or remove the following URL + # Replace or remove the following URL. - --git-url=git@github.com:weaveworks/flux-get-started - --git-branch=master # include this if you want to restrict the manifests considered by flux # to those under the following relative paths in the git repository # - --git-path=subdir1,subdir2 - # include these next two to connect to an "upstream" service + # Include these two to enable git commit signing + # - --git-gpg-key-import=/root/gpg-import + # - --git-signing-key= + + # Include this to enable git signature verification + # - --git-verify-signatures + + # Include these next two to connect to an "upstream" service # (e.g., Weave Cloud). The token is particular to the service. # - --connect=wss://cloud.weave.works/api/flux # - --token=abc123abc123abc123abc123 - # serve /metrics endpoint at different port. + # Serve /metrics endpoint at different port; # make sure to set prometheus' annotation to scrape the port value. - --listen-metrics=:3031 + + # Optional DNS settings, configuring the ndots option may resolve + # nslookup issues on some Kubernetes setups. + # dnsPolicy: "None" + # dnsConfig: + # options: + # - name: ndots + # value: "1" diff --git a/deploy/memcache-dep.yaml b/deploy/memcache-dep.yaml index 2c5de3259..1e0728bc7 100644 --- a/deploy/memcache-dep.yaml +++ b/deploy/memcache-dep.yaml @@ -17,7 +17,7 @@ spec: spec: containers: - name: memcached - image: memcached:1.4.25 + image: memcached:1.5.15 imagePullPolicy: IfNotPresent args: - -m 512 # Maximum memory to use, in megabytes @@ -27,3 +27,7 @@ spec: ports: - name: clients containerPort: 11211 + securityContext: + runAsUser: 11211 + runAsGroup: 11211 + allowPrivilegeEscalation: false diff --git a/docker/Dockerfile.flux b/docker/Dockerfile.flux index 6c4d5bb4e..0fe3d29d2 100644 --- a/docker/Dockerfile.flux +++ b/docker/Dockerfile.flux @@ -2,7 +2,7 @@ FROM alpine:3.9 WORKDIR /home/flux -RUN apk add --no-cache openssh ca-certificates tini 'git>=2.3.0' gnupg +RUN apk add --no-cache openssh-client ca-certificates tini 'git>=2.12.0' 'gnutls>=3.6.7' gnupg # Add git hosts to known hosts file so we can use # StrickHostKeyChecking with git+ssh @@ -14,6 +14,7 @@ RUN sh /home/flux/known_hosts.sh /etc/ssh/ssh_known_hosts && \ COPY ./ssh_config /etc/ssh/ssh_config COPY ./kubectl /usr/local/bin/ +COPY ./kustomize /usr/local/bin # These are pretty static LABEL maintainer="Weaveworks " \ diff --git a/docker/Dockerfile.helm-operator b/docker/Dockerfile.helm-operator index fca75fc0a..9b70a9e62 100644 --- a/docker/Dockerfile.helm-operator +++ b/docker/Dockerfile.helm-operator @@ -2,7 +2,7 @@ FROM alpine:3.9 WORKDIR /home/flux -RUN apk add --no-cache openssh ca-certificates tini 'git>=2.3.0' +RUN apk add --no-cache openssh-client ca-certificates tini 'git>=2.12.0' # Add git hosts to known hosts file so we can use # StrickHostKeyChecking with git+ssh diff --git a/docker/known_hosts.sh b/docker/known_hosts.sh index 379f6a9d3..476621121 100755 --- a/docker/known_hosts.sh +++ b/docker/known_hosts.sh @@ -5,6 +5,7 @@ set -eu known_hosts_file=${1} known_hosts_file=${known_hosts_file:-/etc/ssh/ssh_known_hosts} hosts="github.com gitlab.com bitbucket.org ssh.dev.azure.com vs-ssh.visualstudio.com" +hosts_2022="source.developers.google.com" # The heredoc below was generated by constructing a known_hosts using # @@ -20,6 +21,7 @@ hosts="github.com gitlab.com bitbucket.org ssh.dev.azure.com vs-ssh.visualstudio # - bitbucket.org: https://confluence.atlassian.com/bitbucket/ssh-keys-935365775.html # - ssh.dev.azure.com & vs-ssh.visualstudio.com: sign in, then go to User settings -> SSH Public Keys # (this is where the public key fingerprint is shown; it's not a setting) +# - source.developers.google.com: https://cloud.google.com/source-repositories/docs/cloning-repositories fingerprints=$(mktemp -t) cleanup() { @@ -32,6 +34,7 @@ export LC_ALL=C generate() { ssh-keyscan ${hosts} > ${known_hosts_file} + ssh-keyscan -p 2022 ${hosts_2022} >> ${known_hosts_file} } validate() { @@ -43,6 +46,7 @@ diff - "$fingerprints" < 0 { args = append(args, subdirs...) @@ -229,19 +233,22 @@ func splitLog(s string) ([]Commit, error) { lines := splitList(s) commits := make([]Commit, len(lines)) for i, m := range lines { - parts := strings.SplitN(m, "|", 3) - commits[i].SigningKey = parts[0] - commits[i].Revision = parts[1] - commits[i].Message = parts[2] + parts := strings.SplitN(m, "|", 4) + commits[i].Signature = Signature{ + Key: parts[0], + Status: parts[1], + } + commits[i].Revision = parts[2] + commits[i].Message = parts[3] } return commits, nil } func splitList(s string) []string { - outStr := strings.TrimSpace(s) - if outStr == "" { + if strings.TrimSpace(s) == "" { return []string{} } + outStr := strings.TrimSuffix(s, "\n") return strings.Split(outStr, "\n") } @@ -263,11 +270,21 @@ func moveTagAndPush(ctx context.Context, workingDir, tag, upstream string, tagAc return nil } -func verifyTag(ctx context.Context, workingDir, tag string) error { - var env []string - args := []string{"verify-tag", tag} - if err := execGitCmd(ctx, args, gitCmdConfig{dir: workingDir, env: env}); err != nil { - return errors.Wrap(err, "verifying tag "+tag) +// Verify tag signature and return the revision it points to +func verifyTag(ctx context.Context, workingDir, tag string) (string, error) { + out := &bytes.Buffer{} + args := []string{"verify-tag", "--format", "%(object)", tag} + if err := execGitCmd(ctx, args, gitCmdConfig{dir: workingDir, out: out}); err != nil { + return "", errors.Wrap(err, "verifying tag "+tag) + } + return strings.TrimSpace(out.String()), nil +} + +// Verify commit signature +func verifyCommit(ctx context.Context, workingDir, commit string) error { + args := []string{"verify-commit", commit} + if err := execGitCmd(ctx, args, gitCmdConfig{dir: workingDir}); err != nil { + return fmt.Errorf("failed to verify commit %s", commit) } return nil } @@ -290,7 +307,7 @@ func changed(ctx context.Context, workingDir, ref string, subPaths []string) ([] } // traceGitCommand returns a log line that can be useful when debugging and developing git activity -func traceGitCommand(args []string, config gitCmdConfig, stdout string, stderr string) string { +func traceGitCommand(args []string, config gitCmdConfig, stdOutAndStdErr string) string { for _, exemptedCommand := range exemptedTraceCommands { if exemptedCommand == args[0] { return "" @@ -305,19 +322,46 @@ func traceGitCommand(args []string, config gitCmdConfig, stdout string, stderr s } command := `git ` + strings.Join(args, " ") - out := prepare(stdout) - err := prepare(stderr) + out := prepare(stdOutAndStdErr) return fmt.Sprintf( - "TRACE: command=%q out=%q err=%q dir=%q env=%q", + "TRACE: command=%q out=%q dir=%q env=%q", command, out, - err, config.dir, strings.Join(config.env, ","), ) } +type threadSafeBuffer struct { + buf bytes.Buffer + mu sync.Mutex +} + +func (b *threadSafeBuffer) Write(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.Write(p) +} + +func (b *threadSafeBuffer) Read(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.Read(p) +} + +func (b *threadSafeBuffer) Bytes() []byte { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.Bytes() +} + +func (b *threadSafeBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.String() +} + // execGitCmd runs a `git` command with the supplied arguments. func execGitCmd(ctx context.Context, args []string, config gitCmdConfig) error { c := exec.CommandContext(ctx, "git", args...) @@ -326,30 +370,26 @@ func execGitCmd(ctx context.Context, args []string, config gitCmdConfig) error { c.Dir = config.dir } c.Env = append(env(), config.env...) - c.Stdout = ioutil.Discard + stdOutAndStdErr := &threadSafeBuffer{} + c.Stdout = stdOutAndStdErr + c.Stderr = stdOutAndStdErr if config.out != nil { - c.Stdout = config.out - } - errOut := &bytes.Buffer{} - c.Stderr = errOut - - traceStdout := &bytes.Buffer{} - traceStderr := &bytes.Buffer{} - if trace { - c.Stdout = io.MultiWriter(c.Stdout, traceStdout) - c.Stderr = io.MultiWriter(c.Stderr, traceStderr) + c.Stdout = io.MultiWriter(c.Stdout, config.out) } err := c.Run() if err != nil { - msg := findErrorMessage(errOut) - if msg != "" { - err = errors.New(msg) + if len(stdOutAndStdErr.Bytes()) > 0 { + err = errors.New(stdOutAndStdErr.String()) + msg := findErrorMessage(stdOutAndStdErr) + if msg != "" { + err = fmt.Errorf("%s, full output:\n %s", msg, err.Error()) + } } } if trace { - if traceCommand := traceGitCommand(args, config, traceStdout.String(), traceStderr.String()); traceCommand != "" { + if traceCommand := traceGitCommand(args, config, stdOutAndStdErr.String()); traceCommand != "" { println(traceCommand) } } @@ -375,14 +415,20 @@ func env() []string { return env } -// check returns true if there are changes locally. -func check(ctx context.Context, workingDir string, subdirs []string) bool { +// check returns true if there are any local changes. +func check(ctx context.Context, workingDir string, subdirs []string, checkFullRepo bool) bool { // `--quiet` means "exit with 1 if there are changes" args := []string{"diff", "--quiet"} - args = append(args, "--") - if len(subdirs) > 0 { - args = append(args, subdirs...) + + if checkFullRepo { + args = append(args, "HEAD", "--") + } else { + args = append(args, "--") + if len(subdirs) > 0 { + args = append(args, subdirs...) + } } + return execGitCmd(ctx, args, gitCmdConfig{dir: workingDir}) != nil } diff --git a/git/operations_test.go b/git/operations_test.go index 74fcedae7..17e1c6316 100644 --- a/git/operations_test.go +++ b/git/operations_test.go @@ -1,6 +1,7 @@ package git import ( + "bytes" "context" "fmt" "io/ioutil" @@ -8,6 +9,7 @@ import ( "path" "path/filepath" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/weaveworks/flux/cluster/kubernetes/testfiles" @@ -144,6 +146,35 @@ func TestChangedFiles_NoPath(t *testing.T) { } } +func TestChangedFiles_LeadingSpace(t *testing.T) { + newDir, cleanup := testfiles.TempDir(t) + defer cleanup() + + err := createRepo(newDir, []string{}) + if err != nil { + t.Fatal(err) + } + + filename := " space.yaml" + + if err = updateDirAndCommit(newDir, "", map[string]string{filename: "foo"}); err != nil { + t.Fatal(err) + } + + files, err := changed(context.Background(), newDir, "HEAD~1", []string{}) + if err != nil { + t.Fatal(err) + } + + if len(files) != 1 { + t.Fatal("expected 1 changed file") + } + + if actualFilename := files[0]; actualFilename != filename { + t.Fatalf("expected changed filename to equal: '%s', got '%s'", filename, actualFilename) + } +} + func TestOnelinelog_NoGitpath(t *testing.T) { newDir, cleanup := testfiles.TempDir(t) defer cleanup() @@ -316,7 +347,7 @@ func TestTraceGitCommand(t *testing.T) { dir: "/tmp/flux-working628880789", }, }, - expected: `TRACE: command="git clone --branch master /tmp/flux-gitclone239583443 /tmp/flux-working628880789" out="" err="" dir="/tmp/flux-working628880789" env=""`, + expected: `TRACE: command="git clone --branch master /tmp/flux-gitclone239583443 /tmp/flux-working628880789" out="" dir="/tmp/flux-working628880789" env=""`, }, { name: "git rev-list", @@ -333,7 +364,7 @@ func TestTraceGitCommand(t *testing.T) { dir: "/tmp/flux-gitclone239583443", }, }, - expected: `TRACE: command="git rev-list --max-count 1 flux-sync --" out="b9d6a543acf8085ff6bed23fac17f8dc71bfcb66" err="" dir="/tmp/flux-gitclone239583443" env=""`, + expected: `TRACE: command="git rev-list --max-count 1 flux-sync --" out="b9d6a543acf8085ff6bed23fac17f8dc71bfcb66" dir="/tmp/flux-gitclone239583443" env=""`, }, { name: "git config email", @@ -347,7 +378,7 @@ func TestTraceGitCommand(t *testing.T) { dir: "/tmp/flux-working056923691", }, }, - expected: `TRACE: command="git config user.email support@weave.works" out="" err="" dir="/tmp/flux-working056923691" env=""`, + expected: `TRACE: command="git config user.email support@weave.works" out="" dir="/tmp/flux-working056923691" env=""`, }, { name: "git notes", @@ -363,7 +394,7 @@ func TestTraceGitCommand(t *testing.T) { }, out: "refs/notes/flux", }, - expected: `TRACE: command="git notes --ref flux get-ref" out="refs/notes/flux" err="" dir="/tmp/flux-working647148942" env=""`, + expected: `TRACE: command="git notes --ref flux get-ref" out="refs/notes/flux" dir="/tmp/flux-working647148942" env=""`, }, } for _, example := range examples { @@ -371,8 +402,23 @@ func TestTraceGitCommand(t *testing.T) { example.input.args, example.input.config, example.input.out, - example.input.err, ) assert.Equal(t, example.expected, actual) } } + +// TestMutexBuffer tests that the threadsafe buffer used to capture +// stdout and stderr does not give rise to races or deadlocks. In +// particular, this test guards against reverting to a situation in +// which copying into the buffer from two goroutines can deadlock it, +// if one of them uses `ReadFrom`. +func TestMutexBuffer(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + out := &bytes.Buffer{} + err := execGitCmd(ctx, []string{"log", "--oneline"}, gitCmdConfig{out: out}) + if err != nil { + t.Fatal(err) + } +} diff --git a/git/repo.go b/git/repo.go index f9ac0f611..51b837ba5 100644 --- a/git/repo.go +++ b/git/repo.go @@ -208,6 +208,16 @@ func (r *Repo) Revision(ctx context.Context, ref string) (string, error) { return refRevision(ctx, r.dir, ref) } +// BranchHead returns the HEAD revision (SHA1) of the configured branch +func (r *Repo) BranchHead(ctx context.Context) (string, error) { + r.mu.RLock() + defer r.mu.RUnlock() + if err := r.errorIfNotReady(); err != nil { + return "", err + } + return refRevision(ctx, r.dir, "heads/"+r.branch) +} + func (r *Repo) CommitsBefore(ctx context.Context, ref string, paths ...string) ([]Commit, error) { r.mu.RLock() defer r.mu.RUnlock() @@ -226,6 +236,24 @@ func (r *Repo) CommitsBetween(ctx context.Context, ref1, ref2 string, paths ...s return onelinelog(ctx, r.dir, ref1+".."+ref2, paths) } +func (r *Repo) VerifyTag(ctx context.Context, tag string) (string, error) { + r.mu.RLock() + defer r.mu.RUnlock() + if err := r.errorIfNotReady(); err != nil { + return "", err + } + return verifyTag(ctx, r.dir, tag) +} + +func (r *Repo) VerifyCommit(ctx context.Context, commit string) error { + r.mu.RLock() + defer r.mu.RUnlock() + if err := r.errorIfNotReady(); err != nil { + return err + } + return verifyCommit(ctx, r.dir, commit) +} + // step attempts to advance the repo state machine, and returns `true` // if it has made progress, `false` otherwise. func (r *Repo) step(bg context.Context) bool { diff --git a/git/signature.go b/git/signature.go new file mode 100644 index 000000000..40a7ce7f8 --- /dev/null +++ b/git/signature.go @@ -0,0 +1,13 @@ +package git + +// Signature holds information about a GPG signature. +type Signature struct { + Key string + Status string +} + +// Valid returns true if the signature is _G_ood (valid). +// https://github.com/git/git/blob/56d268bafff7538f82c01d3c9c07bdc54b2993b1/Documentation/pretty-formats.txt#L146-L153 +func (s *Signature) Valid() bool { + return s.Status == "G" +} diff --git a/git/working.go b/git/working.go index c0162af5d..326d1bca0 100644 --- a/git/working.go +++ b/git/working.go @@ -36,9 +36,9 @@ type Checkout struct { } type Commit struct { - SigningKey string - Revision string - Message string + Signature Signature + Revision string + Message string } // CommitAction - struct holding commit information @@ -126,8 +126,14 @@ func (c *Checkout) ManifestDirs() []string { // CommitAndPush commits changes made in this checkout, along with any // extra data as a note, and pushes the commit and note to the remote repo. -func (c *Checkout) CommitAndPush(ctx context.Context, commitAction CommitAction, note interface{}) error { - if !check(ctx, c.dir, c.config.Paths) { +func (c *Checkout) CommitAndPush(ctx context.Context, commitAction CommitAction, note interface{}, addUntracked bool) error { + if addUntracked { + if err := add(ctx, c.dir, "."); err != nil { + return err + } + } + + if !check(ctx, c.dir, c.config.Paths, addUntracked) { return ErrNoChanges } @@ -184,7 +190,7 @@ func (c *Checkout) MoveSyncTagAndPush(ctx context.Context, tagAction TagAction) return moveTagAndPush(ctx, c.dir, c.config.SyncTag, c.upstream.URL, tagAction) } -func (c *Checkout) VerifySyncTag(ctx context.Context) error { +func (c *Checkout) VerifySyncTag(ctx context.Context) (string, error) { return verifyTag(ctx, c.dir, c.config.SyncTag) } @@ -202,3 +208,11 @@ func (c *Checkout) ChangedFiles(ctx context.Context, ref string) ([]string, erro func (c *Checkout) NoteRevList(ctx context.Context) (map[string]struct{}, error) { return noteRevList(ctx, c.dir, c.realNotesRef) } + +func (c *Checkout) Checkout(ctx context.Context, rev string) error { + return checkout(ctx, c.dir, rev) +} + +func (c *Checkout) Add(ctx context.Context, path string) error { + return add(ctx, c.dir, path) +} diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..62fcae930 --- /dev/null +++ b/go.mod @@ -0,0 +1,83 @@ +module github.com/weaveworks/flux + +go 1.12 + +require ( + cloud.google.com/go v0.37.4 // indirect + github.com/Masterminds/goutils v1.1.0 // indirect + github.com/Masterminds/semver v1.4.2 + github.com/Masterminds/sprig v0.0.0-20190301161902-9f8fceff796f // indirect + github.com/VividCortex/gohistogram v1.0.0 // indirect + github.com/aws/aws-sdk-go v1.19.11 + github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 + github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect + github.com/cyphar/filepath-securejoin v0.2.2 // indirect + github.com/docker/distribution v0.0.0-00010101000000-000000000000 + github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect + github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect + github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect + github.com/evanphx/json-patch v4.1.0+incompatible + github.com/ghodss/yaml v1.0.0 + github.com/go-kit/kit v0.8.0 + github.com/go-logfmt/logfmt v0.4.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/googleapis v1.2.0 // indirect + github.com/gogo/status v1.1.0 // indirect + github.com/golang/gddo v0.0.0-20190312205958-5a2505f3dbf0 + github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect + github.com/golang/protobuf v1.3.1 + github.com/google/go-cmp v0.2.0 + github.com/google/gofuzz v1.0.0 // indirect + github.com/google/uuid v1.1.1 // indirect + github.com/gophercloud/gophercloud v0.0.0-20190410012400-2c55d17f707c // indirect + github.com/gorilla/mux v1.7.1 + github.com/gorilla/websocket v1.4.0 + github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/hashicorp/golang-lru v0.5.1 // indirect + github.com/huandu/xstrings v1.2.0 // indirect + github.com/imdario/mergo v0.3.7 + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/justinbarrick/go-k8s-portforward v1.0.3 + github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect + github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833 + github.com/opencontainers/go-digest v1.0.0-rc1 + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opentracing-contrib/go-stdlib v0.0.0-20190324214902-3020fec0e66b // indirect + github.com/opentracing/opentracing-go v1.1.0 // indirect + github.com/pkg/errors v0.8.1 + github.com/pkg/term v0.0.0-20190109203006-aa71e9d9e942 + github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 + github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect + github.com/prometheus/common v0.3.0 // indirect + github.com/prometheus/procfs v0.0.0-20190412120340-e22ddced7142 // indirect + github.com/ryanuber/go-glob v1.0.0 + github.com/sirupsen/logrus v1.4.1 // indirect + github.com/spf13/cobra v0.0.3 + github.com/spf13/pflag v1.0.3 + github.com/stretchr/testify v1.3.0 + github.com/uber/jaeger-client-go v2.16.0+incompatible // indirect + github.com/uber/jaeger-lib v2.0.0+incompatible // indirect + github.com/weaveworks/common v0.0.0-20190410110702-87611edc252e + github.com/weaveworks/go-checkpoint v0.0.0-20170503165305-ebbb8b0518ab + github.com/weaveworks/promrus v1.2.0 // indirect + github.com/whilp/git-urls v0.0.0-20160530060445-31bac0d230fa + golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a // indirect + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 // indirect + golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a // indirect + golang.org/x/sys v0.0.0-20190411185658-b44545bcd369 + golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 + google.golang.org/api v0.3.2 // indirect + google.golang.org/appengine v1.5.0 // indirect + google.golang.org/grpc v1.20.0 // indirect + gopkg.in/yaml.v2 v2.2.2 + k8s.io/api v0.0.0-20190313235455-40a48860b5ab + k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed + k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d + k8s.io/client-go v11.0.0+incompatible + k8s.io/code-generator v0.0.0-20190511023357-639c964206c2 + k8s.io/helm v2.13.1+incompatible + k8s.io/klog v0.3.0 + k8s.io/kube-openapi v0.0.0-20190401085232-94e1e7b7574c // indirect +) + +replace github.com/docker/distribution => github.com/2opremio/distribution v0.0.0-20190419185413-6c9727e5e5de diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..7d23cd107 --- /dev/null +++ b/go.sum @@ -0,0 +1,370 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= +cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/2opremio/distribution v0.0.0-20190419185413-6c9727e5e5de h1:BNSXHiWNaMNhx2g1bbIubySvhdKyNF+0bepwZVa1Q6k= +github.com/2opremio/distribution v0.0.0-20190419185413-6c9727e5e5de/go.mod h1:QHT6cqKT8fLkQMioAxx43yuZxuzwV655sKt60H8N17Q= +github.com/Azure/go-autorest v11.7.1+incompatible h1:M2YZIajBBVekV86x0rr1443Lc1F/Ylxb9w+5EtSyX3Q= +github.com/Azure/go-autorest v11.7.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v0.0.0-20190301161902-9f8fceff796f h1:lGvI8+dm9Y/Qr6BfsbmjAz3iC3iq9+vUQLSKCDROE6s= +github.com/Masterminds/sprig v0.0.0-20190301161902-9f8fceff796f/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/aws/aws-sdk-go v1.19.11 h1:tqaTGER6Byw3QvsjGW0p018U2UOqaJPeJuzoaF7jjoQ= +github.com/aws/aws-sdk-go v1.19.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA= +github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 h1:X0fj836zx99zFu83v/M79DuBn84IL/Syx1SY6Y5ZEMA= +github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20170912183627-bc6354cbbc29/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c h1:ZfSZ3P3BedhKGUhzj7BQlPSU4OvT6tfOKe3DVHzOA7s= +github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f h1:8GDPb0tCY8LQ+OJ3dbHb5sA6YZWXFORQYZx5sdsTlMs= +github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f h1:AUj1VoZUfhPhOPHULCQQDnGhRelpFWHMLhQVWDsS0v4= +github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= +github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.2.0 h1:Z0v3OJDotX9ZBpdz2V+AI7F4fITSZhVE5mg6GQppwMM= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang/gddo v0.0.0-20190312205958-5a2505f3dbf0 h1:CfaPdCDbZu8jSwjq0flJv2u+WreQM0KqytUQahZ6Xf4= +github.com/golang/gddo v0.0.0-20190312205958-5a2505f3dbf0/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.0.0-20180807015416-4ea085781bae/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gophercloud/gophercloud v0.0.0-20190410012400-2c55d17f707c h1:vGQ5eWkG5WkBdfGR+7J5yF2a6clwcUMM1r9fmRHPBVI= +github.com/gophercloud/gophercloud v0.0.0-20190410012400-2c55d17f707c/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.1 h1:Dw4jY2nghMMRsh1ol8dv1axHkDwMQK2DHerMNJsIpJU= +github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/justinbarrick/go-k8s-portforward v1.0.2/go.mod h1:klMOboLnC1/UlkyJnYFjcMcbOtwAcKop+LkIZ4r428o= +github.com/justinbarrick/go-k8s-portforward v1.0.3 h1:FPvJqHjIKb0xlA8FuFYSzlsyQWeqNzXbCDQMyfbmCpI= +github.com/justinbarrick/go-k8s-portforward v1.0.3/go.mod h1:GkvGI25j2iHpJVINl/hZC+sbf9IJ1XkY1MtjSh3Usuk= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833 h1:t4WWQ9I797y7QUgeEjeXnVb+oYuEDQc6gLvrZJTYo94= +github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833/go.mod h1:0CznHmXSjMEqs5Tezj/w2emQoM41wzYM9KpDKUHPYag= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190324214902-3020fec0e66b h1:N/+vVH19UEwFml23XATspYXdbpBU/oPvXy3CnkODjc0= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190324214902-3020fec0e66b/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/term v0.0.0-20190109203006-aa71e9d9e942 h1:A7GG7zcGjl3jqAqGPmcNjd/D9hzL95SuoOQAaFNdLU0= +github.com/pkg/term v0.0.0-20190109203006-aa71e9d9e942/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.3.0 h1:taZ4h8Tkxv2kNyoSctBvfXEHmBmxrwmIidZTIaHons4= +github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190412120340-e22ddced7142 h1:JO6VBMEDSBX/LT4GKwSdvuFigZNwVD4lkPyUE4BDCKE= +github.com/prometheus/procfs v0.0.0-20190412120340-e22ddced7142/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY= +github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw= +github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/weaveworks/common v0.0.0-20190410110702-87611edc252e h1:Y5z9Bu95l0laB3xOF3BT6LWkcR7VcRxN0n+nf2S72xA= +github.com/weaveworks/common v0.0.0-20190410110702-87611edc252e/go.mod h1:pSm+0KR57BG3pvGoJWFXJSAC7+sEPewcvdt5StevL3A= +github.com/weaveworks/go-checkpoint v0.0.0-20170503165305-ebbb8b0518ab h1:mW+hgchD9qUUBqnuaDBj7BkcpFPk/FxeFcUFI5lvvUw= +github.com/weaveworks/go-checkpoint v0.0.0-20170503165305-ebbb8b0518ab/go.mod h1:qkbvw5GPibQ/Nf7IZJL0UoLwmJ6858b4S/hUWRd+cH4= +github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= +github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/whilp/git-urls v0.0.0-20160530060445-31bac0d230fa h1:rW+Lu6281ed/4XGuVIa4/YebTRNvoUJlfJ44ktEVwZk= +github.com/whilp/git-urls v0.0.0-20160530060445-31bac0d230fa/go.mod h1:2rx5KE5FLD0HRfkkpyn8JwbVLBdhgeiOb2D2D9LLKM4= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a h1:Igim7XhdOpBnWPuYJ70XcNpq8q3BCACtVgNfoJxOV7g= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369 h1:aBlRBZoCuZNRDClvfkDoklQqdLzBaA3uViASg2z2p24= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138 h1:H3uGjxCR/6Ds0Mjgyp7LMK81+LvmbvWWEnJhzk1Pi9E= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.3.2 h1:iTp+3yyl/KOtxa/d1/JUE0GGSoR6FuW5udver22iwpw= +google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0 h1:DlsSIrgEBuZAUFJcta2B5i/lzeHHbnfkNFAfFXLVFYQ= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190313235455-40a48860b5ab h1:DG9A67baNpoeweOy2spF1OWHhnVY5KR7/Ek/+U1lVZc= +k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed h1:rCteec//ELIjZMfjIGQbVtZooyaofqDJwsmWwWKItNs= +k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= +k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/code-generator v0.0.0-20190511023357-639c964206c2 h1:wfF2JZb8Bl68FNMg/BAkIkkE29Z/bXWBYTtoQh/Cbo0= +k8s.io/code-generator v0.0.0-20190511023357-639c964206c2/go.mod h1:YMQ7Lt97nW/I6nHACDccgS/sPAyrHQNans96RwPaSb8= +k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= +k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/helm v2.13.1+incompatible h1:qt0LBsHQ7uxCtS3F2r3XI0DNm8ml0xQeSJixUorDyn0= +k8s.io/helm v2.13.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20180509051136-39cb288412c4/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kube-openapi v0.0.0-20190401085232-94e1e7b7574c h1:kJCzg2vGCzah5icgkKR7O1Dzn0NA2iGlym27sb0ZfGE= +k8s.io/kube-openapi v0.0.0-20190401085232-94e1e7b7574c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 h1:8r+l4bNWjRlsFYlQJnKJ2p7s1YQPj4XyXiJVqDHRx7c= +k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/gpg/gpg.go b/gpg/gpg.go index b5ccb4138..e5a5ad24d 100644 --- a/gpg/gpg.go +++ b/gpg/gpg.go @@ -16,7 +16,7 @@ import ( // directory will be imported, but not subdirectories (i.e., no // recursion). It returns the basenames of the succesfully imported // keys. -func ImportKeys(src string) ([]string, error) { +func ImportKeys(src string, trustImportedKeys bool) ([]string, error) { info, err := os.Stat(src) var files []string switch { @@ -54,6 +54,12 @@ func ImportKeys(src string) ([]string, error) { return imported, fmt.Errorf("errored importing keys: %v", failed) } + if trustImportedKeys { + if err = gpgTrustImportedKeys(); err != nil { + return imported, err + } + } + return imported, nil } @@ -65,3 +71,16 @@ func gpgImport(path string) error { } return nil } + +func gpgTrustImportedKeys() error { + // List imported keys and their fingerprints, grep the fingerprints, + // transform them into a format gpg understands, and pipe the output + // into --import-ownertrust. + arg := `gpg --list-keys --fingerprint | grep pub -A 1 | egrep -Ev "pub|--"|tr -d ' ' | awk 'BEGIN { FS = "\n" } ; { print $1":6:" }' | gpg --import-ownertrust` + cmd := exec.Command("sh", "-c", arg) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error trusting imported keys: %s", string(out)) + } + return nil +} diff --git a/gpg/gpgtest/gpg.go b/gpg/gpgtest/gpg.go index aecbb2ac3..d1b574ed3 100644 --- a/gpg/gpgtest/gpg.go +++ b/gpg/gpgtest/gpg.go @@ -39,7 +39,7 @@ func GPGKey(t *testing.T) (string, string, func()) { t.Fatal(err) } - gpgCmd := exec.Command("gpg", "--homedir", newDir, "--list-keys", "--with-colons") + gpgCmd := exec.Command("gpg", "--homedir", newDir, "--list-keys", "--with-colons", "--with-fingerprint") grepCmd := exec.Command("grep", "^fpr") cutCmd := exec.Command("cut", "-d:", "-f10") diff --git a/image/image.go b/image/image.go index c7d8f605b..d580c5ed5 100644 --- a/image/image.go +++ b/image/image.go @@ -223,6 +223,74 @@ func (i Ref) WithNewTag(t string) Ref { return img } +type LabelTimestampFormatError struct { + Labels []string +} + +func (e *LabelTimestampFormatError) Error() string { + return fmt.Sprintf( + "failed to parse %d timestamp label(s) as RFC3339 (%s); ask the repository administrator to correct this as it conflicts with the spec", + len(e.Labels), + strings.Join(e.Labels, ",")) +} + +// Labels has all the image labels we are interested in for an image +// ref, the JSON struct tag keys should be equal to the label. +type Labels struct { + // BuildDate holds the Label Schema spec 'build date' label + // Ref: http://label-schema.org/rc1/#build-time-labels + BuildDate time.Time `json:"org.label-schema.build-date,omitempty"` + // Created holds the Open Container Image spec 'created' label + // Ref: https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys + Created time.Time `json:"org.opencontainers.image.created,omitempty"` +} + +// MarshalJSON returns the Labels value in JSON (as bytes). It is +// implemented so that we can omit the time values when they are +// zero, which would otherwise be tricky for e.g., JavaScript to +// detect. +func (l Labels) MarshalJSON() ([]byte, error) { + var bd, c string + if !l.BuildDate.IsZero() { + bd = l.BuildDate.UTC().Format(time.RFC3339Nano) + } + if !l.Created.IsZero() { + c = l.Created.UTC().Format(time.RFC3339Nano) + } + encode := struct { + BuildDate string `json:"org.label-schema.build-date,omitempty"` + Created string `json:"org.opencontainers.image.created,omitempty"` + }{BuildDate: bd, Created: c} + return json.Marshal(encode) +} + +// UnmarshalJSON populates Labels from JSON (as bytes). It's the +// companion to MarshalJSON above. +func (l *Labels) UnmarshalJSON(b []byte) error { + unencode := struct { + BuildDate string `json:"org.label-schema.build-date,omitempty"` + Created string `json:"org.opencontainers.image.created,omitempty"` + }{} + json.Unmarshal(b, &unencode) + labelErr := LabelTimestampFormatError{} + if err := decodeTime(unencode.BuildDate, &l.BuildDate); err != nil { + if _, ok := err.(*time.ParseError); !ok { + return err + } + labelErr.Labels = append(labelErr.Labels, "org.label-schema.build-date") + } + if err := decodeTime(unencode.Created, &l.Created); err != nil { + if _, ok := err.(*time.ParseError); !ok { + return err + } + labelErr.Labels = append(labelErr.Labels, "org.opencontainers.image.created") + } + if len(labelErr.Labels) >= 1 { + return &labelErr + } + return nil +} + // Info has the metadata we are able to determine about an image ref, // from its registry. type Info struct { @@ -235,6 +303,8 @@ type Info struct { // will be the same for references that point at the same image // (but does not necessarily equal Docker's image ID) ImageID string `json:",omitempty"` + // all labels we are interested in and could find for the image ref + Labels Labels `json:",omitempty"` // the time at which the image pointed at was created CreatedAt time.Time `json:",omitempty"` // the last time this image manifest was fetched @@ -281,6 +351,27 @@ func (im *Info) UnmarshalJSON(b []byte) error { return err } +// CreatedTS returns the created at timestamp for an image, +// prioritizing user defined timestamps from labels over the ones we +// receive from a Docker registry API. +// +// The reason for this is registry vendors have different +// interpretations of what a creation date is, and we want the user to +// be in control when required. +// +// In addition we prioritize the `Created` label over the `BuildDate`, +// as the Label Schema Spec has been deprecated in favour of the OCI +// Spec (but is still well known and widely used). +func (im Info) CreatedTS() time.Time { + if !im.Labels.Created.IsZero() { + return im.Labels.Created + } + if !im.Labels.BuildDate.IsZero() { + return im.Labels.BuildDate + } + return im.CreatedAt +} + // RepositoryMetadata contains the image metadata information found in an // image repository. // @@ -334,10 +425,10 @@ func decodeTime(s string, t *time.Time) error { // NewerByCreated returns true if lhs image should be sorted // before rhs with regard to their creation date descending. func NewerByCreated(lhs, rhs *Info) bool { - if lhs.CreatedAt.Equal(rhs.CreatedAt) { + if lhs.CreatedTS().Equal(rhs.CreatedTS()) { return lhs.ID.String() < rhs.ID.String() } - return lhs.CreatedAt.After(rhs.CreatedAt) + return lhs.CreatedTS().After(rhs.CreatedTS()) } // NewerBySemver returns true if lhs image should be sorted diff --git a/image/image_test.go b/image/image_test.go index 79a4cec55..818ed124a 100644 --- a/image/image_test.go +++ b/image/image_test.go @@ -138,6 +138,53 @@ func TestRefSerialization(t *testing.T) { } } +func TestImageLabelsSerialisation(t *testing.T) { + t0 := time.Now().UTC() // UTC so it has nil location, otherwise it won't compare + t1 := time.Now().Add(5 * time.Minute).UTC() + labels := Labels{Created: t0, BuildDate: t1} + bytes, err := json.Marshal(labels) + if err != nil { + t.Fatal(err) + } + var labels1 Labels + if err = json.Unmarshal(bytes, &labels1); err != nil { + t.Fatal(err) + } + assert.Equal(t, labels, labels1) +} + +func TestNonRFC3339ImageLabelsUnmarshal(t *testing.T) { + str := `{ + "org.label-schema.build-date": "20190523", + "org.opencontainers.image.created": "20190523" +}` + + var labels Labels + err := json.Unmarshal([]byte(str), &labels) + lpe, ok := err.(*LabelTimestampFormatError) + if !ok { + t.Fatalf("Got %v, but expected LabelTimestampFormatError", err) + } + if lc := len(lpe.Labels); lc != 2 { + t.Errorf("Got error for %v labels, expected 2", lc) + } +} + +func TestImageLabelsZeroTime(t *testing.T) { + labels := Labels{} + bytes, err := json.Marshal(labels) + if err != nil { + t.Fatal(err) + } + var labels1 map[string]interface{} + if err = json.Unmarshal(bytes, &labels1); err != nil { + t.Fatal(err) + } + if lc := len(labels1); lc >= 1 { + t.Errorf("serialised Labels contains %v fields; expected it to contain none\n%v", lc, labels1) + } +} + func mustMakeInfo(ref string, created time.Time) Info { r, err := ParseRef(ref) if err != nil { @@ -146,6 +193,11 @@ func mustMakeInfo(ref string, created time.Time) Info { return Info{ID: r, CreatedAt: created} } +func (im Info) setLabels(labels Labels) Info { + im.Labels = labels + return im +} + func TestImageInfoSerialisation(t *testing.T) { t0 := time.Now().UTC() // UTC so it has nil location, otherwise it won't compare t1 := time.Now().Add(5 * time.Minute).UTC() @@ -184,10 +236,10 @@ func TestImage_OrderByCreationDate(t *testing.T) { time0 := testTime.Add(time.Second) time2 := testTime.Add(-time.Second) imA := mustMakeInfo("my/Image:2", testTime) - imB := mustMakeInfo("my/Image:0", time0) - imC := mustMakeInfo("my/Image:3", time2) + imB := mustMakeInfo("my/Image:0", time.Time{}).setLabels(Labels{Created: time0}) + imC := mustMakeInfo("my/Image:3", time.Time{}).setLabels(Labels{BuildDate: time2}) imD := mustMakeInfo("my/Image:4", time.Time{}) // test nil - imE := mustMakeInfo("my/Image:1", testTime) // test equal + imE := mustMakeInfo("my/Image:1", time.Time{}).setLabels(Labels{Created: testTime}) // test equal imF := mustMakeInfo("my/Image:5", time.Time{}) // test nil equal imgs := []Info{imA, imB, imC, imD, imE, imF} Sort(imgs, NewerByCreated) @@ -204,7 +256,7 @@ func checkSorted(t *testing.T, imgs []Info) { for i, im := range imgs { if strconv.Itoa(i) != im.ID.Tag { for j, jim := range imgs { - t.Logf("%v: %v %s", j, jim.ID.String(), jim.CreatedAt) + t.Logf("%v: %v %s", j, jim.ID.String(), jim.CreatedTS()) } t.Fatalf("Not sorted in expected order: %#v", imgs) } diff --git a/integrations/helm/chartsync/chartsync.go b/integrations/helm/chartsync/chartsync.go index d474f671a..df1348eec 100644 --- a/integrations/helm/chartsync/chartsync.go +++ b/integrations/helm/chartsync/chartsync.go @@ -243,15 +243,15 @@ func (chs *ChartChangeSync) Run(stopCh <-chan struct{}, errc chan error, wg *syn if cloneForChart.export != nil { cloneForChart.export.Clean() } - } - // Enqueue release - cacheKey, err := cache.MetaNamespaceKeyFunc(fhr.GetObjectMeta()) - if err != nil { - continue + // we have a (new) clone, enqueue a release + cacheKey, err := cache.MetaNamespaceKeyFunc(fhr.GetObjectMeta()) + if err != nil { + continue + } + chs.logger.Log("info", "enqueing release upgrade due to change in git chart source", "resource", fhr.ResourceID().String()) + chs.releaseQueue.AddRateLimited(cacheKey) } - chs.logger.Log("info", "enqueing release upgrade due to change in git chart source", "resource", fhr.ResourceID().String()) - chs.releaseQueue.AddRateLimited(cacheKey) } } case <-stopCh: diff --git a/integrations/helm/http/daemon/server.go b/integrations/helm/http/daemon/server.go index d7a1cedc7..a0fb574c2 100644 --- a/integrations/helm/http/daemon/server.go +++ b/integrations/helm/http/daemon/server.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + _ "net/http/pprof" "sync/atomic" "time" diff --git a/integrations/helm/operator/operator.go b/integrations/helm/operator/operator.go index 9c3c300c3..c5fe1908d 100644 --- a/integrations/helm/operator/operator.go +++ b/integrations/helm/operator/operator.go @@ -1,7 +1,6 @@ package operator import ( - "errors" "fmt" "sync" "time" @@ -117,22 +116,14 @@ func New( return controller } -// Run sets up the event handlers for our Custom Resource, as well -// as syncing informer caches and starting workers. It will block until stopCh -// is closed, at which point it will shutdown the workqueue and wait for -// workers to finish processing their current work items. -func (c *Controller) Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitGroup) error { +// Run starts workers handling the enqueued events. It will block until +// stopCh is closed, at which point it will shutdown the workqueue and +// wait for workers to finish processing their current work items. +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitGroup) { defer runtime.HandleCrash() defer c.releaseWorkqueue.ShutDown() c.logger.Log("info", "starting operator") - // Wait for the caches to be synced before starting workers - c.logger.Log("info", "waiting for informer caches to sync") - - if ok := cache.WaitForCacheSync(stopCh, c.fhrSynced); !ok { - return errors.New("failed to wait for caches to sync") - } - c.logger.Log("info", "unformer caches synced") c.logger.Log("info", "starting workers") for i := 0; i < threadiness; i++ { @@ -145,8 +136,6 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitG wg.Done() } c.logger.Log("info", "stopping workers") - - return nil } // runWorker is a long-running function calling the diff --git a/integrations/helm/release/release.go b/integrations/helm/release/release.go index 2ccb50da3..c32065c13 100644 --- a/integrations/helm/release/release.go +++ b/integrations/helm/release/release.go @@ -164,7 +164,7 @@ func (r *Release) Install(chartPath, releaseName string, fhr flux_v1beta1.HelmRe return nil, fmt.Errorf("error statting path given for chart %s: %s", chartPath, err.Error()) } - r.logger.Log("info", fmt.Sprintf("processing release %s (as %s)", fhr.Spec.ReleaseName, releaseName), + r.logger.Log("info", fmt.Sprintf("processing release %s (as %s)", GetReleaseName(fhr), releaseName), "action", fmt.Sprintf("%v", action), "options", fmt.Sprintf("%+v", opts), "timeout", fmt.Sprintf("%vs", fhr.GetTimeout())) diff --git a/internal_docs/releasing.md b/internal_docs/releasing.md index 2d2b67bbb..5b5435860 100644 --- a/internal_docs/releasing.md +++ b/internal_docs/releasing.md @@ -6,6 +6,7 @@ The release process needs to do these things: - push Docker image(s) to Docker Hub - possibly upload the [`fluxctl` binaries](/site/fluxctl.md#binary-releases) to the GitHub release - make sure the version is entered into the checkpoint database so that up-to-date checks report back accurate information + - close out the GitHub milestone that was used to track the release Much of this is automated, but it needs a human to turn the wheel. @@ -88,6 +89,10 @@ change. You can do these as additional PRs. Read on, for how to publish a new Helm chart version. +**Bookkeeping** + +11. Close the GitHub milestone relating to the release. If there are open issues or unmerged PRs in the milestone, they will need to be either reassigned to the next milestone, or (if unclear where they belong), unassigned. + ## Helm chart release process 1. Create a new branch as in `chart-bump` diff --git a/manifests/configaware.go b/manifests/configaware.go new file mode 100644 index 000000000..a740f3ccf --- /dev/null +++ b/manifests/configaware.go @@ -0,0 +1,434 @@ +package manifests + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/weaveworks/flux" + "github.com/weaveworks/flux/image" + "github.com/weaveworks/flux/policy" + "github.com/weaveworks/flux/resource" +) + +type resourceWithOrigin struct { + resource resource.Resource + configFile *ConfigFile // only set if the resource came from a configuration file +} + +type configAware struct { + // rawFiles will do everything for the paths that have no config file + rawFiles *rawFiles + + // to maintain encapsulation, we don't rely on the rawFiles values + baseDir string + manifests Manifests + configFiles []*ConfigFile + + // a cache of the loaded resources, since the pattern is to update + // a few things at a time, and the update operations all need to + // have a set of resources + mu sync.RWMutex + resourcesByID map[string]resourceWithOrigin +} + +func NewConfigAware(baseDir string, targetPaths []string, manifests Manifests) (*configAware, error) { + configFiles, rawManifestDirs, err := splitConfigFilesAndRawManifestPaths(baseDir, targetPaths) + if err != nil { + return nil, err + } + + result := &configAware{ + rawFiles: &rawFiles{ + manifests: manifests, + baseDir: baseDir, + paths: rawManifestDirs, + }, + manifests: manifests, + baseDir: baseDir, + configFiles: configFiles, + } + return result, nil +} + +func splitConfigFilesAndRawManifestPaths(baseDir string, paths []string) ([]*ConfigFile, []string, error) { + var ( + configFiles []*ConfigFile + rawManifestPaths []string + ) + + for _, path := range paths { + relPath, err := filepath.Rel(baseDir, path) + if err != nil { + return nil, nil, err + } + configFilePath, workingDirPath, err := findConfigFilePaths(baseDir, path) + if err != nil { + if err == configFileNotFoundErr { + rawManifestPaths = append(rawManifestPaths, path) + continue + } + return nil, nil, fmt.Errorf("error when searching config files for path %q: %s", relPath, err) + } + cf, err := NewConfigFile(configFilePath, workingDirPath) + if err != nil { + relConfigFilePath, relErr := filepath.Rel(baseDir, configFilePath) + if err != nil { + return nil, nil, relErr + } + return nil, nil, fmt.Errorf("cannot parse config file %q: %s", relConfigFilePath, err) + } + configFiles = append(configFiles, cf) + } + + return configFiles, rawManifestPaths, nil +} + +var configFileNotFoundErr = fmt.Errorf("config file not found") + +func findConfigFilePaths(baseDir string, initialPath string) (string, string, error) { + // The path can directly be a .flux.yaml config file + fileStat, err := os.Stat(initialPath) + if err != nil { + return "", "", err + } + if !fileStat.IsDir() { + workingDir, filename := filepath.Split(initialPath) + if filename == ConfigFilename { + return initialPath, filepath.Clean(workingDir), nil + } + return "", "", configFileNotFoundErr + } + + // Make paths canonical and remove potential ending slash, + // for filepath.Dir() to work as we expect. + // Also, the initial path must be contained in baseDir + // (to make sure we don't escape the git checkout when + // moving upwards in the directory hierarchy) + _, cleanInitialPath, err := cleanAndEnsureParentPath(baseDir, initialPath) + if err != nil { + return "", "", err + } + + for path := cleanInitialPath; ; { + potentialConfigFilePath := filepath.Join(path, ConfigFilename) + if _, err := os.Stat(potentialConfigFilePath); err == nil { + return potentialConfigFilePath, initialPath, nil + } + if path == baseDir { + break + } + // check the parent directory + path = filepath.Dir(path) + } + + return "", "", configFileNotFoundErr +} + +func (ca *configAware) SetWorkloadContainerImage(ctx context.Context, resourceID flux.ResourceID, container string, + newImageID image.Ref) error { + resourcesByID, err := ca.getResourcesByID(ctx) + if err != nil { + return err + } + resWithOrigin, ok := resourcesByID[resourceID.String()] + if !ok { + return ErrResourceNotFound(resourceID.String()) + } + if resWithOrigin.configFile == nil { + if err := ca.rawFiles.setManifestWorkloadContainerImage(resWithOrigin.resource, container, newImageID); err != nil { + return err + } + } else if err := ca.setConfigFileWorkloadContainerImage(ctx, resWithOrigin.configFile, resWithOrigin.resource, container, newImageID); err != nil { + return err + } + // Reset resources, since we have modified one + ca.resetResources() + return nil +} + +func (ca *configAware) setConfigFileWorkloadContainerImage(ctx context.Context, cf *ConfigFile, r resource.Resource, + container string, newImageID image.Ref) error { + if cf.PatchUpdated != nil { + return ca.updatePatchFile(ctx, cf, func(previousManifests []byte) ([]byte, error) { + return ca.manifests.SetWorkloadContainerImage(previousManifests, r.ResourceID(), container, newImageID) + }) + } + + // Command-updated + result := cf.ExecContainerImageUpdaters(ctx, + r.ResourceID(), + container, + newImageID.Name.String(), newImageID.Tag, + ) + if len(result) > 0 && result[len(result)-1].Error != nil { + updaters := cf.CommandUpdated.Updaters + return fmt.Errorf("error executing image updater command %q from file %q: %s\noutput:\n%s", + updaters[len(result)-1].ContainerImage.Command, + result[len(result)-1].Error, + r.Source(), + result[len(result)-1].Output, + ) + } + return nil +} + +func (ca *configAware) updatePatchFile(ctx context.Context, cf *ConfigFile, + updateF func(previousManifests []byte) ([]byte, error)) error { + + patchUpdated := *cf.PatchUpdated + generatedManifests, patchedManifests, patchFilePath, err := ca.getGeneratedAndPatchedManifests(ctx, cf, patchUpdated) + if err != nil { + relConfigFilePath, err := filepath.Rel(ca.baseDir, cf.Path) + if err != nil { + return err + } + return fmt.Errorf("error parsing generated, patched output from file %s: %s", relConfigFilePath, err) + } + finalManifests, err := updateF(patchedManifests) + if err != nil { + return err + } + newPatch, err := ca.manifests.CreateManifestPatch(generatedManifests, finalManifests, + "generated manifests", "patched and updated manifests") + if err != nil { + return err + } + return ioutil.WriteFile(patchFilePath, newPatch, 0600) +} + +func (ca *configAware) getGeneratedAndPatchedManifests(ctx context.Context, cf *ConfigFile, patchUpdated PatchUpdated) ([]byte, []byte, string, error) { + generatedManifests, err := ca.getGeneratedManifests(ctx, cf, patchUpdated.Generators) + if err != nil { + return nil, nil, "", err + } + + // The patch file is expressed relatively to the configuration file's working directory + explicitPatchFilePath := patchUpdated.PatchFile + patchFilePath := filepath.Join(cf.WorkingDir, explicitPatchFilePath) + + // Make sure that the patch file doesn't fall out of the Git repository checkout + _, _, err = cleanAndEnsureParentPath(ca.baseDir, patchFilePath) + if err != nil { + return nil, nil, "", err + } + patch, err := ioutil.ReadFile(patchFilePath) + if err != nil { + if !os.IsNotExist(err) { + return nil, nil, "", err + } + // Tolerate a missing patch file, since it may not have been created yet. + // However, its base path must exist. + patchBaseDir := filepath.Dir(patchFilePath) + if stat, err := os.Stat(patchBaseDir); err != nil || !stat.IsDir() { + err := fmt.Errorf("base directory (%q) of patchFile (%q) does not exist", + filepath.Dir(explicitPatchFilePath), explicitPatchFilePath) + return nil, nil, "", err + } + patch = nil + } + relConfigFilePath, err := filepath.Rel(ca.baseDir, cf.Path) + if err != nil { + return nil, nil, "", err + } + patchedManifests, err := ca.manifests.ApplyManifestPatch(generatedManifests, patch, relConfigFilePath, explicitPatchFilePath) + if err != nil { + return nil, nil, "", fmt.Errorf("cannot patch generated resources: %s", err) + } + return generatedManifests, patchedManifests, patchFilePath, nil +} + +func (ca *configAware) getGeneratedManifests(ctx context.Context, cf *ConfigFile, generators []Generator) ([]byte, error) { + buf := bytes.NewBuffer(nil) + for i, cmdResult := range cf.ExecGenerators(ctx, generators) { + relConfigFilePath, err := filepath.Rel(ca.baseDir, cf.Path) + if err != nil { + return nil, err + } + if cmdResult.Error != nil { + err := fmt.Errorf("error executing generator command %q from file %q: %s\nerror output:\n%s\ngenerated output:\n%s", + generators[i].Command, + relConfigFilePath, + cmdResult.Error, + string(cmdResult.Stderr), + string(cmdResult.Stderr), + ) + return nil, err + } + if err := ca.manifests.AppendManifestToBuffer(cmdResult.Stdout, buf); err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} + +func (ca *configAware) UpdateWorkloadPolicies(ctx context.Context, resourceID flux.ResourceID, + update policy.Update) (bool, error) { + resourcesByID, err := ca.getResourcesByID(ctx) + if err != nil { + return false, err + } + resWithOrigin, ok := resourcesByID[resourceID.String()] + if !ok { + return false, ErrResourceNotFound(resourceID.String()) + } + var changed bool + if resWithOrigin.configFile == nil { + changed, err = ca.rawFiles.updateManifestWorkloadPolicies(resWithOrigin.resource, update) + } else { + changed, err = ca.updateConfigFileWorkloadPolicies(ctx, resWithOrigin.configFile, resWithOrigin.resource, update) + } + if err != nil { + return false, err + } + // Reset resources, since we have modified one + ca.resetResources() + return changed, nil +} + +func (ca *configAware) updateConfigFileWorkloadPolicies(ctx context.Context, cf *ConfigFile, r resource.Resource, + update policy.Update) (bool, error) { + if cf.PatchUpdated != nil { + var changed bool + err := ca.updatePatchFile(ctx, cf, func(previousManifests []byte) ([]byte, error) { + updatedManifests, err := ca.manifests.UpdateWorkloadPolicies(previousManifests, r.ResourceID(), update) + if err == nil { + changed = bytes.Compare(previousManifests, updatedManifests) != 0 + } + return updatedManifests, err + }) + return changed, err + } + + // Command-updated + workload, ok := r.(resource.Workload) + if !ok { + return false, errors.New("resource " + r.ResourceID().String() + " does not have containers") + } + changes, err := resource.ChangesForPolicyUpdate(workload, update) + if err != nil { + return false, err + } + + for key, value := range changes { + result := cf.ExecPolicyUpdaters(ctx, r.ResourceID(), key, value) + if len(result) > 0 && result[len(result)-1].Error != nil { + updaters := cf.CommandUpdated.Updaters + err := fmt.Errorf("error executing annotation updater command %q from file %q: %s\noutput:\n%s", + updaters[len(result)-1].Policy.Command, + result[len(result)-1].Error, + r.Source(), + result[len(result)-1].Output, + ) + return false, err + } + } + // We assume that the update changed the resource. Alternatively, we could generate the resources + // again and compare the output, but that's expensive. + return true, nil +} + +func (ca *configAware) GetAllResourcesByID(ctx context.Context) (map[string]resource.Resource, error) { + resourcesByID, err := ca.getResourcesByID(ctx) + if err != nil { + return nil, err + } + result := make(map[string]resource.Resource, len(resourcesByID)) + for id, resourceWithOrigin := range resourcesByID { + result[id] = resourceWithOrigin.resource + } + return result, nil +} + +func (ca *configAware) getResourcesByID(ctx context.Context) (map[string]resourceWithOrigin, error) { + ca.mu.RLock() + if ca.resourcesByID != nil { + toReturn := ca.resourcesByID + ca.mu.RUnlock() + return toReturn, nil + } + ca.mu.RUnlock() + + resourcesByID := map[string]resourceWithOrigin{} + + rawResourcesByID, err := ca.rawFiles.GetAllResourcesByID(ctx) + if err != nil { + return nil, err + } + for id, res := range rawResourcesByID { + resourcesByID[id] = resourceWithOrigin{resource: res} + } + + for _, cf := range ca.configFiles { + var ( + resourceManifests []byte + err error + ) + if cf.CommandUpdated != nil { + var err error + resourceManifests, err = ca.getGeneratedManifests(ctx, cf, cf.CommandUpdated.Generators) + if err != nil { + return nil, err + } + } else { + _, resourceManifests, _, err = ca.getGeneratedAndPatchedManifests(ctx, cf, *cf.PatchUpdated) + } + if err != nil { + return nil, err + } + relConfigFilePath, err := filepath.Rel(ca.baseDir, cf.Path) + if err != nil { + return nil, err + } + resources, err := ca.manifests.ParseManifest(resourceManifests, relConfigFilePath) + if err != nil { + return nil, err + } + for id, r := range resources { + if _, ok := resourcesByID[id]; ok { + return nil, fmt.Errorf("duplicate resource from %s and %s", + r.Source(), resourcesByID[id].resource.Source()) + } + resourcesByID[id] = resourceWithOrigin{resource: r, configFile: cf} + } + } + ca.mu.Lock() + ca.resourcesByID = resourcesByID + ca.mu.Unlock() + return resourcesByID, nil +} + +func (ca *configAware) resetResources() { + ca.mu.Lock() + ca.resourcesByID = nil + ca.mu.Unlock() +} + +func cleanAndEnsureParentPath(basePath string, childPath string) (string, string, error) { + // Make paths canonical and remove potential ending slash, + // for filepath.Dir() to work as we expect + cleanBasePath, err := filepath.Abs(basePath) + if err != nil { + return "", "", err + } + cleanChildPath, err := filepath.Abs(childPath) + if err != nil { + return "", "", err + } + cleanBasePath = filepath.Clean(cleanBasePath) + cleanChildPath = filepath.Clean(cleanChildPath) + + // The initial path must be relative to baseDir + // (to make sure we don't escape the git checkout when + // moving upwards in the directory hierarchy) + if !strings.HasPrefix(cleanChildPath, cleanBasePath) { + return "", "", fmt.Errorf("path %q is outside of base directory %s", childPath, basePath) + } + return cleanBasePath, cleanChildPath, nil +} diff --git a/manifests/configaware_test.go b/manifests/configaware_test.go new file mode 100644 index 000000000..6e4ee5edb --- /dev/null +++ b/manifests/configaware_test.go @@ -0,0 +1,226 @@ +package manifests + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/go-kit/kit/log" + "github.com/stretchr/testify/assert" + + "github.com/weaveworks/flux" + "github.com/weaveworks/flux/cluster/kubernetes" + "github.com/weaveworks/flux/cluster/kubernetes/testfiles" + "github.com/weaveworks/flux/image" + "github.com/weaveworks/flux/policy" + "github.com/weaveworks/flux/resource" +) + +func TestFindConfigFilePaths(t *testing.T) { + baseDir, clean := testfiles.TempDir(t) + defer clean() + targetPath := filepath.Join(baseDir, "one/two/three") + + // create file structure + err := os.MkdirAll(targetPath, 0777) + assert.NoError(t, err) + + // no file should be found in the bottom dir before adding any + _, _, err = findConfigFilePaths(baseDir, targetPath) + assert.Equal(t, configFileNotFoundErr, err) + + // a file should be found in the base directory when added + baseConfigFilePath := filepath.Join(baseDir, ConfigFilename) + f, err := os.Create(baseConfigFilePath) + assert.NoError(t, err) + f.Close() + configFilePath, workingDir, err := findConfigFilePaths(baseDir, targetPath) + assert.NoError(t, err) + assert.Equal(t, baseConfigFilePath, configFilePath) + assert.Equal(t, targetPath, workingDir) + + // a file should be found in the target directory when added, + // and preferred over any files in parent directories + targetConfigFilePath := filepath.Join(targetPath, ConfigFilename) + f, err = os.Create(targetConfigFilePath) + assert.NoError(t, err) + f.Close() + configFilePath, workingDir, err = findConfigFilePaths(baseDir, targetPath) + assert.NoError(t, err) + assert.Equal(t, targetConfigFilePath, configFilePath) + assert.Equal(t, targetPath, workingDir) + + // we can use the config file itself as a target path + configFilePath, workingDir, err = findConfigFilePaths(baseDir, targetConfigFilePath) + assert.NoError(t, err) + assert.Equal(t, targetConfigFilePath, configFilePath) + assert.Equal(t, targetPath, workingDir) +} + +func TestSplitConfigFilesAndRawManifestPaths(t *testing.T) { + baseDir, clean := testfiles.TempDir(t) + defer clean() + + targets := []string{ + filepath.Join(baseDir, "envs/staging"), + filepath.Join(baseDir, "envs/production"), + filepath.Join(baseDir, "commonresources"), + } + for _, target := range targets { + err := os.MkdirAll(target, 0777) + assert.NoError(t, err) + } + + // create common config file for the environments + configFile := `--- +version: 1 +commandUpdated: + generators: + - command: echo g1 +` + err := ioutil.WriteFile(filepath.Join(baseDir, "envs", ConfigFilename), []byte(configFile), 0700) + assert.NoError(t, err) + + configFiles, rawManifestFiles, err := splitConfigFilesAndRawManifestPaths(baseDir, targets) + assert.NoError(t, err) + + assert.Len(t, rawManifestFiles, 1) + assert.Equal(t, filepath.Join(baseDir, "commonresources"), rawManifestFiles[0]) + + assert.Len(t, configFiles, 2) + // We assume config files are processed in order to simplify the checks + assert.Equal(t, filepath.Join(baseDir, "envs/staging"), configFiles[0].WorkingDir) + assert.Equal(t, filepath.Join(baseDir, "envs/production"), configFiles[1].WorkingDir) + assert.NotNil(t, configFiles[0].CommandUpdated) + assert.Len(t, configFiles[0].CommandUpdated.Generators, 1) + assert.Equal(t, "echo g1", configFiles[0].CommandUpdated.Generators[0].Command) + assert.NotNil(t, configFiles[1].CommandUpdated) + assert.Equal(t, configFiles[0].CommandUpdated.Generators, configFiles[0].CommandUpdated.Generators) +} + +func setup(t *testing.T, configFileBody string) (*configAware, func()) { + manifests := kubernetes.NewManifests(kubernetes.ConstNamespacer("default"), log.NewLogfmtLogger(os.Stdout)) + baseDir, cleanup := testfiles.TempDir(t) + if len(configFileBody) > 0 { + ioutil.WriteFile(filepath.Join(baseDir, ConfigFilename), []byte(configFileBody), 0600) + } + frs, err := NewConfigAware(baseDir, []string{baseDir}, manifests) + assert.NoError(t, err) + return frs, cleanup +} + +const commandUpdatedEchoConfigFile = `--- +version: 1 +commandUpdated: + generators: + - command: | + echo "apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: helloworld + spec: + template: + metadata: + labels: + name: helloworld + spec: + containers: + - name: greeter + image: quay.io/weaveworks/helloworld:master-a000001" + updaters: + - containerImage: + command: echo uci $FLUX_WORKLOAD + policy: + command: echo ua $FLUX_WORKLOAD +` + +func TestCommandUpdatedConfigFile(t *testing.T) { + frs, cleanup := setup(t, commandUpdatedEchoConfigFile) + defer cleanup() + ctx := context.Background() + resources, err := frs.GetAllResourcesByID(ctx) + assert.NoError(t, err) + assert.Equal(t, 1, len(resources)) + deploymentID := flux.MustParseResourceID("default:deployment/helloworld") + assert.Contains(t, resources, deploymentID.String()) + ref, err := image.ParseRef("repo/image:tag") + assert.NoError(t, err) + err = frs.SetWorkloadContainerImage(ctx, deploymentID, "greeter", ref) + assert.NoError(t, err) + _, err = frs.UpdateWorkloadPolicies(ctx, deploymentID, policy.Update{ + Add: policy.Set{policy.TagPrefix("greeter"): "glob:master-*"}, + }) + assert.NoError(t, err) +} + +const patchUpdatedEchoConfigFile = `--- +version: 1 +patchUpdated: + generators: + - command: | + echo "apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: helloworld + spec: + template: + metadata: + labels: + name: helloworld + spec: + containers: + - name: greeter + image: quay.io/weaveworks/helloworld:master-a000001 + --- + apiVersion: v1 + kind: Namespace + metadata: + name: demo" + patchFile: patchfile.yaml +` + +func TestPatchUpdatedConfigFile(t *testing.T) { + frs, cleanup := setup(t, patchUpdatedEchoConfigFile) + defer cleanup() + ctx := context.Background() + resources, err := frs.GetAllResourcesByID(ctx) + assert.NoError(t, err) + assert.Equal(t, 2, len(resources)) + var deployment resource.Resource + deploymentID := flux.MustParseResourceID("default:deployment/helloworld") + for id, res := range resources { + if id == deploymentID.String() { + deployment = res + } + } + assert.NotNil(t, deployment) + ref, err := image.ParseRef("repo/image:tag") + assert.NoError(t, err) + err = frs.SetWorkloadContainerImage(ctx, deploymentID, "greeter", ref) + assert.NoError(t, err) + _, err = frs.UpdateWorkloadPolicies(ctx, deploymentID, policy.Update{ + Add: policy.Set{policy.TagPrefix("greeter"): "glob:master-*"}, + }) + expectedPatch := `--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + annotations: + flux.weave.works/tag.greeter: glob:master-* + name: helloworld +spec: + template: + spec: + $setElementOrder/containers: + - name: greeter + containers: + - image: repo/image:tag + name: greeter +` + patchFilePath := filepath.Join(frs.baseDir, "patchfile.yaml") + patch, err := ioutil.ReadFile(patchFilePath) + assert.NoError(t, err) + assert.Equal(t, expectedPatch, string(patch)) +} diff --git a/manifests/configfile.go b/manifests/configfile.go new file mode 100644 index 000000000..2fd896454 --- /dev/null +++ b/manifests/configfile.go @@ -0,0 +1,201 @@ +package manifests + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "time" + + "github.com/pkg/errors" + "gopkg.in/yaml.v2" + + "github.com/weaveworks/flux" +) + +const ( + ConfigFilename = ".flux.yaml" + CommandTimeout = time.Minute +) + +type ConfigFile struct { + Path string + WorkingDir string + Version int + // Only one of the following should be set simultaneously + CommandUpdated *CommandUpdated `yaml:"commandUpdated"` + PatchUpdated *PatchUpdated `yaml:"patchUpdated"` +} + +type CommandUpdated struct { + Generators []Generator + Updaters []Updater +} + +type Generator struct { + Command string +} + +type Updater struct { + ContainerImage ContainerImageUpdater `yaml:"containerImage"` + Policy PolicyUpdater +} + +type ContainerImageUpdater struct { + Command string +} + +type PolicyUpdater struct { + Command string +} + +type PatchUpdated struct { + Generators []Generator + PatchFile string `yaml:"patchFile"` +} + +func NewConfigFile(path, workingDir string) (*ConfigFile, error) { + var result ConfigFile + fileBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("cannot read: %s", err) + } + if err := yaml.Unmarshal(fileBytes, &result); err != nil { + return nil, fmt.Errorf("cannot parse: %s", err) + } + result.Path = path + result.WorkingDir = workingDir + switch { + case (result.CommandUpdated != nil && result.PatchUpdated != nil) || + (result.CommandUpdated == nil && result.PatchUpdated == nil): + return nil, errors.New("a single commandUpdated or patchUpdated entry must be defined") + case result.PatchUpdated != nil && result.PatchUpdated.PatchFile == "": + return nil, errors.New("patchUpdated's patchFile cannot be empty") + case result.Version != 1: + return nil, errors.New("incorrect version, only version 1 is supported for now") + } + return &result, nil +} + +type ConfigFileExecResult struct { + Error error + Stderr []byte + Stdout []byte +} + +type ConfigFileCombinedExecResult struct { + Error error + Output []byte +} + +func (cf *ConfigFile) ExecGenerators(ctx context.Context, generators []Generator) []ConfigFileExecResult { + result := []ConfigFileExecResult{} + for _, g := range generators { + stdErr := bytes.NewBuffer(nil) + stdOut := bytes.NewBuffer(nil) + err := cf.execCommand(ctx, nil, stdOut, stdErr, g.Command) + r := ConfigFileExecResult{ + Stdout: stdOut.Bytes(), + Stderr: stdErr.Bytes(), + Error: err, + } + result = append(result, r) + // Stop exectuing on the first command error + if err != nil { + break + } + } + return result +} + +// ExecContainerImageUpdaters executes all the image updates in the configuration file. +// It will stop at the first error, in which case the returned error will be non-nil +func (cf *ConfigFile) ExecContainerImageUpdaters(ctx context.Context, + workload flux.ResourceID, container string, image, imageTag string) []ConfigFileCombinedExecResult { + env := makeEnvFromResourceID(workload) + env = append(env, + "FLUX_CONTAINER="+container, + "FLUX_IMG="+image, + "FLUX_TAG="+imageTag, + ) + commands := []string{} + var updaters []Updater + if cf.CommandUpdated != nil { + updaters = cf.CommandUpdated.Updaters + } + for _, u := range updaters { + commands = append(commands, u.ContainerImage.Command) + } + return cf.execCommandsWithCombinedOutput(ctx, env, commands) +} + +// ExecPolicyUpdaters executes all the policy update commands given in +// the configuration file. An empty policyValue means remove the +// policy. It will stop at the first error, in which case the returned +// error will be non-nil +func (cf *ConfigFile) ExecPolicyUpdaters(ctx context.Context, + workload flux.ResourceID, policyName, policyValue string) []ConfigFileCombinedExecResult { + env := makeEnvFromResourceID(workload) + env = append(env, "FLUX_POLICY="+policyName) + if policyValue != "" { + env = append(env, "FLUX_POLICY_VALUE="+policyValue) + } + commands := []string{} + var updaters []Updater + if cf.CommandUpdated != nil { + updaters = cf.CommandUpdated.Updaters + } + for _, u := range updaters { + commands = append(commands, u.Policy.Command) + } + return cf.execCommandsWithCombinedOutput(ctx, env, commands) +} + +func (cf *ConfigFile) execCommandsWithCombinedOutput(ctx context.Context, env []string, commands []string) []ConfigFileCombinedExecResult { + env = append(env, "PATH="+os.Getenv("PATH")) + result := []ConfigFileCombinedExecResult{} + for _, c := range commands { + stdOutAndErr := bytes.NewBuffer(nil) + err := cf.execCommand(ctx, env, stdOutAndErr, stdOutAndErr, c) + r := ConfigFileCombinedExecResult{ + Output: stdOutAndErr.Bytes(), + Error: err, + } + result = append(result, r) + // Stop executing on the first command error + if err != nil { + break + } + } + return result +} + +func (cf *ConfigFile) execCommand(ctx context.Context, env []string, stdOut, stdErr io.Writer, command string) error { + cmdCtx, cancel := context.WithTimeout(ctx, CommandTimeout) + defer cancel() + cmd := exec.CommandContext(ctx, "/bin/sh", "-c", command) + cmd.Env = env + cmd.Dir = cf.WorkingDir + cmd.Stdout = stdOut + cmd.Stderr = stdErr + err := cmd.Run() + if cmdCtx.Err() == context.DeadlineExceeded { + err = cmdCtx.Err() + } else if cmdCtx.Err() == context.Canceled { + err = errors.Wrap(ctx.Err(), fmt.Sprintf("context was unexpectedly cancelled")) + } + return err +} + +func makeEnvFromResourceID(id flux.ResourceID) []string { + ns, kind, name := id.Components() + return []string{ + "FLUX_WORKLOAD=" + id.String(), + "FLUX_WL_NS=" + ns, + "FLUX_WL_KIND=" + kind, + "FLUX_WL_NAME=" + name, + } +} diff --git a/manifests/configfile_test.go b/manifests/configfile_test.go new file mode 100644 index 000000000..fcef9e121 --- /dev/null +++ b/manifests/configfile_test.go @@ -0,0 +1,124 @@ +package manifests + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v2" + + "github.com/weaveworks/flux" +) + +const patchUpdatedConfigFile = `--- +version: 1 +patchUpdated: + generators: + - command: foo + - command: bar + patchFile: baz.yaml +` + +func TestParsePatchUpdatedConfigFile(t *testing.T) { + var cf ConfigFile + if err := yaml.Unmarshal([]byte(patchUpdatedConfigFile), &cf); err != nil { + t.Fatal(err) + } + assert.NotNil(t, cf.PatchUpdated) + assert.Nil(t, cf.CommandUpdated) + assert.Equal(t, 1, cf.Version) + assert.Equal(t, 2, len(cf.PatchUpdated.Generators)) + assert.Equal(t, "bar", cf.PatchUpdated.Generators[1].Command) + assert.Equal(t, "baz.yaml", cf.PatchUpdated.PatchFile) +} + +const echoCmdUpdatedConfigFile = `--- +version: 1 +commandUpdated: + generators: + - command: echo g1 + - command: echo g2 + updaters: + - containerImage: + command: echo uci1 $FLUX_WORKLOAD $FLUX_WL_NS $FLUX_WL_KIND $FLUX_WL_NAME $FLUX_CONTAINER $FLUX_IMG $FLUX_TAG + policy: + command: echo ua1 $FLUX_WORKLOAD $FLUX_WL_NS $FLUX_WL_KIND $FLUX_WL_NAME $FLUX_POLICY ${FLUX_POLICY_VALUE:-delete} + - containerImage: + command: echo uci2 $FLUX_WORKLOAD $FLUX_WL_NS $FLUX_WL_KIND $FLUX_WL_NAME $FLUX_CONTAINER $FLUX_IMG $FLUX_TAG + policy: + command: echo ua2 $FLUX_WORKLOAD $FLUX_WL_NS $FLUX_WL_KIND $FLUX_WL_NAME $FLUX_POLICY ${FLUX_POLICY_VALUE:-delete} +` + +func TestParseCmdUpdatedConfigFile(t *testing.T) { + var cf ConfigFile + if err := yaml.Unmarshal([]byte(echoCmdUpdatedConfigFile), &cf); err != nil { + t.Fatal(err) + } + assert.NotNil(t, cf.CommandUpdated) + assert.Nil(t, cf.PatchUpdated) + assert.Equal(t, 1, cf.Version) + assert.Equal(t, 2, len(cf.CommandUpdated.Generators)) + assert.Equal(t, 2, len(cf.CommandUpdated.Updaters)) + assert.Equal(t, + "echo uci1 $FLUX_WORKLOAD $FLUX_WL_NS $FLUX_WL_KIND $FLUX_WL_NAME $FLUX_CONTAINER $FLUX_IMG $FLUX_TAG", + cf.CommandUpdated.Updaters[0].ContainerImage.Command, + ) + assert.Equal(t, + "echo ua2 $FLUX_WORKLOAD $FLUX_WL_NS $FLUX_WL_KIND $FLUX_WL_NAME $FLUX_POLICY ${FLUX_POLICY_VALUE:-delete}", + cf.CommandUpdated.Updaters[1].Policy.Command, + ) +} + +func TestExecGenerators(t *testing.T) { + var cf ConfigFile + err := yaml.Unmarshal([]byte(echoCmdUpdatedConfigFile), &cf) + assert.NoError(t, err) + result := cf.ExecGenerators(context.Background(), cf.CommandUpdated.Generators) + assert.Equal(t, 2, len(result), "result: %s", result) + assert.Equal(t, "g1\n", string(result[0].Stdout)) + assert.Equal(t, "g2\n", string(result[1].Stdout)) +} + +func TestExecContainerImageUpdaters(t *testing.T) { + var cf ConfigFile + err := yaml.Unmarshal([]byte(echoCmdUpdatedConfigFile), &cf) + assert.NoError(t, err) + resourceID := flux.MustParseResourceID("default:deployment/foo") + result := cf.ExecContainerImageUpdaters(context.Background(), resourceID, "bar", "repo/image", "latest") + assert.Equal(t, 2, len(result), "result: %s", result) + assert.Equal(t, + "uci1 default:deployment/foo default deployment foo bar repo/image latest\n", + string(result[0].Output)) + assert.Equal(t, + "uci2 default:deployment/foo default deployment foo bar repo/image latest\n", + string(result[1].Output)) +} + +func TestExecAnnotationUpdaters(t *testing.T) { + var cf ConfigFile + err := yaml.Unmarshal([]byte(echoCmdUpdatedConfigFile), &cf) + assert.NoError(t, err) + resourceID := flux.MustParseResourceID("default:deployment/foo") + + // Test the update/addition of annotations + annotationValue := "value" + result := cf.ExecPolicyUpdaters(context.Background(), resourceID, "key", annotationValue) + assert.Equal(t, 2, len(result), "result: %s", result) + assert.Equal(t, + "ua1 default:deployment/foo default deployment foo key value\n", + string(result[0].Output)) + assert.Equal(t, + "ua2 default:deployment/foo default deployment foo key value\n", + string(result[1].Output)) + + // Test the deletion of annotations " + result = cf.ExecPolicyUpdaters(context.Background(), resourceID, "key", "") + assert.Equal(t, 2, len(result), "result: %s", result) + assert.Equal(t, + "ua1 default:deployment/foo default deployment foo key delete\n", + string(result[0].Output)) + assert.Equal(t, + "ua2 default:deployment/foo default deployment foo key delete\n", + string(result[1].Output)) + +} diff --git a/manifests/manifests.go b/manifests/manifests.go new file mode 100644 index 000000000..4c049e8fb --- /dev/null +++ b/manifests/manifests.go @@ -0,0 +1,36 @@ +package manifests + +import ( + "bytes" + + "github.com/weaveworks/flux" + "github.com/weaveworks/flux/image" + "github.com/weaveworks/flux/policy" + "github.com/weaveworks/flux/resource" +) + +// Manifests represents a format for files or chunks of bytes +// containing definitions of resources, e.g., in Kubernetes, YAML +// files defining Kubernetes resources. +type Manifests interface { + // Load all the resource manifests under the paths + // given. `baseDir` is used to relativise the paths, which are + // supplied as absolute paths to directories or files; at least + // one path should be supplied, even if it is the same as `baseDir`. + LoadManifests(baseDir string, paths []string) (map[string]resource.Resource, error) + // ParseManifest parses the content of a collection of manifests, into resources + ParseManifest(def []byte, source string) (map[string]resource.Resource, error) + // Set the image of a container in a manifest's bytes to that given + SetWorkloadContainerImage(def []byte, resourceID flux.ResourceID, container string, newImageID image.Ref) ([]byte, error) + // UpdateWorkloadPolicies modifies a manifest to apply the policy update specified + UpdateWorkloadPolicies(def []byte, id flux.ResourceID, update policy.Update) ([]byte, error) + // CreateManifestPatch obtains a patch between the original and modified manifests + CreateManifestPatch(originalManifests, modifiedManifests []byte, originalSource, modifiedSource string) ([]byte, error) + // ApplyManifestPatch applies a manifest patch (obtained with CreateManifestDiff) returned the patched manifests + ApplyManifestPatch(originalManifests, patchManifests []byte, originalSource, patchSource string) ([]byte, error) + // AppendManifestToBuffer concatentates manifest bytes to a + // (possibly empty) buffer of manifest bytes; the resulting bytes + // should be parsable by `ParseManifest`. + // TODO(michael) should really be an interface rather than `*bytes.Buffer`. + AppendManifestToBuffer(manifest []byte, buffer *bytes.Buffer) error +} diff --git a/manifests/rawfiles.go b/manifests/rawfiles.go new file mode 100644 index 000000000..ac202d19e --- /dev/null +++ b/manifests/rawfiles.go @@ -0,0 +1,97 @@ +package manifests + +import ( + "bytes" + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/weaveworks/flux" + "github.com/weaveworks/flux/image" + "github.com/weaveworks/flux/policy" + "github.com/weaveworks/flux/resource" +) + +type rawFiles struct { + baseDir string + paths []string + manifests Manifests +} + +func NewRawFiles(baseDir string, paths []string, manifests Manifests) *rawFiles { + return &rawFiles{ + baseDir: baseDir, + paths: paths, + manifests: manifests, + } +} + +// Set the container image of a resource in the store +func (f *rawFiles) SetWorkloadContainerImage(ctx context.Context, id flux.ResourceID, container string, newImageID image.Ref) error { + resourcesByID, err := f.GetAllResourcesByID(ctx) + if err != nil { + return err + } + r, ok := resourcesByID[id.String()] + if !ok { + return ErrResourceNotFound(id.String()) + } + return f.setManifestWorkloadContainerImage(r, container, newImageID) +} + +func (f *rawFiles) setManifestWorkloadContainerImage(r resource.Resource, container string, newImageID image.Ref) error { + fullFilePath := filepath.Join(f.baseDir, r.Source()) + def, err := ioutil.ReadFile(fullFilePath) + if err != nil { + return err + } + newDef, err := f.manifests.SetWorkloadContainerImage(def, r.ResourceID(), container, newImageID) + if err != nil { + return err + } + fi, err := os.Stat(fullFilePath) + if err != nil { + return err + } + return ioutil.WriteFile(fullFilePath, newDef, fi.Mode()) +} + +// UpdateWorkloadPolicies modifies a resource in the store to apply the policy-update specified. +// It returns whether a change in the resource was actually made as a result of the change +func (f *rawFiles) UpdateWorkloadPolicies(ctx context.Context, id flux.ResourceID, update policy.Update) (bool, error) { + resourcesByID, err := f.GetAllResourcesByID(ctx) + if err != nil { + return false, err + } + r, ok := resourcesByID[id.String()] + if !ok { + return false, ErrResourceNotFound(id.String()) + } + return f.updateManifestWorkloadPolicies(r, update) +} + +func (f *rawFiles) updateManifestWorkloadPolicies(r resource.Resource, update policy.Update) (bool, error) { + fullFilePath := filepath.Join(f.baseDir, r.Source()) + def, err := ioutil.ReadFile(fullFilePath) + if err != nil { + return false, err + } + newDef, err := f.manifests.UpdateWorkloadPolicies(def, r.ResourceID(), update) + if err != nil { + return false, err + } + fi, err := os.Stat(fullFilePath) + if err != nil { + return false, err + } + if err := ioutil.WriteFile(fullFilePath, newDef, fi.Mode()); err != nil { + return false, err + } + return bytes.Compare(def, newDef) != 0, nil +} + +// Load all the resources in the store. The returned map is indexed by the resource IDs +func (f *rawFiles) GetAllResourcesByID(_ context.Context) (map[string]resource.Resource, error) { + return f.manifests.LoadManifests(f.baseDir, f.paths) +} diff --git a/manifests/store.go b/manifests/store.go new file mode 100644 index 000000000..3c26576af --- /dev/null +++ b/manifests/store.go @@ -0,0 +1,31 @@ +package manifests + +import ( + "context" + "fmt" + + "github.com/weaveworks/flux" + "github.com/weaveworks/flux/image" + "github.com/weaveworks/flux/policy" + "github.com/weaveworks/flux/resource" +) + +type StoreError struct { + error +} + +func ErrResourceNotFound(name string) error { + return StoreError{fmt.Errorf("resource %s not found", name)} +} + +// Store manages all the cluster resources defined in a checked out repository, explicitly declared +// in a file or not e.g., generated and updated by a .flux.yaml file, explicit Kubernetes .yaml manifests files ... +type Store interface { + // Set the container image of a resource in the store + SetWorkloadContainerImage(ctx context.Context, resourceID flux.ResourceID, container string, newImageID image.Ref) error + // UpdateWorkloadPolicies modifies a resource in the store to apply the policy-update specified. + // It returns whether a change in the resource was actually made as a result of the change + UpdateWorkloadPolicies(ctx context.Context, resourceID flux.ResourceID, update policy.Update) (bool, error) + // Load all the resources in the store. The returned map is indexed by the resource IDs + GetAllResourcesByID(ctx context.Context) (map[string]resource.Resource, error) +} diff --git a/registry/cache/repocachemanager.go b/registry/cache/repocachemanager.go index bade231d4..416bae284 100644 --- a/registry/cache/repocachemanager.go +++ b/registry/cache/repocachemanager.go @@ -254,7 +254,10 @@ func (c *repoCacheManager) updateImage(ctx context.Context, update imageToUpdate if ctx.Err() == context.DeadlineExceeded { return registry.ImageEntry{}, c.clientTimeoutError() } - return registry.ImageEntry{}, err + if _, ok := err.(*image.LabelTimestampFormatError); !ok { + return registry.ImageEntry{}, err + } + c.logger.Log("err", err, "ref", imageID) } refresh := update.previousRefresh diff --git a/registry/cache/repocachemanager_test.go b/registry/cache/repocachemanager_test.go index 62c0f8522..f0cb03fcd 100644 --- a/registry/cache/repocachemanager_test.go +++ b/registry/cache/repocachemanager_test.go @@ -14,11 +14,10 @@ import ( "github.com/weaveworks/flux/image" "github.com/weaveworks/flux/registry" - "github.com/weaveworks/flux/registry/middleware" ) func Test_ClientTimeouts(t *testing.T) { - timeout := 2 * time.Millisecond + timeout := 1 * time.Millisecond server := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) { // make sure we exceed the timeout time.Sleep(timeout * 10) @@ -28,12 +27,8 @@ func Test_ClientTimeouts(t *testing.T) { assert.NoError(t, err) logger := log.NewLogfmtLogger(os.Stdout) cf := ®istry.RemoteClientFactory{ - Logger: log.NewLogfmtLogger(os.Stdout), - Limiters: &middleware.RateLimiters{ - RPS: 100, - Burst: 100, - Logger: logger, - }, + Logger: log.NewLogfmtLogger(os.Stdout), + Limiters: nil, Trace: false, InsecureHosts: []string{url.Host}, } @@ -47,7 +42,7 @@ func Test_ClientTimeouts(t *testing.T) { cf, registry.NoCredentials(), timeout, - 100, + 1, false, logger, nil, @@ -55,5 +50,5 @@ func Test_ClientTimeouts(t *testing.T) { assert.NoError(t, err) _, err = rcm.getTags(context.Background()) assert.Error(t, err) - assert.Equal(t, "client timeout (2ms) exceeded", err.Error()) + assert.Equal(t, "client timeout (1ms) exceeded", err.Error()) } diff --git a/registry/client.go b/registry/client.go index e4f6cdce6..4434a4ef4 100644 --- a/registry/client.go +++ b/registry/client.go @@ -120,6 +120,7 @@ interpret: return ImageEntry{}, fetchErr } + var labelErr error info := image.Info{ID: a.repo.ToRef(ref), Digest: manifestDigest.String()} // TODO(michael): can we type switch? Not sure how dependable the @@ -133,15 +134,22 @@ interpret: Created time.Time `json:"created"` OS string `json:"os"` Arch string `json:"architecture"` + Config struct { + Labels image.Labels `json:"labels"` + } `json:"config"` } if err = json.Unmarshal([]byte(man.History[0].V1Compatibility), &v1); err != nil { - return ImageEntry{}, err + if _, ok := err.(*image.LabelTimestampFormatError); !ok { + return ImageEntry{}, err + } + labelErr = err } // This is not the ImageID that Docker uses, but assumed to // identify the image as it's the topmost layer. info.ImageID = v1.ID info.CreatedAt = v1.Created + info.Labels = v1.Config.Labels case *schema2.DeserializedManifest: var man schema2.Manifest = deserialised.Manifest configBytes, err := repository.Blobs(ctx).Get(ctx, man.Config.Digest) @@ -153,13 +161,20 @@ interpret: Arch string `json:"architecture"` Created time.Time `json:"created"` OS string `json:"os"` + ContainerConfig struct { + Labels image.Labels `json:"labels"` + } `json:"container_config"` } if err = json.Unmarshal(configBytes, &config); err != nil { - return ImageEntry{}, err + if _, ok := err.(*image.LabelTimestampFormatError); !ok { + return ImageEntry{}, err + } + labelErr = err } // This _is_ what Docker uses as its Image ID. info.ImageID = man.Config.Digest.String() info.CreatedAt = config.Created + info.Labels = config.ContainerConfig.Labels case *manifestlist.DeserializedManifestList: var list manifestlist.ManifestList = deserialised.ManifestList // TODO(michael): is it valid to just pick the first one that matches? @@ -176,5 +191,5 @@ interpret: t := reflect.TypeOf(manifest) return ImageEntry{}, errors.New("unknown manifest type: " + t.String()) } - return ImageEntry{Info: info}, nil + return ImageEntry{Info: info}, labelErr } diff --git a/registry/client_factory.go b/registry/client_factory.go index 12265298d..ee7cee89b 100644 --- a/registry/client_factory.go +++ b/registry/client_factory.go @@ -116,13 +116,15 @@ insecureCheckLoop: } // Since we construct one of these per scan, be fairly ruthless // about throttling the number, and closing of, idle connections. - baseTx := &http.Transport{ + var tx http.RoundTripper = &http.Transport{ TLSClientConfig: tlsConfig, MaxIdleConns: 10, IdleConnTimeout: 10 * time.Second, Proxy: http.ProxyFromEnvironment, } - tx := f.Limiters.RoundTripper(baseTx, repo.Domain) + if f.Limiters != nil { + tx = f.Limiters.RoundTripper(tx, repo.Domain) + } if f.Trace { tx = &logging{f.Logger, tx} } @@ -160,7 +162,9 @@ insecureCheckLoop: // bump rate limits up if a repo's metadata has successfully been // fetched. func (f *RemoteClientFactory) Succeed(repo image.CanonicalName) { - f.Limiters.Recover(repo.Domain) + if f.Limiters != nil { + f.Limiters.Recover(repo.Domain) + } } // store adapts a set of pre-selected creds to be an diff --git a/registry/middleware/rate_limiter.go b/registry/middleware/rate_limiter.go index ef8b22418..5ec52620f 100644 --- a/registry/middleware/rate_limiter.go +++ b/registry/middleware/rate_limiter.go @@ -46,11 +46,11 @@ func (limiters *RateLimiters) clip(limit float64) float64 { return limit } -// BackOff can be called to explicitly reduce the limit for a +// backOff can be called to explicitly reduce the limit for a // particular host. Usually this isn't necessary since a RoundTripper // obtained for a host will respond to `HTTP 429` by doing this for // you. -func (limiters *RateLimiters) BackOff(host string) { +func (limiters *RateLimiters) backOff(host string) { limiters.mu.Lock() defer limiters.mu.Unlock() @@ -105,22 +105,22 @@ func (limiters *RateLimiters) RoundTripper(rt http.RoundTripper, host string) ht limiters.perHost[host] = rl } var reduceOnce sync.Once - return &RoundTripRateLimiter{ + return &roundTripRateLimiter{ rl: limiters.perHost[host], tx: rt, slowDown: func() { - reduceOnce.Do(func() { limiters.BackOff(host) }) + reduceOnce.Do(func() { limiters.backOff(host) }) }, } } -type RoundTripRateLimiter struct { +type roundTripRateLimiter struct { rl *rate.Limiter tx http.RoundTripper slowDown func() } -func (t *RoundTripRateLimiter) RoundTrip(r *http.Request) (*http.Response, error) { +func (t *roundTripRateLimiter) RoundTrip(r *http.Request) (*http.Response, error) { // Wait errors out if the request cannot be processed within // the deadline. This is pre-emptive, instead of waiting the // entire duration. diff --git a/release/context.go b/release/context.go index c8f59b7ec..c307e951f 100644 --- a/release/context.go +++ b/release/context.go @@ -1,34 +1,30 @@ package release import ( + "context" "fmt" - "io/ioutil" - "os" - "path/filepath" "github.com/pkg/errors" "github.com/weaveworks/flux" "github.com/weaveworks/flux/cluster" - "github.com/weaveworks/flux/git" + "github.com/weaveworks/flux/manifests" "github.com/weaveworks/flux/registry" "github.com/weaveworks/flux/resource" "github.com/weaveworks/flux/update" ) type ReleaseContext struct { - cluster cluster.Cluster - manifests cluster.Manifests - repo *git.Checkout - registry registry.Registry + cluster cluster.Cluster + resourceStore manifests.Store + registry registry.Registry } -func NewReleaseContext(c cluster.Cluster, m cluster.Manifests, reg registry.Registry, repo *git.Checkout) *ReleaseContext { +func NewReleaseContext(cluster cluster.Cluster, resourceStore manifests.Store, registry registry.Registry) *ReleaseContext { return &ReleaseContext{ - cluster: c, - manifests: m, - repo: repo, - registry: reg, + cluster: cluster, + resourceStore: resourceStore, + registry: registry, } } @@ -36,26 +32,19 @@ func (rc *ReleaseContext) Registry() registry.Registry { return rc.registry } -func (rc *ReleaseContext) LoadManifests() (map[string]resource.Resource, error) { - return rc.manifests.LoadManifests(rc.repo.Dir(), rc.repo.ManifestDirs()) +func (rc *ReleaseContext) GetAllResources(ctx context.Context) (map[string]resource.Resource, error) { + return rc.resourceStore.GetAllResourcesByID(ctx) } -func (rc *ReleaseContext) WriteUpdates(updates []*update.WorkloadUpdate) error { +func (rc *ReleaseContext) WriteUpdates(ctx context.Context, updates []*update.WorkloadUpdate) error { err := func() error { for _, update := range updates { - manifestBytes, err := ioutil.ReadFile(update.ManifestPath) - if err != nil { - return err - } for _, container := range update.Updates { - manifestBytes, err = rc.manifests.UpdateImage(manifestBytes, update.ResourceID, container.Container, container.Target) + err := rc.resourceStore.SetWorkloadContainerImage(ctx, update.ResourceID, container.Container, container.Target) if err != nil { return errors.Wrapf(err, "updating resource %s in %s", update.ResourceID.String(), update.Resource.Source()) } } - if err = ioutil.WriteFile(update.ManifestPath, manifestBytes, os.FileMode(0600)); err != nil { - return errors.Wrapf(err, "writing updated file %s", update.Resource.Source()) - } } return nil }() @@ -68,10 +57,11 @@ func (rc *ReleaseContext) WriteUpdates(updates []*update.WorkloadUpdate) error { // files and the running cluster. `WorkloadFilter`s can be provided // to filter the controllers so found, either before (`prefilters`) or // after (`postfilters`) consulting the cluster. -func (rc *ReleaseContext) SelectWorkloads(results update.Result, prefilters, postfilters []update.WorkloadFilter) ([]*update.WorkloadUpdate, error) { +func (rc *ReleaseContext) SelectWorkloads(ctx context.Context, results update.Result, prefilters, + postfilters []update.WorkloadFilter) ([]*update.WorkloadUpdate, error) { // Start with all the workloads that are defined in the repo. - allDefined, err := rc.WorkloadsForUpdate() + allDefined, err := rc.WorkloadsForUpdate(ctx) if err != nil { return nil, err } @@ -106,9 +96,9 @@ func (rc *ReleaseContext) SelectWorkloads(results update.Result, prefilters, pos update, ok := allDefined[s.ID] if !ok { // A contradiction: we asked only about defined - // controllers, and got a controller that is not + // workloads, and got a workload that is not // defined. - return nil, fmt.Errorf("controller %s was requested and is running, but is not defined", s.ID) + return nil, fmt.Errorf("workload %s was requested and is running, but is not defined", s.ID) } update.Workload = s forPostFiltering = append(forPostFiltering, update) @@ -127,9 +117,9 @@ func (rc *ReleaseContext) SelectWorkloads(results update.Result, prefilters, pos } // WorkloadsForUpdate collects all workloads defined in manifests and prepares a list of -// controller updates for each of them. It does not consider updatability. -func (rc *ReleaseContext) WorkloadsForUpdate() (map[flux.ResourceID]*update.WorkloadUpdate, error) { - resources, err := rc.LoadManifests() +// workload updates for each of them. It does not consider updatability. +func (rc *ReleaseContext) WorkloadsForUpdate(ctx context.Context) (map[flux.ResourceID]*update.WorkloadUpdate, error) { + resources, err := rc.GetAllResources(ctx) if err != nil { return nil, err } @@ -138,9 +128,8 @@ func (rc *ReleaseContext) WorkloadsForUpdate() (map[flux.ResourceID]*update.Work for _, res := range resources { if wl, ok := res.(resource.Workload); ok { defined[res.ResourceID()] = &update.WorkloadUpdate{ - ResourceID: res.ResourceID(), - Resource: wl, - ManifestPath: filepath.Join(rc.repo.Dir(), res.Source()), + ResourceID: res.ResourceID(), + Resource: wl, } } } diff --git a/release/releaser.go b/release/releaser.go index 922856b38..5e72b5089 100644 --- a/release/releaser.go +++ b/release/releaser.go @@ -1,6 +1,7 @@ package release import ( + "context" "fmt" "strings" "time" @@ -13,13 +14,13 @@ import ( ) type Changes interface { - CalculateRelease(update.ReleaseContext, log.Logger) ([]*update.WorkloadUpdate, update.Result, error) + CalculateRelease(context.Context, update.ReleaseContext, log.Logger) ([]*update.WorkloadUpdate, update.Result, error) ReleaseKind() update.ReleaseKind ReleaseType() update.ReleaseType CommitMessage(update.Result) string } -func Release(rc *ReleaseContext, changes Changes, logger log.Logger) (results update.Result, err error) { +func Release(ctx context.Context, rc *ReleaseContext, changes Changes, logger log.Logger) (results update.Result, err error) { defer func(start time.Time) { update.ObserveRelease( start, @@ -31,18 +32,18 @@ func Release(rc *ReleaseContext, changes Changes, logger log.Logger) (results up logger = log.With(logger, "type", "release") - before, err := rc.LoadManifests() - updates, results, err := changes.CalculateRelease(rc, logger) + before, err := rc.GetAllResources(ctx) + updates, results, err := changes.CalculateRelease(ctx, rc, logger) if err != nil { return nil, err } - err = ApplyChanges(rc, updates, logger) + err = ApplyChanges(ctx, rc, updates, logger) if err != nil { return nil, MakeReleaseError(errors.Wrap(err, "applying changes")) } - after, err := rc.LoadManifests() + after, err := rc.GetAllResources(ctx) if err != nil { return nil, MakeReleaseError(errors.Wrap(err, "loading resources after updates")) } @@ -54,7 +55,7 @@ func Release(rc *ReleaseContext, changes Changes, logger log.Logger) (results up return results, nil } -func ApplyChanges(rc *ReleaseContext, updates []*update.WorkloadUpdate, logger log.Logger) error { +func ApplyChanges(ctx context.Context, rc *ReleaseContext, updates []*update.WorkloadUpdate, logger log.Logger) error { logger.Log("updates", len(updates)) if len(updates) == 0 { logger.Log("exit", "no images to update for services given") @@ -62,7 +63,7 @@ func ApplyChanges(rc *ReleaseContext, updates []*update.WorkloadUpdate, logger l } timer := update.NewStageTimer("write_changes") - err := rc.WriteUpdates(updates) + err := rc.WriteUpdates(ctx, updates) timer.ObserveDuration() return err } diff --git a/release/releaser_test.go b/release/releaser_test.go index 712686ab5..8b0505624 100644 --- a/release/releaser_test.go +++ b/release/releaser_test.go @@ -1,6 +1,7 @@ package release import ( + "context" "errors" "fmt" "os" @@ -11,12 +12,15 @@ import ( "github.com/go-kit/kit/log" "github.com/stretchr/testify/assert" + "github.com/weaveworks/flux" "github.com/weaveworks/flux/cluster" "github.com/weaveworks/flux/cluster/kubernetes" + "github.com/weaveworks/flux/cluster/mock" "github.com/weaveworks/flux/git" "github.com/weaveworks/flux/git/gittest" "github.com/weaveworks/flux/image" + "github.com/weaveworks/flux/manifests" registryMock "github.com/weaveworks/flux/registry/mock" "github.com/weaveworks/flux/resource" "github.com/weaveworks/flux/update" @@ -139,8 +143,8 @@ var ( mockManifests = kubernetes.NewManifests(kubernetes.ConstNamespacer("default"), log.NewLogfmtLogger(os.Stdout)) ) -func mockCluster(running ...cluster.Workload) *cluster.Mock { - return &cluster.Mock{ +func mockCluster(running ...cluster.Workload) *mock.Mock { + return &mock.Mock{ AllWorkloadsFunc: func(string) ([]cluster.Workload, error) { return running, nil }, @@ -158,6 +162,11 @@ func mockCluster(running ...cluster.Workload) *cluster.Mock { } } +func NewManifestStoreOrFail(t *testing.T, parser manifests.Manifests, checkout *git.Checkout) manifests.Store { + cm := manifests.NewRawFiles(checkout.Dir(), checkout.ManifestDirs(), parser) + return cm +} + func setup(t *testing.T) (*git.Checkout, func()) { return gittest.Checkout(t) } @@ -225,7 +234,7 @@ func Test_InitContainer(t *testing.T) { }, } - cluster := mockCluster(hwSvc, lockedSvc, initSvc) + mCluster := mockCluster(hwSvc, lockedSvc, initSvc) expect := expected{ Specific: update.Result{ @@ -254,16 +263,15 @@ func Test_InitContainer(t *testing.T) { defer clean() testRelease(t, &ReleaseContext{ - cluster: cluster, - manifests: mockManifests, - registry: mockRegistry, - repo: checkout, + cluster: mCluster, + resourceStore: NewManifestStoreOrFail(t, mockManifests, checkout), + registry: mockRegistry, }, spec, expect.Result()) } func Test_FilterLogic(t *testing.T) { - cluster := mockCluster(hwSvc, lockedSvc) // no testsvc in cluster, but it _is_ in repo + mCluster := mockCluster(hwSvc, lockedSvc) // no testsvc in cluster, but it _is_ in repo notInRepoService := "default:deployment/notInRepo" notInRepoSpec, _ := update.ParseResourceSpec(notInRepoService) @@ -446,17 +454,16 @@ func Test_FilterLogic(t *testing.T) { checkout, cleanup := setup(t) defer cleanup() testRelease(t, &ReleaseContext{ - cluster: cluster, - manifests: mockManifests, - registry: mockRegistry, - repo: checkout, + cluster: mCluster, + resourceStore: NewManifestStoreOrFail(t, mockManifests, checkout), + registry: mockRegistry, }, tst.Spec, tst.Expected.Result()) }) } } func Test_Force_lockedWorkload(t *testing.T) { - cluster := mockCluster(lockedSvc) + mCluster := mockCluster(lockedSvc) success := update.WorkloadResult{ Status: update.ReleaseStatusSuccess, PerContainer: []update.ContainerUpdate{ @@ -539,17 +546,16 @@ func Test_Force_lockedWorkload(t *testing.T) { checkout, cleanup := setup(t) defer cleanup() testRelease(t, &ReleaseContext{ - cluster: cluster, - manifests: mockManifests, - registry: mockRegistry, - repo: checkout, + cluster: mCluster, + resourceStore: NewManifestStoreOrFail(t, mockManifests, checkout), + registry: mockRegistry, }, tst.Spec, tst.Expected.Result()) }) } } func Test_Force_filteredContainer(t *testing.T) { - cluster := mockCluster(semverSvc) + mCluster := mockCluster(semverSvc) successNew := update.WorkloadResult{ Status: update.ReleaseStatusSuccess, PerContainer: []update.ContainerUpdate{ @@ -644,17 +650,16 @@ func Test_Force_filteredContainer(t *testing.T) { checkout, cleanup := setup(t) defer cleanup() testRelease(t, &ReleaseContext{ - cluster: cluster, - manifests: mockManifests, - registry: mockRegistry, - repo: checkout, + cluster: mCluster, + resourceStore: NewManifestStoreOrFail(t, mockManifests, checkout), + registry: mockRegistry, }, tst.Spec, tst.Expected.Result()) }) } } func Test_ImageStatus(t *testing.T) { - cluster := mockCluster(hwSvc, lockedSvc, testSvc) + mCluster := mockCluster(hwSvc, lockedSvc, testSvc) upToDateRegistry := ®istryMock.Registry{ Images: []image.Info{ { @@ -713,13 +718,12 @@ func Test_ImageStatus(t *testing.T) { t.Run(tst.Name, func(t *testing.T) { checkout, cleanup := setup(t) defer cleanup() - ctx := &ReleaseContext{ - cluster: cluster, - manifests: mockManifests, - repo: checkout, - registry: upToDateRegistry, + rc := &ReleaseContext{ + cluster: mCluster, + resourceStore: NewManifestStoreOrFail(t, mockManifests, checkout), + registry: upToDateRegistry, } - testRelease(t, ctx, tst.Spec, tst.Expected.Result()) + testRelease(t, rc, tst.Spec, tst.Expected.Result()) }) } } @@ -738,21 +742,20 @@ func Test_UpdateMultidoc(t *testing.T) { }, } - cluster := mockCluster(hwSvc, lockedSvc, egSvc) // no testsvc in cluster, but it _is_ in repo + mCluster := mockCluster(hwSvc, lockedSvc, egSvc) // no testsvc in cluster, but it _is_ in repo checkout, cleanup := setup(t) defer cleanup() - ctx := &ReleaseContext{ - cluster: cluster, - manifests: mockManifests, - repo: checkout, - registry: mockRegistry, + rc := &ReleaseContext{ + cluster: mCluster, + resourceStore: NewManifestStoreOrFail(t, mockManifests, checkout), + registry: mockRegistry, } spec := update.ReleaseImageSpec{ ServiceSpecs: []update.ResourceSpec{"default:deployment/multi-deploy"}, ImageSpec: update.ImageSpecLatest, Kind: update.ReleaseKindExecute, } - results, err := Release(ctx, spec, log.NewNopLogger()) + results, err := Release(context.Background(), rc, spec, log.NewNopLogger()) if err != nil { t.Error(err) } @@ -786,21 +789,20 @@ func Test_UpdateList(t *testing.T) { }, } - cluster := mockCluster(hwSvc, lockedSvc, egSvc) // no testsvc in cluster, but it _is_ in repo + mCluster := mockCluster(hwSvc, lockedSvc, egSvc) // no testsvc in cluster, but it _is_ in repo checkout, cleanup := setup(t) defer cleanup() - ctx := &ReleaseContext{ - cluster: cluster, - manifests: mockManifests, - repo: checkout, - registry: mockRegistry, + rc := &ReleaseContext{ + cluster: mCluster, + resourceStore: NewManifestStoreOrFail(t, mockManifests, checkout), + registry: mockRegistry, } spec := update.ReleaseImageSpec{ ServiceSpecs: []update.ResourceSpec{"default:deployment/list-deploy"}, ImageSpec: update.ImageSpecLatest, Kind: update.ReleaseKindExecute, } - results, err := Release(ctx, spec, log.NewNopLogger()) + results, err := Release(context.Background(), rc, spec, log.NewNopLogger()) if err != nil { t.Error(err) } @@ -821,14 +823,14 @@ func Test_UpdateList(t *testing.T) { } func Test_UpdateContainers(t *testing.T) { - cluster := mockCluster(hwSvc, lockedSvc) + mCluster := mockCluster(hwSvc, lockedSvc) checkout, cleanup := setup(t) defer cleanup() - ctx := &ReleaseContext{ - cluster: cluster, - manifests: mockManifests, - repo: checkout, - registry: mockRegistry, + ctx := context.Background() + rc := &ReleaseContext{ + cluster: mCluster, + resourceStore: NewManifestStoreOrFail(t, mockManifests, checkout), + registry: mockRegistry, } type expected struct { Err error @@ -1029,7 +1031,7 @@ func Test_UpdateContainers(t *testing.T) { specs.SkipMismatches = ignoreMismatches specs.Force = tst.Force - results, err := Release(ctx, specs, log.NewNopLogger()) + results, err := Release(ctx, rc, specs, log.NewNopLogger()) assert.Equal(t, expected.Err, err) if expected.Err == nil { @@ -1041,8 +1043,8 @@ func Test_UpdateContainers(t *testing.T) { } } -func testRelease(t *testing.T, ctx *ReleaseContext, spec update.ReleaseImageSpec, expected update.Result) { - results, err := Release(ctx, spec, log.NewNopLogger()) +func testRelease(t *testing.T, rc *ReleaseContext, spec update.ReleaseImageSpec, expected update.Result) { + results, err := Release(context.Background(), rc, spec, log.NewNopLogger()) assert.NoError(t, err) assert.Equal(t, expected, results) } @@ -1051,15 +1053,15 @@ func testRelease(t *testing.T, ctx *ReleaseContext, spec update.ReleaseImageSpec // A manifests implementation that does updates incorrectly, so they should fail verification. type badManifests struct { - cluster.Manifests + manifests.Manifests } -func (m *badManifests) UpdateImage(def []byte, resourceID flux.ResourceID, container string, newImageID image.Ref) ([]byte, error) { +func (m *badManifests) SetWorkloadContainerImage(def []byte, id flux.ResourceID, container string, image image.Ref) ([]byte, error) { return def, nil } func Test_BadRelease(t *testing.T) { - cluster := mockCluster(hwSvc) + mCluster := mockCluster(hwSvc) spec := update.ReleaseImageSpec{ ServiceSpecs: []update.ResourceSpec{update.ResourceSpecAll}, ImageSpec: update.ImageSpecFromRef(newHwRef), @@ -1070,13 +1072,13 @@ func Test_BadRelease(t *testing.T) { defer cleanup1() manifests := kubernetes.NewManifests(kubernetes.ConstNamespacer("default"), log.NewLogfmtLogger(os.Stdout)) - ctx := &ReleaseContext{ - cluster: cluster, - manifests: manifests, - repo: checkout1, - registry: mockRegistry, + ctx := context.Background() + rc := &ReleaseContext{ + cluster: mCluster, + resourceStore: NewManifestStoreOrFail(t, manifests, checkout1), + registry: mockRegistry, } - _, err := Release(ctx, spec, log.NewNopLogger()) + _, err := Release(ctx, rc, spec, log.NewNopLogger()) if err != nil { t.Fatal("release with 'good' manifests should succeed, but errored:", err) } @@ -1084,13 +1086,12 @@ func Test_BadRelease(t *testing.T) { checkout2, cleanup2 := setup(t) defer cleanup2() - ctx = &ReleaseContext{ - cluster: cluster, - manifests: &badManifests{manifests}, - repo: checkout2, - registry: mockRegistry, + rc = &ReleaseContext{ + cluster: mCluster, + resourceStore: NewManifestStoreOrFail(t, &badManifests{manifests}, checkout2), + registry: mockRegistry, } - _, err = Release(ctx, spec, log.NewNopLogger()) + _, err = Release(ctx, rc, spec, log.NewNopLogger()) if err == nil { t.Fatal("did not return an error, but was expected to fail verification") } diff --git a/resource/policy.go b/resource/policy.go new file mode 100644 index 000000000..b3910c337 --- /dev/null +++ b/resource/policy.go @@ -0,0 +1,47 @@ +package resource + +import ( + "fmt" + + "github.com/weaveworks/flux/policy" +) + +// ChangeForPolicyUpdate evaluates a policy update with respect to a +// workload. The reason this exists at all is that an `Update` can +// include qualified policies, for example "tag all containers"; and +// to make actual changes, we need to examine the workload to which +// it's to be applied. +// +// This also translates policy deletion to empty values (i.e., `""`), +// to make it easy to use as command-line arguments or environment +// variables. When represented in manifests, policies are expected to +// have a non-empty value when present, even if it's `"true"`; so an +// empty value can safely denote deletion. +func ChangesForPolicyUpdate(workload Workload, update policy.Update) (map[string]string, error) { + add, del := update.Add, update.Remove + // We may be sent the pseudo-policy `policy.TagAll`, which means + // apply this filter to all containers. To do so, we need to know + // what all the containers are. + if tagAll, ok := update.Add.Get(policy.TagAll); ok { + add = add.Without(policy.TagAll) + for _, container := range workload.Containers() { + if tagAll == policy.PatternAll.String() { + del = del.Add(policy.TagPrefix(container.Name)) + } else { + add = add.Set(policy.TagPrefix(container.Name), tagAll) + } + } + } + + result := map[string]string{} + for pol, val := range add { + if policy.Tag(pol) && !policy.NewPattern(val).Valid() { + return nil, fmt.Errorf("invalid tag pattern: %q", val) + } + result[string(pol)] = val + } + for pol, _ := range del { + result[string(pol)] = "" + } + return result, nil +} diff --git a/site/annotations-tutorial.md b/site/annotations-tutorial.md index 34a100475..e4f684c77 100644 --- a/site/annotations-tutorial.md +++ b/site/annotations-tutorial.md @@ -62,9 +62,14 @@ If you have never used Helm, you first need to Deploy Tiller in the `kube-system` namespace: ```sh - helm init --skip-refresh --upgrade --service-account tiller + helm init --skip-refresh --upgrade --service-account tiller --history-max 10 ``` + > **Note:** This is a quick guide and by no means a production ready + > Tiller setup, please look into ['Securing your Helm installation'](https://helm.sh/docs/using_helm/#securing-your-helm-installation) + > and be aware of the `--history-max` flag before promoting to + > production. + Now you can take care of the actual installation. First add the Flux repository of Weaveworks: @@ -102,7 +107,7 @@ The first step is done. Flux is now and up running (you can confirm by running `kubectl get pods --all-namespaces`). In the second step we will use fluxctl to talk to Flux in the cluster and -interact with the deployments. First, please [install fluxctl](https://github.com/weaveworks/flux/blob/master/site/fluxctl.md#installing-fluxctl). +interact with the deployments. First, please [install fluxctl](fluxctl.md#installing-fluxctl). (It enables you to drive all of Weave Flux, so have a look at the output of `fluxctl -h` to get a better idea.) @@ -204,10 +209,33 @@ In our case this is `1.4.2` (it could be a later image too). Let's say an engineer found that `1.4.2` was faulty and we have to go back to `1.4.1`. That's easy. -Rollback to `1.4.1`: +Lock deployment with a message describing why: + +```sh +fluxctl lock -w demo:deployment/podinfo -m "1.4.2 does not work for us" +``` + +The resulting diff should look like this + +```diff +--- a/workloads/podinfo-dep.yaml ++++ b/workloads/podinfo-dep.yaml +@@ -10,6 +10,7 @@ metadata: + app: podinfo + annotations: + flux.weave.works/automated: "true" + flux.weave.works/tag.init: glob:1.4.* + flux.weave.works/tag.podinfod: glob:1.4.* ++ flux.weave.works/locked: 'true' + spec: + strategy: + rollingUpdate: +``` + +Rollback to `1.4.1`. Flag `--force` is needed because the workload is locked: ```sh -fluxctl release --workload demo:deployment/podinfo -i stefanprodan/podinfo:1.4.1 +fluxctl release --force --workload demo:deployment/podinfo -i stefanprodan/podinfo:1.4.1 ``` The response should be @@ -236,29 +264,6 @@ and the diff for this is going to look like this: - containerPort: 9898 ``` -Lock to `1.4.1` with a message describing why: - -```sh -fluxctl lock -w demo:deployment/podinfo -m "1.4.2 does not work for us" -``` - -The resulting diff should look like this - -```diff ---- a/workloads/podinfo-dep.yaml -+++ b/workloads/podinfo-dep.yaml -@@ -10,6 +10,7 @@ metadata: - app: podinfo - annotations: - flux.weave.works/automated: "true" - flux.weave.works/tag.init: glob:1.4.* - flux.weave.works/tag.podinfod: glob:1.4.* -+ flux.weave.works/locked: 'true' - spec: - strategy: - rollingUpdate: -``` - And that's it. At the end of this tutorial, you have automated, locked and annotated deployments with Weave Flux. diff --git a/site/building.md b/site/building.md index 7bab2dd7a..10f3d051b 100644 --- a/site/building.md +++ b/site/building.md @@ -5,21 +5,19 @@ menu_order: 80 # Build -You'll need a working `go` environment (official releases are build against `1.10`), -including the [`dep`](https://github.com/golang/dep#installation) tool. - +You'll need a working `go` environment version >= 1.11 (official releases are built against `1.12`). It's also expected that you have a Docker daemon for building images. -Ensure the repository is checked out into $GOPATH/src/github.com/weaveworks/flux. -Then, from the root, +Clone the respository. The project uses Go Modules, so if you explicitly define `$GOPATH` you should +clone somewhere else + +Then, from the root directory, ```sh -$ dep ensure -# .. time passes .. $ make ``` -This makes Docker images, and installs binaries to $GOPATH/bin. +This makes Docker images, and installs binaries to `$GOBIN` (if you define it) or `$(go env GOPATH)/bin`. Note: The default target architecture is amd64. If you would like to try to build Docker images and binaries for a different architecture you will have to set ARCH variable, @@ -34,16 +32,3 @@ $ make ARCH= $ make test ``` -# Dependency management - -We use [dep](https://github.com/golang/dep) to manage vendored dependencies. -Note that **we do not check in the dependencies**. - -To get all the dependencies put in the `vendor/` folder, use - -```sh -$ dep ensure -``` - -If you see a big diff or other unexpected output after running `dep ensure`, -make sure you're using the latest official release of `dep`. diff --git a/site/daemon.md b/site/daemon.md index 792993594..15544f2a0 100644 --- a/site/daemon.md +++ b/site/daemon.md @@ -94,3 +94,5 @@ fluxd requires setup and offers customization though a multitude of flags. | **SSH key generation** | --ssh-keygen-bits | | -b argument to ssh-keygen (default unspecified) | --ssh-keygen-type | | -t argument to ssh-keygen (default unspecified) +| **manifest generation** +| --manifest-generation | false | experimental; search for .flux.yaml files to generate manifests diff --git a/site/faq.md b/site/faq.md index e7e8e8393..1343faea5 100644 --- a/site/faq.md +++ b/site/faq.md @@ -22,13 +22,13 @@ menu_order: 60 * [How often does Flux check for new git commits (and can I make it sync faster)?](#how-often-does-flux-check-for-new-git-commits-and-can-i-make-it-sync-faster) * [How do I use my own deploy key?](#how-do-i-use-my-own-deploy-key) * [How do I use a private git host (or one that's not github.com, gitlab.com, bitbucket.org, dev.azure.com, or vs-ssh.visualstudio.com)?](#how-do-i-use-a-private-git-host-or-one-thats-not-githubcom-gitlabcom-bitbucketorg-devazurecom-or-vs-sshvisualstudiocom) - * [Will Flux delete resources that are no longer in the git repository?](#will-flux-delete-resources-that-are-no-longer-in-the-git-repository) * [Why does my CI pipeline keep getting triggered?](#why-does-my-ci-pipeline-keep-getting-triggered) * [Can I restrict the namespaces that Flux can see or operate on?](#can-i-restrict-the-namespaces-that-flux-can-see-or-operate-on) * [Can I change the namespace Flux puts things in by default?](#can-i-change-the-namespace-flux-puts-things-in-by-default) * [Can I temporarily make Flux ignore a deployment?](#can-i-temporarily-make-flux-ignore-a-deployment) * [How can I prevent Flux overriding the replicas when using HPA?](#how-can-i-prevent-flux-overriding-the-replicas-when-using-hpa) * [Can I disable Flux registry scanning?](#can-i-disable-flux-registry-scanning) + * [Does Flux support Kustomize/My favorite manifest factorization technology?](#does-flux-support-kustomizetemplatingmy-favorite-manifest-factorization-technology) - [Flux Helm Operator questions](#flux-helm-operator-questions) * [I'm using SSL between Helm and Tiller. How can I configure Flux to use the certificate?](#im-using-ssl-between-helm-and-tiller-how-can-i-configure-flux-to-use-the-certificate) * [I've deleted a HelmRelease file from Git. Why is the Helm release still running on my cluster?](#ive-deleted-a-helmrelease-file-from-git-why-is-the-helm-release-still-running-on-my-cluster) @@ -183,8 +183,7 @@ There are exceptions: To work around exceptional cases, you can mount a docker config into the Flux container. See the argument `--docker-config` in [the daemon -arguments -reference](https://github.com/weaveworks/flux/blob/master/site/daemon.md#flags). +arguments reference](daemon.md#flags). See also [Why are my images not showing up in the list of images?](#why-are-my-images-not-showing-up-in-the-list-of-images) @@ -243,7 +242,7 @@ First delete the secret (if it exists): `kubectl delete secret flux-git-deploy` -Then create a new secret named `flux-git-deploy`, using your private key as the content of the secret: +Then create a new secret named `flux-git-deploy`, using your private key as the content of the secret (you can generate the key with `ssh-keygen -q -N "" -f /full/path/to/private_key`): `kubectl create secret generic flux-git-deploy --from-file=identity=/full/path/to/private_key` @@ -251,6 +250,9 @@ Now restart fluxd to re-read the k8s secret (if it is running): `kubectl delete $(kubectl get pod -o name -l name=flux)` +If you have installed flux through Helm, make sure to pass +`--set git.secretName=flux-git-deploy` when installing/upgrading the chart. + ### How do I use a private git host (or one that's not github.com, gitlab.com, bitbucket.org, dev.azure.com, or vs-ssh.visualstudio.com)? As part of using git+ssh securely from the Flux daemon, we make sure @@ -266,12 +268,6 @@ host key(s). How to do this is documented in [standalone-setup.md](/site/standalone-setup.md#using-a-private-git-host). -### Will Flux delete resources that are no longer in the git repository? - -Not at present. It's tricky to come up with a safe and unsurprising -way for this to work. There's discussion of some possibilities in -[weaveworks/flux#738](https://github.com/weaveworks/flux/issues/738). - ### Why does my CI pipeline keep getting triggered? There's a couple of reasons this can happen. @@ -313,7 +309,7 @@ to experiment to find the most restrictive permissions that work for your case. You will need to use the (experimental) command-line flag -`--k8s-namespace-whitelist` to enumerate the namespaces that Flux +`--k8s-allow-namespace` to enumerate the namespaces that Flux attempts to scan for workloads. ### Can I change the namespace Flux puts things in by default? @@ -417,6 +413,17 @@ Disable image scanning for all images: --registry-exclude-image=* ``` +### Does Flux support Kustomize/Templating/My favorite manifest factorization technology? + +Yes! + +Flux experimentally supports technology-agnostic manifest factorization through +`.flux.yaml` configuration files placed in the Git repository. To enable this +feature please supply `fluxd` with flag `--manifest-generation=true`. + +See [`.flux.yaml` configuration files documentation](/site/fluxyaml-config-files.md) for +further details. + ## Flux Helm Operator questions ### I'm using SSL between Helm and Tiller. How can I configure Flux to use the certificate? diff --git a/site/fluxctl.md b/site/fluxctl.md index 68fa6d19b..9100d95f0 100644 --- a/site/fluxctl.md +++ b/site/fluxctl.md @@ -6,6 +6,7 @@ menu_order: 40 - [Installing fluxctl](#installing-fluxctl) * [Mac OS](#mac-os) * [Linux](#linux) + + [Ubuntu (and others): snaps](#ubuntu-and-others-snaps) + [Arch Linux](#arch-linux) * [Binary releases](#binary-releases) - [Connecting fluxctl to the daemon](#connecting-fluxctl-to-the-daemon) @@ -15,7 +16,7 @@ menu_order: 40 + [2. Specify a key to use](#2-specify-a-key-to-use) - [Workloads](#workloads) * [What is a Workload?](#what-is-a-workload) - * [Viewing Workloads](#viewing-workloads) + * [Viewing Workloads](#viewing-workloads) * [Inspecting the Version of a Container](#inspecting-the-version-of-a-container) * [Releasing a Workload](#releasing-a-workload) * [Turning on Automation](#turning-on-automation) @@ -27,9 +28,11 @@ menu_order: 40 * [Recording user and message with the triggered action](#recording-user-and-message-with-the-triggered-action) - [Image Tag Filtering](#image-tag-filtering) * [Filter pattern types](#filter-pattern-types) - * [Glob](#glob) - * [Semver](#semver) - * [Regexp](#regexp) + + [Glob](#glob) + + [Semver](#semver) + + [Regexp](#regexp) + * [Controlling image timestamps with labels](#controlling-image-timestamps-with-labels) + + [Supported label formats](#supported-label-formats) - [Actions triggered through `fluxctl`](#actions-triggered-through-fluxctl) * [Errors due to author customization](#errors-due-to-author-customization) - [Using Annotations](#using-annotations) @@ -54,6 +57,26 @@ brew install fluxctl ## Linux +### Ubuntu (and others): snaps + +[Many Linux distributions](https://docs.snapcraft.io/installing-snapd) support +snaps these days, which makes it very easy to install `fluxctl` and stay up to +date. + +To install it, simply run: + +```sh +sudo snap install fluxctl +``` + +If you would prefer to track builds from master, run + +```sh +sudo snap install fluxctl --edge +``` + +instead. + ### Arch Linux Install the `fluxctl-bin` package [from the @@ -73,7 +96,7 @@ page](https://github.com/weaveworks/flux/releases). # Connecting fluxctl to the daemon -By default, fluxctl will attempt to port-forward to your Flux +By default, `fluxctl` will attempt to port-forward to your Flux instance, assuming it runs in the `"default"` namespace. You can specify a different namespace with the `--k8s-fwd-ns` flag: @@ -118,7 +141,7 @@ options: ### 1. Allow Flux to generate a key for you If you don't specify a key to use, Flux will create one for you. Obtain -the public key through fluxctl: +the public key through `fluxctl`: ```sh $ fluxctl identity @@ -497,11 +520,11 @@ fluxctl release --workload=default:deployment/helloworld --update-all-images --f Please note that automation might immediately undo this. -# Filter pattern types +## Filter pattern types Flux currently offers support for `glob`, `semver` and `regexp` based filtering. -## Glob +### Glob The glob (`*`) filter is the simplest filter Flux supports, a filter can contain multiple globs: @@ -510,7 +533,7 @@ multiple globs: fluxctl policy --workload=default:deployment/helloworld --tag-all='glob:master-v1.*.*' ``` -## Semver +### Semver If your images use [semantic versioning](https://semver.org) you can filter by image tags that adhere to certain constraints: @@ -528,7 +551,7 @@ fluxctl policy --workload=default:deployment/helloworld --tag-all='semver:*' Using a semver filter will also affect how Flux sorts images, so that the higher versions will be considered newer. -## Regexp +### Regexp If your images have complex tags you can filter by regular expression: @@ -540,6 +563,23 @@ Instead of `regexp` it is also possible to use its alias `regex`. Please bear in mind that if you want to match the whole tag, you must bookend your pattern with `^` and `$`. +## Controlling image timestamps with labels + +Some image registries do not expose a reliable creation timestamp for +image tags, which could pose a problem for the automated roll-out of +images. + +To overcome this problem you can define one of the supported labels in +your `Dockerfile`. Flux will prioritize labels over the timestamp it +retrieves from the registry. + +### Supported label formats + +- [`org.opencontainers.image.created`](https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys) + date and time on which the image was built (string, date-time as defined by RFC 3339). +- [`org.label-schema.build-date`](http://label-schema.org/rc1/#build-time-labels) + date and time on which the image was built (string, date-time as defined by RFC 3339). + # Actions triggered through `fluxctl` `fluxctl` provides the following flags for the message and author customization: @@ -615,7 +655,7 @@ configured using fluxctl. Here's a simple but complete deployment file with annotations: -``` +```yaml apiVersion: apps/v1 kind: Deployment metadata: diff --git a/site/fluxyaml-config-files.md b/site/fluxyaml-config-files.md new file mode 100644 index 000000000..c836730d4 --- /dev/null +++ b/site/fluxyaml-config-files.md @@ -0,0 +1,238 @@ +# Manifest factorization through `.flux.yaml` configuration files + +## Enabling search of `.flux.yaml` files + +This feature is still experimental. To enable it please supply `fluxd` with flag `--manifest-generation=true`. + +## Goal + +It is a common pattern to run very similar resources in separate clusters. There are various scenarios in which this is +required, the two main ones being: + +* Having a staging/canary cluster and production cluster. The resources from the staging cluster are regularly promoted + to the production cluster. In addition, there are long-term differences between the cluster resources (e.g. different + security keys, different database endpoints etc ...). +* Federation. Different clusters run in separate regions, usually with very similar resources but different + configurations. + +The main goal of `.flux.yaml` configuration files is to help deploying similar resources/clusters: + +* with minimal replication of resource definitions +* while keeping Flux neutral about the factorization technology used + + +## File-access behaviour in Flux + +Flux performs two types of actions on raw manifest files from the Git repository: + +1. Read manifest files when performing a sync operation (i.e making sure that the status of the cluster reflects what's + in the manifest files, adjusting it if necessary) +2. Update the manifest files of [workload](https://github.com/weaveworks/flux/blob/master/site/fluxctl.md#what-is-a-workload). + Specifically, flux can update: + * container images, when releasing a new image version. A release can happen manually or automatically, when a new + container image is pushed to a repository. + * annotations, which establish the release policy of a workload (e.g. whether it should be automatically released, + whether it should be locked from releasing, what image tags should be considered for automated releases …) + +Flux can be configured to confine the scope of (1) and (2): + +* To specific (sub)directories (flag` --git-path`) +* To a Git branch other than `master` (flag` --git-branch`) + + +## Abstracting out file-access: generators and updaters + +Flux allows you to declare configuration files to override file operations (1) and (2), by declaring commands which +perform equivalent actions. + +The configuration files are formatted in `YAML` and named `.flux.yaml`. They must be located on the Git repository +(more on this later). + +A `commandUpdated` `.flux.yaml` file has the following loosely specified format: + + +``` +version: X +commandUpdated: + generators: + - command: generator_command1 g1arg1 g1arg2 ... + - command: generator_command2 g2arg2 g2arg2 ... + updaters: + - containerImage: + command: containerImage_updater_command1 ciu1arg1 ciu1arg2 ... + policy: + command: policy_updater_command1 pu1arg1 pu1arg2 ... + - containerImage: + command: containerImage_updater_command2 ciu2arg1 ciu2arg2 ... + policy: + command: policy_updater_command2 pu2arg1 pu2arg2 ... +``` + + +> **Note:** For a simpler approach to updates, Flux provides a `patchUpdated` configuration file variant. + + +The file above is versioned (in order to account for future file format changes). Current version is `1`, +which is enforced. + +Also, the file contains two generators (declared in the `generators `entry), used to generate manifests and two updaters +(declared in the `updaters `entry), used to update resources in the Git repository. + +The generators are meant as an alternative to Flux manifest reads (1). Each updater is split into a `containerImage` +command and a `policy` command, covering the corresponding two types of workload manifest updates mentioned in (2). + +> **Note** Update commands operate on policies, rather than annotations. That is for two reasons: +> +> * It is an implementation detail for Kubernetes manifests specifically that policies are represented as annotations. +> * Some configurations (even those for Kubernetes clusters) may encode policies symbolically. + +Here is a specific `.flux.yaml` example, declaring a generator and an updater using [Kustomize](https://github.com/kubernetes-sigs/kustomize) +(see [https://github.com/weaveworks/flux-kustomize-example](https://github.com/weaveworks/flux-kustomize-example) +for a complete example). + + +``` +version: 1 +commandUpdated: + generators: + - command: kustomize build . + updaters: + # use https://github.com/squaremo/kubeyaml on flux-patch.yaml + - containerImage: + command: >- + cat flux-patch.yaml | + kubeyaml image --namespace $FLUX_WL_NS --kind $FLUX_WL_KIND --name $FLUX_WL_NAME --container $FLUX_CONTAINER --image "$FLUX_IMG:$FLUX_TAG" + > new-flux-patch.yaml && + mv new-flux-patch.yaml flux-patch.yaml + policy: + command: >- + cat flux-patch.yaml | + kubeyaml annotate --namespace $FLUX_WL_NS --kind $FLUX_WL_KIND --name $FLUX_WL_NAME "flux.weave.works/$FLUX_POLICY=$FLUX_POLICY_VALUE" + > new-flux-patch.yaml && + mv new-flux-patch.yaml flux-patch.yaml +``` + + +For every flux target path, Flux will look for a `.flux.yaml` file in the target path and all its parent directories. +If a `.flux.yaml` is found: + +1. When syncing, `fluxd` will run each of the `generators`, collecting yaml manifests printed to `stdout` and applying + them to the cluster. +2. When making a release or updating a policy, `fluxd` will run the `updaters`, which are in charge of updating the Git + repository to reflect the required changes in workloads. + * The `containerImage `updaters are invoked once for every container whose image requires updating. + * The `policy` updaters are invoked once for every workload annotation which needs to be added or updated. + * Updaters are supplied with environment variables indicating what image should be updated and what annotation to + update (more on this later). + * Updaters expected to modify the Git working tree in-place + + After invoking the updaters, `fluxd` will then commit and push the resulting modifications to the git repository. + +3. `fluxd `will ignore any other yaml files under that path (e.g. resource manifests). + +Generators and updaters are intentionally independent, in case a matching updater cannot be provided. It is hard to +create updaters for some factorization technologies (particularly Configuration-As-Code). To improve the situation, +a separate configuration file variant (`patchedUpdated`) is provided, which will be described later on. + + +### Execution context of commands + +`generators` and `updaters` are run in a POSIX shell inside the Flux container. This means that the `command`s supplied +should be available in the [Flux container image](../docker/Dockerfile.flux). Flux currently includes `Kustomize` and +basic Unix shell tools. If the tools in the Flux image are not sufficient for your use case, you can include new tools +in your own Flux-based image or, if the tools are popular enough, Flux maintainers can add them to the Flux image +(please create an issue). In the future (once [Ephemeral containers](https://github.com/kubernetes/kubernetes/pull/59416) +are available), you will be able to specify an container image for each command. + +The working directory (also known as CWD) of the `command`s executed from a `.fluxctl.yaml` file will be set to the +target path (`--git-path` entry) used when finding that `.fluxctl.yaml` file. + +For example, when using flux with `--git-path=staging` on a git repository with this structure: + + +``` +├── .flux.yaml +├── staging/ +├──── [...] +├── production/ +└──── [...] +``` + +The commands in `.flux.yaml `will be executed with their working directory set to `staging.` + +In addition, `updaters` are provided with some environment variables: + +* `FLUX_WORKLOAD`: Workload to be updated. Its format is `:/` (e.g. `default:deployment/foo`). + For convenience (to circumvent parsing) `FLUX_WORKLOAD `is also broken down into the following environment variables: + * `FLUX_WL_NS` + * `FLUX_WL_KIND` + * `FLUX_WL_NAME` +* `containerImage` updaters are provided with: + * `FLUX_CONTAINER`: Name of the container within the workload whose image needs to be updated. + * `FLUX_IMG`: Image name which the container needs to be updated to (e.g. `nginx`). + * `FLUX_TAG`: Image tag which the container needs to be updated to (e.g. `1.15`). +* `policy` updaters are provided with: + * `FLUX_POLICY`: the name of the policy to be added or updated in the workload. To make into an annotation name, + prefix with `flux.weave.works/` + * `FLUX_POLICY_VALUE`: value of the policy to be added or updated in the controller. If the `FLUX_POLICY_VALUE` + environment variable is not set, it means the policy should be removed. + + +### Combining generators, updaters and raw manifest files + +The `.flux.yaml` files support including multiple generators and updaters. Here is an example combining multiple +generators: + + +``` +version: 1 +commandUpdated: + generators: + - command: kustomize build . + - command: helm template ../charts/mychart -f overrides.yaml +``` + + +The generators/updaters will simply be executed in the presented order (top down). Flux will merge their output. + +Flux supports both generated manifests and raw manifests tracked in the same repository. If Flux doesn't find a +configuration file associated to a target directory, Flux will inspect it in search for raw YAML manifest files. + + +### The `patchUpdated` configuration variant + +We mentioned before that, while it is simple for users to provide generator commands, matching updater commands are +harder to construct. To improve the situation, Flux provides a different configuration variant: `patchUpdated`. +`patchUpdated` configurations store the modifications from Flux into a +[YAML merge patch file](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md) +and implicitly apply them to the resources printed by the `generators`. + +Here is an example, allowing to deploy a [Helm chart without a Tiller installation](https://jenkins-x.io/news/helm-without-tiller/) + + +``` +version: 1 +patchUpdated: + generators: + - command: helm template ../charts/mychart -f overrides.yaml + patchFile: flux-patch.yaml +``` + +The `mergePatchUpdater` will store the modifications made by Flux in file `flux-patch.yaml` and will apply the patch to +the output of `helm template ../charts/mychart -f overrides.yaml`. + +The patch file path should be relative to Flux target which matched the configuration file. + +Note that the patch file will need to be kept consistent with any changes made in the generated manifests. In particular, +the patch file will be sensitive to changes in workload names, workload namespaces or workload kinds. + +Lastly, here is another example using Kustomize which is much simpler than the `commandUpdated`-based example presented +earlier. + +``` +version: 1 +commandUpdated: + generators: + - command: helm template ../charts/mychart -f overrides.yaml + patchFile: flux-patch.yaml +``` diff --git a/site/get-started-developing.md b/site/get-started-developing.md index e40415b77..93d7a7648 100644 --- a/site/get-started-developing.md +++ b/site/get-started-developing.md @@ -17,6 +17,19 @@ This guide shows a workflow for making a small (actually, tiny) change to Flux, > 1. make a change to the code > 1. see your code changes have been deployed > 1. repeat +> 1. Remote cluster development approach: +> 1. ensure local kubectl access to a remote kubernetes cluster. +> 1. have an available local memcached instance. +> 1. make a change to the code +> 1. ```bash +> go run cmd/fluxd/main.go \ +> --memcached-hostname localhost \ +> --memcached-port 11211 \ +> --memcached-service "" \ +> --git-url git@github.com:weaveworks/flux-get-started \ +> --k8s-in-cluster=false +> ``` +> 1. repeat > 1. Use `helm` and `skaffold` together to deploy changes to the Flux helm chart. > 1. `make` > 1. make a change to the code @@ -82,7 +95,7 @@ Now that we know everything is working with `flux-getting-started`, we're going 1. Clone `git@github.com:/flux.git` replacing `` with your GitHub username. - In the same terminal you ran `eval $(minikube docker-env)`, run `dep ensure` followed by `make` from the root directory of the Flux repo. You'll see docker's usual output as it builds the image layers. Once it's done, you should see something like this in the middle of the output: + In the same terminal you ran `eval $(minikube docker-env)`, run `GO111MODULE=on go mod download` followed by `make` from the root directory of the Flux repo. You'll see docker's usual output as it builds the image layers. Once it's done, you should see something like this in the middle of the output: ``` Successfully built 606610e0f4ef Successfully tagged docker.io/weaveworks/flux:latest @@ -183,4 +196,4 @@ Now that we know everything is working with `flux-getting-started`, we're going ## Congratulations! -You have now modified Flux and deployed that change locally. From here on out, you simply need to run `make` after you save your changes and wait a few seconds for your new pod to be deployed to minikube. Keep in mind, that (as in the situation where you run `make` without saving any changes) if the docker image you pointed to in the Kubernetes deployment for Flux is not Successfully tagged, `freshpod` won't have anything new to deploy. Other than that, you should be good to go! \ No newline at end of file +You have now modified Flux and deployed that change locally. From here on out, you simply need to run `make` after you save your changes and wait a few seconds for your new pod to be deployed to minikube. Keep in mind, that (as in the situation where you run `make` without saving any changes) if the docker image you pointed to in the Kubernetes deployment for Flux is not Successfully tagged, `freshpod` won't have anything new to deploy. Other than that, you should be good to go! diff --git a/site/get-started.md b/site/get-started.md index 9fe48647d..e98a5ff09 100644 --- a/site/get-started.md +++ b/site/get-started.md @@ -36,6 +36,7 @@ will work as well though. > ```sh > kubectl create clusterrolebinding "cluster-admin-$(whoami)" --clusterrole=cluster-admin --user="$(gcloud config get-value core/account)" > ``` +> > to avoid an error along the lines of > > `Error from server (Forbidden): error when creating "deploy/flux-account.yaml": @@ -63,9 +64,9 @@ $EDITOR deploy/flux-deployment.yaml In our example we are going to use [flux-get-started](https://github.com/weaveworks/flux-get-started). If you want to use that too, be sure to create a fork of it on GitHub and -add the git URL to the config file above. After that, set the `--git-path` -flag to `--git-path=namespaces,workloads`, this is meant to exclude Helm -manifests. Again, if you want to get started with Helm, please refer to the +add the git URL to the config file above. After that, set the `--git-path` +flag to `--git-path=namespaces,workloads`, this is meant to exclude Helm +manifests. Again, if you want to get started with Helm, please refer to the [Helm section](./helm-get-started.md). ## Deploying Flux to the cluster @@ -150,5 +151,5 @@ very straight-forward and are a quite natural work-flow. As a next step, you might want to dive deeper into [how to control Flux](./fluxctl.md), check out [more sophisticated setups](./standalone-setup.md) or go through our hands-on -tutorial about driving Flux, e.g. +tutorial about driving Flux, e.g. [automations, annotations and locks](annotations-tutorial.md). \ No newline at end of file diff --git a/site/git-commit-signing.md b/site/git-commit-signing.md deleted file mode 100644 index 139469fbb..000000000 --- a/site/git-commit-signing.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Git commit signing -menu_order: 90 ---- - -# Summary - -Flux can be configured to sign commits that it makes to the user git -repo when, for example, it detects an updated Docker image is available -for a release with automatic deployments enabled. Enabling this feature -requires the configuration of two flags: - -1. `--git-gpg-key-import` should be set to the path Flux should look - for GPG key(s) to import, this can be a direct path to a key or the - path to a folder Flux should scan for files. -2. `--git-signing-key` should be set to the ID of the key Flux should - use to sign commits, for example: `649C056644DBB17D123D699B42532AEA4FFBFC0B` - -# Importing GPG key(s) - -Any file found in the configured `--git-gpg-key-import` path will be -imported into GPG; therefore, by volume-mounting a key into that -directory it will be made available for use by Flux. - -> **Note:** Flux *does not* recursively scan a given directory but does -understand symbolic links to files. - -# Using the `--git-signing-key` flag - -Once a key has been imported, all that needs to be done is to specify -that git commit signing should be performed by providing the -`--git-signing-key` flag and the ID of the key to use. For example: - -`--git-signing-key 649C056644DBB17D123D699B42532AEA4FFBFC0B` diff --git a/site/git-gpg.md b/site/git-gpg.md new file mode 100644 index 000000000..c89d987b2 --- /dev/null +++ b/site/git-gpg.md @@ -0,0 +1,253 @@ +--- +title: Git commit signing and verification +menu_order: 90 +--- + +- [Summary](#summary) +- [Commit signing](#commit-signing) + * [Creating a GPG signing key](#creating-a-gpg-signing-key) + * [Importing a GPG signing key](#importing-a-gpg-signing-key) +- [Signature verification](#signature-verification) + * [Importing trusted GPG keys and enabling verification](#importing-trusted-gpg-keys-and-enabling-verification) + * [Enabling verification for existing repositories, disaster recovery, and deleted sync tags](#enabling-verification-for-existing-repositories-disaster-recovery-and-deleted-sync-tags) + +# Summary + +Flux can be configured to sign commits that it makes to the user git +repo when, for example, it detects an updated Docker image is available +for a release with automatic deployments enabled. To complete this +functionality it is also able to verify signatures of commits (and the +sync tag in git) to prevent Flux from applying unauthorized changes on +the cluster. + +# Commit signing + +The signing of commits (and the sync tag) requires two flags to be set: + +1. `--git-gpg-key-import` should be set to the path(s) Flux should look + for GPG key(s) to import, this can be direct paths to keys and/or + the paths to folders Flux should scan for files. +2. `--git-signing-key` should be set to the ID of the key Flux should + use to sign commits, this can be the full fingerprint or the long + ID, for example: `700D397C988079BFF0DDAFED6A7436E8790F8689` (or + `6A7436E8790F8689`) + +Once enabled Flux will sign both commits and the sync tag with given +`--git-signing-key`. + +## Creating a GPG signing key + +> **Note:** This requires [gnupg](https://www.gnupg.org) to be +installed on your system. + +1. Enter the following shell command to start the key generation dialog: + + ```sh + $ gpg --full-generate-key + ``` + +2. The dialog will guide you through the process of generating a key. + Pressing the `Enter` key will assign the default value, please note + that when in doubt, in almost all cases, the default value is + recommended. + + Select what kind of key you want and press `Enter`: + + ```sh + Please select what kind of key you want: + (1) RSA and RSA (default) + (2) DSA and Elgamal + (3) DSA (sign only) + (4) RSA (sign only) + Your selection? 1 + ``` + +3. Enter the desired key size (or simply press `Enter` as the default + will be secure for almost any setup): + + ```sh + RSA keys may be between 1024 and 4096 bits long. + What keysize do you want? (2048) + ``` + +4. Specify how long the key should be valid (or simply press `Enter`): + + ```sh + Please specify how long the key should be valid. + 0 = key does not expire + = key expires in n days + w = key expires in n weeks + m = key expires in n months + y = key expires in n years + Key is valid for? (0) + ``` + +5. Verify your selection of choices and accept (`y` and `Enter`) + +6. Enter your user ID information, it is recommended to set the email + address to the same address as the daemon uses for Git operations. + +7. **Do not enter a passphrase**, as Flux will be unable to sign with a + passphrase protected private key, instead, keep it in a secure place. + +8. You can validate the public and private keypair were created with + success by running: + + ```sh + $ gpg --list-secret-keys --keyid-format long + sec rsa2048/6A7436E8790F8689 2019-03-28 [SC] + 700D397C988079BFF0DDAFED6A7436E8790F8689 + uid [ultimate] Weaveworks Flux + ssb rsa2048/ECA4FF5BD988B8E9 2019-03-28 [E] + ``` + +## Importing a GPG signing key + +Any file found in the configured `--git-gpg-key-import` path(s) will be +imported into GPG; therefore, by volume-mounting a key into that +directory it will be made available for use by Flux. + +1. Retrieve the key ID (second row of the `sec` column): + + ```sh + $ gpg --list-secret-keys --keyid-format long + sec rsa2048/6A7436E8790F8689 2019-03-28 [SC] + 700D397C988079BFF0DDAFED6A7436E8790F8689 + uid [ultimate] Weaveworks Flux + ssb rsa2048/ECA4FF5BD988B8E9 2019-03-28 [E] + ``` + +2. Export the public and private keypair from your local GPG keyring + to a Kubernetes secret with `--export-secret-keys `: + + ```sh + $ gpg --export-secret-keys 700D397C988079BFF0DDAFED6A7436E8790F8689 | + kubectl create secret generic flux-gpg-signing-key --from-file=flux.asc=/dev/stdin --dry-run -o yaml + apiVersion: v1 + data: + flux.asc: + kind: Secret + metadata: + creationTimestamp: null + name: flux-gpg-signing-key + ``` + +3. Adapt your Flux deployment to mount the secret and enable the + signing of commits: + + ```yaml + spec: + template: + spec: + volumes: + - name: gpg-signing-key + secret: + secretName: flux-gpg-signing-key + defaultMode: 0400 + containers: + - name: flux + volumeMounts: + - name: gpg-signing-key + mountPath: /root/gpg-signing-key/ + readOnly: true + args: + - --git-gpg-key-import=/root/gpg-signing-key + - --git-signing-key=700D397C988079BFF0DDAFED6A7436E8790F8689 # key id + ``` + + or set the `gpgKeys.secretName` in your Helm `values.yaml` to + `gpg-keys`, and `signingKey` to your ``. + +4. To validate your setup is working, run `git log --show-signature` or + `git verify-tag ` to assure Flux signs its git + actions. + + ```sh + $ git verify-tag + gpg: Signature made vr 29 mrt 2019 15:28:34 CET + gpg: using RSA key 700D397C988079BFF0DDAFED6A7436E8790F8689 + gpg: Good signature from "Weaveworks Flux " [ultimate] + ``` + +> **Note:** Flux *does not* recursively scan a given directory but does +understand symbolic links to files. + +> **Note:** Flux will automatically add any imported key to the GnuPG + trustdb. This is required as git will otherwise not trust signatures + made with the imported keys. + +# Signature verification + +The verification of commit signatures is enabled by importing all +trusted public keys (`--git-gpg-key-import=,`), and by +setting the `--gpg-verify-signatures` flag. Once enabled Flux will +verify all commit signatures, and the signature from the sync tag it is +comparing revisions with. + +In case a signature can not be verified, Flux will sync state up to the +last valid revision it can find _before_ the unverified commit was +made, and lock on this revision. + +## Importing trusted GPG keys and enabling verification + +1. Collect the public keys from all trusted git authors. + +2. Create a `ConfigMap` with all trusted public keys: + + ```sh + $ kubectl create configmap generic flux-gpg-public-keys \ + --from-file=author.asc --from-file=author2.asc --dry-run -o yaml + apiVersion: v1 + data: + author.asc: + author2.asc: + kind: ConfigMap + metadata: + creationTimestamp: null + name: flux-gpg-public-keys + ``` + +3. Mount the config map in your Flux deployment, add the mount path to + `--git-gpg-key-import`, and enable the verification of commits: + + ```yaml + spec: + template: + spec: + volumes: + - name: gpg-public-keys + configMap: + name: flux-gpg-public-keys + defaultMode: 0400 + containers: + - name: flux + volumeMounts: + - name: gpg-public-keys + mountPath: /root/gpg-public-keys + readOnly: true + args: + - --git-gpg-key-import=/root/gpg-public-keys + - --git-verify-signatures + ``` + +> **Note:** Flux *does not* recursively scan a given directory but does +understand symbolic links to files. + +## Enabling verification for existing repositories, disaster recovery, and deleted sync tags + +In case you have existing commits in your repository without a +signature you may want to: + +a. First enable signing by setting the `--git-gpg-key-import` and + `--git-signing-key`, after Flux has synchronized the first commit + with a signature, enable verification. + +b. Sign the sync tag by yourself, with a key that is imported, to point + towards the first commit with a signature (or the current `HEAD`). + Flux will then start synchronizing the changes between the sync tag + revision and `HEAD`. + + ```sh + $ git tag --force --local-user= -a -m "Sync pointer" + $ git push --force origin + ``` diff --git a/site/helm-get-started.md b/site/helm-get-started.md index f382f73c7..76033fca7 100644 --- a/site/helm-get-started.md +++ b/site/helm-get-started.md @@ -52,9 +52,14 @@ kubectl create clusterrolebinding tiller-cluster-rule \ Deploy Tiller in `kube-system` namespace: ```sh -helm init --skip-refresh --upgrade --service-account tiller +helm init --skip-refresh --upgrade --service-account tiller --history-max 10 ``` +> **Note:** This is a quick guide and by no means a production ready +> Tiller setup, please look into ['Securing your Helm installation'](https://helm.sh/docs/using_helm/#securing-your-helm-installation) +> and be aware of the `--history-max` flag before promoting to +> production. + ## Install Weave Flux Add the Flux repository of Weaveworks: diff --git a/site/helm-integration.md b/site/helm-integration.md index 0f64da5ce..efe642331 100644 --- a/site/helm-integration.md +++ b/site/helm-integration.md @@ -249,6 +249,10 @@ If the chart you're using in a `HelmRelease` lets you specify the particular images to run, you will usually be able to update them with Flux, the same way you can with Deployments and so on. +> **Note:** for automation to work, the repository _and_ tag should be +> defined, as Flux determines image updates based on what it reads in +> the `.spec.values` of the `HelmRelease`. + Flux interprets certain commonly used structures in the `values` section of a `HelmRelease` as referring to images. The following are understood (showing just the `values` section): @@ -381,7 +385,7 @@ the filename. ``` cp ~/.helm/repository/repositories.yaml . -sed -i -e 's/^\( *cache: \).*\/\(.*\.yaml\)/\1\2/g' +sed -i -e 's/^\( *cache: \).*\/\(.*\.yaml\)/\1\2/g' repositories.yaml ``` Now you can create a secret in the same namespace as you're running diff --git a/site/helm-operator.md b/site/helm-operator.md index 2ef947ddb..cf1135ae4 100644 --- a/site/helm-operator.md +++ b/site/helm-operator.md @@ -138,8 +138,12 @@ Deploy Tiller: ```bash kubectl apply -f helm-rbac.yaml -# Deploy helm with mutual TLS enabled -helm init --upgrade --service-account tiller \ +# Deploy helm with mutual TLS enabled. +# --history-max limits the maximum number of revisions Tiller stores; +# leaving it to the default (0) may result in request timeouts after N +# releases, due to the excessive amount of ConfigMaps Tiller will +# attempt to retrieve. +helm init --upgrade --service-account tiller --history-max 10 \ --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' \ --tiller-tls \ --tiller-tls-cert ./tls/server.pem \ diff --git a/snap/local/fluxctl-launch b/snap/local/fluxctl-launch new file mode 100755 index 000000000..e541bb652 --- /dev/null +++ b/snap/local/fluxctl-launch @@ -0,0 +1,3 @@ +#!/bin/sh +real_home=$(getent passwd "$(id -u)" | cut -d ':' -f 6) +HOME=$real_home fluxctl.real "$@" diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml new file mode 100644 index 000000000..4923289bd --- /dev/null +++ b/snap/snapcraft.yaml @@ -0,0 +1,59 @@ +name: fluxctl +version-script: | + FLUX_TAG="$(git tag -l | egrep -v '^(chart-|helm-|master-|pre-split)' | sort --version-sort | tail -n1)" + if [ "$SNAPCRAFT_PROJECT_GRADE" = "stable" ] + then + echo "$FLUX_TAG" + else + GIT_REV="$(git rev-parse --short HEAD)" + echo "$FLUX_TAG+$GIT_REV" + fi +version: git +summary: fluxctl talks to Weave Flux and helps you deploy your code +description: | + fluxctl talks to your Weave Flux instance and exposes all its + functionality to an easy to use command line interface. +grade: devel # must be 'stable' to release into candidate/stable channels +confinement: strict +base: core18 + +parts: + launcher: + source: snap/local + source-type: local + plugin: dump + organize: + '*': bin/ + override-stage: | + cd $SNAPCRAFT_PART_INSTALL + chmod +x bin/fluxctl-launch + snapcraftctl stage + fluxctl: + source: . + plugin: go + go-importpath: github.com/weaveworks/flux + build-packages: + - gcc + stage: + - -bin/fluxd + - -bin/helm-operator + organize: + bin/fluxctl: bin/fluxctl.real + after: [launcher] + +plugs: + kube-config: + interface: personal-files + read: + - $HOME/.kube/config + - $HOME/.minikube/client.key + - $HOME/.minikube/client.crt + - $HOME/.minikube/ca.crt + +apps: + fluxctl: + command: bin/fluxctl-launch + plugs: + - kube-config + - network + - network-bind diff --git a/ssh/keyring.go b/ssh/keyring.go index 317b3f115..878a14003 100644 --- a/ssh/keyring.go +++ b/ssh/keyring.go @@ -7,3 +7,19 @@ type KeyRing interface { KeyPair() (publicKey PublicKey, privateKeyPath string) Regenerate() error } + +type sshKeyRing struct{} + +// NewNopSSHKeyRing returns a KeyRing that doesn't do anything. +// It is meant for local development purposes when running fluxd outside a Kubernetes container. +func NewNopSSHKeyRing() KeyRing { + return &sshKeyRing{} +} + +func (skr *sshKeyRing) KeyPair() (PublicKey, string) { + return PublicKey{}, "" +} + +func (skr *sshKeyRing) Regenerate() error { + return nil +} diff --git a/sync/sync_test.go b/sync/sync_test.go index 41da55a8b..580023680 100644 --- a/sync/sync_test.go +++ b/sync/sync_test.go @@ -1,6 +1,7 @@ package sync import ( + "context" "os" "testing" @@ -11,6 +12,7 @@ import ( "github.com/weaveworks/flux/cluster/kubernetes" "github.com/weaveworks/flux/git" "github.com/weaveworks/flux/git/gittest" + "github.com/weaveworks/flux/manifests" "github.com/weaveworks/flux/resource" ) @@ -21,11 +23,12 @@ func TestSync(t *testing.T) { defer cleanup() // Start with nothing running. We should be told to apply all the things. - manifests := kubernetes.NewManifests(kubernetes.ConstNamespacer("default"), log.NewLogfmtLogger(os.Stdout)) + parser := kubernetes.NewManifests(kubernetes.ConstNamespacer("default"), log.NewLogfmtLogger(os.Stdout)) clus := &syncCluster{map[string]string{}} dirs := checkout.ManifestDirs() - resources, err := manifests.LoadManifests(checkout.Dir(), dirs) + rs := manifests.NewRawFiles(checkout.Dir(), checkout.ManifestDirs(), parser) + resources, err := rs.GetAllResourcesByID(context.TODO()) if err != nil { t.Fatal(err) } @@ -33,7 +36,7 @@ func TestSync(t *testing.T) { if err := Sync("synctest", resources, clus); err != nil { t.Fatal(err) } - checkClusterMatchesFiles(t, manifests, clus.resources, checkout.Dir(), dirs) + checkClusterMatchesFiles(t, rs, clus.resources, checkout.Dir(), dirs) } // --- @@ -75,8 +78,8 @@ func resourcesToStrings(resources map[string]resource.Resource) map[string]strin // Our invariant is that the model we can export from the cluster // should always reflect what's in git. So, let's check that. -func checkClusterMatchesFiles(t *testing.T, m cluster.Manifests, resources map[string]string, base string, dirs []string) { - files, err := m.LoadManifests(base, dirs) +func checkClusterMatchesFiles(t *testing.T, ms manifests.Store, resources map[string]string, base string, dirs []string) { + files, err := ms.GetAllResourcesByID(context.Background()) if err != nil { t.Fatal(err) } diff --git a/test/e2e/e2e-flux-build.sh b/test/e2e/e2e-flux-build.sh deleted file mode 100755 index 709500e43..000000000 --- a/test/e2e/e2e-flux-build.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit - -export GOPATH=$HOME/go -export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin - -echo ">>> Installing go dep" -curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh -dep ensure -vendor-only - -echo ">>> Building docker images" -make all diff --git a/test/e2e/e2e-flux-chart.sh b/test/e2e/e2e-flux-chart.sh deleted file mode 100755 index 42713e340..000000000 --- a/test/e2e/e2e-flux-chart.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit - -export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" -REPO_ROOT=$(git rev-parse --show-toplevel) -KNOWN_HOSTS=$(cat ${REPO_ROOT}/test/e2e/known_hosts) -GITCONFIG=$(cat ${REPO_ROOT}/test/e2e/gitconfig) - -echo ">>> Loading $(docker/image-tag) into the cluster" -kind load docker-image "docker.io/weaveworks/flux:$(docker/image-tag)" -kind load docker-image "docker.io/weaveworks/helm-operator:$(docker/image-tag)" - -echo ">>> Installing Flux with Helm" -helm install --name flux --wait \ ---namespace flux \ ---set image.tag=$(docker/image-tag) \ ---set git.url=ssh://git@gitsrv/git-server/repos/cluster.git \ ---set git.secretName=ssh-git \ ---set git.pollInterval=30s \ ---set git.config.secretName=gitconfig \ ---set git.config.enabled=true \ ---set-string git.config.data="${GITCONFIG}" \ ---set helmOperator.tag=$(docker/image-tag) \ ---set helmOperator.create=true \ ---set helmOperator.createCRD=true \ ---set helmOperator.git.secretName=ssh-git \ ---set registry.excludeImage=* \ ---set-string ssh.known_hosts="${KNOWN_HOSTS}" \ -${REPO_ROOT}/chart/flux - -echo '>>> Waiting for gitconfig secret' -retries=12 -count=0 -ok=false -until ${ok}; do - actual=$(kubectl get secrets -n flux gitconfig -ojsonpath={..data.gitconfig} | base64 -d) - if [ "${actual}" == "${GITCONFIG}" ]; then - echo -e "Expected Git configuration deployed\n" - kubectl get secrets -n flux gitconfig && echo - ok=true - else - ok=false - sleep 10 - fi - count=$(($count + 1)) - if [[ ${count} -eq ${retries} ]]; then - kubectl -n flux get secrets - echo "No more retries left" - exit 1 - fi -done - -echo '>>> Waiting for namespace demo' -retries=12 -count=1 -ok=false -until ${ok}; do - kubectl describe ns/demo && ok=true || ok=false - sleep 10 - count=$(($count + 1)) - if [[ ${count} -eq ${retries} ]]; then - kubectl -n flux logs deployment/flux - echo "No more retries left" - exit 1 - fi -done - -echo '>>> Waiting for workload podinfo' -retries=12 -count=0 -ok=false -until ${ok}; do - kubectl -n demo describe deployment/podinfo && ok=true || ok=false - sleep 10 - count=$(($count + 1)) - if [[ ${count} -eq ${retries} ]]; then - kubectl -n flux logs deployment/flux - echo "No more retries left" - exit 1 - fi -done - -echo '>>> Waiting for Helm release mongodb' -retries=12 -count=0 -ok=false -until ${ok}; do - kubectl -n demo describe deployment/mongodb && ok=true || ok=false - sleep 10 - count=$(($count + 1)) - if [[ ${count} -eq ${retries} ]]; then - kubectl -n flux logs deployment/flux - kubectl -n flux logs deployment/flux-helm-operator - echo "No more retries left" - exit 1 - fi -done - -echo ">>> Flux logs" -kubectl -n flux logs deployment/flux - -echo ">>> Helm Operator logs" -kubectl -n flux logs deployment/flux-helm-operator - -echo ">>> List pods" -kubectl -n demo get pods - -echo ">>> Check workload" -kubectl -n demo rollout status deployment/podinfo - -echo ">>> Check Helm release" -kubectl -n demo rollout status deployment/mongodb diff --git a/test/e2e/e2e-git.sh b/test/e2e/e2e-git.sh deleted file mode 100755 index 725c50742..000000000 --- a/test/e2e/e2e-git.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit - -REPO_ROOT=$(git rev-parse --show-toplevel) -SCRIPT_DIR="${REPO_ROOT}/test/e2e" -export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" - -echo ">>> Installing git" -kubectl create namespace flux -ssh-keygen -t rsa -N "" -f "${SCRIPT_DIR}/id_rsa" -kubectl create secret generic ssh-git --namespace=flux --from-file="${SCRIPT_DIR}/known_hosts" --from-file="${SCRIPT_DIR}/id_rsa" --from-file=identity="${SCRIPT_DIR}/id_rsa" --from-file="${SCRIPT_DIR}/id_rsa.pub" -rm "${SCRIPT_DIR}/id_rsa" "${SCRIPT_DIR}/id_rsa.pub" -kubectl apply -f "${SCRIPT_DIR}/git-dep.yaml" - -kubectl -n flux rollout status deployment/gitsrv - diff --git a/test/e2e/e2e-golang.sh b/test/e2e/e2e-golang.sh deleted file mode 100755 index dad56278a..000000000 --- a/test/e2e/e2e-golang.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit - -GO_VERSION=1.12.5 - -echo ">>> Installing go ${GO_VERSION}" -curl -O https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz -tar -xf go${GO_VERSION}.linux-amd64.tar.gz -sudo rm -rf /usr/local/go -sudo mv go /usr/local - -export GOPATH=$HOME/go -export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin - -mkdir -p $HOME/go/bin -mkdir -p $HOME/go/src - -go version diff --git a/test/e2e/e2e-helm.sh b/test/e2e/e2e-helm.sh deleted file mode 100755 index 85f12a898..000000000 --- a/test/e2e/e2e-helm.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit - -REPO_ROOT=$(git rev-parse --show-toplevel) -export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" - -echo ">>> Installing Helm" -curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash - -echo '>>> Installing Tiller' -kubectl --namespace kube-system create sa tiller -kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller -helm init --service-account tiller --upgrade --wait - diff --git a/test/e2e/e2e-kind.sh b/test/e2e/e2e-kind.sh deleted file mode 100755 index 99b845f84..000000000 --- a/test/e2e/e2e-kind.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit - -export GOPATH=$HOME/go -export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin -REPO_ROOT=$(git rev-parse --show-toplevel) - -echo ">>> Installing kubectl" -curl -sLO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ -chmod +x kubectl && \ -sudo mv kubectl /usr/local/bin/ - -echo ">>> Building sigs.k8s.io/kind" -cd $HOME -GO111MODULE="on" go get -u sigs.k8s.io/kind@master -cd $REPO_ROOT - -echo ">>> Installing kind" -sudo cp $GOPATH/bin/kind /usr/local/bin/ -kind create cluster --wait 5m - -export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" -kubectl get pods --all-namespaces diff --git a/test/e2e/git-dep.yaml b/test/e2e/gitsrv.yaml similarity index 93% rename from test/e2e/git-dep.yaml rename to test/e2e/gitsrv.yaml index 0a263f68a..812bd4721 100644 --- a/test/e2e/git-dep.yaml +++ b/test/e2e/gitsrv.yaml @@ -5,7 +5,6 @@ metadata: labels: name: gitsrv name: gitsrv - namespace: flux spec: replicas: 1 selector: @@ -17,7 +16,7 @@ spec: name: gitsrv spec: containers: - - image: stefanprodan/gitsrv:0.0.5 + - image: stefanprodan/gitsrv:0.0.12 name: git env: - name: REPO @@ -46,7 +45,6 @@ metadata: labels: name: gitsrv name: gitsrv - namespace: flux spec: ports: - name: ssh diff --git a/test/e2e/known_hosts b/test/e2e/known_hosts index 48c28ec97..27da08378 100644 --- a/test/e2e/known_hosts +++ b/test/e2e/known_hosts @@ -1,3 +1,4 @@ -gitsrv ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDt65M0saFA6Ewsjc8mQ405bI406BsFSJrbGz9GFrbWBn0Rs3Loi4S6ADWgTWlcspHvbfmf7vXW9oiTjxeke8oHPCfFYrHFdPK26Big2J1kE2DzRONizZWcdw8dpqRhvxlt2+/EJuuay8CGXz3VLCV8NgJc5AYmAwNPknUhWKxaAzjwvRd/0cyXr4vvcTDcmwR63oiWcRPkHCZ5LClFuZE065Ulm2o6CgItl+e6M5ouDSJWZDpYWWmmJJJv1DQoRruNbaf5f4bgWVkKjrQ/0cA4iWWlkwJMlAWQgp9saD0G2F83hrf2XapM/clWTFybkzPqPqaw2BEwXP6vWp6A1iUR -gitsrv ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBM6WyUxQRPtZhYLxjG6YRQOHD/SX/+uI4XBo44UTSu21uqmf/lG8cLWTdMVzDlUDY9/Dx4tFz96LT97kUC1pLJs= -gitsrv ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAMv1z2YnDug5M4Klp3FMbBvw9NdFrx7Om5uEKFQs07t +# generated with "ssh-keyscan gitsrv" +gitsrv ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2WoJ2k+WA54pdxw5EGhg9CQBHKDVjHzNNlgRfTGrQBpgQT3/HEBi6BGi2ZmS6o6W9EJfzYzl3PvC+JY6BqcdM8XqbDazC1rkGtlycHd+dFT/TmWvBqJ2Oh+oJNL7IgpjBPJJMdAEc9nzUTTYa7V2A9SeaAyQJKGaftZhHEXTxkxxbWP2an7bzyw9QNCiF/ogQ79DPsp7ly4v4KgeGLSm9AoT/HO5+kJwXX3yQ1hKrFZyhzhaYiwzdApc3iUJtUEz1lKVX+63+WN6qhkbCUjlhfOGyT3qk18sMU6raqKt8uuQeR9f4/xkMXGWQuULhjGwOkju+8Dma8GvnhKKwHf5V +gitsrv ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFhuyD3SzMaTye/OX51Jb3fgZDxhGnXgJQ6oFvSSwqDGDm4fcueHE979xEPolNe9hn6jGg/2DS3xkU8boPKv8mo= +gitsrv ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAbLc9veRHa/l/kK6hmRWMA+QoWd8vLtLHbm4v6wj8XU diff --git a/test/e2e/run.sh b/test/e2e/run.sh new file mode 100755 index 000000000..b5f400a3b --- /dev/null +++ b/test/e2e/run.sh @@ -0,0 +1,212 @@ +#!/usr/bin/env bash + +set -o errexit + +declare -a on_exit_items + +function on_exit() { + if [ "${#on_exit_items[@]}" -gt 0 ]; then + echo -e '\nRunning deferred items, please do not interrupt until they are done:' + fi + for I in "${on_exit_items[@]}"; do + echo "deferred: ${I}" + eval "${I}" + done +} + +# Cleaning up only makes sense in a local environment +# it just wastes time in CircleCI +if [ "${CI}" != 'true' ]; then + trap on_exit EXIT +fi + +function defer() { + on_exit_items=("$*" "${on_exit_items[@]}") +} + +REPO_ROOT=$(git rev-parse --show-toplevel) +SCRIPT_DIR="${REPO_ROOT}/test/e2e" +KIND_VERSION=0.2.1 +CACHE_DIR="${REPO_ROOT}/cache/$CURRENT_OS_ARCH" +KIND_CACHE_PATH="${CACHE_DIR}/kind-$KIND_VERSION" +KIND_CLUSTER=flux-e2e +USING_KIND=false +FLUX_NAMESPACE=flux-e2e +DEMO_NAMESPACE=demo + + +# Check if there is a kubernetes cluster running, otherwise use Kind +if ! kubectl version > /dev/null 2>&1 ; then + if [ ! -f "${KIND_CACHE_PATH}" ]; then + echo '>>> Downloading Kind' + mkdir -p "${CACHE_DIR}" + curl -sL "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-${CURRENT_OS_ARCH}" -o "${KIND_CACHE_PATH}" + fi + echo '>>> Creating Kind Kubernetes cluster' + cp "${KIND_CACHE_PATH}" "${REPO_ROOT}/test/bin/kind" + chmod +x "${REPO_ROOT}/test/bin/kind" + defer kind --name "${KIND_CLUSTER}" delete cluster > /dev/null 2>&1 + kind create cluster --name "${KIND_CLUSTER}" --wait 5m + export KUBECONFIG="$(kind --name="${KIND_CLUSTER}" get kubeconfig-path)" + USING_KIND=true + kubectl get pods --all-namespaces +fi + + +if ! helm version > /dev/null 2>&1; then + echo '>>> Installing Tiller' + kubectl --namespace kube-system create sa tiller + defer kubectl --namespace kube-system delete sa tiller + kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller + defer kubectl delete clusterrolebinding tiller-cluster-rule + helm init --service-account tiller --upgrade --wait + defer helm reset --force +fi + +kubectl create namespace "$FLUX_NAMESPACE" +defer kubectl delete namespace "$FLUX_NAMESPACE" + +echo '>>> Installing mock git server' +ssh-keygen -t rsa -N "" -f "${SCRIPT_DIR}/id_rsa" +defer rm -f "${SCRIPT_DIR}/id_rsa" "${SCRIPT_DIR}/id_rsa.pub" +kubectl create secret generic ssh-git --namespace="${FLUX_NAMESPACE}" --from-file="${SCRIPT_DIR}/known_hosts" --from-file="${SCRIPT_DIR}/id_rsa" --from-file=identity="${SCRIPT_DIR}/id_rsa" --from-file="${SCRIPT_DIR}/id_rsa.pub" +kubectl apply -n "${FLUX_NAMESPACE}" -f "${SCRIPT_DIR}/gitsrv.yaml" +kubectl -n "${FLUX_NAMESPACE}" rollout status deployment/gitsrv + + +if [ "${USING_KIND}" = 'true' ]; then + echo '>>> Loading images into the Kind cluster' + kind --name "${KIND_CLUSTER}" load docker-image 'docker.io/weaveworks/flux:latest' + kind --name "${KIND_CLUSTER}" load docker-image 'docker.io/weaveworks/helm-operator:latest' +fi + +echo '>>> Installing Flux with Helm' + +CREATE_CRDS='true' +if kubectl get crd fluxhelmreleases.helm.integrations.flux.weave.works helmreleases.flux.weave.works > /dev/null 2>&1; then + # CRDs existed, don't try to create them + echo 'CRDs existed, setting helmOperator.createCRD=false' + CREATE_CRDS='false' +else + # Schedule CRD deletion before calling helm, since it may fail and create them anyways + defer kubectl delete crd fluxhelmreleases.helm.integrations.flux.weave.works helmreleases.flux.weave.works > /dev/null 2>&1 +fi + +KNOWN_HOSTS=$(cat "${REPO_ROOT}/test/e2e/known_hosts") +GITCONFIG=$(cat "${REPO_ROOT}/test/e2e/gitconfig") + + +defer helm delete --purge flux > /dev/null 2>&1 + +helm install --name flux --wait \ +--namespace "${FLUX_NAMESPACE}" \ +--set image.tag=latest \ +--set git.url=ssh://git@gitsrv/git-server/repos/cluster.git \ +--set git.secretName=ssh-git \ +--set git.pollInterval=30s \ +--set git.config.secretName=gitconfig \ +--set git.config.enabled=true \ +--set-string git.config.data="${GITCONFIG}" \ +--set helmOperator.tag=latest \ +--set helmOperator.create=true \ +--set helmOperator.createCRD=true \ +--set helmOperator.git.secretName=ssh-git \ +--set registry.excludeImage=* \ +--set-string ssh.known_hosts="${KNOWN_HOSTS}" \ +--set helmOperator.createCRD="${CREATE_CRDS}" \ +"${REPO_ROOT}/chart/flux" + + + + +echo -n '>>> Waiting for gitconfig secret ' +retries=24 +count=0 +ok=false +until ${ok}; do + actual=$(kubectl get secrets -n "${FLUX_NAMESPACE}" gitconfig -ojsonpath={..data.gitconfig} | base64 --decode) + if [ "${actual}" = "${GITCONFIG}" ]; then + echo ' Expected Git configuration deployed' + kubectl get secrets -n "${FLUX_NAMESPACE}" gitconfig && echo + ok=true + else + echo -n '.' + ok=false + sleep 5 + fi + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + echo ' No more retries left' + kubectl -n "${FLUX_NAMESPACE}" get secrets + exit 1 + fi +done + +echo -n ">>> Waiting for namespace ${DEMO_NAMESPACE} " +retries=24 +count=1 +ok=false +until ${ok}; do + kubectl describe "ns/${DEMO_NAMESPACE}" && ok=true || ok=false + echo -n '.' + sleep 5 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n "${FLUX_NAMESPACE}" logs deployment/flux + echo ' No more retries left' + exit 1 + fi +done +echo ' done' + +echo -n '>>> Waiting for workload podinfo ' +retries=24 +count=0 +ok=false +until ${ok}; do + kubectl -n "${DEMO_NAMESPACE}" describe deployment/podinfo && ok=true || ok=false + echo -n '.' + sleep 5 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n "${FLUX_NAMESPACE}" logs deployment/flux + echo ' No more retries left' + exit 1 + fi +done +echo ' done' + +echo -n '>>> Waiting for Helm release mongodb ' +retries=24 +count=0 +ok=false +until ${ok}; do + kubectl -n $DEMO_NAMESPACE describe deployment/mongodb && ok=true || ok=false + echo -n '.' + sleep 5 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n "${FLUX_NAMESPACE}" logs deployment/flux + kubectl -n "${FLUX_NAMESPACE}" logs deployment/flux-helm-operator + echo ' No more retries left' + exit 1 + fi +done +echo ' done' + +echo '>>> Flux logs' +kubectl -n "${FLUX_NAMESPACE}" logs deployment/flux + +echo '>>> Helm Operator logs' +kubectl -n "${FLUX_NAMESPACE}" logs deployment/flux-helm-operator + +echo '>>> List pods' +kubectl -n "${DEMO_NAMESPACE}" get pods + +echo '>>> Check workload' +kubectl -n "${DEMO_NAMESPACE}" rollout status deployment/podinfo + +echo '>>> Check Helm release' +kubectl -n "${DEMO_NAMESPACE}" rollout status deployment/mongodb + +echo -e '\nEnd to end test was successful!!\n' diff --git a/tools.go b/tools.go new file mode 100644 index 000000000..f358383b5 --- /dev/null +++ b/tools.go @@ -0,0 +1,10 @@ +// +build tools + +// This file just exists to ensure we download the tools we need for building +// See https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module + +package flux + +import ( + _ "k8s.io/code-generator" +) diff --git a/update/automated.go b/update/automated.go index f2e6797b5..0c32575ac 100644 --- a/update/automated.go +++ b/update/automated.go @@ -2,6 +2,7 @@ package update import ( "bytes" + "context" "fmt" "github.com/go-kit/kit/log" @@ -24,7 +25,7 @@ func (a *Automated) Add(service flux.ResourceID, container resource.Container, i a.Changes = append(a.Changes, Change{service, container, image}) } -func (a *Automated) CalculateRelease(rc ReleaseContext, logger log.Logger) ([]*WorkloadUpdate, Result, error) { +func (a *Automated) CalculateRelease(ctx context.Context, rc ReleaseContext, logger log.Logger) ([]*WorkloadUpdate, Result, error) { prefilters := []WorkloadFilter{ &IncludeFilter{a.workloadIDs()}, } @@ -34,7 +35,7 @@ func (a *Automated) CalculateRelease(rc ReleaseContext, logger log.Logger) ([]*W } result := Result{} - updates, err := rc.SelectWorkloads(result, prefilters, postfilters) + updates, err := rc.SelectWorkloads(ctx, result, prefilters, postfilters) if err != nil { return nil, nil, err } diff --git a/update/release_containers.go b/update/release_containers.go index 7855b43cc..7231ab5db 100644 --- a/update/release_containers.go +++ b/update/release_containers.go @@ -2,6 +2,7 @@ package update import ( "bytes" + "context" "errors" "fmt" "strings" @@ -25,10 +26,11 @@ type ReleaseContainersSpec struct { // CalculateRelease computes required controller updates to satisfy this specification. // It returns an error if any spec calculation fails unless `SkipMismatches` is true. -func (s ReleaseContainersSpec) CalculateRelease(rc ReleaseContext, logger log.Logger) ([]*WorkloadUpdate, Result, error) { +func (s ReleaseContainersSpec) CalculateRelease(ctx context.Context, rc ReleaseContext, + logger log.Logger) ([]*WorkloadUpdate, Result, error) { results := Result{} prefilter, postfilter := s.filters() - all, err := rc.SelectWorkloads(results, prefilter, postfilter) + all, err := rc.SelectWorkloads(ctx, results, prefilter, postfilter) if err != nil { return nil, results, err } diff --git a/update/release_image.go b/update/release_image.go index 9502eeb5d..5a0add724 100644 --- a/update/release_image.go +++ b/update/release_image.go @@ -1,6 +1,7 @@ package update import ( + "context" "fmt" "strings" @@ -45,7 +46,7 @@ func ParseReleaseKind(s string) (ReleaseKind, error) { const UserAutomated = "" type ReleaseContext interface { - SelectWorkloads(Result, []WorkloadFilter, []WorkloadFilter) ([]*WorkloadUpdate, error) + SelectWorkloads(context.Context, Result, []WorkloadFilter, []WorkloadFilter) ([]*WorkloadUpdate, error) Registry() registry.Registry } @@ -70,10 +71,10 @@ func (s ReleaseImageSpec) ReleaseType() ReleaseType { } } -func (s ReleaseImageSpec) CalculateRelease(rc ReleaseContext, logger log.Logger) ([]*WorkloadUpdate, Result, error) { +func (s ReleaseImageSpec) CalculateRelease(ctx context.Context, rc ReleaseContext, logger log.Logger) ([]*WorkloadUpdate, Result, error) { results := Result{} timer := NewStageTimer("select_workloads") - updates, err := s.selectWorkloads(rc, results) + updates, err := s.selectWorkloads(ctx, rc, results) timer.ObserveDuration() if err != nil { return nil, nil, err @@ -105,14 +106,14 @@ func (s ReleaseImageSpec) CommitMessage(result Result) string { // Take the spec given in the job, and figure out which workloads are // in question based on the running workloads and those defined in the // repo. Fill in the release results along the way. -func (s ReleaseImageSpec) selectWorkloads(rc ReleaseContext, results Result) ([]*WorkloadUpdate, error) { +func (s ReleaseImageSpec) selectWorkloads(ctx context.Context, rc ReleaseContext, results Result) ([]*WorkloadUpdate, error) { // Build list of filters prefilters, postfilters, err := s.filters(rc) if err != nil { return nil, err } // Find and filter workloads - return rc.SelectWorkloads(results, prefilters, postfilters) + return rc.SelectWorkloads(ctx, results, prefilters, postfilters) } func (s ReleaseImageSpec) filters(rc ReleaseContext) ([]WorkloadFilter, []WorkloadFilter, error) { diff --git a/update/workload.go b/update/workload.go index f7314c82f..7eeacbc49 100644 --- a/update/workload.go +++ b/update/workload.go @@ -7,11 +7,10 @@ import ( ) type WorkloadUpdate struct { - ResourceID flux.ResourceID - Workload cluster.Workload - Resource resource.Workload - ManifestPath string - Updates []ContainerUpdate + ResourceID flux.ResourceID + Workload cluster.Workload + Resource resource.Workload + Updates []ContainerUpdate } type WorkloadFilter interface {