diff --git a/.bingo/prometheus.mod b/.bingo/prometheus.mod index 3a37d55077..15cd0edab7 100644 --- a/.bingo/prometheus.mod +++ b/.bingo/prometheus.mod @@ -22,4 +22,44 @@ replace ( k8s.io/klog => github.com/simonpasquier/klog-gokit v0.1.0 ) -require github.com/prometheus/prometheus v2.4.3+incompatible // cmd/prometheus +require ( + github.com/Azure/azure-sdk-for-go v0.0.0-00010101000000-000000000000 // indirect + github.com/Azure/go-autorest v0.0.0-00010101000000-000000000000 // indirect + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/aws/aws-sdk-go v1.44.55 // indirect + github.com/cockroachdb/cmux v0.0.0-00010101000000-000000000000 // indirect + github.com/cockroachdb/cockroach v0.0.0-00010101000000-000000000000 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/googleapis/gnostic v0.0.0-00010101000000-000000000000 // indirect + github.com/gophercloud/gophercloud v0.0.0-00010101000000-000000000000 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/hashicorp/consul/api v1.13.1 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/julienschmidt/httprouter v1.3.0 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/oklog/oklog v0.3.2 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/prometheus/prometheus v2.4.3+incompatible // cmd/prometheus + github.com/prometheus/tsdb v0.0.0-00010101000000-000000000000 // indirect + github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414 // indirect + github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect + github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect + golang.org/x/net v0.0.0-20220708220712-1185a9018129 // indirect + golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect + google.golang.org/api v0.87.0 // indirect + google.golang.org/genproto v0.0.0-20220714211235-042d03aeabc9 // indirect + google.golang.org/grpc v1.48.0 // indirect + gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect + gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/api v0.0.0-00010101000000-000000000000 // indirect + k8s.io/apimachinery v0.0.0-00010101000000-000000000000 // indirect + k8s.io/client-go v0.0.0-00010101000000-000000000000 // indirect +) diff --git a/.gitignore b/.gitignore index 9a29eebf67..e5068f1580 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,6 @@ examples/tmp/ # Ignore the MacOS Trash (DS-Store) .DS_Store + +# Ignore generated Cortex files +internal/cortex/querier/active-query-tracker diff --git a/cmd/thanos/compact.go b/cmd/thanos/compact.go index 6de07405b5..ca9fba5543 100644 --- a/cmd/thanos/compact.go +++ b/cmd/thanos/compact.go @@ -26,6 +26,7 @@ import ( "github.com/prometheus/common/route" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore/client" blocksAPI "github.com/thanos-io/thanos/pkg/api/blocks" "github.com/thanos-io/thanos/pkg/block" @@ -38,7 +39,6 @@ import ( "github.com/thanos-io/thanos/pkg/extprom" extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" "github.com/thanos-io/thanos/pkg/logging" - "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/runutil" httpserver "github.com/thanos-io/thanos/pkg/server/http" diff --git a/cmd/thanos/downsample.go b/cmd/thanos/downsample.go index bd299c1e3e..232d3ac49d 100644 --- a/cmd/thanos/downsample.go +++ b/cmd/thanos/downsample.go @@ -21,6 +21,8 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -28,8 +30,6 @@ import ( "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/runutil" httpserver "github.com/thanos-io/thanos/pkg/server/http" diff --git a/cmd/thanos/main_test.go b/cmd/thanos/main_test.go index ce383c81f7..48ef8c263d 100644 --- a/cmd/thanos/main_test.go +++ b/cmd/thanos/main_test.go @@ -19,11 +19,11 @@ import ( "github.com/prometheus/client_golang/prometheus" promtest "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index 445717d8ea..b55bc68622 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -25,6 +25,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" "gopkg.in/yaml.v2" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -36,8 +38,6 @@ import ( "github.com/thanos-io/thanos/pkg/info" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/logging" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/receive" "github.com/thanos-io/thanos/pkg/runutil" diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index bf626dc721..900a38bbd6 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -38,6 +38,7 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/agent" "github.com/prometheus/prometheus/util/strutil" + "github.com/thanos-io/objstore/client" "gopkg.in/yaml.v2" "github.com/thanos-io/thanos/pkg/alert" @@ -53,7 +54,6 @@ import ( "github.com/thanos-io/thanos/pkg/info" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/logging" - "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/promclient" thanosrules "github.com/thanos-io/thanos/pkg/rules" diff --git a/cmd/thanos/sidecar.go b/cmd/thanos/sidecar.go index afc4b5d852..c5389a3fa1 100644 --- a/cmd/thanos/sidecar.go +++ b/cmd/thanos/sidecar.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/objstore/client" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/component" @@ -34,7 +35,6 @@ import ( "github.com/thanos-io/thanos/pkg/logging" meta "github.com/thanos-io/thanos/pkg/metadata" thanosmodel "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/reloader" diff --git a/cmd/thanos/store.go b/cmd/thanos/store.go index b257e202b1..08a1904393 100644 --- a/cmd/thanos/store.go +++ b/cmd/thanos/store.go @@ -18,6 +18,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/route" + "github.com/thanos-io/objstore/client" commonmodel "github.com/prometheus/common/model" @@ -36,7 +37,6 @@ import ( "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/logging" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/runutil" grpcserver "github.com/thanos-io/thanos/pkg/server/grpc" diff --git a/cmd/thanos/tools_bucket.go b/cmd/thanos/tools_bucket.go index f1107706aa..855540cacc 100644 --- a/cmd/thanos/tools_bucket.go +++ b/cmd/thanos/tools_bucket.go @@ -35,6 +35,8 @@ import ( "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" extflag "github.com/efficientgo/tools/extkingpin" "golang.org/x/text/language" @@ -53,8 +55,6 @@ import ( extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" "github.com/thanos-io/thanos/pkg/logging" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/replicate" "github.com/thanos-io/thanos/pkg/runutil" diff --git a/docs/storage.md b/docs/storage.md index 0544d5276f..b088ca1f0e 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -4,7 +4,7 @@ Thanos uses object storage as primary storage for metrics and metadata related t ## Configuring Access to Object Storage -Thanos supports any object stores that can be implemented against Thanos [objstore.Bucket interface](../pkg/objstore/objstore.go). +Thanos supports any object stores that can be implemented against Thanos [objstore.Bucket interface](https://github.com/thanos-io/objstore/blob/main/objstore.go). All clients can be configured using `--objstore.config-file` to reference to the configuration file or `--objstore.config` to put yaml config directly. @@ -495,18 +495,22 @@ prefix: "" ### How to add a new client to Thanos? +objstore.go + Following checklist allows adding new Go code client to supported providers: 1. Create new directory under `pkg/objstore/` -2. Implement [objstore.Bucket interface](../pkg/objstore/objstore.go) +2. Implement [objstore.Bucket interface](https://github.com/thanos-io/objstore/blob/main//objstore.go) 3. Add `NewTestBucket` constructor for testing purposes, that creates and deletes temporary bucket. -4. Use created `NewTestBucket` in [ForeachStore method](../pkg/objstore/objtesting/foreach.go) to ensure we can run tests against new provider. (In PR) -5. RUN the [TestObjStoreAcceptanceTest](../pkg/objstore/objtesting/acceptance_e2e_test.go) against your provider to ensure it fits. Fix any found error until test passes. (In PR) -6. Add client implementation to the factory in [factory](../pkg/objstore/client/factory.go) code. (Using as small amount of flags as possible in every command) +4. Use created `NewTestBucket` in [ForeachStore method](https://github.com/thanos-io/objstore/blob/main/objtesting/foreach.go) to ensure we can run tests against new provider. (In PR) +5. RUN the [TestObjStoreAcceptanceTest](https://github.com/thanos-io/objstore/blob/main//objtesting/acceptance_e2e_test.go) against your provider to ensure it fits. Fix any found error until test passes. (In PR) +6. Add client implementation to the factory in [factory](https://github.com/thanos-io/objstore/blob/main/client/factory.go) code. (Using as small amount of flags as possible in every command) 7. Add client struct config to [bucketcfggen](../scripts/cfggen/main.go) to allow config auto generation. At that point, anyone can use your provider by spec. +Check the checklist in [thanos-io/objstore](https://github.com/thanos-io/objstore#how-to-add-a-new-client-to-thanos) for more comprehensive information! + ## Data in Object Storage Thanos supports writing and reading data in native Prometheus `TSDB blocks` in [TSDB format](https://github.com/prometheus/prometheus/tree/master/tsdb/docs/format). This is the format used by [Prometheus](https://prometheus.io) TSDB database for persisting data on the local disk. With the efficient index and [chunk](design.md#chunk) binary formats, it also fits well to be used directly from object storage using range GET API. diff --git a/examples/interactive/interactive_test.go b/examples/interactive/interactive_test.go index a635384cf4..20d8db8de6 100644 --- a/examples/interactive/interactive_test.go +++ b/examples/interactive/interactive_test.go @@ -15,12 +15,13 @@ import ( e2einteractive "github.com/efficientgo/e2e/interactive" e2emonitoring "github.com/efficientgo/e2e/monitoring" "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/objstore/providers/s3" + "gopkg.in/yaml.v2" + "github.com/thanos-io/thanos/pkg/testutil" tracingclient "github.com/thanos-io/thanos/pkg/tracing/client" "github.com/thanos-io/thanos/pkg/tracing/jaeger" - "gopkg.in/yaml.v2" ) const ( diff --git a/go.mod b/go.mod index ed765d6e78..3b5040a206 100644 --- a/go.mod +++ b/go.mod @@ -1,20 +1,20 @@ module github.com/thanos-io/thanos require ( - cloud.google.com/go/storage v1.10.0 + cloud.google.com/go/storage v1.10.0 // indirect cloud.google.com/go/trace v0.1.0 - github.com/Azure/azure-pipeline-go v0.2.3 - github.com/Azure/azure-storage-blob-go v0.13.0 - github.com/Azure/go-autorest/autorest/adal v0.9.20 - github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 + github.com/Azure/azure-pipeline-go v0.2.3 // indirect + github.com/Azure/azure-storage-blob-go v0.14.0 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.0.0 github.com/NYTimes/gziphandler v1.1.1 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/alicebob/miniredis/v2 v2.14.3 - github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible - github.com/aws/aws-sdk-go-v2 v1.13.0 - github.com/aws/aws-sdk-go-v2/config v1.13.1 - github.com/baidubce/bce-sdk-go v0.9.81 + github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible // indirect + github.com/aws/aws-sdk-go-v2 v1.16.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.15.1 // indirect + github.com/baidubce/bce-sdk-go v0.9.111 // indirect github.com/blang/semver/v4 v4.0.0 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 @@ -23,10 +23,10 @@ require ( github.com/chromedp/chromedp v0.5.3 github.com/davecgh/go-spew v1.1.1 github.com/efficientgo/e2e v0.12.1 - github.com/efficientgo/tools/core v0.0.0-20210829154005-c7bad8450208 + github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b github.com/efficientgo/tools/extkingpin v0.0.0-20210609125236-d73259166f20 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb - github.com/fatih/structtag v1.1.0 + github.com/fatih/structtag v1.2.0 github.com/felixge/fgprof v0.9.2 github.com/fortytw2/leaktest v1.3.0 github.com/fsnotify/fsnotify v1.5.4 @@ -52,7 +52,7 @@ require ( github.com/miekg/dns v1.1.49 github.com/minio/minio-go/v7 v7.0.32-0.20220706200439-ef3e45ed9cdb github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f - github.com/ncw/swift v1.0.52 + github.com/ncw/swift v1.0.53 // indirect github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 github.com/olekukonko/tablewriter v0.0.5 @@ -63,10 +63,10 @@ require ( github.com/prometheus/alertmanager v0.24.0 github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.35.0 + github.com/prometheus/common v0.37.0 github.com/prometheus/exporter-toolkit v0.7.1 github.com/prometheus/prometheus v1.8.2-0.20220620125440-d7e7b8e04b5e - github.com/tencentyun/cos-go-sdk-v5 v0.7.31 + github.com/tencentyun/cos-go-sdk-v5 v0.7.34 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible github.com/vimeo/galaxycache v0.0.0-20210323154928-b7e5d71c067a @@ -83,10 +83,10 @@ require ( go.uber.org/goleak v1.1.12 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 golang.org/x/net v0.0.0-20220607020251-c690dde0001d - golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 + golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 // indirect golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f golang.org/x/text v0.3.7 - google.golang.org/api v0.83.0 + google.golang.org/api v0.83.0 // indirect google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 google.golang.org/grpc v1.47.0 google.golang.org/grpc/examples v0.0.0-20211119005141-f45e61797429 @@ -111,6 +111,7 @@ require ( github.com/opentracing-contrib/go-stdlib v1.0.0 github.com/sony/gobreaker v0.4.1 github.com/stretchr/testify v1.7.1 + github.com/thanos-io/objstore v0.0.0-20220715165016-ce338803bc1e go.etcd.io/etcd/api/v3 v3.5.4 go.etcd.io/etcd/client/pkg/v3 v3.5.4 go.etcd.io/etcd/client/v3 v3.5.4 @@ -133,17 +134,17 @@ require ( github.com/armon/go-radix v1.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/aws/aws-sdk-go v1.44.29 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.10.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.2.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.9.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.14.0 // indirect - github.com/aws/smithy-go v1.10.0 // indirect - github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 // indirect + github.com/aws/smithy-go v1.11.1 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/clbanning/mxj v1.8.4 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/dennwc/varint v1.0.0 // indirect @@ -210,7 +211,6 @@ require ( github.com/rivo/uniseg v0.2.0 // indirect github.com/rs/xid v1.4.0 // indirect github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect - github.com/satori/go.uuid v1.2.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/sirupsen/logrus v1.8.1 // indirect @@ -227,7 +227,7 @@ require ( go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect golang.org/x/tools v0.1.10 // indirect golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 261532f153..fee6904a56 100644 --- a/go.sum +++ b/go.sum @@ -43,6 +43,7 @@ cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wq cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -62,8 +63,8 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= -github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= +github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -71,7 +72,6 @@ github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgq github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= -github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= @@ -133,8 +133,8 @@ github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZp github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.14.3 h1:QWoo2wchYmLgOB6ctlTt2dewQ1Vu6phl+iQbwT8SYGo= github.com/alicebob/miniredis/v2 v2.14.3/go.mod h1:gquAfGbzn92jvtrSC69+6zZnwSODVXVpYDRaGhWaL6I= -github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible h1:EaK5256H3ELiyaq5O/Zwd6fnghD6DqmZDQmmzzJklUU= -github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible h1:9gWa46nstkJ9miBReJcN8Gq34cBFbzSpQZVVT9N09TM= +github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -162,32 +162,32 @@ github.com/aws/aws-sdk-go v1.44.29 h1:53YWlelsMiYmGxuTRpAq7Xp+pE+0esAVqNFiNyekU+ github.com/aws/aws-sdk-go v1.44.29/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.13.0 h1:1XIXAfxsEmbhbj5ry3D3vX+6ZcUYvIqSm4CWWEuGZCA= -github.com/aws/aws-sdk-go-v2 v1.13.0/go.mod h1:L6+ZpqHaLbAaxsqV0L4cvxZY7QupWJB4fhkf8LXvC7w= -github.com/aws/aws-sdk-go-v2/config v1.13.1 h1:yLv8bfNoT4r+UvUKQKqRtdnvuWGMK5a82l4ru9Jvnuo= -github.com/aws/aws-sdk-go-v2/config v1.13.1/go.mod h1:Ba5Z4yL/UGbjQUzsiaN378YobhFo0MLfueXGiOsYtEs= -github.com/aws/aws-sdk-go-v2/credentials v1.8.0 h1:8Ow0WcyDesGNL0No11jcgb1JAtE+WtubqXjgxau+S0o= -github.com/aws/aws-sdk-go-v2/credentials v1.8.0/go.mod h1:gnMo58Vwx3Mu7hj1wpcG8DI0s57c9o42UQ6wgTQT5to= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.10.0 h1:NITDuUZO34mqtOwFWZiXo7yAHj7kf+XPE+EiKuCBNUI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.10.0/go.mod h1:I6/fHT/fH460v09eg2gVrd8B/IqskhNdpcLH0WNO3QI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.4 h1:CRiQJ4E2RhfDdqbie1ZYDo8QtIo75Mk7oTdJSfwJTMQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.4/go.mod h1:XHgQ7Hz2WY2GAn//UXHofLfPXWh+s62MbMOijrg12Lw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.2.0 h1:3ADoioDMOtF4uiK59vCpplpCwugEU+v4ZFD29jDL3RQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.2.0/go.mod h1:BsCSJHx5DnDXIrOcqB8KN1/B+hXLG/bi4Y6Vjcx/x9E= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.5 h1:ixotxbfTCFpqbuwFv/RcZwyzhkxPSYDYEMcj4niB5Uk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.5/go.mod h1:R3sWUqPcfXSiF/LSFJhjyJmpg9uV6yP2yv3YZZjldVI= +github.com/aws/aws-sdk-go-v2 v1.16.0 h1:cBAYjiiexRAg9v2z9vb6IdxAa7ef4KCtjW7w7e3GxGo= +github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= +github.com/aws/aws-sdk-go-v2/config v1.15.1 h1:hTIZFepYESYyowQUBo47lu69WSxsYqGUILY9Nu8+7pY= +github.com/aws/aws-sdk-go-v2/config v1.15.1/go.mod h1:MZHGbuW2WnqIOQQBKu2ZkhTjuutZSTnn56TDq4QyydE= +github.com/aws/aws-sdk-go-v2/credentials v1.11.0 h1:gc4Uhs80s60nmLon5Z4JXWinX2BkAGT0YROoUT8h8U4= +github.com/aws/aws-sdk-go-v2/credentials v1.11.0/go.mod h1:EdV1ZFgtZ4XM5RDHWcRWK8H+xW5duNVBqWj2oLu7tRo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1 h1:F9Je1nq5YXfMOv6451NHvMf6U0iTWeMnsG0MMIQoUmk= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1/go.mod h1:Yph0XsTbQ5GGZ2+mO1a03P/SO9fdX3t1nejIp2tq79g= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7 h1:KUErSJgdqmqAPBWAp6Zx9CjL0YXfytXJeXcsWnuCM1c= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7/go.mod h1:oB9nZcxH1cGq7NPGurVJwxrO2vmJ9mmEBayCwcAlmT8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1 h1:feVfa9eJonhJiss7g51ikjNB2DrUzbNZNvPL8pw/54k= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1/go.mod h1:K4vz7lRYCyLYpYAMCLObODahFgARdD3YVa0MvQte9Co= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8 h1:adr3PfiggFtqgFofAMUFCtdvwzpf3QxPES4ezK4M3iI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8/go.mod h1:wLbQYt36AJqaRZUQiCNXzbtkNigyPfKHrotHuIDiCy8= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.7.0 h1:4QAOB3KrvI1ApJK14sliGr3Ie2pjyvNypn/lfzDHfUw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.7.0/go.mod h1:K/qPe6AP2TGYv4l6n7c88zh9jWBDf6nHhvg1fx/EWfU= -github.com/aws/aws-sdk-go-v2/service/sso v1.9.0 h1:1qLJeQGBmNQW3mBNzK2CFmrQNmoXWrscPqsrAaU1aTA= -github.com/aws/aws-sdk-go-v2/service/sso v1.9.0/go.mod h1:vCV4glupK3tR7pw7ks7Y4jYRL86VvxS+g5qk04YeWrU= -github.com/aws/aws-sdk-go-v2/service/sts v1.14.0 h1:ksiDXhvNYg0D2/UFkLejsaz3LqpW5yjNQ8Nx9Sn2c0E= -github.com/aws/aws-sdk-go-v2/service/sts v1.14.0/go.mod h1:u0xMJKDvvfocRjiozsoZglVNXRG19043xzp3r2ivLIk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1 h1:B/SPX7J+Y0Yrcjv60Nhbh1gC2uBN47SfN8JYre6Mp4M= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1/go.mod h1:2Hhr9Eh1gJzDatwACX/ozAZ/ljq5vzvPRu5cdu25tzc= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.1 h1:DyHctRsJIAWIvom1Itb4T84D2jwpIu+KIi3d0SFaswg= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.1/go.mod h1:CvFTucADIx7U/M44vjLs/ZttpQHdpxwK+62+dUGhDeY= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 h1:xsOtPAvHqhvQvBza5ohaUcfq1LceH2lZKMUGZJKiZiM= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.1/go.mod h1:Aq2/Qggh2oemSfyHH+EO4UBbgWG6zFCXLHYI4ILTY7w= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.10.0 h1:gsoZQMNHnX+PaghNw4ynPsyGP7aUCqx5sY2dlPQsZ0w= -github.com/aws/smithy-go v1.10.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/baidubce/bce-sdk-go v0.9.81 h1:n8KfThLG9fvGv3A+RtTt/jKhg/FPPRpo+iNnS2r+iPI= -github.com/baidubce/bce-sdk-go v0.9.81/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= +github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g= +github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= +github.com/baidubce/bce-sdk-go v0.9.111 h1:yGgtPpZYUZW4uoVorQ4xnuEgVeddACydlcJKW87MDV4= +github.com/baidubce/bce-sdk-go v0.9.111/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= @@ -198,6 +198,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= @@ -225,6 +226,7 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -244,6 +246,7 @@ github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzA github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -284,8 +287,8 @@ github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8E github.com/efficientgo/e2e v0.12.1 h1:ZYNTf09ptlba0I3ZStYaF7gCbevWdalriiX7usOSiFM= github.com/efficientgo/e2e v0.12.1/go.mod h1:xDHUyIqAWyVWU29Lf+BaZoavW7xAbDEvTwHWWI/3bhk= github.com/efficientgo/tools/core v0.0.0-20210129205121-421d0828c9a6/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= -github.com/efficientgo/tools/core v0.0.0-20210829154005-c7bad8450208 h1:jIALuFymwBqVsF32JhgzVsbCB6QsWvXqhetn8QgyrZ4= -github.com/efficientgo/tools/core v0.0.0-20210829154005-c7bad8450208/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= +github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b h1:ZHiD4/yE4idlbqvAO6iYCOYRzOMRpxkW+FKasRA3tsQ= +github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= github.com/efficientgo/tools/extkingpin v0.0.0-20210609125236-d73259166f20 h1:kM/ALyvAnTrwSB+nlKqoKaDnZbInp1YImZvW+gtHwc8= github.com/efficientgo/tools/extkingpin v0.0.0-20210609125236-d73259166f20/go.mod h1:ZV0utlglOczUWv3ih2AbqPSoLoFzdplUYxwV62eZi6Q= github.com/elastic/go-sysinfo v1.1.1 h1:ZVlaLDyhVkDfjwPGU55CQRCRolNpc7P0BbyhhQZQmMI= @@ -313,8 +316,8 @@ github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGE github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structtag v1.1.0 h1:6j4mUV/ES2duvnAzKMFkN6/A5mCaNYPD3xfbAkLLOF8= -github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/fgprof v0.9.2 h1:tAMHtWMyl6E0BimjVbFt7fieU6FpjttsZN7j0wT5blc= github.com/felixge/fgprof v0.9.2/go.mod h1:+VNi+ZXtHIQ6wIw6bUT8nXQRefQflWECoFyRealT5sg= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -570,6 +573,7 @@ github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c2 github.com/gophercloud/gophercloud v0.24.0 h1:jDsIMGJ1KZpAjYfQgGI2coNQj5Q83oPzuiGJRFWgMzw= github.com/gophercloud/gophercloud v0.24.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20220221023154-0b2280d3ff96/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -596,11 +600,13 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2/go.mod h1:chrfS3YoLAlKTRE5cFWvCbt8uGAjshktT4PveTUpsFQ= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/api v1.13.0 h1:2hnLQ0GjQvw7f3O61jMO8gbasZviZTrt9R8WzgiirHc= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= @@ -646,6 +652,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= @@ -754,6 +761,7 @@ github.com/lovoo/gcloud-opentracing v0.3.0 h1:nAeKG70rIsog0TelcEtt6KU0Y1s5qXtsDL github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -805,6 +813,7 @@ github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.23/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2DAn1ou4+Do= github.com/minio/minio-go/v7 v7.0.32-0.20220706200439-ef3e45ed9cdb h1:J7jRWqlD+K3Tp4YbLWcyBKiHoNRy49JR5HA4RetFrAY= github.com/minio/minio-go/v7 v7.0.32-0.20220706200439-ef3e45ed9cdb/go.mod h1:/sjRKkKIA75CKh1iu8E3qBy7ktBmCCDGII0zbXGwbUk= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= @@ -859,8 +868,10 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU= -github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= +github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= @@ -919,6 +930,7 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -967,8 +979,9 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= @@ -1015,6 +1028,7 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= @@ -1032,6 +1046,7 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.2.1/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= @@ -1045,10 +1060,13 @@ github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY52 github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1067,12 +1085,15 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.194/go.mod h1:yrBKWhChnDqNz1xuXdSbWXG56XawEq0G5j1lg4VwBD4= -github.com/tencentyun/cos-go-sdk-v5 v0.7.31 h1:NujkkOKMJ3IFs1+trCwXOKRCIPQ8qI5Lxul9JkhTg6M= -github.com/tencentyun/cos-go-sdk-v5 v0.7.31/go.mod h1:4E4+bQ2gBVJcgEC9Cufwylio4mXOct2iu05WjgEBx1o= +github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= +github.com/tencentyun/cos-go-sdk-v5 v0.7.34/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= +github.com/thanos-io/objstore v0.0.0-20220715165016-ce338803bc1e h1:IhC7gP1u/uA+yf9RYwhRVBq+2+HV1xRGcrY/C6WBaPY= +github.com/thanos-io/objstore v0.0.0-20220715165016-ce338803bc1e/go.mod h1:Fp62HaCG8R+5ak2g6+foU/Jag9JhtmpftVpubyS3S5s= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= @@ -1222,6 +1243,7 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1351,6 +1373,7 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -1480,8 +1503,9 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -1533,6 +1557,7 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1614,6 +1639,7 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= @@ -1761,6 +1787,7 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= diff --git a/internal/cortex/chunk/purger/tenant_deletion_api.go b/internal/cortex/chunk/purger/tenant_deletion_api.go index a2fabaa1dc..c19244665d 100644 --- a/internal/cortex/chunk/purger/tenant_deletion_api.go +++ b/internal/cortex/chunk/purger/tenant_deletion_api.go @@ -14,7 +14,7 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" cortex_tsdb "github.com/thanos-io/thanos/internal/cortex/storage/tsdb" diff --git a/internal/cortex/chunk/purger/tenant_deletion_api_test.go b/internal/cortex/chunk/purger/tenant_deletion_api_test.go index 03a9c8986b..1bca835a34 100644 --- a/internal/cortex/chunk/purger/tenant_deletion_api_test.go +++ b/internal/cortex/chunk/purger/tenant_deletion_api_test.go @@ -13,7 +13,7 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/require" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/weaveworks/common/user" "github.com/thanos-io/thanos/internal/cortex/storage/tsdb" diff --git a/internal/cortex/querier/blocks_finder_bucket_index.go b/internal/cortex/querier/blocks_finder_bucket_index.go index c2d504cc24..3bdd03e70c 100644 --- a/internal/cortex/querier/blocks_finder_bucket_index.go +++ b/internal/cortex/querier/blocks_finder_bucket_index.go @@ -11,7 +11,7 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" "github.com/thanos-io/thanos/internal/cortex/storage/tsdb/bucketindex" diff --git a/internal/cortex/querier/blocks_finder_bucket_index_test.go b/internal/cortex/querier/blocks_finder_bucket_index_test.go index 81922a045d..5ac485d80b 100644 --- a/internal/cortex/querier/blocks_finder_bucket_index_test.go +++ b/internal/cortex/querier/blocks_finder_bucket_index_test.go @@ -14,7 +14,7 @@ import ( "github.com/oklog/ulid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/tsdb/bucketindex" cortex_testutil "github.com/thanos-io/thanos/internal/cortex/storage/tsdb/testutil" diff --git a/internal/cortex/querier/blocks_finder_bucket_scan.go b/internal/cortex/querier/blocks_finder_bucket_scan.go index da2a60521d..03bd14bb45 100644 --- a/internal/cortex/querier/blocks_finder_bucket_scan.go +++ b/internal/cortex/querier/blocks_finder_bucket_scan.go @@ -19,9 +19,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" cortex_tsdb "github.com/thanos-io/thanos/internal/cortex/storage/tsdb" diff --git a/internal/cortex/querier/blocks_finder_bucket_scan_test.go b/internal/cortex/querier/blocks_finder_bucket_scan_test.go index d8e39c0782..b1efa3d4d4 100644 --- a/internal/cortex/querier/blocks_finder_bucket_scan_test.go +++ b/internal/cortex/querier/blocks_finder_bucket_scan_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" cortex_tsdb "github.com/thanos-io/thanos/internal/cortex/storage/tsdb" @@ -91,7 +91,7 @@ func TestBucketScanBlocksFinder_InitialScanFailure(t *testing.T) { s := NewBucketScanBlocksFinder(cfg, bucket, nil, log.NewNopLogger(), reg) defer func() { s.StopAsync() - s.AwaitTerminated(context.Background()) //nolint: errcheck + s.AwaitTerminated(context.Background()) // nolint: errcheck }() // Mock the storage to simulate a failure when reading objects. diff --git a/internal/cortex/storage/bucket/azure/bucket_client.go b/internal/cortex/storage/bucket/azure/bucket_client.go index 08c90ca03b..e6bfbc953d 100644 --- a/internal/cortex/storage/bucket/azure/bucket_client.go +++ b/internal/cortex/storage/bucket/azure/bucket_client.go @@ -6,9 +6,9 @@ package azure import ( "github.com/go-kit/log" "github.com/prometheus/common/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/azure" - yaml "gopkg.in/yaml.v2" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/providers/azure" + "gopkg.in/yaml.v2" ) func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) { diff --git a/internal/cortex/storage/bucket/bucket_util.go b/internal/cortex/storage/bucket/bucket_util.go index 0385bb88db..99844ab80b 100644 --- a/internal/cortex/storage/bucket/bucket_util.go +++ b/internal/cortex/storage/bucket/bucket_util.go @@ -9,7 +9,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // DeletePrefix removes all objects with given prefix, recursively. diff --git a/internal/cortex/storage/bucket/bucket_util_test.go b/internal/cortex/storage/bucket/bucket_util_test.go index 156c5f9b95..1fc3fbd4b7 100644 --- a/internal/cortex/storage/bucket/bucket_util_test.go +++ b/internal/cortex/storage/bucket/bucket_util_test.go @@ -11,7 +11,7 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) func TestDeletePrefix(t *testing.T) { diff --git a/internal/cortex/storage/bucket/client.go b/internal/cortex/storage/bucket/client.go index 8dca49d3c8..f7661ea130 100644 --- a/internal/cortex/storage/bucket/client.go +++ b/internal/cortex/storage/bucket/client.go @@ -12,7 +12,7 @@ import ( "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket/azure" "github.com/thanos-io/thanos/internal/cortex/storage/bucket/filesystem" diff --git a/internal/cortex/storage/bucket/client_mock.go b/internal/cortex/storage/bucket/client_mock.go index 481b41f05a..0fc612f4f0 100644 --- a/internal/cortex/storage/bucket/client_mock.go +++ b/internal/cortex/storage/bucket/client_mock.go @@ -12,7 +12,7 @@ import ( "time" "github.com/stretchr/testify/mock" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) var errObjectDoesNotExist = errors.New("object does not exist") diff --git a/internal/cortex/storage/bucket/filesystem/bucket_client.go b/internal/cortex/storage/bucket/filesystem/bucket_client.go index 910c866f7f..9db2de32e2 100644 --- a/internal/cortex/storage/bucket/filesystem/bucket_client.go +++ b/internal/cortex/storage/bucket/filesystem/bucket_client.go @@ -4,8 +4,8 @@ package filesystem import ( - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/providers/filesystem" ) // NewBucketClient creates a new filesystem bucket client diff --git a/internal/cortex/storage/bucket/gcs/bucket_client.go b/internal/cortex/storage/bucket/gcs/bucket_client.go index 42568de409..d75438e811 100644 --- a/internal/cortex/storage/bucket/gcs/bucket_client.go +++ b/internal/cortex/storage/bucket/gcs/bucket_client.go @@ -7,9 +7,9 @@ import ( "context" "github.com/go-kit/log" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/gcs" - yaml "gopkg.in/yaml.v2" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/providers/gcs" + "gopkg.in/yaml.v2" ) // NewBucketClient creates a new GCS bucket client diff --git a/internal/cortex/storage/bucket/prefixed_bucket_client.go b/internal/cortex/storage/bucket/prefixed_bucket_client.go index 3c30bcd793..4ce908c4c8 100644 --- a/internal/cortex/storage/bucket/prefixed_bucket_client.go +++ b/internal/cortex/storage/bucket/prefixed_bucket_client.go @@ -8,7 +8,7 @@ import ( "io" "strings" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) type PrefixedBucketClient struct { diff --git a/internal/cortex/storage/bucket/s3/bucket_client.go b/internal/cortex/storage/bucket/s3/bucket_client.go index 06a307f6a6..0d49690c82 100644 --- a/internal/cortex/storage/bucket/s3/bucket_client.go +++ b/internal/cortex/storage/bucket/s3/bucket_client.go @@ -6,8 +6,8 @@ package s3 import ( "github.com/go-kit/log" "github.com/prometheus/common/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/providers/s3" ) // NewBucketClient creates a new S3 bucket client diff --git a/internal/cortex/storage/bucket/s3/config.go b/internal/cortex/storage/bucket/s3/config.go index 07a88440ad..bf5450b32e 100644 --- a/internal/cortex/storage/bucket/s3/config.go +++ b/internal/cortex/storage/bucket/s3/config.go @@ -12,7 +12,7 @@ import ( "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/objstore/providers/s3" bucket_http "github.com/thanos-io/thanos/internal/cortex/storage/bucket/http" "github.com/thanos-io/thanos/internal/cortex/util" diff --git a/internal/cortex/storage/bucket/sse_bucket_client.go b/internal/cortex/storage/bucket/sse_bucket_client.go index e9f86a62db..570013240b 100644 --- a/internal/cortex/storage/bucket/sse_bucket_client.go +++ b/internal/cortex/storage/bucket/sse_bucket_client.go @@ -9,8 +9,8 @@ import ( "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/providers/s3" cortex_s3 "github.com/thanos-io/thanos/internal/cortex/storage/bucket/s3" ) diff --git a/internal/cortex/storage/bucket/sse_bucket_client_test.go b/internal/cortex/storage/bucket/sse_bucket_client_test.go index cde83a20a8..2add694de6 100644 --- a/internal/cortex/storage/bucket/sse_bucket_client_test.go +++ b/internal/cortex/storage/bucket/sse_bucket_client_test.go @@ -14,7 +14,7 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket/s3" "github.com/thanos-io/thanos/internal/cortex/util/flagext" diff --git a/internal/cortex/storage/bucket/swift/bucket_client.go b/internal/cortex/storage/bucket/swift/bucket_client.go index 0fa699d08b..c4d3b270e0 100644 --- a/internal/cortex/storage/bucket/swift/bucket_client.go +++ b/internal/cortex/storage/bucket/swift/bucket_client.go @@ -6,9 +6,9 @@ package swift import ( "github.com/go-kit/log" "github.com/prometheus/common/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/swift" - yaml "gopkg.in/yaml.v2" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/providers/swift" + "gopkg.in/yaml.v2" ) // NewBucketClient creates a new Swift bucket client diff --git a/internal/cortex/storage/bucket/user_bucket_client.go b/internal/cortex/storage/bucket/user_bucket_client.go index af2cf33c10..986f2d0a61 100644 --- a/internal/cortex/storage/bucket/user_bucket_client.go +++ b/internal/cortex/storage/bucket/user_bucket_client.go @@ -4,7 +4,7 @@ package bucket import ( - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // NewUserBucketClient returns a bucket client to use to access the storage on behalf of the provided user. diff --git a/internal/cortex/storage/tsdb/bucketindex/loader.go b/internal/cortex/storage/tsdb/bucketindex/loader.go index 71b1ec5a1c..4986f79f8f 100644 --- a/internal/cortex/storage/tsdb/bucketindex/loader.go +++ b/internal/cortex/storage/tsdb/bucketindex/loader.go @@ -13,7 +13,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "go.uber.org/atomic" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" diff --git a/internal/cortex/storage/tsdb/bucketindex/markers.go b/internal/cortex/storage/tsdb/bucketindex/markers.go index 835b363d9f..d45d7601d4 100644 --- a/internal/cortex/storage/tsdb/bucketindex/markers.go +++ b/internal/cortex/storage/tsdb/bucketindex/markers.go @@ -13,9 +13,10 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" ) diff --git a/internal/cortex/storage/tsdb/bucketindex/markers_bucket_client.go b/internal/cortex/storage/tsdb/bucketindex/markers_bucket_client.go index b5216e7903..50f0a9e95a 100644 --- a/internal/cortex/storage/tsdb/bucketindex/markers_bucket_client.go +++ b/internal/cortex/storage/tsdb/bucketindex/markers_bucket_client.go @@ -10,8 +10,9 @@ import ( "io/ioutil" "path" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" ) // globalMarkersBucket is a bucket client which stores markers (eg. block deletion marks) in a per-tenant diff --git a/internal/cortex/storage/tsdb/bucketindex/markers_bucket_client_test.go b/internal/cortex/storage/tsdb/bucketindex/markers_bucket_client_test.go index d0f6ee0455..2a52725faf 100644 --- a/internal/cortex/storage/tsdb/bucketindex/markers_bucket_client_test.go +++ b/internal/cortex/storage/tsdb/bucketindex/markers_bucket_client_test.go @@ -14,8 +14,9 @@ import ( "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" cortex_testutil "github.com/thanos-io/thanos/internal/cortex/storage/tsdb/testutil" diff --git a/internal/cortex/storage/tsdb/bucketindex/markers_test.go b/internal/cortex/storage/tsdb/bucketindex/markers_test.go index 5c9f09fd41..7f1fb7a29c 100644 --- a/internal/cortex/storage/tsdb/bucketindex/markers_test.go +++ b/internal/cortex/storage/tsdb/bucketindex/markers_test.go @@ -15,8 +15,9 @@ import ( "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" cortex_testutil "github.com/thanos-io/thanos/internal/cortex/storage/tsdb/testutil" ) diff --git a/internal/cortex/storage/tsdb/bucketindex/storage.go b/internal/cortex/storage/tsdb/bucketindex/storage.go index 9fce267235..dacb92c2bb 100644 --- a/internal/cortex/storage/tsdb/bucketindex/storage.go +++ b/internal/cortex/storage/tsdb/bucketindex/storage.go @@ -11,7 +11,7 @@ import ( "github.com/go-kit/log" "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" "github.com/thanos-io/thanos/internal/cortex/util/runutil" diff --git a/internal/cortex/storage/tsdb/bucketindex/updater.go b/internal/cortex/storage/tsdb/bucketindex/updater.go index 48f4993908..b952af3950 100644 --- a/internal/cortex/storage/tsdb/bucketindex/updater.go +++ b/internal/cortex/storage/tsdb/bucketindex/updater.go @@ -14,9 +14,10 @@ import ( "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/pkg/errors" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" util_log "github.com/thanos-io/thanos/internal/cortex/util/log" diff --git a/internal/cortex/storage/tsdb/bucketindex/updater_test.go b/internal/cortex/storage/tsdb/bucketindex/updater_test.go index 56248be180..611bbd77a0 100644 --- a/internal/cortex/storage/tsdb/bucketindex/updater_test.go +++ b/internal/cortex/storage/tsdb/bucketindex/updater_test.go @@ -16,9 +16,10 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" "github.com/thanos-io/thanos/internal/cortex/storage/tsdb/testutil" diff --git a/internal/cortex/storage/tsdb/caching_bucket.go b/internal/cortex/storage/tsdb/caching_bucket.go index 6c3053be01..aed7d8941b 100644 --- a/internal/cortex/storage/tsdb/caching_bucket.go +++ b/internal/cortex/storage/tsdb/caching_bucket.go @@ -16,11 +16,12 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/cache" "github.com/thanos-io/thanos/pkg/cacheutil" - "github.com/thanos-io/thanos/pkg/objstore" storecache "github.com/thanos-io/thanos/pkg/store/cache" ) diff --git a/internal/cortex/storage/tsdb/tenant_deletion_mark.go b/internal/cortex/storage/tsdb/tenant_deletion_mark.go index 0d1b3829ea..14d8dcb003 100644 --- a/internal/cortex/storage/tsdb/tenant_deletion_mark.go +++ b/internal/cortex/storage/tsdb/tenant_deletion_mark.go @@ -12,7 +12,7 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" util_log "github.com/thanos-io/thanos/internal/cortex/util/log" diff --git a/internal/cortex/storage/tsdb/tenant_deletion_mark_test.go b/internal/cortex/storage/tsdb/tenant_deletion_mark_test.go index 68da7400aa..28bd0bea2e 100644 --- a/internal/cortex/storage/tsdb/tenant_deletion_mark_test.go +++ b/internal/cortex/storage/tsdb/tenant_deletion_mark_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) func TestTenantDeletionMarkExists(t *testing.T) { diff --git a/internal/cortex/storage/tsdb/testutil/block_mock.go b/internal/cortex/storage/tsdb/testutil/block_mock.go index 14f13cec68..f24df0c4d5 100644 --- a/internal/cortex/storage/tsdb/testutil/block_mock.go +++ b/internal/cortex/storage/tsdb/testutil/block_mock.go @@ -15,8 +15,9 @@ import ( "github.com/oklog/ulid" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/require" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" ) func MockStorageBlock(t testing.TB, bucket objstore.Bucket, userID string, minT, maxT int64) tsdb.BlockMeta { diff --git a/internal/cortex/storage/tsdb/testutil/objstore.go b/internal/cortex/storage/tsdb/testutil/objstore.go index 86fbcf287b..a5e73bfe77 100644 --- a/internal/cortex/storage/tsdb/testutil/objstore.go +++ b/internal/cortex/storage/tsdb/testutil/objstore.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket/filesystem" ) diff --git a/internal/cortex/storage/tsdb/users_scanner.go b/internal/cortex/storage/tsdb/users_scanner.go index 0d2eac686d..fec3571a1d 100644 --- a/internal/cortex/storage/tsdb/users_scanner.go +++ b/internal/cortex/storage/tsdb/users_scanner.go @@ -9,7 +9,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // AllUsers returns true to each call and should be used whenever the UsersScanner should not filter out diff --git a/internal/cortex/storegateway/bucket_index_metadata_fetcher.go b/internal/cortex/storegateway/bucket_index_metadata_fetcher.go index d4d123fac9..9535ac269f 100644 --- a/internal/cortex/storegateway/bucket_index_metadata_fetcher.go +++ b/internal/cortex/storegateway/bucket_index_metadata_fetcher.go @@ -12,9 +12,10 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" "github.com/thanos-io/thanos/internal/cortex/storage/tsdb/bucketindex" diff --git a/internal/cortex/storegateway/bucket_stores.go b/internal/cortex/storegateway/bucket_stores.go index 0a465a88d0..9e1cc463f3 100644 --- a/internal/cortex/storegateway/bucket_stores.go +++ b/internal/cortex/storegateway/bucket_stores.go @@ -22,18 +22,19 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/thanos-io/objstore" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/logging" + "google.golang.org/grpc/metadata" + "github.com/thanos-io/thanos/pkg/block" thanos_metadata "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/gate" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/pool" "github.com/thanos-io/thanos/pkg/store" storecache "github.com/thanos-io/thanos/pkg/store/cache" "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/logging" - "google.golang.org/grpc/metadata" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" "github.com/thanos-io/thanos/internal/cortex/storage/tsdb" diff --git a/internal/cortex/storegateway/bucket_stores_test.go b/internal/cortex/storegateway/bucket_stores_test.go index 7612a0ef64..12e6728d0a 100644 --- a/internal/cortex/storegateway/bucket_stores_test.go +++ b/internal/cortex/storegateway/bucket_stores_test.go @@ -27,15 +27,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/thanos-io/objstore" + "github.com/weaveworks/common/logging" + "go.uber.org/atomic" + "google.golang.org/grpc/metadata" + thanos_metadata "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/weaveworks/common/logging" - "go.uber.org/atomic" - "google.golang.org/grpc/metadata" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" "github.com/thanos-io/thanos/internal/cortex/storage/bucket/filesystem" diff --git a/internal/cortex/storegateway/gateway.go b/internal/cortex/storegateway/gateway.go index b639e2e00e..e77ce3cdd1 100644 --- a/internal/cortex/storegateway/gateway.go +++ b/internal/cortex/storegateway/gateway.go @@ -15,10 +15,11 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/thanos-io/objstore" + "github.com/weaveworks/common/logging" + "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/weaveworks/common/logging" "github.com/thanos-io/thanos/internal/cortex/ring" "github.com/thanos-io/thanos/internal/cortex/ring/kv" diff --git a/internal/cortex/storegateway/gateway_test.go b/internal/cortex/storegateway/gateway_test.go index 3a59da6e32..1a50ffc334 100644 --- a/internal/cortex/storegateway/gateway_test.go +++ b/internal/cortex/storegateway/gateway_test.go @@ -29,13 +29,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/thanos-io/objstore" + "google.golang.org/grpc/status" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" - "google.golang.org/grpc/status" "github.com/thanos-io/thanos/internal/cortex/ring" "github.com/thanos-io/thanos/internal/cortex/ring/kv/consul" diff --git a/internal/cortex/storegateway/metadata_fetcher_filters.go b/internal/cortex/storegateway/metadata_fetcher_filters.go index 91c217345f..6576311844 100644 --- a/internal/cortex/storegateway/metadata_fetcher_filters.go +++ b/internal/cortex/storegateway/metadata_fetcher_filters.go @@ -9,10 +9,11 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/tsdb/bucketindex" ) diff --git a/internal/cortex/storegateway/metadata_fetcher_filters_test.go b/internal/cortex/storegateway/metadata_fetcher_filters_test.go index ee2ce18dae..167209eaf8 100644 --- a/internal/cortex/storegateway/metadata_fetcher_filters_test.go +++ b/internal/cortex/storegateway/metadata_fetcher_filters_test.go @@ -17,10 +17,11 @@ import ( promtest "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/internal/cortex/storage/bucket" "github.com/thanos-io/thanos/internal/cortex/storage/tsdb/bucketindex" diff --git a/internal/cortex/storegateway/sharding_strategy.go b/internal/cortex/storegateway/sharding_strategy.go index b8de349d46..c9d054c42b 100644 --- a/internal/cortex/storegateway/sharding_strategy.go +++ b/internal/cortex/storegateway/sharding_strategy.go @@ -9,10 +9,11 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/internal/cortex/ring" cortex_tsdb "github.com/thanos-io/thanos/internal/cortex/storage/tsdb" diff --git a/pkg/api/blocks/v1.go b/pkg/api/blocks/v1.go index 858ecdb49a..d546f78468 100644 --- a/pkg/api/blocks/v1.go +++ b/pkg/api/blocks/v1.go @@ -14,13 +14,13 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/route" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/api" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" "github.com/thanos-io/thanos/pkg/logging" - "github.com/thanos-io/thanos/pkg/objstore" ) // BlocksAPI is a very simple API used by Thanos Block Viewer. diff --git a/pkg/api/blocks/v1_test.go b/pkg/api/blocks/v1_test.go index c2a349da76..ec0ba0f598 100644 --- a/pkg/api/blocks/v1_test.go +++ b/pkg/api/blocks/v1_test.go @@ -21,11 +21,11 @@ import ( "github.com/oklog/ulid" "github.com/prometheus/common/route" "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/objstore" baseAPI "github.com/thanos-io/thanos/pkg/api" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/block/block.go b/pkg/block/block.go index 0b6c201434..8889ce29da 100644 --- a/pkg/block/block.go +++ b/pkg/block/block.go @@ -22,9 +22,9 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/block/block_test.go b/pkg/block/block_test.go index 066f95616f..86cbb60ff8 100644 --- a/pkg/block/block_test.go +++ b/pkg/block/block_test.go @@ -23,9 +23,9 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" promtest "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/block/fetcher.go b/pkg/block/fetcher.go index 24967c05c4..78b9ae324b 100644 --- a/pkg/block/fetcher.go +++ b/pkg/block/fetcher.go @@ -23,6 +23,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/thanos-io/objstore" "golang.org/x/sync/errgroup" "gopkg.in/yaml.v2" @@ -30,7 +31,6 @@ import ( "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/block/fetcher_test.go b/pkg/block/fetcher_test.go index a826856b04..c041d73c24 100644 --- a/pkg/block/fetcher_test.go +++ b/pkg/block/fetcher_test.go @@ -24,12 +24,12 @@ import ( "github.com/prometheus/client_golang/prometheus" promtest "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/objtesting" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/objtesting" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/block/indexheader/binary_reader.go b/pkg/block/indexheader/binary_reader.go index a945d53fa2..7ba2e8c2ac 100644 --- a/pkg/block/indexheader/binary_reader.go +++ b/pkg/block/indexheader/binary_reader.go @@ -26,9 +26,9 @@ import ( "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/index" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/block/indexheader/header_test.go b/pkg/block/indexheader/header_test.go index a53842077e..29700a61c3 100644 --- a/pkg/block/indexheader/header_test.go +++ b/pkg/block/indexheader/header_test.go @@ -20,11 +20,11 @@ import ( "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/index" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/providers/filesystem" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/block/indexheader/lazy_binary_reader.go b/pkg/block/indexheader/lazy_binary_reader.go index 30069e2cd7..13bf476812 100644 --- a/pkg/block/indexheader/lazy_binary_reader.go +++ b/pkg/block/indexheader/lazy_binary_reader.go @@ -17,10 +17,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/tsdb/index" + "github.com/thanos-io/objstore" "go.uber.org/atomic" "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" ) var ( diff --git a/pkg/block/indexheader/lazy_binary_reader_test.go b/pkg/block/indexheader/lazy_binary_reader_test.go index 1fcb65a631..ac105b815d 100644 --- a/pkg/block/indexheader/lazy_binary_reader_test.go +++ b/pkg/block/indexheader/lazy_binary_reader_test.go @@ -16,10 +16,10 @@ import ( "github.com/oklog/ulid" promtestutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/objstore/providers/filesystem" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/block/indexheader/reader_pool.go b/pkg/block/indexheader/reader_pool.go index c1742d5d4c..535ee952da 100644 --- a/pkg/block/indexheader/reader_pool.go +++ b/pkg/block/indexheader/reader_pool.go @@ -13,8 +13,7 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // ReaderPoolMetrics holds metrics tracked by ReaderPool. diff --git a/pkg/block/indexheader/reader_pool_test.go b/pkg/block/indexheader/reader_pool_test.go index 2beaecf9ff..fa769167d2 100644 --- a/pkg/block/indexheader/reader_pool_test.go +++ b/pkg/block/indexheader/reader_pool_test.go @@ -14,10 +14,10 @@ import ( "github.com/go-kit/log" promtestutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/objstore/providers/filesystem" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/block/metadata/markers.go b/pkg/block/metadata/markers.go index 81480652fc..a93bd9a0ba 100644 --- a/pkg/block/metadata/markers.go +++ b/pkg/block/metadata/markers.go @@ -12,8 +12,8 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid" "github.com/pkg/errors" + "github.com/thanos-io/objstore" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/block/metadata/markers_test.go b/pkg/block/metadata/markers_test.go index 1da73e2c98..c3d5105494 100644 --- a/pkg/block/metadata/markers_test.go +++ b/pkg/block/metadata/markers_test.go @@ -16,9 +16,9 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid" "github.com/pkg/errors" + "github.com/thanos-io/objstore" "go.uber.org/goleak" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/cache/caching_bucket_config.go b/pkg/cache/caching_bucket_config.go index d8a22db297..3422783d6b 100644 --- a/pkg/cache/caching_bucket_config.go +++ b/pkg/cache/caching_bucket_config.go @@ -6,7 +6,7 @@ package cache import ( "time" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // Codec for encoding and decoding results of Iter call. diff --git a/pkg/cache/groupcache.go b/pkg/cache/groupcache.go index 063c768c94..5e43622d12 100644 --- a/pkg/cache/groupcache.go +++ b/pkg/cache/groupcache.go @@ -20,16 +20,17 @@ import ( "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/route" + "github.com/thanos-io/objstore" + "github.com/vimeo/galaxycache" + galaxyhttp "github.com/vimeo/galaxycache/http" + "golang.org/x/net/http2" + "gopkg.in/yaml.v2" + "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/cache/cachekey" - "github.com/vimeo/galaxycache" - galaxyhttp "github.com/vimeo/galaxycache/http" - "golang.org/x/net/http2" - "gopkg.in/yaml.v2" ) type Groupcache struct { diff --git a/pkg/cache/groupcache_test.go b/pkg/cache/groupcache_test.go index 06df4cf642..93d4197aff 100644 --- a/pkg/cache/groupcache_test.go +++ b/pkg/cache/groupcache_test.go @@ -17,16 +17,17 @@ import ( "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/route" + "github.com/thanos-io/objstore" + galaxyhttp "github.com/vimeo/galaxycache/http" + "golang.org/x/net/http2" + "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/prober" httpserver "github.com/thanos-io/thanos/pkg/server/http" "github.com/thanos-io/thanos/pkg/store/cache/cachekey" "github.com/thanos-io/thanos/pkg/testutil" - galaxyhttp "github.com/vimeo/galaxycache/http" - "golang.org/x/net/http2" ) const basePath = `/_groupcache/` diff --git a/pkg/compact/blocks_cleaner.go b/pkg/compact/blocks_cleaner.go index 6b86c15f6c..5ae9120a8c 100644 --- a/pkg/compact/blocks_cleaner.go +++ b/pkg/compact/blocks_cleaner.go @@ -11,9 +11,9 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" ) // BlocksCleaner is a struct that deletes blocks from bucket which are marked for deletion. diff --git a/pkg/compact/clean.go b/pkg/compact/clean.go index 9a7cce5b92..a398ba9e3c 100644 --- a/pkg/compact/clean.go +++ b/pkg/compact/clean.go @@ -11,9 +11,9 @@ import ( "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" ) const ( diff --git a/pkg/compact/clean_test.go b/pkg/compact/clean_test.go index 1493846ae5..ce88e3365f 100644 --- a/pkg/compact/clean_test.go +++ b/pkg/compact/clean_test.go @@ -16,10 +16,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/compact/compact.go b/pkg/compact/compact.go index e9a5dcdd34..549cf61d8a 100644 --- a/pkg/compact/compact.go +++ b/pkg/compact/compact.go @@ -23,6 +23,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" "golang.org/x/sync/errgroup" "github.com/thanos-io/thanos/pkg/block" @@ -30,7 +31,6 @@ import ( "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/tracing" ) diff --git a/pkg/compact/compact_e2e_test.go b/pkg/compact/compact_e2e_test.go index 8f12ed5f69..649470b666 100644 --- a/pkg/compact/compact_e2e_test.go +++ b/pkg/compact/compact_e2e_test.go @@ -24,12 +24,12 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/objtesting" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/dedup" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/objtesting" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/compact/compact_test.go b/pkg/compact/compact_test.go index 889c39a97c..927738964e 100644 --- a/pkg/compact/compact_test.go +++ b/pkg/compact/compact_test.go @@ -20,12 +20,12 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" promtestutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/compact/planner.go b/pkg/compact/planner.go index d316994f54..5c2a93df8d 100644 --- a/pkg/compact/planner.go +++ b/pkg/compact/planner.go @@ -13,10 +13,10 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" ) type tsdbBasedPlanner struct { diff --git a/pkg/compact/planner_test.go b/pkg/compact/planner_test.go index a54ca1af0f..11690cd70d 100644 --- a/pkg/compact/planner_test.go +++ b/pkg/compact/planner_test.go @@ -19,10 +19,10 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" promtest "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/compact/retention.go b/pkg/compact/retention.go index 691fd8532d..40ea0a1a72 100644 --- a/pkg/compact/retention.go +++ b/pkg/compact/retention.go @@ -13,10 +13,10 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" ) // ApplyRetentionPolicyByResolution removes blocks depending on the specified retentionByResolution based on blocks MaxTime. diff --git a/pkg/compact/retention_test.go b/pkg/compact/retention_test.go index cc5cf4d215..6b9efff911 100644 --- a/pkg/compact/retention_test.go +++ b/pkg/compact/retention_test.go @@ -18,11 +18,11 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" promtest "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/objstore/azure/azure.go b/pkg/objstore/azure/azure.go deleted file mode 100644 index 0f7cb1ff6b..0000000000 --- a/pkg/objstore/azure/azure.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package azure - -import ( - "context" - "io" - "os" - "strings" - "testing" - "time" - - blob "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/exthttp" - "github.com/thanos-io/thanos/pkg/objstore" -) - -const ( - azureDefaultEndpoint = "blob.core.windows.net" -) - -// DefaultConfig for Azure objstore client. -var DefaultConfig = Config{ - HTTPConfig: exthttp.HTTPConfig{ - IdleConnTimeout: model.Duration(90 * time.Second), - ResponseHeaderTimeout: model.Duration(2 * time.Minute), - TLSHandshakeTimeout: model.Duration(10 * time.Second), - ExpectContinueTimeout: model.Duration(1 * time.Second), - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, - MaxConnsPerHost: 0, - DisableCompression: false, - }, -} - -// Config Azure storage configuration. -type Config struct { - StorageAccountName string `yaml:"storage_account"` - StorageAccountKey string `yaml:"storage_account_key"` - ContainerName string `yaml:"container"` - Endpoint string `yaml:"endpoint"` - MaxRetries int `yaml:"max_retries"` - MSIResource string `yaml:"msi_resource"` - UserAssignedID string `yaml:"user_assigned_id"` - PipelineConfig PipelineConfig `yaml:"pipeline_config"` - ReaderConfig ReaderConfig `yaml:"reader_config"` - HTTPConfig exthttp.HTTPConfig `yaml:"http_config"` -} - -type ReaderConfig struct { - MaxRetryRequests int `yaml:"max_retry_requests"` -} - -type PipelineConfig struct { - MaxTries int32 `yaml:"max_tries"` - TryTimeout model.Duration `yaml:"try_timeout"` - RetryDelay model.Duration `yaml:"retry_delay"` - MaxRetryDelay model.Duration `yaml:"max_retry_delay"` -} - -// Bucket implements the store.Bucket interface against Azure APIs. -type Bucket struct { - logger log.Logger - containerURL blob.ContainerURL - config *Config -} - -// Validate checks to see if any of the config options are set. -func (conf *Config) validate() error { - - var errMsg []string - if conf.MSIResource == "" { - if conf.UserAssignedID == "" { - if conf.StorageAccountName == "" || - conf.StorageAccountKey == "" { - errMsg = append(errMsg, "invalid Azure storage configuration") - } - if conf.StorageAccountName == "" && conf.StorageAccountKey != "" { - errMsg = append(errMsg, "no Azure storage_account specified while storage_account_key is present in config file; both should be present") - } - if conf.StorageAccountName != "" && conf.StorageAccountKey == "" { - errMsg = append(errMsg, "no Azure storage_account_key specified while storage_account is present in config file; both should be present") - } - } else { - if conf.StorageAccountName == "" { - errMsg = append(errMsg, "UserAssignedID is configured but storage account name is missing") - } - if conf.StorageAccountKey != "" { - errMsg = append(errMsg, "UserAssignedID is configured but storage account key is used") - } - } - } else { - if conf.StorageAccountName == "" { - errMsg = append(errMsg, "MSI resource is configured but storage account name is missing") - } - if conf.StorageAccountKey != "" { - errMsg = append(errMsg, "MSI resource is configured but storage account key is used") - } - } - - if conf.ContainerName == "" { - errMsg = append(errMsg, "no Azure container specified") - } - if conf.Endpoint == "" { - conf.Endpoint = azureDefaultEndpoint - } - - if conf.PipelineConfig.MaxTries < 0 { - errMsg = append(errMsg, "The value of max_tries must be greater than or equal to 0 in the config file") - } - - if conf.ReaderConfig.MaxRetryRequests < 0 { - errMsg = append(errMsg, "The value of max_retry_requests must be greater than or equal to 0 in the config file") - } - - if len(errMsg) > 0 { - return errors.New(strings.Join(errMsg, ", ")) - } - - return nil -} - -// HTTPConfig exists here only because Cortex depends on it, and we depend on Cortex. -// Deprecated. -// TODO(bwplotka): Remove it, once we remove Cortex cycle dep, or Cortex stops using this. -type HTTPConfig = exthttp.HTTPConfig - -// parseConfig unmarshals a buffer into a Config with default values. -func parseConfig(conf []byte) (Config, error) { - config := DefaultConfig - if err := yaml.UnmarshalStrict(conf, &config); err != nil { - return Config{}, err - } - - // If we don't have config specific retry values but we do have the generic MaxRetries. - // This is for backwards compatibility but also ease of configuration. - if config.MaxRetries > 0 { - if config.PipelineConfig.MaxTries == 0 { - config.PipelineConfig.MaxTries = int32(config.MaxRetries) - } - if config.ReaderConfig.MaxRetryRequests == 0 { - config.ReaderConfig.MaxRetryRequests = config.MaxRetries - } - } - - return config, nil -} - -// NewBucket returns a new Bucket using the provided Azure config. -func NewBucket(logger log.Logger, azureConfig []byte, component string) (*Bucket, error) { - level.Debug(logger).Log("msg", "creating new Azure bucket connection", "component", component) - - conf, err := parseConfig(azureConfig) - if err != nil { - return nil, err - } - - return NewBucketWithConfig(logger, conf, component) -} - -// NewBucketWithConfig returns a new Bucket using the provided Azure config struct. -func NewBucketWithConfig(logger log.Logger, conf Config, component string) (*Bucket, error) { - if err := conf.validate(); err != nil { - return nil, err - } - - ctx := context.Background() - container, err := createContainer(ctx, logger, conf) - if err != nil { - ret, ok := err.(blob.StorageError) - if !ok { - return nil, errors.Wrapf(err, "Azure API return unexpected error: %T\n", err) - } - if ret.ServiceCode() == "ContainerAlreadyExists" { - level.Debug(logger).Log("msg", "Getting connection to existing Azure blob container", "container", conf.ContainerName) - container, err = getContainer(ctx, logger, conf) - if err != nil { - return nil, errors.Wrapf(err, "cannot get existing Azure blob container: %s", container) - } - } else { - return nil, errors.Wrapf(err, "error creating Azure blob container: %s", container) - } - } else { - level.Info(logger).Log("msg", "Azure blob container successfully created", "address", container) - } - - bkt := &Bucket{ - logger: logger, - containerURL: container, - config: &conf, - } - return bkt, nil -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - prefix := dir - if prefix != "" && !strings.HasSuffix(prefix, DirDelim) { - prefix += DirDelim - } - - marker := blob.Marker{} - params := objstore.ApplyIterOptions(options...) - listOptions := blob.ListBlobsSegmentOptions{Prefix: prefix} - - for i := 1; ; i++ { - var ( - blobPrefixes []blob.BlobPrefix - blobItems []blob.BlobItemInternal - ) - - if params.Recursive { - list, err := b.containerURL.ListBlobsFlatSegment(ctx, marker, listOptions) - if err != nil { - return errors.Wrapf(err, "cannot list flat blobs with prefix %s (iteration #%d)", dir, i) - } - - marker = list.NextMarker - blobItems = list.Segment.BlobItems - blobPrefixes = nil - } else { - list, err := b.containerURL.ListBlobsHierarchySegment(ctx, marker, DirDelim, listOptions) - if err != nil { - return errors.Wrapf(err, "cannot list hierarchy blobs with prefix %s (iteration #%d)", dir, i) - } - - marker = list.NextMarker - blobItems = list.Segment.BlobItems - blobPrefixes = list.Segment.BlobPrefixes - } - - var listNames []string - - for _, blob := range blobItems { - listNames = append(listNames, blob.Name) - } - - for _, blobPrefix := range blobPrefixes { - listNames = append(listNames, blobPrefix.Name) - } - - for _, name := range listNames { - if err := f(name); err != nil { - return err - } - } - - // Continue iterating if we are not done. - if !marker.NotDone() { - break - } - - level.Debug(b.logger).Log("msg", "requesting next iteration of listing blobs", "last_entries", len(listNames), "iteration", i) - } - - return nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - if err == nil { - return false - } - - errorCode := parseError(err.Error()) - if errorCode == "InvalidUri" || errorCode == "BlobNotFound" { - return true - } - - return false -} - -func (b *Bucket) getBlobReader(ctx context.Context, name string, offset, length int64) (io.ReadCloser, error) { - level.Debug(b.logger).Log("msg", "getting blob", "blob", name, "offset", offset, "length", length) - if name == "" { - return nil, errors.New("X-Ms-Error-Code: [EmptyContainerName]") - } - exists, err := b.Exists(ctx, name) - if err != nil { - return nil, errors.Wrapf(err, "cannot get blob reader: %s", name) - } - - if !exists { - return nil, errors.New("X-Ms-Error-Code: [BlobNotFound]") - } - - blobURL := getBlobURL(name, b.containerURL) - if err != nil { - return nil, errors.Wrapf(err, "cannot get Azure blob URL, address: %s", name) - } - - dl, err := blobURL.Download(ctx, offset, length, blob.BlobAccessConditions{}, false, blob.ClientProvidedKeyOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "cannot download Azure blob, address: %s", name) - } - - return dl.Body(blob.RetryReaderOptions{ - MaxRetryRequests: b.config.ReaderConfig.MaxRetryRequests, - }), nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getBlobReader(ctx, name, 0, blob.CountToEnd) -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getBlobReader(ctx, name, off, length) -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - blobURL := getBlobURL(name, b.containerURL) - - props, err := blobURL.GetProperties(ctx, blob.BlobAccessConditions{}, blob.ClientProvidedKeyOptions{}) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: props.ContentLength(), - LastModified: props.LastModified(), - }, nil -} - -// Exists checks if the given object exists. -func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { - level.Debug(b.logger).Log("msg", "check if blob exists", "blob", name) - blobURL := getBlobURL(name, b.containerURL) - - if _, err := blobURL.GetProperties(ctx, blob.BlobAccessConditions{}, blob.ClientProvidedKeyOptions{}); err != nil { - if b.IsObjNotFoundErr(err) { - return false, nil - } - return false, errors.Wrapf(err, "cannot get properties for Azure blob, address: %s", name) - } - - return true, nil -} - -// Upload the contents of the reader as an object into the bucket. -func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { - level.Debug(b.logger).Log("msg", "Uploading blob", "blob", name) - blobURL := getBlobURL(name, b.containerURL) - - if _, err := blob.UploadStreamToBlockBlob(ctx, r, blobURL, - blob.UploadStreamToBlockBlobOptions{ - BufferSize: 3 * 1024 * 1024, - MaxBuffers: 4, - }, - ); err != nil { - return errors.Wrapf(err, "cannot upload Azure blob, address: %s", name) - } - return nil -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(ctx context.Context, name string) error { - level.Debug(b.logger).Log("msg", "Deleting blob", "blob", name) - blobURL := getBlobURL(name, b.containerURL) - - if _, err := blobURL.Delete(ctx, blob.DeleteSnapshotsOptionInclude, blob.BlobAccessConditions{}); err != nil { - return errors.Wrapf(err, "error deleting blob, address: %s", name) - } - return nil -} - -// Name returns Azure container name. -func (b *Bucket) Name() string { - return b.config.ContainerName -} - -// NewTestBucket creates test bkt client that before returning creates temporary bucket. -// In a close function it empties and deletes the bucket. -func NewTestBucket(t testing.TB, component string) (objstore.Bucket, func(), error) { - t.Log("Using test Azure bucket.") - - conf := &Config{ - StorageAccountName: os.Getenv("AZURE_STORAGE_ACCOUNT"), - StorageAccountKey: os.Getenv("AZURE_STORAGE_ACCESS_KEY"), - ContainerName: objstore.CreateTemporaryTestBucketName(t), - } - - bc, err := yaml.Marshal(conf) - if err != nil { - return nil, nil, err - } - - ctx := context.Background() - - bkt, err := NewBucket(log.NewNopLogger(), bc, component) - if err != nil { - t.Errorf("Cannot create Azure storage container:") - return nil, nil, err - } - - return bkt, func() { - objstore.EmptyBucket(t, ctx, bkt) - err = bkt.Delete(ctx, conf.ContainerName) - if err != nil { - t.Logf("deleting bucket failed: %s", err) - } - }, nil -} - -// Close bucket. -func (b *Bucket) Close() error { - return nil -} diff --git a/pkg/objstore/azure/azure_test.go b/pkg/objstore/azure/azure_test.go deleted file mode 100644 index fcf51e0a10..0000000000 --- a/pkg/objstore/azure/azure_test.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package azure - -import ( - "testing" - "time" - - "github.com/thanos-io/thanos/pkg/exthttp" - "github.com/thanos-io/thanos/pkg/testutil" -) - -type TestCase struct { - name string - config []byte - wantFailParse bool - wantFailValidate bool -} - -var validConfig = []byte(`storage_account: "myStorageAccount" -storage_account_key: "abc123" -container: "MyContainer" -endpoint: "blob.core.windows.net" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": 0`) - -var tests = []TestCase{ - { - name: "validConfig", - config: validConfig, - wantFailParse: false, - wantFailValidate: false, - }, - { - name: "Missing storage account", - config: []byte(`storage_account: "" -storage_account_key: "abc123" -container: "MyContainer" -endpoint: "blob.core.windows.net" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": 0`), - wantFailParse: false, - wantFailValidate: true, - }, - { - name: "Missing storage account key", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "" -container: "MyContainer" -endpoint: "blob.core.windows.net" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": 0`), - wantFailParse: false, - wantFailValidate: true, - }, - { - name: "Negative max_tries", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "asdfsdf" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "max_tries": -1 - "try_timeout": 0`), - wantFailParse: false, - wantFailValidate: true, - }, - { - name: "Negative max_retry_requests", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "asdfsdf" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": -100 -pipeline_config: - "try_timeout": 0`), - wantFailParse: false, - wantFailValidate: true, - }, - { - name: "Not a Duration", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "asdfsdf" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": 10`), - wantFailParse: true, - wantFailValidate: true, - }, - { - name: "Valid Duration", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "asdfsdf" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": "10s"`), - wantFailParse: false, - wantFailValidate: false, - }, - { - name: "msi resource used with storage accounts", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "asdfsdf" -msi_resource: "https://example.blob.core.windows.net" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": "10s"`), - wantFailParse: false, - wantFailValidate: true, - }, - { - name: "Valid MSI Resource", - config: []byte(`storage_account: "myAccount" -storage_account_key: "" -msi_resource: "https://example.blob.core.windows.net" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": "10s"`), - wantFailParse: false, - wantFailValidate: false, - }, - { - name: "Valid User Assigned Identity Config without Resource", - config: []byte(`storage_account: "myAccount" -storage_account_key: "" -user_assigned_id: "1234-56578678-655" -container: "MyContainer"`), - wantFailParse: false, - wantFailValidate: false, - }, - { - name: "Valid User Assigned Identity Config with Resource", - config: []byte(`storage_account: "myAccount" -storage_account_key: "" -user_assigned_id: "1234-56578678-655" -msi_resource: "https://example.blob.core.windows.net" -container: "MyContainer"`), - wantFailParse: false, - wantFailValidate: false, - }, -} - -func TestConfig_validate(t *testing.T) { - - for _, testCase := range tests { - - conf, err := parseConfig(testCase.config) - - if (err != nil) != testCase.wantFailParse { - t.Errorf("%s error = %v, wantFailParse %v", testCase.name, err, testCase.wantFailParse) - continue - } - - validateErr := conf.validate() - if (validateErr != nil) != testCase.wantFailValidate { - t.Errorf("%s error = %v, wantFailValidate %v", testCase.name, validateErr, testCase.wantFailValidate) - } - } - -} - -func TestParseConfig_DefaultHTTPConfig(t *testing.T) { - - cfg, err := parseConfig(validConfig) - testutil.Ok(t, err) - - if time.Duration(cfg.HTTPConfig.IdleConnTimeout) != time.Duration(90*time.Second) { - t.Errorf("parsing of idle_conn_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(90*time.Second)) - } - - if time.Duration(cfg.HTTPConfig.ResponseHeaderTimeout) != time.Duration(2*time.Minute) { - t.Errorf("parsing of response_header_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(2*time.Minute)) - } - - if cfg.HTTPConfig.InsecureSkipVerify { - t.Errorf("parsing of insecure_skip_verify failed: got %v, expected %v", cfg.HTTPConfig.InsecureSkipVerify, false) - } -} - -func TestParseConfig_CustomHTTPConfigWithTLS(t *testing.T) { - input := []byte(`storage_account: "myStorageAccount" -storage_account_key: "abc123" -container: "MyContainer" -endpoint: "blob.core.windows.net" -http_config: - tls_config: - ca_file: /certs/ca.crt - cert_file: /certs/cert.crt - key_file: /certs/key.key - server_name: server - insecure_skip_verify: false - `) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - testutil.Equals(t, "/certs/ca.crt", cfg.HTTPConfig.TLSConfig.CAFile) - testutil.Equals(t, "/certs/cert.crt", cfg.HTTPConfig.TLSConfig.CertFile) - testutil.Equals(t, "/certs/key.key", cfg.HTTPConfig.TLSConfig.KeyFile) - testutil.Equals(t, "server", cfg.HTTPConfig.TLSConfig.ServerName) - testutil.Equals(t, false, cfg.HTTPConfig.TLSConfig.InsecureSkipVerify) -} - -func TestParseConfig_CustomLegacyInsecureSkipVerify(t *testing.T) { - input := []byte(`storage_account: "myStorageAccount" -storage_account_key: "abc123" -container: "MyContainer" -endpoint: "blob.core.windows.net" -http_config: - insecure_skip_verify: true - tls_config: - insecure_skip_verify: false - `) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - transport, err := exthttp.DefaultTransport(cfg.HTTPConfig) - testutil.Ok(t, err) - testutil.Equals(t, true, transport.TLSClientConfig.InsecureSkipVerify) -} diff --git a/pkg/objstore/azure/helpers.go b/pkg/objstore/azure/helpers.go deleted file mode 100644 index cebcd27dd4..0000000000 --- a/pkg/objstore/azure/helpers.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package azure - -import ( - "context" - "fmt" - "net/http" - "net/url" - "regexp" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" - blob "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure/auth" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/thanos-io/thanos/pkg/exthttp" -) - -// DirDelim is the delimiter used to model a directory structure in an object store bucket. -const DirDelim = "/" - -var errorCodeRegex = regexp.MustCompile(`X-Ms-Error-Code:\D*\[(\w+)\]`) - -func init() { - // Disable `ForceLog` in Azure storage module - // As the time of this patch, the logging function in the storage module isn't correctly - // detecting expected REST errors like 404 and so outputs them to syslog along with a stacktrace. - // https://github.com/Azure/azure-storage-blob-go/issues/214 - // - // This needs to be done at startup because the underlying variable is not thread safe. - // https://github.com/Azure/azure-pipeline-go/blob/dc95902f1d32034f8f743ccc6c3f2eb36b84da27/pipeline/core.go#L276-L283 - pipeline.SetForceLogEnabled(false) -} - -func getAzureStorageCredentials(logger log.Logger, conf Config) (blob.Credential, error) { - if conf.MSIResource != "" || conf.UserAssignedID != "" { - spt, err := getServicePrincipalToken(logger, conf) - if err != nil { - return nil, err - } - if err := spt.Refresh(); err != nil { - return nil, err - } - - return blob.NewTokenCredential(spt.Token().AccessToken, func(tc blob.TokenCredential) time.Duration { - err := spt.Refresh() - if err != nil { - level.Error(logger).Log("msg", "could not refresh MSI token", "err", err) - // Retry later as the error can be related to API throttling - return 30 * time.Second - } - tc.SetToken(spt.Token().AccessToken) - return spt.Token().Expires().Sub(time.Now().Add(2 * time.Minute)) - }), nil - } - - credential, err := blob.NewSharedKeyCredential(conf.StorageAccountName, conf.StorageAccountKey) - if err != nil { - return nil, err - } - return credential, nil -} - -func getServicePrincipalToken(logger log.Logger, conf Config) (*adal.ServicePrincipalToken, error) { - resource := conf.MSIResource - if resource == "" { - resource = fmt.Sprintf("https://%s.%s", conf.StorageAccountName, conf.Endpoint) - } - - msiConfig := auth.MSIConfig{ - Resource: resource, - } - - if conf.UserAssignedID != "" { - level.Debug(logger).Log("msg", "using user assigned identity", "clientId", conf.UserAssignedID) - msiConfig.ClientID = conf.UserAssignedID - } else { - level.Debug(logger).Log("msg", "using system assigned identity") - } - - return msiConfig.ServicePrincipalToken() -} - -func getContainerURL(ctx context.Context, logger log.Logger, conf Config) (blob.ContainerURL, error) { - credentials, err := getAzureStorageCredentials(logger, conf) - - if err != nil { - return blob.ContainerURL{}, err - } - - retryOptions := blob.RetryOptions{ - MaxTries: conf.PipelineConfig.MaxTries, - TryTimeout: time.Duration(conf.PipelineConfig.TryTimeout), - RetryDelay: time.Duration(conf.PipelineConfig.RetryDelay), - MaxRetryDelay: time.Duration(conf.PipelineConfig.MaxRetryDelay), - } - - if deadline, ok := ctx.Deadline(); ok { - retryOptions.TryTimeout = time.Until(deadline) - } - - dt, err := exthttp.DefaultTransport(conf.HTTPConfig) - if err != nil { - return blob.ContainerURL{}, err - } - client := http.Client{ - Transport: dt, - } - - p := blob.NewPipeline(credentials, blob.PipelineOptions{ - Retry: retryOptions, - Telemetry: blob.TelemetryOptions{Value: "Thanos"}, - RequestLog: blob.RequestLogOptions{ - // Log a warning if an operation takes longer than the specified duration. - // (-1=no logging; 0=default 3s threshold) - LogWarningIfTryOverThreshold: -1, - }, - Log: pipeline.LogOptions{ - ShouldLog: nil, - }, - HTTPSender: pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - resp, err := client.Do(request.WithContext(ctx)) - - return pipeline.NewHTTPResponse(resp), err - } - }), - }) - u, err := url.Parse(fmt.Sprintf("https://%s.%s", conf.StorageAccountName, conf.Endpoint)) - if err != nil { - return blob.ContainerURL{}, err - } - service := blob.NewServiceURL(*u, p) - - return service.NewContainerURL(conf.ContainerName), nil -} - -func getContainer(ctx context.Context, logger log.Logger, conf Config) (blob.ContainerURL, error) { - c, err := getContainerURL(ctx, logger, conf) - if err != nil { - return blob.ContainerURL{}, err - } - // Getting container properties to check if it exists or not. Returns error which will be parsed further. - _, err = c.GetProperties(ctx, blob.LeaseAccessConditions{}) - return c, err -} - -func createContainer(ctx context.Context, logger log.Logger, conf Config) (blob.ContainerURL, error) { - c, err := getContainerURL(ctx, logger, conf) - if err != nil { - return blob.ContainerURL{}, err - } - _, err = c.Create( - ctx, - blob.Metadata{}, - blob.PublicAccessNone) - return c, err -} - -func getBlobURL(blobName string, c blob.ContainerURL) blob.BlockBlobURL { - return c.NewBlockBlobURL(blobName) -} - -func parseError(errorCode string) string { - match := errorCodeRegex.FindStringSubmatch(errorCode) - if len(match) == 2 { - return match[1] - } - return errorCode -} diff --git a/pkg/objstore/azure/helpers_test.go b/pkg/objstore/azure/helpers_test.go deleted file mode 100644 index c3139a8c07..0000000000 --- a/pkg/objstore/azure/helpers_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package azure - -import ( - "context" - "testing" - - "github.com/go-kit/log" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func Test_getContainerURL(t *testing.T) { - type args struct { - conf Config - } - tests := []struct { - name string - args args - want string - wantErr bool - }{ - { - name: "default", - args: args{ - conf: Config{ - StorageAccountName: "foo", - StorageAccountKey: "Zm9vCg==", - ContainerName: "roo", - Endpoint: azureDefaultEndpoint, - }, - }, - want: "https://foo.blob.core.windows.net/roo", - wantErr: false, - }, - { - name: "azure china", - args: args{ - conf: Config{ - StorageAccountName: "foo", - StorageAccountKey: "Zm9vCg==", - ContainerName: "roo", - Endpoint: "blob.core.chinacloudapi.cn", - }, - }, - want: "https://foo.blob.core.chinacloudapi.cn/roo", - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - got, err := getContainerURL(ctx, log.NewNopLogger(), tt.args.conf) - if (err != nil) != tt.wantErr { - t.Errorf("getContainerURL() error = %v, wantErr %v", err, tt.wantErr) - return - } - testutil.Equals(t, tt.want, got.String()) - }) - } -} diff --git a/pkg/objstore/bos/bos.go b/pkg/objstore/bos/bos.go deleted file mode 100644 index a34f3eccda..0000000000 --- a/pkg/objstore/bos/bos.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package bos - -import ( - "context" - "fmt" - "io" - "math" - "math/rand" - "net/http" - "os" - "strings" - "testing" - "time" - - "github.com/baidubce/bce-sdk-go/bce" - "github.com/baidubce/bce-sdk-go/services/bos" - "github.com/baidubce/bce-sdk-go/services/bos/api" - "github.com/go-kit/log" - "github.com/pkg/errors" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" -) - -// partSize 128MB. -const partSize = 1024 * 1024 * 128 - -// Bucket implements the store.Bucket interface against bos-compatible(Baidu Object Storage) APIs. -type Bucket struct { - logger log.Logger - client *bos.Client - name string -} - -// Config encapsulates the necessary config values to instantiate an bos client. -type Config struct { - Bucket string `yaml:"bucket"` - Endpoint string `yaml:"endpoint"` - AccessKey string `yaml:"access_key"` - SecretKey string `yaml:"secret_key"` -} - -func (conf *Config) validate() error { - if conf.Bucket == "" || - conf.Endpoint == "" || - conf.AccessKey == "" || - conf.SecretKey == "" { - return errors.New("insufficient BOS configuration information") - } - - return nil -} - -// parseConfig unmarshal a buffer into a Config with default HTTPConfig values. -func parseConfig(conf []byte) (Config, error) { - config := Config{} - if err := yaml.Unmarshal(conf, &config); err != nil { - return Config{}, err - } - - return config, nil -} - -// NewBucket new bos bucket. -func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) { - if logger == nil { - logger = log.NewNopLogger() - } - - config, err := parseConfig(conf) - if err != nil { - return nil, errors.Wrap(err, "parsing BOS configuration") - } - - return NewBucketWithConfig(logger, config, component) -} - -// NewBucketWithConfig returns a new Bucket using the provided bos config struct. -func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) { - if err := config.validate(); err != nil { - return nil, errors.Wrap(err, "validating BOS configuration") - } - - client, err := bos.NewClient(config.AccessKey, config.SecretKey, config.Endpoint) - if err != nil { - return nil, errors.Wrap(err, "creating BOS client") - } - - client.Config.UserAgent = fmt.Sprintf("thanos-%s", component) - - bkt := &Bucket{ - logger: logger, - client: client, - name: config.Bucket, - } - return bkt, nil -} - -// Name returns the bucket name for the provider. -func (b *Bucket) Name() string { - return b.name -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(_ context.Context, name string) error { - return b.client.DeleteObject(b.name, name) -} - -// Upload the contents of the reader as an object into the bucket. -func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) error { - size, err := objstore.TryToGetSize(r) - if err != nil { - return errors.Wrapf(err, "getting size of %s", name) - } - - partNums, lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize - if partNums == 0 { - body, err := bce.NewBodyFromSizedReader(r, lastSlice) - if err != nil { - return errors.Wrapf(err, "failed to create SizedReader for %s", name) - } - - if _, err := b.client.PutObject(b.name, name, body, nil); err != nil { - return errors.Wrapf(err, "failed to upload %s", name) - } - - return nil - } - - result, err := b.client.BasicInitiateMultipartUpload(b.name, name) - if err != nil { - return errors.Wrapf(err, "failed to initiate MultipartUpload for %s", name) - } - - uploadEveryPart := func(partSize int64, part int, uploadId string) (string, error) { - body, err := bce.NewBodyFromSizedReader(r, partSize) - if err != nil { - return "", err - } - - etag, err := b.client.UploadPart(b.name, name, uploadId, part, body, nil) - if err != nil { - if err := b.client.AbortMultipartUpload(b.name, name, uploadId); err != nil { - return etag, err - } - return etag, err - } - return etag, nil - } - - var parts []api.UploadInfoType - - for part := 1; part <= partNums; part++ { - etag, err := uploadEveryPart(partSize, part, result.UploadId) - if err != nil { - return errors.Wrapf(err, "failed to upload part %d for %s", part, name) - } - parts = append(parts, api.UploadInfoType{PartNumber: part, ETag: etag}) - } - - if lastSlice != 0 { - etag, err := uploadEveryPart(lastSlice, partNums+1, result.UploadId) - if err != nil { - return errors.Wrapf(err, "failed to upload the last part for %s", name) - } - parts = append(parts, api.UploadInfoType{PartNumber: partNums + 1, ETag: etag}) - } - - if _, err := b.client.CompleteMultipartUploadFromStruct(b.name, name, result.UploadId, &api.CompleteMultipartUploadArgs{Parts: parts}); err != nil { - return errors.Wrapf(err, "failed to set %s upload completed", name) - } - return nil -} - -// Iter calls f for each entry in the given directory (not recursive). The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt ...objstore.IterOption) error { - if dir != "" { - dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim - } - - delimiter := objstore.DirDelim - - if objstore.ApplyIterOptions(opt...).Recursive { - delimiter = "" - } - - var marker string - for { - if err := ctx.Err(); err != nil { - return err - } - - objects, err := b.client.ListObjects(b.name, &api.ListObjectsArgs{ - Delimiter: delimiter, - Marker: marker, - MaxKeys: 1000, - Prefix: dir, - }) - if err != nil { - return err - } - - marker = objects.NextMarker - for _, object := range objects.Contents { - if err := f(object.Key); err != nil { - return err - } - } - - for _, object := range objects.CommonPrefixes { - if err := f(object.Prefix); err != nil { - return err - } - } - if !objects.IsTruncated { - break - } - } - return nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getRange(ctx, b.name, name, 0, -1) -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getRange(ctx, b.name, name, off, length) -} - -// Exists checks if the given object exists in the bucket. -func (b *Bucket) Exists(_ context.Context, name string) (bool, error) { - _, err := b.client.GetObjectMeta(b.name, name) - if err != nil { - if b.IsObjNotFoundErr(err) { - return false, nil - } - return false, errors.Wrapf(err, "getting object metadata of %s", name) - } - return true, nil -} - -func (b *Bucket) Close() error { - return nil -} - -// ObjectSize returns the size of the specified object. -func (b *Bucket) ObjectSize(_ context.Context, name string) (uint64, error) { - objMeta, err := b.client.GetObjectMeta(b.name, name) - if err != nil { - return 0, err - } - return uint64(objMeta.ContentLength), nil -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) { - objMeta, err := b.client.GetObjectMeta(b.name, name) - if err != nil { - return objstore.ObjectAttributes{}, errors.Wrapf(err, "gettting objectmeta of %s", name) - } - - lastModified, err := time.Parse(time.RFC1123, objMeta.LastModified) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: objMeta.ContentLength, - LastModified: lastModified, - }, nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - switch bosErr := errors.Cause(err).(type) { - case *bce.BceServiceError: - if bosErr.StatusCode == http.StatusNotFound || bosErr.Code == "NoSuchKey" { - return true - } - } - return false -} - -func (b *Bucket) getRange(_ context.Context, bucketName, objectKey string, off, length int64) (io.ReadCloser, error) { - if len(objectKey) == 0 { - return nil, errors.Errorf("given object name should not empty") - } - - ranges := []int64{off} - if length != -1 { - ranges = append(ranges, off+length-1) - } - - obj, err := b.client.GetObject(bucketName, objectKey, map[string]string{}, ranges...) - if err != nil { - return nil, err - } - - return obj.Body, nil -} - -func configFromEnv() Config { - c := Config{ - Bucket: os.Getenv("BOS_BUCKET"), - Endpoint: os.Getenv("BOS_ENDPOINT"), - AccessKey: os.Getenv("BOS_ACCESS_KEY"), - SecretKey: os.Getenv("BOS_SECRET_KEY"), - } - return c -} - -// NewTestBucket creates test bkt client that before returning creates temporary bucket. -// In a close function it empties and deletes the bucket. -func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) { - c := configFromEnv() - if err := validateForTest(c); err != nil { - return nil, nil, err - } - - if c.Bucket != "" { - if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { - return nil, nil, errors.New("BOS_BUCKET is defined. Normally this tests will create temporary bucket " + - "and delete it after test. Unset BOS_BUCKET env variable to use default logic. If you really want to run " + - "tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " + - "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + - "to safety (accidentally pointing prod bucket for test) as well as BOS not being fully strong consistent.") - } - - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - if err := b.Iter(context.Background(), "", func(f string) error { - return errors.Errorf("bucket %s is not empty", c.Bucket) - }); err != nil { - return nil, nil, errors.Wrapf(err, "checking bucket %s", c.Bucket) - } - - t.Log("WARNING. Reusing", c.Bucket, "BOS bucket for BOS tests. Manual cleanup afterwards is required") - return b, func() {}, nil - } - - src := rand.NewSource(time.Now().UnixNano()) - tmpBucketName := strings.Replace(fmt.Sprintf("test_%x", src.Int63()), "_", "-", -1) - - if len(tmpBucketName) >= 31 { - tmpBucketName = tmpBucketName[:31] - } - - c.Bucket = tmpBucketName - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - if _, err := b.client.PutBucket(b.name); err != nil { - return nil, nil, err - } - - t.Log("created temporary BOS bucket for BOS tests with name", tmpBucketName) - return b, func() { - objstore.EmptyBucket(t, context.Background(), b) - if err := b.client.DeleteBucket(b.name); err != nil { - t.Logf("deleting bucket %s failed: %s", tmpBucketName, err) - } - }, nil -} - -func validateForTest(conf Config) error { - if conf.Endpoint == "" || - conf.AccessKey == "" || - conf.SecretKey == "" { - return errors.New("insufficient BOS configuration information") - } - return nil -} diff --git a/pkg/objstore/client/factory.go b/pkg/objstore/client/factory.go deleted file mode 100644 index 2061079014..0000000000 --- a/pkg/objstore/client/factory.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package client - -import ( - "context" - "fmt" - "strings" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - yaml "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/azure" - "github.com/thanos-io/thanos/pkg/objstore/bos" - "github.com/thanos-io/thanos/pkg/objstore/cos" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" - "github.com/thanos-io/thanos/pkg/objstore/gcs" - "github.com/thanos-io/thanos/pkg/objstore/oss" - "github.com/thanos-io/thanos/pkg/objstore/s3" - "github.com/thanos-io/thanos/pkg/objstore/swift" -) - -type ObjProvider string - -const ( - FILESYSTEM ObjProvider = "FILESYSTEM" - GCS ObjProvider = "GCS" - S3 ObjProvider = "S3" - AZURE ObjProvider = "AZURE" - SWIFT ObjProvider = "SWIFT" - COS ObjProvider = "COS" - ALIYUNOSS ObjProvider = "ALIYUNOSS" - BOS ObjProvider = "BOS" -) - -type BucketConfig struct { - Type ObjProvider `yaml:"type"` - Config interface{} `yaml:"config"` - Prefix string `yaml:"prefix" default:""` -} - -// NewBucket initializes and returns new object storage clients. -// NOTE: confContentYaml can contain secrets. -func NewBucket(logger log.Logger, confContentYaml []byte, reg prometheus.Registerer, component string) (objstore.InstrumentedBucket, error) { - level.Info(logger).Log("msg", "loading bucket configuration") - bucketConf := &BucketConfig{} - if err := yaml.UnmarshalStrict(confContentYaml, bucketConf); err != nil { - return nil, errors.Wrap(err, "parsing config YAML file") - } - - config, err := yaml.Marshal(bucketConf.Config) - if err != nil { - return nil, errors.Wrap(err, "marshal content of bucket configuration") - } - - var bucket objstore.Bucket - switch strings.ToUpper(string(bucketConf.Type)) { - case string(GCS): - bucket, err = gcs.NewBucket(context.Background(), logger, config, component) - case string(S3): - bucket, err = s3.NewBucket(logger, config, component) - case string(AZURE): - bucket, err = azure.NewBucket(logger, config, component) - case string(SWIFT): - bucket, err = swift.NewContainer(logger, config) - case string(COS): - bucket, err = cos.NewBucket(logger, config, component) - case string(ALIYUNOSS): - bucket, err = oss.NewBucket(logger, config, component) - case string(FILESYSTEM): - bucket, err = filesystem.NewBucketFromConfig(config) - case string(BOS): - bucket, err = bos.NewBucket(logger, config, component) - default: - return nil, errors.Errorf("bucket with type %s is not supported", bucketConf.Type) - } - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("create %s client", bucketConf.Type)) - } - - return objstore.NewTracingBucket(objstore.BucketWithMetrics(bucket.Name(), objstore.NewPrefixedBucket(bucket, bucketConf.Prefix), reg)), nil -} diff --git a/pkg/objstore/client/testconf/blank-gcs.conf.yml b/pkg/objstore/client/testconf/blank-gcs.conf.yml deleted file mode 100644 index cb5ef588cc..0000000000 --- a/pkg/objstore/client/testconf/blank-gcs.conf.yml +++ /dev/null @@ -1 +0,0 @@ -type: GCS \ No newline at end of file diff --git a/pkg/objstore/client/testconf/fake-gcs.conf.yml b/pkg/objstore/client/testconf/fake-gcs.conf.yml deleted file mode 100644 index 538c832788..0000000000 --- a/pkg/objstore/client/testconf/fake-gcs.conf.yml +++ /dev/null @@ -1,3 +0,0 @@ -type: FAKE-GCS -config: - bucket: test-bucket \ No newline at end of file diff --git a/pkg/objstore/clientutil/parse.go b/pkg/objstore/clientutil/parse.go deleted file mode 100644 index 759c42d29c..0000000000 --- a/pkg/objstore/clientutil/parse.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package clientutil - -import ( - "net/http" - "strconv" - "time" - - "github.com/pkg/errors" -) - -// ParseContentLength returns the content length (in bytes) parsed from the Content-Length -// HTTP header in input. -func ParseContentLength(m http.Header) (int64, error) { - const name = "Content-Length" - - v, ok := m[name] - if !ok { - return 0, errors.Errorf("%s header not found", name) - } - - if len(v) == 0 { - return 0, errors.Errorf("%s header has no values", name) - } - - ret, err := strconv.ParseInt(v[0], 10, 64) - if err != nil { - return 0, errors.Wrapf(err, "convert %s", name) - } - - return ret, nil -} - -// ParseLastModified returns the timestamp parsed from the Last-Modified -// HTTP header in input. -// Passing an second parameter, named f, to specify the time format. -// If f is empty then RFC3339 will be used as default format. -func ParseLastModified(m http.Header, f string) (time.Time, error) { - const ( - name = "Last-Modified" - defaultFormat = time.RFC3339 - ) - - v, ok := m[name] - if !ok { - return time.Time{}, errors.Errorf("%s header not found", name) - } - - if len(v) == 0 { - return time.Time{}, errors.Errorf("%s header has no values", name) - } - - if f == "" { - f = defaultFormat - } - - mod, err := time.Parse(f, v[0]) - if err != nil { - return time.Time{}, errors.Wrapf(err, "parse %s", name) - } - - return mod, nil -} diff --git a/pkg/objstore/clientutil/parse_test.go b/pkg/objstore/clientutil/parse_test.go deleted file mode 100644 index e2c44d8aaa..0000000000 --- a/pkg/objstore/clientutil/parse_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package clientutil - -import ( - "net/http" - "testing" - "time" - - alioss "github.com/aliyun/aliyun-oss-go-sdk/oss" - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestParseLastModified(t *testing.T) { - location, _ := time.LoadLocation("GMT") - tests := map[string]struct { - headerValue string - expectedVal time.Time - expectedErr string - format string - }{ - "no header": { - expectedErr: "Last-Modified header not found", - }, - "empty format string to default RFC3339 format": { - headerValue: "2015-11-06T10:07:11.000Z", - expectedVal: time.Date(2015, time.November, 6, 10, 7, 11, 0, time.UTC), - format: "", - }, - "valid RFC3339 header value": { - headerValue: "2015-11-06T10:07:11.000Z", - expectedVal: time.Date(2015, time.November, 6, 10, 7, 11, 0, time.UTC), - format: time.RFC3339, - }, - "invalid RFC3339 header value": { - headerValue: "invalid", - expectedErr: `parse Last-Modified: parsing time "invalid" as "2006-01-02T15:04:05Z07:00": cannot parse "invalid" as "2006"`, - format: time.RFC3339, - }, - "valid RFC1123 header value": { - headerValue: "Fri, 24 Feb 2012 06:07:48 GMT", - expectedVal: time.Date(2012, time.February, 24, 6, 7, 48, 0, location), - format: time.RFC1123, - }, - "invalid RFC1123 header value": { - headerValue: "invalid", - expectedErr: `parse Last-Modified: parsing time "invalid" as "Mon, 02 Jan 2006 15:04:05 MST": cannot parse "invalid" as "Mon"`, - format: time.RFC1123, - }, - } - - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - meta := http.Header{} - if testData.headerValue != "" { - meta.Add(alioss.HTTPHeaderLastModified, testData.headerValue) - } - - actual, err := ParseLastModified(meta, testData.format) - - if testData.expectedErr != "" { - testutil.NotOk(t, err) - testutil.Equals(t, testData.expectedErr, err.Error()) - } else { - testutil.Ok(t, err) - testutil.Assert(t, testData.expectedVal.Equal(actual)) - } - }) - } -} - -func TestParseContentLength(t *testing.T) { - tests := map[string]struct { - headerValue string - expectedVal int64 - expectedErr string - }{ - "no header": { - expectedErr: "Content-Length header not found", - }, - "invalid header value": { - headerValue: "invalid", - expectedErr: `convert Content-Length: strconv.ParseInt: parsing "invalid": invalid syntax`, - }, - "valid header value": { - headerValue: "12345", - expectedVal: 12345, - }, - } - - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - meta := http.Header{} - if testData.headerValue != "" { - meta.Add(alioss.HTTPHeaderContentLength, testData.headerValue) - } - - actual, err := ParseContentLength(meta) - - if testData.expectedErr != "" { - testutil.NotOk(t, err) - testutil.Equals(t, testData.expectedErr, err.Error()) - } else { - testutil.Ok(t, err) - testutil.Equals(t, testData.expectedVal, actual) - } - }) - } -} diff --git a/pkg/objstore/cos/cos.go b/pkg/objstore/cos/cos.go deleted file mode 100644 index a22010bb72..0000000000 --- a/pkg/objstore/cos/cos.go +++ /dev/null @@ -1,550 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package cos - -import ( - "context" - "fmt" - "io" - "math" - "math/rand" - "net/http" - "net/url" - "os" - "strings" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/tencentyun/cos-go-sdk-v5" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/exthttp" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/clientutil" - "github.com/thanos-io/thanos/pkg/runutil" -) - -// DirDelim is the delimiter used to model a directory structure in an object store bucket. -const dirDelim = "/" - -// Bucket implements the store.Bucket interface against cos-compatible(Tencent Object Storage) APIs. -type Bucket struct { - logger log.Logger - client *cos.Client - name string -} - -// DefaultConfig is the default config for an cos client. default tune the `MaxIdleConnsPerHost`. -var DefaultConfig = Config{ - HTTPConfig: exthttp.HTTPConfig{ - IdleConnTimeout: model.Duration(90 * time.Second), - ResponseHeaderTimeout: model.Duration(2 * time.Minute), - TLSHandshakeTimeout: model.Duration(10 * time.Second), - ExpectContinueTimeout: model.Duration(1 * time.Second), - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, - MaxConnsPerHost: 0, - }, -} - -// Config encapsulates the necessary config values to instantiate an cos client. -type Config struct { - Bucket string `yaml:"bucket"` - Region string `yaml:"region"` - AppId string `yaml:"app_id"` - Endpoint string `yaml:"endpoint"` - SecretKey string `yaml:"secret_key"` - SecretId string `yaml:"secret_id"` - HTTPConfig exthttp.HTTPConfig `yaml:"http_config"` -} - -// Validate checks to see if mandatory cos config options are set. -func (conf *Config) validate() error { - if conf.Endpoint != "" { - if _, err := url.Parse(conf.Endpoint); err != nil { - return errors.Wrap(err, "parse endpoint") - } - if conf.SecretId == "" || - conf.SecretKey == "" { - return errors.New("secret_id or secret_key is empty") - } - return nil - } - if conf.Bucket == "" || - conf.AppId == "" || - conf.Region == "" || - conf.SecretId == "" || - conf.SecretKey == "" { - return errors.New("insufficient cos configuration information") - } - return nil -} - -// parseConfig unmarshal a buffer into a Config with default HTTPConfig values. -func parseConfig(conf []byte) (Config, error) { - config := DefaultConfig - if err := yaml.Unmarshal(conf, &config); err != nil { - return Config{}, err - } - - return config, nil -} - -// NewBucket returns a new Bucket using the provided cos configuration. -func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) { - if logger == nil { - logger = log.NewNopLogger() - } - - config, err := parseConfig(conf) - if err != nil { - return nil, errors.Wrap(err, "parsing cos configuration") - } - - return NewBucketWithConfig(logger, config, component) -} - -// NewBucketWithConfig returns a new Bucket using the provided cos config values. -func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) { - if err := config.validate(); err != nil { - return nil, errors.Wrap(err, "validate cos configuration") - } - - var bucketURL *url.URL - var err error - if config.Endpoint != "" { - bucketURL, err = url.Parse(config.Endpoint) - if err != nil { - return nil, errors.Wrap(err, "parse endpoint") - } - } else { - bucketURL = cos.NewBucketURL(fmt.Sprintf("%s-%s", config.Bucket, config.AppId), config.Region, true) - } - b := &cos.BaseURL{BucketURL: bucketURL} - tpt, _ := exthttp.DefaultTransport(config.HTTPConfig) - client := cos.NewClient(b, &http.Client{ - Transport: &cos.AuthorizationTransport{ - SecretID: config.SecretId, - SecretKey: config.SecretKey, - Transport: tpt, - }, - }) - - bkt := &Bucket{ - logger: logger, - client: client, - name: config.Bucket, - } - return bkt, nil -} - -// Name returns the bucket name for COS. -func (b *Bucket) Name() string { - return b.name -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - resp, err := b.client.Object.Head(ctx, name, nil) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - size, err := clientutil.ParseContentLength(resp.Header) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - // tencent cos return Last-Modified header in RFC1123 format. - // see api doc for details: https://intl.cloud.tencent.com/document/product/436/7729 - mod, err := clientutil.ParseLastModified(resp.Header, time.RFC1123) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: size, - LastModified: mod, - }, nil -} - -var ( - _ cos.FixedLengthReader = (*fixedLengthReader)(nil) -) - -type fixedLengthReader struct { - io.Reader - size int64 -} - -func newFixedLengthReader(r io.Reader, size int64) io.Reader { - return fixedLengthReader{ - Reader: io.LimitReader(r, size), - size: size, - } -} - -// Size implement cos.FixedLengthReader interface. -func (r fixedLengthReader) Size() int64 { - return r.size -} - -// Upload the contents of the reader as an object into the bucket. -func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { - size, err := objstore.TryToGetSize(r) - if err != nil { - return errors.Wrapf(err, "getting size of %s", name) - } - // partSize 128MB. - const partSize = 1024 * 1024 * 128 - partNums, lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize - if partNums == 0 { - if _, err := b.client.Object.Put(ctx, name, r, nil); err != nil { - return errors.Wrapf(err, "Put object: %s", name) - } - return nil - } - // 1. init. - result, _, err := b.client.Object.InitiateMultipartUpload(ctx, name, nil) - if err != nil { - return errors.Wrapf(err, "InitiateMultipartUpload %s", name) - } - uploadEveryPart := func(partSize int64, part int, uploadID string) (string, error) { - r := newFixedLengthReader(r, partSize) - resp, err := b.client.Object.UploadPart(ctx, name, uploadID, part, r, &cos.ObjectUploadPartOptions{ - ContentLength: partSize, - }) - if err != nil { - if _, err := b.client.Object.AbortMultipartUpload(ctx, name, uploadID); err != nil { - return "", err - } - return "", err - } - etag := resp.Header.Get("ETag") - return etag, nil - } - optcom := &cos.CompleteMultipartUploadOptions{} - // 2. upload parts. - for part := 1; part <= partNums; part++ { - etag, err := uploadEveryPart(partSize, part, result.UploadID) - if err != nil { - return errors.Wrapf(err, "uploadPart %d, %s", part, name) - } - optcom.Parts = append(optcom.Parts, cos.Object{ - PartNumber: part, ETag: etag}, - ) - } - // 3. upload last part. - if lastSlice != 0 { - part := partNums + 1 - etag, err := uploadEveryPart(lastSlice, part, result.UploadID) - if err != nil { - return errors.Wrapf(err, "uploadPart %d, %s", part, name) - } - optcom.Parts = append(optcom.Parts, cos.Object{ - PartNumber: part, ETag: etag}, - ) - } - // 4. complete. - if _, _, err := b.client.Object.CompleteMultipartUpload(ctx, name, result.UploadID, optcom); err != nil { - return errors.Wrapf(err, "CompleteMultipartUpload %s", name) - } - return nil -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(ctx context.Context, name string) error { - if _, err := b.client.Object.Delete(ctx, name); err != nil { - return errors.Wrap(err, "delete cos object") - } - return nil -} - -// Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - if dir != "" { - dir = strings.TrimSuffix(dir, dirDelim) + dirDelim - } - - for object := range b.listObjects(ctx, dir, options...) { - if object.err != nil { - return object.err - } - if object.key == "" { - continue - } - if err := f(object.key); err != nil { - return err - } - } - - return nil -} - -func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("given object name should not empty") - } - - opts := &cos.ObjectGetOptions{} - if length != -1 { - if err := setRange(opts, off, off+length-1); err != nil { - return nil, err - } - } else if off > 0 { - if err := setRange(opts, off, 0); err != nil { - return nil, err - } - } - - resp, err := b.client.Object.Get(ctx, name, opts) - if err != nil { - return nil, err - } - if _, err := resp.Body.Read(nil); err != nil { - runutil.ExhaustCloseWithLogOnErr(b.logger, resp.Body, "cos get range obj close") - return nil, err - } - // Add size info into reader to pass it to Upload function. - r := objectSizerReadCloser{ReadCloser: resp.Body, size: resp.ContentLength} - return r, nil -} - -type objectSizerReadCloser struct { - io.ReadCloser - size int64 -} - -// ObjectSize implement objstore.ObjectSizer. -func (o objectSizerReadCloser) ObjectSize() (int64, error) { - return o.size, nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getRange(ctx, name, 0, -1) -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getRange(ctx, name, off, length) -} - -// Exists checks if the given object exists in the bucket. -func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { - if _, err := b.client.Object.Head(ctx, name, nil); err != nil { - if b.IsObjNotFoundErr(err) { - return false, nil - } - return false, errors.Wrap(err, "head cos object") - } - - return true, nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - switch tmpErr := errors.Cause(err).(type) { - case *cos.ErrorResponse: - if tmpErr.Code == "NoSuchKey" || - (tmpErr.Response != nil && tmpErr.Response.StatusCode == http.StatusNotFound) { - return true - } - return false - default: - return false - } -} - -func (b *Bucket) Close() error { return nil } - -type objectInfo struct { - key string - err error -} - -func (b *Bucket) listObjects(ctx context.Context, objectPrefix string, options ...objstore.IterOption) <-chan objectInfo { - objectsCh := make(chan objectInfo, 1) - - // If recursive iteration is enabled we should pass an empty delimiter. - delimiter := dirDelim - if objstore.ApplyIterOptions(options...).Recursive { - delimiter = "" - } - - go func(objectsCh chan<- objectInfo) { - defer close(objectsCh) - var marker string - for { - result, _, err := b.client.Bucket.Get(ctx, &cos.BucketGetOptions{ - Prefix: objectPrefix, - MaxKeys: 1000, - Marker: marker, - Delimiter: delimiter, - }) - if err != nil { - select { - case objectsCh <- objectInfo{ - err: err, - }: - case <-ctx.Done(): - } - return - } - - for _, object := range result.Contents { - select { - case objectsCh <- objectInfo{ - key: object.Key, - }: - case <-ctx.Done(): - return - } - } - - // The result of CommonPrefixes contains the objects - // that have the same keys between Prefix and the key specified by delimiter. - for _, obj := range result.CommonPrefixes { - select { - case objectsCh <- objectInfo{ - key: obj, - }: - case <-ctx.Done(): - return - } - } - - if !result.IsTruncated { - return - } - - marker = result.NextMarker - } - }(objectsCh) - return objectsCh -} - -func setRange(opts *cos.ObjectGetOptions, start, end int64) error { - if start == 0 && end < 0 { - opts.Range = fmt.Sprintf("bytes=%d", end) - } else if 0 < start && end == 0 { - opts.Range = fmt.Sprintf("bytes=%d-", start) - } else if 0 <= start && start <= end { - opts.Range = fmt.Sprintf("bytes=%d-%d", start, end) - } else { - return errors.Errorf("Invalid range specified: start=%d end=%d", start, end) - } - return nil -} - -func configFromEnv() Config { - c := Config{ - Bucket: os.Getenv("COS_BUCKET"), - AppId: os.Getenv("COS_APP_ID"), - Region: os.Getenv("COS_REGION"), - Endpoint: os.Getenv("COS_ENDPOINT"), - SecretId: os.Getenv("COS_SECRET_ID"), - SecretKey: os.Getenv("COS_SECRET_KEY"), - } - - return c -} - -// NewTestBucket creates test bkt client that before returning creates temporary bucket. -// In a close function it empties and deletes the bucket. -func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) { - c := configFromEnv() - if err := validateForTest(c); err != nil { - return nil, nil, err - } - - if c.Bucket != "" { - if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { - return nil, nil, errors.New("COS_BUCKET is defined. Normally this tests will create temporary bucket " + - "and delete it after test. Unset COS_BUCKET env variable to use default logic. If you really want to run " + - "tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " + - "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + - "to safety (accidentally pointing prod bucket for test) as well as COS not being fully strong consistent.") - } - - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - if err := b.Iter(context.Background(), "", func(f string) error { - return errors.Errorf("bucket %s is not empty", c.Bucket) - }); err != nil { - return nil, nil, errors.Wrapf(err, "cos check bucket %s", c.Bucket) - } - - t.Log("WARNING. Reusing", c.Bucket, "COS bucket for COS tests. Manual cleanup afterwards is required") - return b, func() {}, nil - } - c.Bucket = createTemporaryTestBucketName(t) - - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - if _, err := b.client.Bucket.Put(context.Background(), nil); err != nil { - return nil, nil, err - } - t.Log("created temporary COS bucket for COS tests with name", c.Bucket) - - return b, func() { - objstore.EmptyBucket(t, context.Background(), b) - if _, err := b.client.Bucket.Delete(context.Background()); err != nil { - t.Logf("deleting bucket %s failed: %s", c.Bucket, err) - } - }, nil -} - -func validateForTest(conf Config) error { - if conf.Endpoint != "" { - if _, err := url.Parse(conf.Endpoint); err != nil { - return errors.Wrap(err, "parse endpoint") - } - if conf.SecretId == "" || - conf.SecretKey == "" { - return errors.New("secret_id or secret_key is empty") - } - return nil - } - if conf.AppId == "" || - conf.Region == "" || - conf.SecretId == "" || - conf.SecretKey == "" { - return errors.New("insufficient cos configuration information") - } - return nil -} - -// createTemporaryTestBucketName create a temp cos bucket for test. -// Bucket Naming Conventions: https://intl.cloud.tencent.com/document/product/436/13312#overview -func createTemporaryTestBucketName(t testing.TB) string { - src := rand.New(rand.NewSource(time.Now().UnixNano())) - name := fmt.Sprintf("test_%x_%s", src.Int31(), strings.ToLower(t.Name())) - name = strings.NewReplacer("_", "-", "/", "-").Replace(name) - const maxLength = 50 - if len(name) >= maxLength { - name = name[:maxLength] - } - return strings.TrimSuffix(name, "-") -} diff --git a/pkg/objstore/cos/cos_test.go b/pkg/objstore/cos/cos_test.go deleted file mode 100644 index cc9432e97c..0000000000 --- a/pkg/objstore/cos/cos_test.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package cos - -import ( - "testing" - "time" - - "github.com/prometheus/common/model" - "github.com/thanos-io/thanos/pkg/exthttp" - "github.com/thanos-io/thanos/pkg/testutil" -) - -func Test_parseConfig(t *testing.T) { - type args struct { - conf []byte - } - tests := []struct { - name string - args args - want Config - wantErr bool - }{ - { - name: "empty", - args: args{ - conf: []byte(""), - }, - want: DefaultConfig, - wantErr: false, - }, - { - name: "max_idle_conns", - args: args{ - conf: []byte(` -http_config: - max_idle_conns: 200 -`), - }, - want: Config{ - HTTPConfig: exthttp.HTTPConfig{ - IdleConnTimeout: model.Duration(90 * time.Second), - ResponseHeaderTimeout: model.Duration(2 * time.Minute), - TLSHandshakeTimeout: model.Duration(10 * time.Second), - ExpectContinueTimeout: model.Duration(1 * time.Second), - MaxIdleConns: 200, - MaxIdleConnsPerHost: 100, - MaxConnsPerHost: 0, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := parseConfig(tt.args.conf) - if (err != nil) != tt.wantErr { - t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) - return - } - testutil.Equals(t, tt.want, got) - }) - } -} - -func TestConfig_validate(t *testing.T) { - type fields struct { - Bucket string - Region string - AppId string - Endpoint string - SecretKey string - SecretId string - HTTPConfig exthttp.HTTPConfig - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "ok endpoint", - fields: fields{ - Endpoint: "http://bucket-123.cos.ap-beijing.myqcloud.com", - SecretId: "sid", - SecretKey: "skey", - }, - wantErr: false, - }, - { - name: "ok bucket-appid-region", - fields: fields{ - Bucket: "bucket", - AppId: "123", - Region: "ap-beijing", - SecretId: "sid", - SecretKey: "skey", - }, - wantErr: false, - }, - { - name: "missing skey", - fields: fields{ - Bucket: "bucket", - AppId: "123", - Region: "ap-beijing", - }, - wantErr: true, - }, - { - name: "missing bucket", - fields: fields{ - AppId: "123", - Region: "ap-beijing", - SecretId: "sid", - SecretKey: "skey", - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - conf := &Config{ - Bucket: tt.fields.Bucket, - Region: tt.fields.Region, - AppId: tt.fields.AppId, - Endpoint: tt.fields.Endpoint, - SecretKey: tt.fields.SecretKey, - SecretId: tt.fields.SecretId, - HTTPConfig: tt.fields.HTTPConfig, - } - if err := conf.validate(); (err != nil) != tt.wantErr { - t.Errorf("validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/pkg/objstore/filesystem/filesystem.go b/pkg/objstore/filesystem/filesystem.go deleted file mode 100644 index 075a87a3cb..0000000000 --- a/pkg/objstore/filesystem/filesystem.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package filesystem - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkg/errors" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/runutil" -) - -// Config stores the configuration for storing and accessing blobs in filesystem. -type Config struct { - Directory string `yaml:"directory"` -} - -// Bucket implements the objstore.Bucket interfaces against filesystem that binary runs on. -// Methods from Bucket interface are thread-safe. Objects are assumed to be immutable. -// NOTE: It does not follow symbolic links. -type Bucket struct { - rootDir string -} - -// NewBucketFromConfig returns a new filesystem.Bucket from config. -func NewBucketFromConfig(conf []byte) (*Bucket, error) { - var c Config - if err := yaml.Unmarshal(conf, &c); err != nil { - return nil, err - } - if c.Directory == "" { - return nil, errors.New("missing directory for filesystem bucket") - } - return NewBucket(c.Directory) -} - -// NewBucket returns a new filesystem.Bucket. -func NewBucket(rootDir string) (*Bucket, error) { - absDir, err := filepath.Abs(rootDir) - if err != nil { - return nil, err - } - return &Bucket{rootDir: absDir}, nil -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - params := objstore.ApplyIterOptions(options...) - absDir := filepath.Join(b.rootDir, dir) - info, err := os.Stat(absDir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return errors.Wrapf(err, "stat %s", absDir) - } - if !info.IsDir() { - return nil - } - - files, err := ioutil.ReadDir(absDir) - if err != nil { - return err - } - for _, file := range files { - name := filepath.Join(dir, file.Name()) - - if file.IsDir() { - empty, err := isDirEmpty(filepath.Join(absDir, file.Name())) - if err != nil { - return err - } - - if empty { - // Skip empty directories. - continue - } - - name += objstore.DirDelim - - if params.Recursive { - // Recursively list files in the subdirectory. - if err := b.Iter(ctx, name, f, options...); err != nil { - return err - } - - // The callback f() has already been called for the subdirectory - // files so we should skip to next filesystem entry. - continue - } - } - if err := f(name); err != nil { - return err - } - } - return nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.GetRange(ctx, name, 0, -1) -} - -type rangeReaderCloser struct { - io.Reader - f *os.File -} - -func (r *rangeReaderCloser) Close() error { - return r.f.Close() -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) { - file := filepath.Join(b.rootDir, name) - stat, err := os.Stat(file) - if err != nil { - return objstore.ObjectAttributes{}, errors.Wrapf(err, "stat %s", file) - } - - return objstore.ObjectAttributes{ - Size: stat.Size(), - LastModified: stat.ModTime(), - }, nil -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("object name is empty") - } - - file := filepath.Join(b.rootDir, name) - if _, err := os.Stat(file); err != nil { - return nil, errors.Wrapf(err, "stat %s", file) - } - - f, err := os.OpenFile(filepath.Clean(file), os.O_RDONLY, 0600) - if err != nil { - return nil, err - } - - if off > 0 { - _, err := f.Seek(off, 0) - if err != nil { - return nil, errors.Wrapf(err, "seek %v", off) - } - } - - if length == -1 { - return f, nil - } - - return &rangeReaderCloser{Reader: io.LimitReader(f, length), f: f}, nil -} - -// Exists checks if the given directory exists in memory. -func (b *Bucket) Exists(_ context.Context, name string) (bool, error) { - info, err := os.Stat(filepath.Join(b.rootDir, name)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, errors.Wrapf(err, "stat %s", filepath.Join(b.rootDir, name)) - } - return !info.IsDir(), nil -} - -// Upload writes the file specified in src to into the memory. -func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) (err error) { - file := filepath.Join(b.rootDir, name) - if err := os.MkdirAll(filepath.Dir(file), os.ModePerm); err != nil { - return err - } - - f, err := os.Create(file) - if err != nil { - return err - } - defer runutil.CloseWithErrCapture(&err, f, "close") - - if _, err := io.Copy(f, r); err != nil { - return errors.Wrapf(err, "copy to %s", file) - } - return nil -} - -func isDirEmpty(name string) (ok bool, err error) { - f, err := os.Open(filepath.Clean(name)) - if os.IsNotExist(err) { - // The directory doesn't exist. We don't consider it an error and we treat it like empty. - return true, nil - } - if err != nil { - return false, err - } - defer runutil.CloseWithErrCapture(&err, f, "dir open") - - if _, err = f.Readdir(1); err == io.EOF || os.IsNotExist(err) { - return true, nil - } - return false, err -} - -// Delete removes all data prefixed with the dir. -func (b *Bucket) Delete(_ context.Context, name string) error { - file := filepath.Join(b.rootDir, name) - for file != b.rootDir { - if err := os.RemoveAll(file); err != nil { - return errors.Wrapf(err, "rm %s", file) - } - file = filepath.Dir(file) - empty, err := isDirEmpty(file) - if err != nil { - return err - } - if !empty { - break - } - } - return nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - return os.IsNotExist(errors.Cause(err)) -} - -func (b *Bucket) Close() error { return nil } - -// Name returns the bucket name. -func (b *Bucket) Name() string { - return fmt.Sprintf("fs: %s", b.rootDir) -} diff --git a/pkg/objstore/filesystem/filesystem_test.go b/pkg/objstore/filesystem/filesystem_test.go deleted file mode 100644 index 7dcf3e3ea5..0000000000 --- a/pkg/objstore/filesystem/filesystem_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package filesystem - -import ( - "context" - "strings" - "sync" - "testing" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestDelete_EmptyDirDeletionRaceCondition(t *testing.T) { - const runs = 1000 - - ctx := context.Background() - - for r := 0; r < runs; r++ { - b, err := NewBucket(t.TempDir()) - testutil.Ok(t, err) - - // Upload 2 objects in a subfolder. - testutil.Ok(t, b.Upload(ctx, "subfolder/first", strings.NewReader("first"))) - testutil.Ok(t, b.Upload(ctx, "subfolder/second", strings.NewReader("second"))) - - // Prepare goroutines to concurrently delete the 2 objects (each one deletes a different object) - start := make(chan struct{}) - group := sync.WaitGroup{} - group.Add(2) - - for _, object := range []string{"first", "second"} { - go func(object string) { - defer group.Done() - - <-start - testutil.Ok(t, b.Delete(ctx, "subfolder/"+object)) - }(object) - } - - // Go! - close(start) - group.Wait() - } -} diff --git a/pkg/objstore/gcs/gcs.go b/pkg/objstore/gcs/gcs.go deleted file mode 100644 index ce93f42c0c..0000000000 --- a/pkg/objstore/gcs/gcs.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -// Package gcs implements common object storage abstractions against Google Cloud Storage. -package gcs - -import ( - "context" - "fmt" - "io" - "runtime" - "strings" - "testing" - - "cloud.google.com/go/storage" - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/common/version" - "golang.org/x/oauth2/google" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" -) - -// DirDelim is the delimiter used to model a directory structure in an object store bucket. -const DirDelim = "/" - -// Config stores the configuration for gcs bucket. -type Config struct { - Bucket string `yaml:"bucket"` - ServiceAccount string `yaml:"service_account"` -} - -// Bucket implements the store.Bucket and shipper.Bucket interfaces against GCS. -type Bucket struct { - logger log.Logger - bkt *storage.BucketHandle - name string - - closer io.Closer -} - -// NewBucket returns a new Bucket against the given bucket handle. -func NewBucket(ctx context.Context, logger log.Logger, conf []byte, component string) (*Bucket, error) { - var gc Config - if err := yaml.Unmarshal(conf, &gc); err != nil { - return nil, err - } - - return NewBucketWithConfig(ctx, logger, gc, component) -} - -// NewBucketWithConfig returns a new Bucket with gcs Config struct. -func NewBucketWithConfig(ctx context.Context, logger log.Logger, gc Config, component string) (*Bucket, error) { - if gc.Bucket == "" { - return nil, errors.New("missing Google Cloud Storage bucket name for stored blocks") - } - - var opts []option.ClientOption - - // If ServiceAccount is provided, use them in GCS client, otherwise fallback to Google default logic. - if gc.ServiceAccount != "" { - credentials, err := google.CredentialsFromJSON(ctx, []byte(gc.ServiceAccount), storage.ScopeFullControl) - if err != nil { - return nil, errors.Wrap(err, "failed to create credentials from JSON") - } - opts = append(opts, option.WithCredentials(credentials)) - } - - opts = append(opts, - option.WithUserAgent(fmt.Sprintf("thanos-%s/%s (%s)", component, version.Version, runtime.Version())), - ) - - gcsClient, err := storage.NewClient(ctx, opts...) - if err != nil { - return nil, err - } - bkt := &Bucket{ - logger: logger, - bkt: gcsClient.Bucket(gc.Bucket), - closer: gcsClient, - name: gc.Bucket, - } - return bkt, nil -} - -// Name returns the bucket name for gcs. -func (b *Bucket) Name() string { - return b.name -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - // Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the - // object itself as one prefix item. - if dir != "" { - dir = strings.TrimSuffix(dir, DirDelim) + DirDelim - } - - // If recursive iteration is enabled we should pass an empty delimiter. - delimiter := DirDelim - if objstore.ApplyIterOptions(options...).Recursive { - delimiter = "" - } - - it := b.bkt.Objects(ctx, &storage.Query{ - Prefix: dir, - Delimiter: delimiter, - }) - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - attrs, err := it.Next() - if err == iterator.Done { - return nil - } - if err != nil { - return err - } - if err := f(attrs.Prefix + attrs.Name); err != nil { - return err - } - } -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.bkt.Object(name).NewReader(ctx) -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.bkt.Object(name).NewRangeReader(ctx, off, length) -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - attrs, err := b.bkt.Object(name).Attrs(ctx) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: attrs.Size, - LastModified: attrs.Updated, - }, nil -} - -// Handle returns the underlying GCS bucket handle. -// Used for testing purposes (we return handle, so it is not instrumented). -func (b *Bucket) Handle() *storage.BucketHandle { - return b.bkt -} - -// Exists checks if the given object exists. -func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { - if _, err := b.bkt.Object(name).Attrs(ctx); err == nil { - return true, nil - } else if err != storage.ErrObjectNotExist { - return false, err - } - return false, nil -} - -// Upload writes the file specified in src to remote GCS location specified as target. -func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { - w := b.bkt.Object(name).NewWriter(ctx) - - if _, err := io.Copy(w, r); err != nil { - return err - } - return w.Close() -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(ctx context.Context, name string) error { - return b.bkt.Object(name).Delete(ctx) -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - return errors.Is(err, storage.ErrObjectNotExist) -} - -func (b *Bucket) Close() error { - return b.closer.Close() -} - -// NewTestBucket creates test bkt client that before returning creates temporary bucket. -// In a close function it empties and deletes the bucket. -func NewTestBucket(t testing.TB, project string) (objstore.Bucket, func(), error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - gTestConfig := Config{ - Bucket: objstore.CreateTemporaryTestBucketName(t), - } - - bc, err := yaml.Marshal(gTestConfig) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(ctx, log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - if err = b.bkt.Create(ctx, project, nil); err != nil { - _ = b.Close() - return nil, nil, err - } - - t.Log("created temporary GCS bucket for GCS tests with name", b.name, "in project", project) - return b, func() { - objstore.EmptyBucket(t, ctx, b) - if err := b.bkt.Delete(ctx); err != nil { - t.Logf("deleting bucket failed: %s", err) - } - if err := b.Close(); err != nil { - t.Logf("closing bucket failed: %s", err) - } - }, nil -} diff --git a/pkg/objstore/gcs/gcs_test.go b/pkg/objstore/gcs/gcs_test.go deleted file mode 100644 index 417c1fe4b1..0000000000 --- a/pkg/objstore/gcs/gcs_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package gcs - -import ( - "context" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/go-kit/log" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestBucket_Get_ShouldReturnErrorIfServerTruncateResponse(t *testing.T) { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") - w.Header().Set("Content-Length", "100") - - // Write less bytes than the content length. - _, err := w.Write([]byte("12345")) - testutil.Ok(t, err) - })) - defer srv.Close() - - os.Setenv("STORAGE_EMULATOR_HOST", srv.Listener.Addr().String()) - - cfg := Config{ - Bucket: "test-bucket", - ServiceAccount: "", - } - - bkt, err := NewBucketWithConfig(context.Background(), log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - reader, err := bkt.Get(context.Background(), "test") - testutil.Ok(t, err) - - // We expect an error when reading back. - _, err = ioutil.ReadAll(reader) - testutil.Equals(t, io.ErrUnexpectedEOF, err) -} diff --git a/pkg/objstore/inmem.go b/pkg/objstore/inmem.go deleted file mode 100644 index f90ed6d90c..0000000000 --- a/pkg/objstore/inmem.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "bytes" - "context" - "io" - "io/ioutil" - "sort" - "strings" - "sync" - "time" - - "github.com/pkg/errors" -) - -var errNotFound = errors.New("inmem: object not found") - -// InMemBucket implements the objstore.Bucket interfaces against local memory. -// Methods from Bucket interface are thread-safe. Objects are assumed to be immutable. -type InMemBucket struct { - mtx sync.RWMutex - objects map[string][]byte - attrs map[string]ObjectAttributes -} - -// NewInMemBucket returns a new in memory Bucket. -// NOTE: Returned bucket is just a naive in memory bucket implementation. For test use cases only. -func NewInMemBucket() *InMemBucket { - return &InMemBucket{ - objects: map[string][]byte{}, - attrs: map[string]ObjectAttributes{}, - } -} - -// Objects returns internally stored objects. -// NOTE: For assert purposes. -func (b *InMemBucket) Objects() map[string][]byte { - return b.objects -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *InMemBucket) Iter(_ context.Context, dir string, f func(string) error, options ...IterOption) error { - unique := map[string]struct{}{} - params := ApplyIterOptions(options...) - - var dirPartsCount int - dirParts := strings.SplitAfter(dir, DirDelim) - for _, p := range dirParts { - if p == "" { - continue - } - dirPartsCount++ - } - - b.mtx.RLock() - for filename := range b.objects { - if !strings.HasPrefix(filename, dir) || dir == filename { - continue - } - - if params.Recursive { - // Any object matching the prefix should be included. - unique[filename] = struct{}{} - continue - } - - parts := strings.SplitAfter(filename, DirDelim) - unique[strings.Join(parts[:dirPartsCount+1], "")] = struct{}{} - } - b.mtx.RUnlock() - - var keys []string - for n := range unique { - keys = append(keys, n) - } - sort.Slice(keys, func(i, j int) bool { - if strings.HasSuffix(keys[i], DirDelim) && strings.HasSuffix(keys[j], DirDelim) { - return strings.Compare(keys[i], keys[j]) < 0 - } - if strings.HasSuffix(keys[i], DirDelim) { - return false - } - if strings.HasSuffix(keys[j], DirDelim) { - return true - } - - return strings.Compare(keys[i], keys[j]) < 0 - }) - - for _, k := range keys { - if err := f(k); err != nil { - return err - } - } - return nil -} - -// Get returns a reader for the given object name. -func (b *InMemBucket) Get(_ context.Context, name string) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("inmem: object name is empty") - } - - b.mtx.RLock() - file, ok := b.objects[name] - b.mtx.RUnlock() - if !ok { - return nil, errNotFound - } - - return ioutil.NopCloser(bytes.NewReader(file)), nil -} - -// GetRange returns a new range reader for the given object name and range. -func (b *InMemBucket) GetRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("inmem: object name is empty") - } - - b.mtx.RLock() - file, ok := b.objects[name] - b.mtx.RUnlock() - if !ok { - return nil, errNotFound - } - - if int64(len(file)) < off { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - if length == -1 { - return ioutil.NopCloser(bytes.NewReader(file[off:])), nil - } - - if length <= 0 { - return ioutil.NopCloser(bytes.NewReader(nil)), errors.New("length cannot be smaller or equal 0") - } - - if int64(len(file)) <= off+length { - // Just return maximum of what we have. - length = int64(len(file)) - off - } - - return ioutil.NopCloser(bytes.NewReader(file[off : off+length])), nil -} - -// Exists checks if the given directory exists in memory. -func (b *InMemBucket) Exists(_ context.Context, name string) (bool, error) { - b.mtx.RLock() - defer b.mtx.RUnlock() - _, ok := b.objects[name] - return ok, nil -} - -// Attributes returns information about the specified object. -func (b *InMemBucket) Attributes(_ context.Context, name string) (ObjectAttributes, error) { - b.mtx.RLock() - attrs, ok := b.attrs[name] - b.mtx.RUnlock() - if !ok { - return ObjectAttributes{}, errNotFound - } - return attrs, nil -} - -// Upload writes the file specified in src to into the memory. -func (b *InMemBucket) Upload(_ context.Context, name string, r io.Reader) error { - b.mtx.Lock() - defer b.mtx.Unlock() - body, err := ioutil.ReadAll(r) - if err != nil { - return err - } - b.objects[name] = body - b.attrs[name] = ObjectAttributes{ - Size: int64(len(body)), - LastModified: time.Now(), - } - return nil -} - -// Delete removes all data prefixed with the dir. -func (b *InMemBucket) Delete(_ context.Context, name string) error { - b.mtx.Lock() - defer b.mtx.Unlock() - if _, ok := b.objects[name]; !ok { - return errNotFound - } - delete(b.objects, name) - delete(b.attrs, name) - return nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *InMemBucket) IsObjNotFoundErr(err error) bool { - return errors.Is(err, errNotFound) -} - -func (b *InMemBucket) Close() error { return nil } - -// Name returns the bucket name. -func (b *InMemBucket) Name() string { - return "inmem" -} diff --git a/pkg/objstore/objstore.go b/pkg/objstore/objstore.go deleted file mode 100644 index 8bf665d105..0000000000 --- a/pkg/objstore/objstore.go +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "bytes" - "context" - "io" - "io/fs" - "os" - "path" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "golang.org/x/sync/errgroup" - - "github.com/thanos-io/thanos/pkg/runutil" -) - -const ( - OpIter = "iter" - OpGet = "get" - OpGetRange = "get_range" - OpExists = "exists" - OpUpload = "upload" - OpDelete = "delete" - OpAttributes = "attributes" -) - -// Bucket provides read and write access to an object storage bucket. -// NOTE: We assume strong consistency for write-read flow. -type Bucket interface { - io.Closer - BucketReader - - // Upload the contents of the reader as an object into the bucket. - // Upload should be idempotent. - Upload(ctx context.Context, name string, r io.Reader) error - - // Delete removes the object with the given name. - // If object does not exists in the moment of deletion, Delete should throw error. - Delete(ctx context.Context, name string) error - - // Name returns the bucket name for the provider. - Name() string -} - -// InstrumentedBucket is a Bucket with optional instrumentation control on reader. -type InstrumentedBucket interface { - Bucket - - // WithExpectedErrs allows to specify a filter that marks certain errors as expected, so it will not increment - // thanos_objstore_bucket_operation_failures_total metric. - WithExpectedErrs(IsOpFailureExpectedFunc) Bucket - - // ReaderWithExpectedErrs allows to specify a filter that marks certain errors as expected, so it will not increment - // thanos_objstore_bucket_operation_failures_total metric. - // TODO(bwplotka): Remove this when moved to Go 1.14 and replace with InstrumentedBucketReader. - ReaderWithExpectedErrs(IsOpFailureExpectedFunc) BucketReader -} - -// BucketReader provides read access to an object storage bucket. -type BucketReader interface { - // Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full - // object name including the prefix of the inspected directory. - // Entries are passed to function in sorted order. - Iter(ctx context.Context, dir string, f func(string) error, options ...IterOption) error - - // Get returns a reader for the given object name. - Get(ctx context.Context, name string) (io.ReadCloser, error) - - // GetRange returns a new range reader for the given object name and range. - GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) - - // Exists checks if the given object exists in the bucket. - Exists(ctx context.Context, name string) (bool, error) - - // IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. - IsObjNotFoundErr(err error) bool - - // Attributes returns information about the specified object. - Attributes(ctx context.Context, name string) (ObjectAttributes, error) -} - -// InstrumentedBucket is a BucketReader with optional instrumentation control. -type InstrumentedBucketReader interface { - BucketReader - - // ReaderWithExpectedErrs allows to specify a filter that marks certain errors as expected, so it will not increment - // thanos_objstore_bucket_operation_failures_total metric. - ReaderWithExpectedErrs(IsOpFailureExpectedFunc) BucketReader -} - -// IterOption configures the provided params. -type IterOption func(params *IterParams) - -// WithRecursiveIter is an option that can be applied to Iter() to recursively list objects -// in the bucket. -func WithRecursiveIter(params *IterParams) { - params.Recursive = true -} - -// IterParams holds the Iter() parameters and is used by objstore clients implementations. -type IterParams struct { - Recursive bool -} - -func ApplyIterOptions(options ...IterOption) IterParams { - out := IterParams{} - for _, opt := range options { - opt(&out) - } - return out -} - -// DownloadOption configures the provided params. -type DownloadOption func(params *downloadParams) - -// downloadParams holds the DownloadDir() parameters and is used by objstore clients implementations. -type downloadParams struct { - concurrency int - ignoredPaths []string -} - -// WithDownloadIgnoredPaths is an option to set the paths to not be downloaded. -func WithDownloadIgnoredPaths(ignoredPaths ...string) DownloadOption { - return func(params *downloadParams) { - params.ignoredPaths = ignoredPaths - } -} - -// WithFetchConcurrency is an option to set the concurrency of the download operation. -func WithFetchConcurrency(concurrency int) DownloadOption { - return func(params *downloadParams) { - params.concurrency = concurrency - } -} - -func applyDownloadOptions(options ...DownloadOption) downloadParams { - out := downloadParams{ - concurrency: 1, - } - for _, opt := range options { - opt(&out) - } - return out -} - -// UploadOption configures the provided params. -type UploadOption func(params *uploadParams) - -// uploadParams holds the UploadDir() parameters and is used by objstore clients implementations. -type uploadParams struct { - concurrency int -} - -// WithUploadConcurrency is an option to set the concurrency of the upload operation. -func WithUploadConcurrency(concurrency int) UploadOption { - return func(params *uploadParams) { - params.concurrency = concurrency - } -} - -func applyUploadOptions(options ...UploadOption) uploadParams { - out := uploadParams{ - concurrency: 1, - } - for _, opt := range options { - opt(&out) - } - return out -} - -type ObjectAttributes struct { - // Size is the object size in bytes. - Size int64 `json:"size"` - - // LastModified is the timestamp the object was last modified. - LastModified time.Time `json:"last_modified"` -} - -// TryToGetSize tries to get upfront size from reader. -// Some implementations may return only size of unread data in the reader, so it's best to call this method before -// doing any reading. -// -// TODO(https://github.com/thanos-io/thanos/issues/678): Remove guessing length when minio provider will support multipart upload without this. -func TryToGetSize(r io.Reader) (int64, error) { - switch f := r.(type) { - case *os.File: - fileInfo, err := f.Stat() - if err != nil { - return 0, errors.Wrap(err, "os.File.Stat()") - } - return fileInfo.Size(), nil - case *bytes.Buffer: - return int64(f.Len()), nil - case *bytes.Reader: - // Returns length of unread data only. - return int64(f.Len()), nil - case *strings.Reader: - return f.Size(), nil - case ObjectSizer: - return f.ObjectSize() - } - return 0, errors.Errorf("unsupported type of io.Reader: %T", r) -} - -// ObjectSizer can return size of object. -type ObjectSizer interface { - // ObjectSize returns the size of the object in bytes, or error if it is not available. - ObjectSize() (int64, error) -} - -type nopCloserWithObjectSize struct{ io.Reader } - -func (nopCloserWithObjectSize) Close() error { return nil } -func (n nopCloserWithObjectSize) ObjectSize() (int64, error) { return TryToGetSize(n.Reader) } - -// NopCloserWithSize returns a ReadCloser with a no-op Close method wrapping -// the provided Reader r. Returned ReadCloser also implements Size method. -func NopCloserWithSize(r io.Reader) io.ReadCloser { - return nopCloserWithObjectSize{r} -} - -// UploadDir uploads all files in srcdir to the bucket with into a top-level directory -// named dstdir. It is a caller responsibility to clean partial upload in case of failure. -func UploadDir(ctx context.Context, logger log.Logger, bkt Bucket, srcdir, dstdir string, options ...UploadOption) error { - df, err := os.Stat(srcdir) - opts := applyUploadOptions(options...) - - // The derived Context is canceled the first time a function passed to Go returns a non-nil error or the first - // time Wait returns, whichever occurs first. - g, ctx := errgroup.WithContext(ctx) - g.SetLimit(opts.concurrency) - - if err != nil { - return errors.Wrap(err, "stat dir") - } - if !df.IsDir() { - return errors.Errorf("%s is not a directory", srcdir) - } - err = filepath.WalkDir(srcdir, func(src string, d fs.DirEntry, err error) error { - g.Go(func() error { - if err != nil { - return err - } - if d.IsDir() { - return nil - } - srcRel, err := filepath.Rel(srcdir, src) - if err != nil { - return errors.Wrap(err, "getting relative path") - } - - dst := path.Join(dstdir, filepath.ToSlash(srcRel)) - return UploadFile(ctx, logger, bkt, src, dst) - }) - - return nil - }) - - if err == nil { - err = g.Wait() - } - - return err -} - -// UploadFile uploads the file with the given name to the bucket. -// It is a caller responsibility to clean partial upload in case of failure. -func UploadFile(ctx context.Context, logger log.Logger, bkt Bucket, src, dst string) error { - r, err := os.Open(filepath.Clean(src)) - if err != nil { - return errors.Wrapf(err, "open file %s", src) - } - defer runutil.CloseWithLogOnErr(logger, r, "close file %s", src) - - if err := bkt.Upload(ctx, dst, r); err != nil { - return errors.Wrapf(err, "upload file %s as %s", src, dst) - } - level.Debug(logger).Log("msg", "uploaded file", "from", src, "dst", dst, "bucket", bkt.Name()) - return nil -} - -// DirDelim is the delimiter used to model a directory structure in an object store bucket. -const DirDelim = "/" - -// DownloadFile downloads the src file from the bucket to dst. If dst is an existing -// directory, a file with the same name as the source is created in dst. -// If destination file is already existing, download file will overwrite it. -func DownloadFile(ctx context.Context, logger log.Logger, bkt BucketReader, src, dst string) (err error) { - if fi, err := os.Stat(dst); err == nil { - if fi.IsDir() { - dst = filepath.Join(dst, filepath.Base(src)) - } - } else if !os.IsNotExist(err) { - return err - } - - rc, err := bkt.Get(ctx, src) - if err != nil { - return errors.Wrapf(err, "get file %s", src) - } - defer runutil.CloseWithLogOnErr(logger, rc, "download block's file reader") - - f, err := os.Create(dst) - if err != nil { - return errors.Wrap(err, "create file") - } - defer func() { - if err != nil { - if rerr := os.Remove(dst); rerr != nil { - level.Warn(logger).Log("msg", "failed to remove partially downloaded file", "file", dst, "err", rerr) - } - } - }() - defer runutil.CloseWithLogOnErr(logger, f, "download block's output file") - - if _, err = io.Copy(f, rc); err != nil { - return errors.Wrap(err, "copy object to file") - } - return nil -} - -// DownloadDir downloads all object found in the directory into the local directory. -func DownloadDir(ctx context.Context, logger log.Logger, bkt BucketReader, originalSrc, src, dst string, options ...DownloadOption) error { - if err := os.MkdirAll(dst, 0750); err != nil { - return errors.Wrap(err, "create dir") - } - opts := applyDownloadOptions(options...) - - // The derived Context is canceled the first time a function passed to Go returns a non-nil error or the first - // time Wait returns, whichever occurs first. - g, ctx := errgroup.WithContext(ctx) - g.SetLimit(opts.concurrency) - - var downloadedFiles []string - var m sync.Mutex - - err := bkt.Iter(ctx, src, func(name string) error { - g.Go(func() error { - dst := filepath.Join(dst, filepath.Base(name)) - if strings.HasSuffix(name, DirDelim) { - if err := DownloadDir(ctx, logger, bkt, originalSrc, name, dst, options...); err != nil { - return err - } - m.Lock() - downloadedFiles = append(downloadedFiles, dst) - m.Unlock() - return nil - } - for _, ignoredPath := range opts.ignoredPaths { - if ignoredPath == strings.TrimPrefix(name, string(originalSrc)+DirDelim) { - level.Debug(logger).Log("msg", "not downloading again because a provided path matches this one", "file", name) - return nil - } - } - if err := DownloadFile(ctx, logger, bkt, name, dst); err != nil { - return err - } - - m.Lock() - downloadedFiles = append(downloadedFiles, dst) - m.Unlock() - return nil - }) - return nil - }) - - if err == nil { - err = g.Wait() - } - - if err != nil { - downloadedFiles = append(downloadedFiles, dst) // Last, clean up the root dst directory. - // Best-effort cleanup if the download failed. - for _, f := range downloadedFiles { - if rerr := os.Remove(f); rerr != nil { - level.Warn(logger).Log("msg", "failed to remove file on partial dir download error", "file", f, "err", rerr) - } - } - return err - } - - return nil -} - -// IsOpFailureExpectedFunc allows to mark certain errors as expected, so they will not increment thanos_objstore_bucket_operation_failures_total metric. -type IsOpFailureExpectedFunc func(error) bool - -var _ InstrumentedBucket = &metricBucket{} - -// BucketWithMetrics takes a bucket and registers metrics with the given registry for -// operations run against the bucket. -func BucketWithMetrics(name string, b Bucket, reg prometheus.Registerer) *metricBucket { - bkt := &metricBucket{ - bkt: b, - isOpFailureExpected: func(err error) bool { return false }, - ops: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_objstore_bucket_operations_total", - Help: "Total number of all attempted operations against a bucket.", - ConstLabels: prometheus.Labels{"bucket": name}, - }, []string{"operation"}), - - opsFailures: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_objstore_bucket_operation_failures_total", - Help: "Total number of operations against a bucket that failed, but were not expected to fail in certain way from caller perspective. Those errors have to be investigated.", - ConstLabels: prometheus.Labels{"bucket": name}, - }, []string{"operation"}), - - opsDuration: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Name: "thanos_objstore_bucket_operation_duration_seconds", - Help: "Duration of successful operations against the bucket", - ConstLabels: prometheus.Labels{"bucket": name}, - Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, - }, []string{"operation"}), - - lastSuccessfulUploadTime: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Name: "thanos_objstore_bucket_last_successful_upload_time", - Help: "Second timestamp of the last successful upload to the bucket.", - }, []string{"bucket"}), - } - for _, op := range []string{ - OpIter, - OpGet, - OpGetRange, - OpExists, - OpUpload, - OpDelete, - OpAttributes, - } { - bkt.ops.WithLabelValues(op) - bkt.opsFailures.WithLabelValues(op) - bkt.opsDuration.WithLabelValues(op) - } - bkt.lastSuccessfulUploadTime.WithLabelValues(b.Name()) - return bkt -} - -type metricBucket struct { - bkt Bucket - - ops *prometheus.CounterVec - opsFailures *prometheus.CounterVec - isOpFailureExpected IsOpFailureExpectedFunc - - opsDuration *prometheus.HistogramVec - lastSuccessfulUploadTime *prometheus.GaugeVec -} - -func (b *metricBucket) WithExpectedErrs(fn IsOpFailureExpectedFunc) Bucket { - return &metricBucket{ - bkt: b.bkt, - ops: b.ops, - opsFailures: b.opsFailures, - isOpFailureExpected: fn, - opsDuration: b.opsDuration, - lastSuccessfulUploadTime: b.lastSuccessfulUploadTime, - } -} - -func (b *metricBucket) ReaderWithExpectedErrs(fn IsOpFailureExpectedFunc) BucketReader { - return b.WithExpectedErrs(fn) -} - -func (b *metricBucket) Iter(ctx context.Context, dir string, f func(name string) error, options ...IterOption) error { - const op = OpIter - b.ops.WithLabelValues(op).Inc() - - err := b.bkt.Iter(ctx, dir, f, options...) - if err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - } - return err -} - -func (b *metricBucket) Attributes(ctx context.Context, name string) (ObjectAttributes, error) { - const op = OpAttributes - b.ops.WithLabelValues(op).Inc() - - start := time.Now() - attrs, err := b.bkt.Attributes(ctx, name) - if err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return attrs, err - } - b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) - return attrs, nil -} - -func (b *metricBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - const op = OpGet - b.ops.WithLabelValues(op).Inc() - - rc, err := b.bkt.Get(ctx, name) - if err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return nil, err - } - return newTimingReadCloser( - rc, - op, - b.opsDuration, - b.opsFailures, - b.isOpFailureExpected, - ), nil -} - -func (b *metricBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - const op = OpGetRange - b.ops.WithLabelValues(op).Inc() - - rc, err := b.bkt.GetRange(ctx, name, off, length) - if err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return nil, err - } - return newTimingReadCloser( - rc, - op, - b.opsDuration, - b.opsFailures, - b.isOpFailureExpected, - ), nil -} - -func (b *metricBucket) Exists(ctx context.Context, name string) (bool, error) { - const op = OpExists - b.ops.WithLabelValues(op).Inc() - - start := time.Now() - ok, err := b.bkt.Exists(ctx, name) - if err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return false, err - } - b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) - return ok, nil -} - -func (b *metricBucket) Upload(ctx context.Context, name string, r io.Reader) error { - const op = OpUpload - b.ops.WithLabelValues(op).Inc() - - start := time.Now() - if err := b.bkt.Upload(ctx, name, r); err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return err - } - b.lastSuccessfulUploadTime.WithLabelValues(b.bkt.Name()).SetToCurrentTime() - b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) - return nil -} - -func (b *metricBucket) Delete(ctx context.Context, name string) error { - const op = OpDelete - b.ops.WithLabelValues(op).Inc() - - start := time.Now() - if err := b.bkt.Delete(ctx, name); err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return err - } - b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) - - return nil -} - -func (b *metricBucket) IsObjNotFoundErr(err error) bool { - return b.bkt.IsObjNotFoundErr(err) -} - -func (b *metricBucket) Close() error { - return b.bkt.Close() -} - -func (b *metricBucket) Name() string { - return b.bkt.Name() -} - -type timingReadCloser struct { - io.ReadCloser - objSize int64 - objSizeErr error - - alreadyGotErr bool - - start time.Time - op string - duration *prometheus.HistogramVec - failed *prometheus.CounterVec - isFailureExpected IsOpFailureExpectedFunc -} - -func newTimingReadCloser(rc io.ReadCloser, op string, dur *prometheus.HistogramVec, failed *prometheus.CounterVec, isFailureExpected IsOpFailureExpectedFunc) *timingReadCloser { - // Initialize the metrics with 0. - dur.WithLabelValues(op) - failed.WithLabelValues(op) - objSize, objSizeErr := TryToGetSize(rc) - return &timingReadCloser{ - ReadCloser: rc, - objSize: objSize, - objSizeErr: objSizeErr, - start: time.Now(), - op: op, - duration: dur, - failed: failed, - isFailureExpected: isFailureExpected, - } -} - -func (t *timingReadCloser) ObjectSize() (int64, error) { - return t.objSize, t.objSizeErr -} - -func (rc *timingReadCloser) Close() error { - err := rc.ReadCloser.Close() - if !rc.alreadyGotErr && err != nil { - rc.failed.WithLabelValues(rc.op).Inc() - } - if !rc.alreadyGotErr && err == nil { - rc.duration.WithLabelValues(rc.op).Observe(time.Since(rc.start).Seconds()) - rc.alreadyGotErr = true - } - return err -} - -func (rc *timingReadCloser) Read(b []byte) (n int, err error) { - n, err = rc.ReadCloser.Read(b) - // Report metric just once. - if !rc.alreadyGotErr && err != nil && err != io.EOF { - if !rc.isFailureExpected(err) { - rc.failed.WithLabelValues(rc.op).Inc() - } - rc.alreadyGotErr = true - } - return n, err -} diff --git a/pkg/objstore/objstore_test.go b/pkg/objstore/objstore_test.go deleted file mode 100644 index 950fd82eb1..0000000000 --- a/pkg/objstore/objstore_test.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "bytes" - "context" - "io" - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - promtest "github.com/prometheus/client_golang/prometheus/testutil" - "go.uber.org/atomic" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestMetricBucket_Close(t *testing.T) { - bkt := BucketWithMetrics("abc", NewInMemBucket(), nil) - // Expected initialized metrics. - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.ops)) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsFailures)) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsDuration)) - - AcceptanceTest(t, bkt.WithExpectedErrs(bkt.IsObjNotFoundErr)) - testutil.Equals(t, float64(9), promtest.ToFloat64(bkt.ops.WithLabelValues(OpIter))) - testutil.Equals(t, float64(2), promtest.ToFloat64(bkt.ops.WithLabelValues(OpAttributes))) - testutil.Equals(t, float64(3), promtest.ToFloat64(bkt.ops.WithLabelValues(OpGet))) - testutil.Equals(t, float64(3), promtest.ToFloat64(bkt.ops.WithLabelValues(OpGetRange))) - testutil.Equals(t, float64(2), promtest.ToFloat64(bkt.ops.WithLabelValues(OpExists))) - testutil.Equals(t, float64(9), promtest.ToFloat64(bkt.ops.WithLabelValues(OpUpload))) - testutil.Equals(t, float64(3), promtest.ToFloat64(bkt.ops.WithLabelValues(OpDelete))) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.ops)) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpIter))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpAttributes))) - testutil.Equals(t, float64(1), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpGet))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpGetRange))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpExists))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpUpload))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpDelete))) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsFailures)) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsDuration)) - lastUpload := promtest.ToFloat64(bkt.lastSuccessfulUploadTime) - testutil.Assert(t, lastUpload > 0, "last upload not greater than 0, val: %f", lastUpload) - - // Clear bucket, but don't clear metrics to ensure we use same. - bkt.bkt = NewInMemBucket() - AcceptanceTest(t, bkt) - testutil.Equals(t, float64(18), promtest.ToFloat64(bkt.ops.WithLabelValues(OpIter))) - testutil.Equals(t, float64(4), promtest.ToFloat64(bkt.ops.WithLabelValues(OpAttributes))) - testutil.Equals(t, float64(6), promtest.ToFloat64(bkt.ops.WithLabelValues(OpGet))) - testutil.Equals(t, float64(6), promtest.ToFloat64(bkt.ops.WithLabelValues(OpGetRange))) - testutil.Equals(t, float64(4), promtest.ToFloat64(bkt.ops.WithLabelValues(OpExists))) - testutil.Equals(t, float64(18), promtest.ToFloat64(bkt.ops.WithLabelValues(OpUpload))) - testutil.Equals(t, float64(6), promtest.ToFloat64(bkt.ops.WithLabelValues(OpDelete))) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.ops)) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpIter))) - // Not expected not found error here. - testutil.Equals(t, float64(1), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpAttributes))) - // Not expected not found errors, this should increment failure metric on get for not found as well, so +2. - testutil.Equals(t, float64(3), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpGet))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpGetRange))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpExists))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpUpload))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpDelete))) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsFailures)) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsDuration)) - testutil.Assert(t, promtest.ToFloat64(bkt.lastSuccessfulUploadTime) > lastUpload) -} - -func TestTracingReader(t *testing.T) { - r := bytes.NewReader([]byte("hello world")) - tr := newTracingReadCloser(NopCloserWithSize(r), nil) - - size, err := TryToGetSize(tr) - - testutil.Ok(t, err) - testutil.Equals(t, int64(11), size) - - smallBuf := make([]byte, 4) - n, err := io.ReadFull(tr, smallBuf) - testutil.Ok(t, err) - testutil.Equals(t, 4, n) - - // Verify that size is still the same, after reading 4 bytes. - size, err = TryToGetSize(tr) - - testutil.Ok(t, err) - testutil.Equals(t, int64(11), size) -} - -func TestDownloadUploadDirConcurrency(t *testing.T) { - r := prometheus.NewRegistry() - m := BucketWithMetrics("", NewInMemBucket(), r) - tempDir := t.TempDir() - - testutil.Ok(t, m.Upload(context.Background(), "dir/obj1", bytes.NewReader([]byte("1")))) - testutil.Ok(t, m.Upload(context.Background(), "dir/obj2", bytes.NewReader([]byte("2")))) - testutil.Ok(t, m.Upload(context.Background(), "dir/obj3", bytes.NewReader([]byte("3")))) - - testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(` - # HELP thanos_objstore_bucket_operations_total Total number of all attempted operations against a bucket. - # TYPE thanos_objstore_bucket_operations_total counter - thanos_objstore_bucket_operations_total{bucket="",operation="attributes"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="delete"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="exists"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="get"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="get_range"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="iter"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="upload"} 3 - `), `thanos_objstore_bucket_operations_total`)) - - testutil.Ok(t, DownloadDir(context.Background(), log.NewNopLogger(), m, "dir/", "dir/", tempDir, WithFetchConcurrency(10))) - i, err := ioutil.ReadDir(tempDir) - testutil.Ok(t, err) - testutil.Assert(t, len(i) == 3) - testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(` - # HELP thanos_objstore_bucket_operations_total Total number of all attempted operations against a bucket. - # TYPE thanos_objstore_bucket_operations_total counter - thanos_objstore_bucket_operations_total{bucket="",operation="attributes"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="delete"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="exists"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="get"} 3 - thanos_objstore_bucket_operations_total{bucket="",operation="get_range"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="iter"} 1 - thanos_objstore_bucket_operations_total{bucket="",operation="upload"} 3 - `), `thanos_objstore_bucket_operations_total`)) - - testutil.Ok(t, UploadDir(context.Background(), log.NewNopLogger(), m, tempDir, "/dir-copy", WithUploadConcurrency(10))) - - testutil.Ok(t, promtest.GatherAndCompare(r, strings.NewReader(` - # HELP thanos_objstore_bucket_operations_total Total number of all attempted operations against a bucket. - # TYPE thanos_objstore_bucket_operations_total counter - thanos_objstore_bucket_operations_total{bucket="",operation="attributes"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="delete"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="exists"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="get"} 3 - thanos_objstore_bucket_operations_total{bucket="",operation="get_range"} 0 - thanos_objstore_bucket_operations_total{bucket="",operation="iter"} 1 - thanos_objstore_bucket_operations_total{bucket="",operation="upload"} 6 - `), `thanos_objstore_bucket_operations_total`)) -} - -func TestTimingTracingReader(t *testing.T) { - m := BucketWithMetrics("", NewInMemBucket(), nil) - r := bytes.NewReader([]byte("hello world")) - - tr := NopCloserWithSize(r) - tr = newTimingReadCloser(tr, "", m.opsDuration, m.opsFailures, func(err error) bool { - return false - }) - tr = newTracingReadCloser(tr, nil) - - size, err := TryToGetSize(tr) - - testutil.Ok(t, err) - testutil.Equals(t, int64(11), size) - - smallBuf := make([]byte, 4) - n, err := io.ReadFull(tr, smallBuf) - testutil.Ok(t, err) - testutil.Equals(t, 4, n) - - // Verify that size is still the same, after reading 4 bytes. - size, err = TryToGetSize(tr) - - testutil.Ok(t, err) - testutil.Equals(t, int64(11), size) -} - -func TestDownloadDir_CleanUp(t *testing.T) { - b := unreliableBucket{ - Bucket: NewInMemBucket(), - n: 3, - current: atomic.NewInt32(0), - } - tempDir := t.TempDir() - - testutil.Ok(t, b.Upload(context.Background(), "dir/obj1", bytes.NewReader([]byte("1")))) - testutil.Ok(t, b.Upload(context.Background(), "dir/obj2", bytes.NewReader([]byte("2")))) - testutil.Ok(t, b.Upload(context.Background(), "dir/obj3", bytes.NewReader([]byte("3")))) - - // We exapect the third Get to fail - testutil.NotOk(t, DownloadDir(context.Background(), log.NewNopLogger(), b, "dir/", "dir/", tempDir)) - _, err := os.Stat(tempDir) - testutil.Assert(t, os.IsNotExist(err)) -} - -// unreliableBucket implements Bucket and returns an error on every n-th Get. -type unreliableBucket struct { - Bucket - - n int32 - current *atomic.Int32 -} - -func (b unreliableBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - if b.current.Inc()%b.n == 0 { - return nil, errors.Errorf("some error message") - } - return b.Bucket.Get(ctx, name) -} diff --git a/pkg/objstore/objtesting/acceptance_e2e_test.go b/pkg/objstore/objtesting/acceptance_e2e_test.go deleted file mode 100644 index 4b2c6a2030..0000000000 --- a/pkg/objstore/objtesting/acceptance_e2e_test.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objtesting - -import ( - "testing" - - "github.com/thanos-io/thanos/pkg/objstore" -) - -// TestObjStoreAcceptanceTest_e2e tests all known implementation against interface behavior contract we agreed on. -// This ensures consistent behavior across all implementations. -// NOTE: This test assumes strong consistency, but in the same way it does not guarantee that if it passes, the -// used object store is strongly consistent. -func TestObjStore_AcceptanceTest_e2e(t *testing.T) { - ForeachStore(t, objstore.AcceptanceTest) -} diff --git a/pkg/objstore/objtesting/foreach.go b/pkg/objstore/objtesting/foreach.go deleted file mode 100644 index 139e724271..0000000000 --- a/pkg/objstore/objtesting/foreach.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objtesting - -import ( - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/thanos-io/thanos/pkg/objstore/bos" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/azure" - "github.com/thanos-io/thanos/pkg/objstore/cos" - "github.com/thanos-io/thanos/pkg/objstore/gcs" - "github.com/thanos-io/thanos/pkg/objstore/oss" - "github.com/thanos-io/thanos/pkg/objstore/s3" - "github.com/thanos-io/thanos/pkg/objstore/swift" - "github.com/thanos-io/thanos/pkg/testutil" -) - -// IsObjStoreSkipped returns true if given provider ID is found in THANOS_TEST_OBJSTORE_SKIP array delimited by comma e.g: -// THANOS_TEST_OBJSTORE_SKIP=GCS,S3,AZURE,SWIFT,COS,ALIYUNOSS,BOS. -func IsObjStoreSkipped(t *testing.T, provider client.ObjProvider) bool { - if e, ok := os.LookupEnv("THANOS_TEST_OBJSTORE_SKIP"); ok { - obstores := strings.Split(e, ",") - for _, objstore := range obstores { - if objstore == string(provider) { - t.Logf("%s found in THANOS_TEST_OBJSTORE_SKIP array. Skipping.", provider) - return true - } - } - } - - return false -} - -// ForeachStore runs given test using all available objstore implementations. -// For each it creates a new bucket with a random name and a cleanup function -// that deletes it after test was run. -// Use THANOS_TEST_OBJSTORE_SKIP to skip explicitly certain object storages. -func ForeachStore(t *testing.T, testFn func(t *testing.T, bkt objstore.Bucket)) { - t.Parallel() - - // Mandatory Inmem. Not parallel, to detect problem early. - if ok := t.Run("inmem", func(t *testing.T) { - testFn(t, objstore.NewInMemBucket()) - }); !ok { - return - } - - // Mandatory Filesystem. - t.Run("filesystem", func(t *testing.T) { - t.Parallel() - - dir, err := ioutil.TempDir("", "filesystem-foreach-store-test") - testutil.Ok(t, err) - defer testutil.Ok(t, os.RemoveAll(dir)) - - b, err := filesystem.NewBucket(dir) - testutil.Ok(t, err) - testFn(t, b) - testFn(t, objstore.NewPrefixedBucket(b, "some_prefix")) - }) - - // Optional GCS. - if !IsObjStoreSkipped(t, client.GCS) { - t.Run("gcs", func(t *testing.T) { - bkt, closeFn, err := gcs.NewTestBucket(t, os.Getenv("GCP_PROJECT")) - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - // TODO(bwplotka): Add goleak when https://github.com/GoogleCloudPlatform/google-cloud-go/issues/1025 is resolved. - testFn(t, bkt) - testFn(t, objstore.NewPrefixedBucket(bkt, "some_prefix")) - }) - } - - // Optional S3. - if !IsObjStoreSkipped(t, client.S3) { - t.Run("aws s3", func(t *testing.T) { - // TODO(bwplotka): Allow taking location from envvar. - bkt, closeFn, err := s3.NewTestBucket(t, "us-west-2") - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - // TODO(bwplotka): Add goleak when we fix potential leak in minio library. - // We cannot use goleak for detecting our own potential leaks, when goleak detects leaks in minio itself. - // This needs to be investigated more. - - testFn(t, bkt) - testFn(t, objstore.NewPrefixedBucket(bkt, "some_prefix")) - }) - } - - // Optional Azure. - if !IsObjStoreSkipped(t, client.AZURE) { - t.Run("azure", func(t *testing.T) { - bkt, closeFn, err := azure.NewTestBucket(t, "e2e-tests") - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - testFn(t, bkt) - testFn(t, objstore.NewPrefixedBucket(bkt, "some_prefix")) - }) - } - - // Optional SWIFT. - if !IsObjStoreSkipped(t, client.SWIFT) { - t.Run("swift", func(t *testing.T) { - container, closeFn, err := swift.NewTestContainer(t) - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - testFn(t, container) - testFn(t, objstore.NewPrefixedBucket(container, "some_prefix")) - }) - } - - // Optional COS. - if !IsObjStoreSkipped(t, client.COS) { - t.Run("Tencent cos", func(t *testing.T) { - bkt, closeFn, err := cos.NewTestBucket(t) - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - testFn(t, bkt) - testFn(t, objstore.NewPrefixedBucket(bkt, "some_prefix")) - }) - } - - // Optional OSS. - if !IsObjStoreSkipped(t, client.ALIYUNOSS) { - t.Run("AliYun oss", func(t *testing.T) { - bkt, closeFn, err := oss.NewTestBucket(t) - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - testFn(t, bkt) - testFn(t, objstore.NewPrefixedBucket(bkt, "some_prefix")) - }) - } - - // Optional BOS. - if !IsObjStoreSkipped(t, client.BOS) { - t.Run("Baidu BOS", func(t *testing.T) { - bkt, closeFn, err := bos.NewTestBucket(t) - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - testFn(t, bkt) - testFn(t, objstore.NewPrefixedBucket(bkt, "some_prefix")) - }) - } -} diff --git a/pkg/objstore/oss/oss.go b/pkg/objstore/oss/oss.go deleted file mode 100644 index 5e96c3ddf1..0000000000 --- a/pkg/objstore/oss/oss.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package oss - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "math" - "math/rand" - "net/http" - "os" - "strconv" - "strings" - "testing" - "time" - - alioss "github.com/aliyun/aliyun-oss-go-sdk/oss" - "github.com/go-kit/log" - "github.com/pkg/errors" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/clientutil" -) - -// Part size for multi part upload. -const PartSize = 1024 * 1024 * 128 - -// Config stores the configuration for oss bucket. -type Config struct { - Endpoint string `yaml:"endpoint"` - Bucket string `yaml:"bucket"` - AccessKeyID string `yaml:"access_key_id"` - AccessKeySecret string `yaml:"access_key_secret"` -} - -// Bucket implements the store.Bucket interface. -type Bucket struct { - name string - logger log.Logger - client *alioss.Client - config Config - bucket *alioss.Bucket -} - -func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) { - c := Config{ - Endpoint: os.Getenv("ALIYUNOSS_ENDPOINT"), - Bucket: os.Getenv("ALIYUNOSS_BUCKET"), - AccessKeyID: os.Getenv("ALIYUNOSS_ACCESS_KEY_ID"), - AccessKeySecret: os.Getenv("ALIYUNOSS_ACCESS_KEY_SECRET"), - } - - if c.Endpoint == "" || c.AccessKeyID == "" || c.AccessKeySecret == "" { - return nil, nil, errors.New("aliyun oss endpoint or access_key_id or access_key_secret " + - "is not present in config file") - } - if c.Bucket != "" && os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "true" { - t.Log("ALIYUNOSS_BUCKET is defined. Normally this tests will create temporary bucket " + - "and delete it after test. Unset ALIYUNOSS_BUCKET env variable to use default logic. If you really want to run " + - "tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true.") - return NewTestBucketFromConfig(t, c, true) - } - return NewTestBucketFromConfig(t, c, false) -} - -// Upload the contents of the reader as an object into the bucket. -func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) error { - // TODO(https://github.com/thanos-io/thanos/issues/678): Remove guessing length when minio provider will support multipart upload without this. - size, err := objstore.TryToGetSize(r) - if err != nil { - return errors.Wrapf(err, "failed to get size apriori to upload %s", name) - } - - chunksnum, lastslice := int(math.Floor(float64(size)/PartSize)), size%PartSize - - ncloser := ioutil.NopCloser(r) - switch chunksnum { - case 0: - if err := b.bucket.PutObject(name, ncloser); err != nil { - return errors.Wrap(err, "failed to upload oss object") - } - default: - { - init, err := b.bucket.InitiateMultipartUpload(name) - if err != nil { - return errors.Wrap(err, "failed to initiate multi-part upload") - } - chunk := 0 - uploadEveryPart := func(everypartsize int64, cnk int) (alioss.UploadPart, error) { - prt, err := b.bucket.UploadPart(init, ncloser, everypartsize, cnk) - if err != nil { - if err := b.bucket.AbortMultipartUpload(init); err != nil { - return prt, errors.Wrap(err, "failed to abort multi-part upload") - } - - return prt, errors.Wrap(err, "failed to upload multi-part chunk") - } - return prt, nil - } - var parts []alioss.UploadPart - for ; chunk < chunksnum; chunk++ { - part, err := uploadEveryPart(PartSize, chunk+1) - if err != nil { - return errors.Wrap(err, "failed to upload every part") - } - parts = append(parts, part) - } - if lastslice != 0 { - part, err := uploadEveryPart(lastslice, chunksnum+1) - if err != nil { - return errors.Wrap(err, "failed to upload the last chunk") - } - parts = append(parts, part) - } - if _, err := b.bucket.CompleteMultipartUpload(init, parts); err != nil { - return errors.Wrap(err, "failed to set multi-part upload completive") - } - } - } - return nil -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(ctx context.Context, name string) error { - if err := b.bucket.DeleteObject(name); err != nil { - return errors.Wrap(err, "delete oss object") - } - return nil -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - m, err := b.bucket.GetObjectMeta(name) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - size, err := clientutil.ParseContentLength(m) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - // aliyun oss return Last-Modified header in RFC1123 format. - // see api doc for details: https://www.alibabacloud.com/help/doc-detail/31985.htm - mod, err := clientutil.ParseLastModified(m, time.RFC1123) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: size, - LastModified: mod, - }, nil -} - -// NewBucket returns a new Bucket using the provided oss config values. -func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) { - var config Config - if err := yaml.Unmarshal(conf, &config); err != nil { - return nil, errors.Wrap(err, "parse aliyun oss config file failed") - } - - return NewBucketWithConfig(logger, config, component) -} - -// NewBucketWithConfig returns a new Bucket using the provided oss config struct. -func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) { - if err := validate(config); err != nil { - return nil, err - } - - client, err := alioss.New(config.Endpoint, config.AccessKeyID, config.AccessKeySecret) - if err != nil { - return nil, errors.Wrap(err, "create aliyun oss client failed") - } - bk, err := client.Bucket(config.Bucket) - if err != nil { - return nil, errors.Wrapf(err, "use aliyun oss bucket %s failed", config.Bucket) - } - - bkt := &Bucket{ - logger: logger, - client: client, - name: config.Bucket, - config: config, - bucket: bk, - } - return bkt, nil -} - -// validate checks to see the config options are set. -func validate(config Config) error { - if config.Endpoint == "" || config.Bucket == "" { - return errors.New("aliyun oss endpoint or bucket is not present in config file") - } - if config.AccessKeyID == "" || config.AccessKeySecret == "" { - return errors.New("aliyun oss access_key_id or access_key_secret is not present in config file") - } - - return nil -} - -// Iter calls f for each entry in the given directory (not recursive). The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - if dir != "" { - dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim - } - - delimiter := alioss.Delimiter(objstore.DirDelim) - if objstore.ApplyIterOptions(options...).Recursive { - delimiter = nil - } - - marker := alioss.Marker("") - for { - if err := ctx.Err(); err != nil { - return errors.Wrap(err, "context closed while iterating bucket") - } - objects, err := b.bucket.ListObjects(alioss.Prefix(dir), delimiter, marker) - if err != nil { - return errors.Wrap(err, "listing aliyun oss bucket failed") - } - marker = alioss.Marker(objects.NextMarker) - - for _, object := range objects.Objects { - if err := f(object.Key); err != nil { - return errors.Wrapf(err, "callback func invoke for object %s failed ", object.Key) - } - } - - for _, object := range objects.CommonPrefixes { - if err := f(object); err != nil { - return errors.Wrapf(err, "callback func invoke for directory %s failed", object) - } - } - if !objects.IsTruncated { - break - } - } - - return nil -} - -func (b *Bucket) Name() string { - return b.name -} - -func NewTestBucketFromConfig(t testing.TB, c Config, reuseBucket bool) (objstore.Bucket, func(), error) { - if c.Bucket == "" { - src := rand.NewSource(time.Now().UnixNano()) - - bktToCreate := strings.ReplaceAll(fmt.Sprintf("test_%s_%x", strings.ToLower(t.Name()), src.Int63()), "_", "-") - if len(bktToCreate) >= 63 { - bktToCreate = bktToCreate[:63] - } - testclient, err := alioss.New(c.Endpoint, c.AccessKeyID, c.AccessKeySecret) - if err != nil { - return nil, nil, errors.Wrap(err, "create aliyun oss client failed") - } - - if err := testclient.CreateBucket(bktToCreate); err != nil { - return nil, nil, errors.Wrapf(err, "create aliyun oss bucket %s failed", bktToCreate) - } - c.Bucket = bktToCreate - } - - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-aliyun-oss-test") - if err != nil { - return nil, nil, err - } - - if reuseBucket { - if err := b.Iter(context.Background(), "", func(f string) error { - return errors.Errorf("bucket %s is not empty", c.Bucket) - }); err != nil { - return nil, nil, errors.Wrapf(err, "oss check bucket %s", c.Bucket) - } - - t.Log("WARNING. Reusing", c.Bucket, "Aliyun OSS bucket for OSS tests. Manual cleanup afterwards is required") - return b, func() {}, nil - } - - return b, func() { - objstore.EmptyBucket(t, context.Background(), b) - if err := b.client.DeleteBucket(c.Bucket); err != nil { - t.Logf("deleting bucket %s failed: %s", c.Bucket, err) - } - }, nil -} - -func (b *Bucket) Close() error { return nil } - -func (b *Bucket) setRange(start, end int64, name string) (alioss.Option, error) { - var opt alioss.Option - if 0 <= start && start <= end { - header, err := b.bucket.GetObjectMeta(name) - if err != nil { - return nil, err - } - - size, err := strconv.ParseInt(header["Content-Length"][0], 10, 64) - if err != nil { - return nil, err - } - - if end > size { - end = size - 1 - } - - opt = alioss.Range(start, end) - } else { - return nil, errors.Errorf("Invalid range specified: start=%d end=%d", start, end) - } - return opt, nil -} - -func (b *Bucket) getRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("given object name should not empty") - } - - var opts []alioss.Option - if length != -1 { - opt, err := b.setRange(off, off+length-1, name) - if err != nil { - return nil, err - } - opts = append(opts, opt) - } - - resp, err := b.bucket.GetObject(name, opts...) - if err != nil { - return nil, err - } - - return resp, nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getRange(ctx, name, 0, -1) -} - -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getRange(ctx, name, off, length) -} - -// Exists checks if the given object exists in the bucket. -func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { - exists, err := b.bucket.IsObjectExist(name) - if err != nil { - if b.IsObjNotFoundErr(err) { - return false, nil - } - return false, errors.Wrap(err, "cloud not check if object exists") - } - - return exists, nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - switch aliErr := errors.Cause(err).(type) { - case alioss.ServiceError: - if aliErr.StatusCode == http.StatusNotFound { - return true - } - } - return false -} diff --git a/pkg/objstore/prefixed_bucket.go b/pkg/objstore/prefixed_bucket.go deleted file mode 100644 index 130f14d439..0000000000 --- a/pkg/objstore/prefixed_bucket.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "context" - "io" - "strings" -) - -type PrefixedBucket struct { - bkt Bucket - prefix string -} - -func NewPrefixedBucket(bkt Bucket, prefix string) Bucket { - if validPrefix(prefix) { - return &PrefixedBucket{bkt: bkt, prefix: strings.Trim(prefix, DirDelim)} - } - - return bkt -} - -func validPrefix(prefix string) bool { - prefix = strings.Replace(prefix, "/", "", -1) - return len(prefix) > 0 -} - -func conditionalPrefix(prefix, name string) string { - if len(name) > 0 { - return withPrefix(prefix, name) - } - - return name -} - -func withPrefix(prefix, name string) string { - return prefix + DirDelim + name -} - -func (p *PrefixedBucket) Close() error { - return p.bkt.Close() -} - -// Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full -// object name including the prefix of the inspected directory. -// Entries are passed to function in sorted order. -func (p *PrefixedBucket) Iter(ctx context.Context, dir string, f func(string) error, options ...IterOption) error { - pdir := withPrefix(p.prefix, dir) - - return p.bkt.Iter(ctx, pdir, func(s string) error { - return f(strings.TrimPrefix(s, p.prefix+DirDelim)) - }, options...) -} - -// Get returns a reader for the given object name. -func (p *PrefixedBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return p.bkt.Get(ctx, conditionalPrefix(p.prefix, name)) -} - -// GetRange returns a new range reader for the given object name and range. -func (p *PrefixedBucket) GetRange(ctx context.Context, name string, off int64, length int64) (io.ReadCloser, error) { - return p.bkt.GetRange(ctx, conditionalPrefix(p.prefix, name), off, length) -} - -// Exists checks if the given object exists in the bucket. -func (p *PrefixedBucket) Exists(ctx context.Context, name string) (bool, error) { - return p.bkt.Exists(ctx, conditionalPrefix(p.prefix, name)) -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (p *PrefixedBucket) IsObjNotFoundErr(err error) bool { - return p.bkt.IsObjNotFoundErr(err) -} - -// Attributes returns information about the specified object. -func (p PrefixedBucket) Attributes(ctx context.Context, name string) (ObjectAttributes, error) { - return p.bkt.Attributes(ctx, conditionalPrefix(p.prefix, name)) -} - -// Upload the contents of the reader as an object into the bucket. -// Upload should be idempotent. -func (p *PrefixedBucket) Upload(ctx context.Context, name string, r io.Reader) error { - return p.bkt.Upload(ctx, conditionalPrefix(p.prefix, name), r) -} - -// Delete removes the object with the given name. -// If object does not exists in the moment of deletion, Delete should throw error. -func (p *PrefixedBucket) Delete(ctx context.Context, name string) error { - return p.bkt.Delete(ctx, conditionalPrefix(p.prefix, name)) -} - -// Name returns the bucket name for the provider. -func (p *PrefixedBucket) Name() string { - return p.bkt.Name() -} diff --git a/pkg/objstore/prefixed_bucket_test.go b/pkg/objstore/prefixed_bucket_test.go deleted file mode 100644 index 6e93583052..0000000000 --- a/pkg/objstore/prefixed_bucket_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "context" - "io/ioutil" - "sort" - "strings" - "testing" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestPrefixedBucket_Acceptance(t *testing.T) { - - prefixes := []string{ - "/someprefix/anotherprefix/", - "someprefix/anotherprefix/", - "someprefix/anotherprefix", - "someprefix/", - "someprefix"} - - for _, prefix := range prefixes { - AcceptanceTest(t, NewPrefixedBucket(NewInMemBucket(), prefix)) - UsesPrefixTest(t, NewInMemBucket(), prefix) - } -} - -func UsesPrefixTest(t *testing.T, bkt Bucket, prefix string) { - testutil.Ok(t, bkt.Upload(context.Background(), strings.Trim(prefix, "/")+"/file1.jpg", strings.NewReader("test-data1"))) - - pBkt := NewPrefixedBucket(bkt, prefix) - rc1, err := pBkt.Get(context.Background(), "file1.jpg") - testutil.Ok(t, err) - - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rc1.Close()) }() - content, err := ioutil.ReadAll(rc1) - testutil.Ok(t, err) - testutil.Equals(t, "test-data1", string(content)) - - testutil.Ok(t, pBkt.Upload(context.Background(), "file2.jpg", strings.NewReader("test-data2"))) - rc2, err := bkt.Get(context.Background(), strings.Trim(prefix, "/")+"/file2.jpg") - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rc2.Close()) }() - contentUpload, err := ioutil.ReadAll(rc2) - testutil.Ok(t, err) - testutil.Equals(t, "test-data2", string(contentUpload)) - - testutil.Ok(t, pBkt.Delete(context.Background(), "file2.jpg")) - _, err = bkt.Get(context.Background(), strings.Trim(prefix, "/")+"/file2.jpg") - testutil.NotOk(t, err) - testutil.Assert(t, pBkt.IsObjNotFoundErr(err), "expected not found error got %s", err) - - rc3, err := pBkt.GetRange(context.Background(), "file1.jpg", 1, 3) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rc3.Close()) }() - content, err = ioutil.ReadAll(rc3) - testutil.Ok(t, err) - testutil.Equals(t, "est", string(content)) - - ok, err := pBkt.Exists(context.Background(), "file1.jpg") - testutil.Ok(t, err) - testutil.Assert(t, ok, "expected exits") - - attrs, err := pBkt.Attributes(context.Background(), "file1.jpg") - testutil.Ok(t, err) - testutil.Assert(t, attrs.Size == 10, "expected size to be equal to 10") - - testutil.Ok(t, bkt.Upload(context.Background(), strings.Trim(prefix, "/")+"/dir/file1.jpg", strings.NewReader("test-data1"))) - seen := []string{} - testutil.Ok(t, pBkt.Iter(context.Background(), "", func(fn string) error { - seen = append(seen, fn) - return nil - }, WithRecursiveIter)) - expected := []string{"dir/file1.jpg", "file1.jpg"} - sort.Strings(expected) - sort.Strings(seen) - testutil.Equals(t, expected, seen) - - seen = []string{} - testutil.Ok(t, pBkt.Iter(context.Background(), "", func(fn string) error { - seen = append(seen, fn) - return nil - })) - expected = []string{"dir/", "file1.jpg"} - sort.Strings(expected) - sort.Strings(seen) - testutil.Equals(t, expected, seen) -} diff --git a/pkg/objstore/s3/s3.go b/pkg/objstore/s3/s3.go deleted file mode 100644 index 1fdc5347dc..0000000000 --- a/pkg/objstore/s3/s3.go +++ /dev/null @@ -1,616 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -// Package s3 implements common object storage abstractions against s3-compatible APIs. -package s3 - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "runtime" - "strconv" - "strings" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/common/version" - "github.com/thanos-io/thanos/pkg/exthttp" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/runutil" - "gopkg.in/yaml.v2" -) - -type ctxKey int - -type BucketLookupType int - -func (blt BucketLookupType) String() string { - return []string{"auto", "virtual-hosted", "path"}[blt] -} - -func (blt BucketLookupType) MinioType() minio.BucketLookupType { - return []minio.BucketLookupType{ - minio.BucketLookupAuto, - minio.BucketLookupDNS, - minio.BucketLookupPath, - }[blt] -} - -func (blt BucketLookupType) MarshalYAML() (interface{}, error) { - return blt.String(), nil -} - -func (blt *BucketLookupType) UnmarshalYAML(unmarshal func(interface{}) error) error { - var lookupType string - if err := unmarshal(&lookupType); err != nil { - return err - } - - switch lookupType { - case "auto": - *blt = AutoLookup - return nil - case "virtual-hosted": - *blt = VirtualHostLookup - return nil - case "path": - *blt = PathLookup - return nil - } - - return fmt.Errorf("unsupported bucket lookup type: %s", lookupType) -} - -const ( - AutoLookup BucketLookupType = iota - VirtualHostLookup - PathLookup - - // DirDelim is the delimiter used to model a directory structure in an object store bucket. - DirDelim = "/" - - // SSEKMS is the name of the SSE-KMS method for objectstore encryption. - SSEKMS = "SSE-KMS" - - // SSEC is the name of the SSE-C method for objstore encryption. - SSEC = "SSE-C" - - // SSES3 is the name of the SSE-S3 method for objstore encryption. - SSES3 = "SSE-S3" - - // sseConfigKey is the context key to override SSE config. This feature is used by downstream - // projects (eg. Cortex) to inject custom SSE config on a per-request basis. Future work or - // refactoring can introduce breaking changes as far as the functionality is preserved. - // NOTE: we're using a context value only because it's a very specific S3 option. If SSE will - // be available to wider set of backends we should probably add a variadic option to Get() and Upload(). - sseConfigKey = ctxKey(0) -) - -var DefaultConfig = Config{ - PutUserMetadata: map[string]string{}, - HTTPConfig: exthttp.HTTPConfig{ - IdleConnTimeout: model.Duration(90 * time.Second), - ResponseHeaderTimeout: model.Duration(2 * time.Minute), - TLSHandshakeTimeout: model.Duration(10 * time.Second), - ExpectContinueTimeout: model.Duration(1 * time.Second), - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, - MaxConnsPerHost: 0, - }, - PartSize: 1024 * 1024 * 64, // 64MB. - BucketLookupType: AutoLookup, -} - -// HTTPConfig exists here only because Cortex depends on it, and we depend on Cortex. -// Deprecated. -// TODO(bwplotka): Remove it, once we remove Cortex cycle dep, or Cortex stops using this. -type HTTPConfig = exthttp.HTTPConfig - -// Config stores the configuration for s3 bucket. -type Config struct { - Bucket string `yaml:"bucket"` - Endpoint string `yaml:"endpoint"` - Region string `yaml:"region"` - AWSSDKAuth bool `yaml:"aws_sdk_auth"` - AccessKey string `yaml:"access_key"` - Insecure bool `yaml:"insecure"` - SignatureV2 bool `yaml:"signature_version2"` - SecretKey string `yaml:"secret_key"` - PutUserMetadata map[string]string `yaml:"put_user_metadata"` - HTTPConfig exthttp.HTTPConfig `yaml:"http_config"` - TraceConfig TraceConfig `yaml:"trace"` - ListObjectsVersion string `yaml:"list_objects_version"` - BucketLookupType BucketLookupType `yaml:"bucket_lookup_type"` - // PartSize used for multipart upload. Only used if uploaded object size is known and larger than configured PartSize. - // NOTE we need to make sure this number does not produce more parts than 10 000. - PartSize uint64 `yaml:"part_size"` - SSEConfig SSEConfig `yaml:"sse_config"` - STSEndpoint string `yaml:"sts_endpoint"` -} - -// SSEConfig deals with the configuration of SSE for Minio. The following options are valid: -// kmsencryptioncontext == https://docs.aws.amazon.com/kms/latest/developerguide/services-s3.html#s3-encryption-context -type SSEConfig struct { - Type string `yaml:"type"` - KMSKeyID string `yaml:"kms_key_id"` - KMSEncryptionContext map[string]string `yaml:"kms_encryption_context"` - EncryptionKey string `yaml:"encryption_key"` -} - -type TraceConfig struct { - Enable bool `yaml:"enable"` -} - -// Bucket implements the store.Bucket interface against s3-compatible APIs. -type Bucket struct { - logger log.Logger - name string - client *minio.Client - defaultSSE encrypt.ServerSide - putUserMetadata map[string]string - partSize uint64 - listObjectsV1 bool -} - -// parseConfig unmarshals a buffer into a Config with default values. -func parseConfig(conf []byte) (Config, error) { - config := DefaultConfig - if err := yaml.UnmarshalStrict(conf, &config); err != nil { - return Config{}, err - } - - return config, nil -} - -// NewBucket returns a new Bucket using the provided s3 config values. -func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) { - config, err := parseConfig(conf) - if err != nil { - return nil, err - } - - return NewBucketWithConfig(logger, config, component) -} - -type overrideSignerType struct { - credentials.Provider - signerType credentials.SignatureType -} - -func (s *overrideSignerType) Retrieve() (credentials.Value, error) { - v, err := s.Provider.Retrieve() - if err != nil { - return v, err - } - if !v.SignerType.IsAnonymous() { - v.SignerType = s.signerType - } - return v, nil -} - -// NewBucketWithConfig returns a new Bucket using the provided s3 config values. -func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) { - var chain []credentials.Provider - - // TODO(bwplotka): Don't do flags as they won't scale, use actual params like v2, v4 instead - wrapCredentialsProvider := func(p credentials.Provider) credentials.Provider { return p } - if config.SignatureV2 { - wrapCredentialsProvider = func(p credentials.Provider) credentials.Provider { - return &overrideSignerType{Provider: p, signerType: credentials.SignatureV2} - } - } - - if err := validate(config); err != nil { - return nil, err - } - - if config.AWSSDKAuth { - chain = []credentials.Provider{ - wrapCredentialsProvider(&AWSSDKAuth{Region: config.Region}), - } - } else if config.AccessKey != "" { - chain = []credentials.Provider{wrapCredentialsProvider(&credentials.Static{ - Value: credentials.Value{ - AccessKeyID: config.AccessKey, - SecretAccessKey: config.SecretKey, - SignerType: credentials.SignatureV4, - }, - })} - } else { - chain = []credentials.Provider{ - wrapCredentialsProvider(&credentials.EnvAWS{}), - wrapCredentialsProvider(&credentials.FileAWSCredentials{}), - wrapCredentialsProvider(&credentials.IAM{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - Endpoint: config.STSEndpoint, - }), - } - } - - // Check if a roundtripper has been set in the config - // otherwise build the default transport. - var rt http.RoundTripper - if config.HTTPConfig.Transport != nil { - rt = config.HTTPConfig.Transport - } else { - var err error - rt, err = exthttp.DefaultTransport(config.HTTPConfig) - if err != nil { - return nil, err - } - } - - client, err := minio.New(config.Endpoint, &minio.Options{ - Creds: credentials.NewChainCredentials(chain), - Secure: !config.Insecure, - Region: config.Region, - Transport: rt, - BucketLookup: config.BucketLookupType.MinioType(), - }) - if err != nil { - return nil, errors.Wrap(err, "initialize s3 client") - } - client.SetAppInfo(fmt.Sprintf("thanos-%s", component), fmt.Sprintf("%s (%s)", version.Version, runtime.Version())) - - var sse encrypt.ServerSide - if config.SSEConfig.Type != "" { - switch config.SSEConfig.Type { - case SSEKMS: - // If the KMSEncryptionContext is a nil map the header that is - // constructed by the encrypt.ServerSide object will be base64 - // encoded "nil" which is not accepted by AWS. - if config.SSEConfig.KMSEncryptionContext == nil { - config.SSEConfig.KMSEncryptionContext = make(map[string]string) - } - sse, err = encrypt.NewSSEKMS(config.SSEConfig.KMSKeyID, config.SSEConfig.KMSEncryptionContext) - if err != nil { - return nil, errors.Wrap(err, "initialize s3 client SSE-KMS") - } - - case SSEC: - key, err := ioutil.ReadFile(config.SSEConfig.EncryptionKey) - if err != nil { - return nil, err - } - - sse, err = encrypt.NewSSEC(key) - if err != nil { - return nil, errors.Wrap(err, "initialize s3 client SSE-C") - } - - case SSES3: - sse = encrypt.NewSSE() - - default: - sseErrMsg := errors.Errorf("Unsupported type %q was provided. Supported types are SSE-S3, SSE-KMS, SSE-C", config.SSEConfig.Type) - return nil, errors.Wrap(sseErrMsg, "Initialize s3 client SSE Config") - } - } - - if config.TraceConfig.Enable { - logWriter := log.NewStdlibAdapter(level.Debug(logger), log.MessageKey("s3TraceMsg")) - client.TraceOn(logWriter) - } - - if config.ListObjectsVersion != "" && config.ListObjectsVersion != "v1" && config.ListObjectsVersion != "v2" { - return nil, errors.Errorf("Initialize s3 client list objects version: Unsupported version %q was provided. Supported values are v1, v2", config.ListObjectsVersion) - } - - bkt := &Bucket{ - logger: logger, - name: config.Bucket, - client: client, - defaultSSE: sse, - putUserMetadata: config.PutUserMetadata, - partSize: config.PartSize, - listObjectsV1: config.ListObjectsVersion == "v1", - } - return bkt, nil -} - -// Name returns the bucket name for s3. -func (b *Bucket) Name() string { - return b.name -} - -// validate checks to see the config options are set. -func validate(conf Config) error { - if conf.Endpoint == "" { - return errors.New("no s3 endpoint in config file") - } - - if conf.AWSSDKAuth && conf.AccessKey != "" { - return errors.New("aws_sdk_auth and access_key are mutually exclusive configurations") - } - - if conf.AccessKey == "" && conf.SecretKey != "" { - return errors.New("no s3 access_key specified while secret_key is present in config file; either both should be present in config or envvars/IAM should be used.") - } - - if conf.AccessKey != "" && conf.SecretKey == "" { - return errors.New("no s3 secret_key specified while access_key is present in config file; either both should be present in config or envvars/IAM should be used.") - } - - if conf.SSEConfig.Type == SSEC && conf.SSEConfig.EncryptionKey == "" { - return errors.New("encryption_key must be set if sse_config.type is set to 'SSE-C'") - } - - if conf.SSEConfig.Type == SSEKMS && conf.SSEConfig.KMSKeyID == "" { - return errors.New("kms_key_id must be set if sse_config.type is set to 'SSE-KMS'") - } - - return nil -} - -// ValidateForTests checks to see the config options for tests are set. -func ValidateForTests(conf Config) error { - if conf.Endpoint == "" || - conf.AccessKey == "" || - conf.SecretKey == "" { - return errors.New("insufficient s3 test configuration information") - } - return nil -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - // Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the - // object itself as one prefix item. - if dir != "" { - dir = strings.TrimSuffix(dir, DirDelim) + DirDelim - } - - opts := minio.ListObjectsOptions{ - Prefix: dir, - Recursive: objstore.ApplyIterOptions(options...).Recursive, - UseV1: b.listObjectsV1, - } - - for object := range b.client.ListObjects(ctx, b.name, opts) { - // Catch the error when failed to list objects. - if object.Err != nil { - return object.Err - } - // This sometimes happens with empty buckets. - if object.Key == "" { - continue - } - // The s3 client can also return the directory itself in the ListObjects call above. - if object.Key == dir { - continue - } - if err := f(object.Key); err != nil { - return err - } - } - - return nil -} - -func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - sse, err := b.getServerSideEncryption(ctx) - if err != nil { - return nil, err - } - - opts := &minio.GetObjectOptions{ServerSideEncryption: sse} - if length != -1 { - if err := opts.SetRange(off, off+length-1); err != nil { - return nil, err - } - } else if off > 0 { - if err := opts.SetRange(off, 0); err != nil { - return nil, err - } - } - r, err := b.client.GetObject(ctx, b.name, name, *opts) - if err != nil { - return nil, err - } - - // NotFoundObject error is revealed only after first Read. This does the initial GetRequest. Prefetch this here - // for convenience. - if _, err := r.Read(nil); err != nil { - runutil.CloseWithLogOnErr(b.logger, r, "s3 get range obj close") - - // First GET Object request error. - return nil, err - } - - return r, nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getRange(ctx, name, 0, -1) -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getRange(ctx, name, off, length) -} - -// Exists checks if the given object exists. -func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { - _, err := b.client.StatObject(ctx, b.name, name, minio.StatObjectOptions{}) - if err != nil { - if b.IsObjNotFoundErr(err) { - return false, nil - } - return false, errors.Wrap(err, "stat s3 object") - } - - return true, nil -} - -// Upload the contents of the reader as an object into the bucket. -func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { - sse, err := b.getServerSideEncryption(ctx) - if err != nil { - return err - } - - // TODO(https://github.com/thanos-io/thanos/issues/678): Remove guessing length when minio provider will support multipart upload without this. - size, err := objstore.TryToGetSize(r) - if err != nil { - level.Warn(b.logger).Log("msg", "could not guess file size for multipart upload; upload might be not optimized", "name", name, "err", err) - size = -1 - } - - partSize := b.partSize - if size < int64(partSize) { - partSize = 0 - } - if _, err := b.client.PutObject( - ctx, - b.name, - name, - r, - size, - minio.PutObjectOptions{ - PartSize: partSize, - ServerSideEncryption: sse, - UserMetadata: b.putUserMetadata, - // 4 is what minio-go have as the default. To be certain we do micro benchmark before any changes we - // ensure we pin this number to four. - // TODO(bwplotka): Consider adjusting this number to GOMAXPROCS or to expose this in config if it becomes bottleneck. - NumThreads: 4, - }, - ); err != nil { - return errors.Wrap(err, "upload s3 object") - } - - return nil -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - objInfo, err := b.client.StatObject(ctx, b.name, name, minio.StatObjectOptions{}) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: objInfo.Size, - LastModified: objInfo.LastModified, - }, nil -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(ctx context.Context, name string) error { - return b.client.RemoveObject(ctx, b.name, name, minio.RemoveObjectOptions{}) -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - return minio.ToErrorResponse(errors.Cause(err)).Code == "NoSuchKey" -} - -func (b *Bucket) Close() error { return nil } - -// getServerSideEncryption returns the SSE to use. -func (b *Bucket) getServerSideEncryption(ctx context.Context) (encrypt.ServerSide, error) { - if value := ctx.Value(sseConfigKey); value != nil { - if sse, ok := value.(encrypt.ServerSide); ok { - return sse, nil - } - return nil, errors.New("invalid SSE config override provided in the context") - } - - return b.defaultSSE, nil -} - -func configFromEnv() Config { - c := Config{ - Bucket: os.Getenv("S3_BUCKET"), - Endpoint: os.Getenv("S3_ENDPOINT"), - AccessKey: os.Getenv("S3_ACCESS_KEY"), - SecretKey: os.Getenv("S3_SECRET_KEY"), - } - - c.Insecure, _ = strconv.ParseBool(os.Getenv("S3_INSECURE")) - c.HTTPConfig.InsecureSkipVerify, _ = strconv.ParseBool(os.Getenv("S3_INSECURE_SKIP_VERIFY")) - c.SignatureV2, _ = strconv.ParseBool(os.Getenv("S3_SIGNATURE_VERSION2")) - return c -} - -// NewTestBucket creates test bkt client that before returning creates temporary bucket. -// In a close function it empties and deletes the bucket. -func NewTestBucket(t testing.TB, location string) (objstore.Bucket, func(), error) { - c := configFromEnv() - if err := ValidateForTests(c); err != nil { - return nil, nil, err - } - - if c.Bucket != "" && os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { - return nil, nil, errors.New("S3_BUCKET is defined. Normally this tests will create temporary bucket " + - "and delete it after test. Unset S3_BUCKET env variable to use default logic. If you really want to run " + - "tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " + - "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + - "to safety (accidentally pointing prod bucket for test) as well as aws s3 not being fully strong consistent.") - } - - return NewTestBucketFromConfig(t, location, c, true) -} - -func NewTestBucketFromConfig(t testing.TB, location string, c Config, reuseBucket bool) (objstore.Bucket, func(), error) { - ctx := context.Background() - - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - bktToCreate := c.Bucket - if c.Bucket != "" && reuseBucket { - if err := b.Iter(ctx, "", func(f string) error { - return errors.Errorf("bucket %s is not empty", c.Bucket) - }); err != nil { - return nil, nil, errors.Wrapf(err, "s3 check bucket %s", c.Bucket) - } - - t.Log("WARNING. Reusing", c.Bucket, "AWS bucket for AWS tests. Manual cleanup afterwards is required") - return b, func() {}, nil - } - - if c.Bucket == "" { - bktToCreate = objstore.CreateTemporaryTestBucketName(t) - } - - if err := b.client.MakeBucket(ctx, bktToCreate, minio.MakeBucketOptions{Region: location}); err != nil { - return nil, nil, err - } - b.name = bktToCreate - t.Log("created temporary AWS bucket for AWS tests with name", bktToCreate, "in", location) - - return b, func() { - objstore.EmptyBucket(t, ctx, b) - if err := b.client.RemoveBucket(ctx, bktToCreate); err != nil { - t.Logf("deleting bucket %s failed: %s", bktToCreate, err) - } - }, nil -} - -// ContextWithSSEConfig returns a context with a custom SSE config set. The returned context should be -// provided to S3 objstore client functions to override the default SSE config. -func ContextWithSSEConfig(ctx context.Context, value encrypt.ServerSide) context.Context { - return context.WithValue(ctx, sseConfigKey, value) -} diff --git a/pkg/objstore/s3/s3_aws_sdk_auth.go b/pkg/objstore/s3/s3_aws_sdk_auth.go deleted file mode 100644 index 393a931d17..0000000000 --- a/pkg/objstore/s3/s3_aws_sdk_auth.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package s3 - -import ( - "context" - - aws "github.com/aws/aws-sdk-go-v2/aws" - awsconfig "github.com/aws/aws-sdk-go-v2/config" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/pkg/errors" -) - -// AWSSDKAuth retrieves credentials from the aws-sdk-go. -type AWSSDKAuth struct { - Region string - creds aws.Credentials -} - -// NewAWSSDKAuth returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewAWSSDKAuth(region string) *credentials.Credentials { - return credentials.New(&AWSSDKAuth{ - Region: region, - }) -} - -// Retrieve retrieves the keys from the environment. -func (a *AWSSDKAuth) Retrieve() (credentials.Value, error) { - cfg, err := awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithRegion(a.Region)) - if err != nil { - return credentials.Value{}, errors.Wrap(err, "load AWS SDK config") - } - - creds, err := cfg.Credentials.Retrieve(context.TODO()) - if err != nil { - return credentials.Value{}, errors.Wrap(err, "retrieve AWS SDK credentials") - } - - a.creds = creds - - return credentials.Value{ - AccessKeyID: creds.AccessKeyID, - SecretAccessKey: creds.SecretAccessKey, - SessionToken: creds.SessionToken, - SignerType: credentials.SignatureV4, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (a *AWSSDKAuth) IsExpired() bool { - return a.creds.Expired() -} diff --git a/pkg/objstore/s3/s3_e2e_test.go b/pkg/objstore/s3/s3_e2e_test.go deleted file mode 100644 index 0fef4a71e4..0000000000 --- a/pkg/objstore/s3/s3_e2e_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package s3_test - -import ( - "bytes" - "context" - "strings" - "testing" - - "github.com/efficientgo/e2e" - "github.com/go-kit/log" - - "github.com/thanos-io/thanos/pkg/objstore/s3" - "github.com/thanos-io/thanos/test/e2e/e2ethanos" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -// Regression benchmark for https://github.com/thanos-io/thanos/issues/3917 and https://github.com/thanos-io/thanos/issues/3967. -// $ export ver=v1 && go test ./pkg/objstore/s3/... -run '^$' -bench '^BenchmarkUpload' -benchtime 5s -count 5 \ -// -memprofile=${ver}.mem.pprof -cpuprofile=${ver}.cpu.pprof | tee ${ver}.txt . -func BenchmarkUpload(b *testing.B) { - b.ReportAllocs() - ctx := context.Background() - - e, err := e2e.NewDockerEnvironment("e2e_bench_mino_client", e2e.WithLogger(log.NewNopLogger())) - testutil.Ok(b, err) - b.Cleanup(e2ethanos.CleanScenario(b, e)) - - const bucket = "benchmark" - m := e2ethanos.NewMinio(e, "benchmark", bucket) - testutil.Ok(b, e2e.StartAndWaitReady(m)) - - bkt, err := s3.NewBucketWithConfig( - log.NewNopLogger(), - e2ethanos.NewS3Config(bucket, m.Endpoint("https"), m.Dir()), - "test-feed", - ) - testutil.Ok(b, err) - - buf := bytes.Buffer{} - buf.Grow(1028 * 1028 * 100) // 100MB. - word := "abcdefghij" - for i := 0; i < buf.Cap()/len(word); i++ { - _, _ = buf.WriteString(word) - } - str := buf.String() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - testutil.Ok(b, bkt.Upload(ctx, "test", strings.NewReader(str))) - } -} diff --git a/pkg/objstore/s3/s3_test.go b/pkg/objstore/s3/s3_test.go deleted file mode 100644 index d9734ccf83..0000000000 --- a/pkg/objstore/s3/s3_test.go +++ /dev/null @@ -1,421 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package s3 - -import ( - "context" - "encoding/base64" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/thanos-io/thanos/pkg/exthttp" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -const endpoint string = "localhost:80" - -func TestParseConfig(t *testing.T) { - input := []byte(`bucket: abcd -insecure: false`) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - if cfg.Bucket != "abcd" { - t.Errorf("parsing of bucket failed: got %v, expected %v", cfg.Bucket, "abcd") - } - if cfg.Insecure { - t.Errorf("parsing of insecure failed: got %v, expected %v", cfg.Insecure, false) - } -} - -func TestParseConfig_SSEConfig(t *testing.T) { - input := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-S3`) - - cfg, err := parseConfig(input) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg)) - - input2 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-C`) - - cfg, err = parseConfig(input2) - testutil.Ok(t, err) - testutil.NotOk(t, validate(cfg)) - - input3 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-C - kms_key_id: qweasd`) - - cfg, err = parseConfig(input3) - testutil.Ok(t, err) - testutil.NotOk(t, validate(cfg)) - - input4 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-C - encryption_key: /some/file`) - - cfg, err = parseConfig(input4) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg)) - - input5 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-KMS`) - - cfg, err = parseConfig(input5) - testutil.Ok(t, err) - testutil.NotOk(t, validate(cfg)) - - input6 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-KMS - kms_key_id: abcd1234-ab12-cd34-1234567890ab`) - - cfg, err = parseConfig(input6) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg)) - - input7 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-KMS - kms_key_id: abcd1234-ab12-cd34-1234567890ab - kms_encryption_context: - key: value - something: else - a: b`) - - cfg, err = parseConfig(input7) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg)) - - input8 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-MagicKey - kms_key_id: abcd1234-ab12-cd34-1234567890ab - encryption_key: /some/file`) - - cfg, err = parseConfig(input8) - testutil.Ok(t, err) - // Since the error handling for "proper type" if done as we're setting up the bucket. - testutil.Ok(t, validate(cfg)) -} - -func TestParseConfig_DefaultHTTPConfig(t *testing.T) { - input := []byte(`bucket: abcd -insecure: false`) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - if time.Duration(cfg.HTTPConfig.IdleConnTimeout) != time.Duration(90*time.Second) { - t.Errorf("parsing of idle_conn_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(90*time.Second)) - } - - if time.Duration(cfg.HTTPConfig.ResponseHeaderTimeout) != time.Duration(2*time.Minute) { - t.Errorf("parsing of response_header_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(2*time.Minute)) - } - - if cfg.HTTPConfig.InsecureSkipVerify { - t.Errorf("parsing of insecure_skip_verify failed: got %v, expected %v", cfg.HTTPConfig.InsecureSkipVerify, false) - } -} - -func TestParseConfig_CustomHTTPConfig(t *testing.T) { - input := []byte(`bucket: abcd -insecure: false -http_config: - insecure_skip_verify: true - idle_conn_timeout: 50s - response_header_timeout: 1m`) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - if time.Duration(cfg.HTTPConfig.IdleConnTimeout) != time.Duration(50*time.Second) { - t.Errorf("parsing of idle_conn_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(50*time.Second)) - } - - if time.Duration(cfg.HTTPConfig.ResponseHeaderTimeout) != time.Duration(1*time.Minute) { - t.Errorf("parsing of response_header_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(1*time.Minute)) - } - - if !cfg.HTTPConfig.InsecureSkipVerify { - t.Errorf("parsing of insecure_skip_verify failed: got %v, expected %v", cfg.HTTPConfig.InsecureSkipVerify, false) - } -} - -func TestParseConfig_CustomHTTPConfigWithTLS(t *testing.T) { - input := []byte(`bucket: abcd -insecure: false -http_config: - tls_config: - ca_file: /certs/ca.crt - cert_file: /certs/cert.crt - key_file: /certs/key.key - server_name: server - insecure_skip_verify: false - `) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - testutil.Equals(t, "/certs/ca.crt", cfg.HTTPConfig.TLSConfig.CAFile) - testutil.Equals(t, "/certs/cert.crt", cfg.HTTPConfig.TLSConfig.CertFile) - testutil.Equals(t, "/certs/key.key", cfg.HTTPConfig.TLSConfig.KeyFile) - testutil.Equals(t, "server", cfg.HTTPConfig.TLSConfig.ServerName) - testutil.Equals(t, false, cfg.HTTPConfig.TLSConfig.InsecureSkipVerify) -} - -func TestParseConfig_CustomLegacyInsecureSkipVerify(t *testing.T) { - input := []byte(`bucket: abcd -insecure: false -http_config: - insecure_skip_verify: true - tls_config: - insecure_skip_verify: false - `) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - transport, err := exthttp.DefaultTransport(cfg.HTTPConfig) - testutil.Ok(t, err) - testutil.Equals(t, true, transport.TLSClientConfig.InsecureSkipVerify) -} - -func TestValidate_OK(t *testing.T) { - input := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -access_key: "access_key" -insecure: false -signature_version2: false -secret_key: "secret_key" -http_config: - insecure_skip_verify: false - idle_conn_timeout: 50s`) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg)) - testutil.Assert(t, cfg.PutUserMetadata != nil, "map should not be nil") - - input2 := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -access_key: "access_key" -insecure: false -signature_version2: false -secret_key: "secret_key" -put_user_metadata: - "X-Amz-Acl": "bucket-owner-full-control" -http_config: - idle_conn_timeout: 0s`) - cfg2, err := parseConfig(input2) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg2)) - - testutil.Equals(t, "bucket-owner-full-control", cfg2.PutUserMetadata["X-Amz-Acl"]) -} - -func TestParseConfig_PartSize(t *testing.T) { - input := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -access_key: "access_key" -insecure: false -signature_version2: false -secret_key: "secret_key" -http_config: - insecure_skip_verify: false - idle_conn_timeout: 50s`) - - cfg, err := parseConfig(input) - testutil.Ok(t, err) - testutil.Assert(t, cfg.PartSize == 1024*1024*64, "when part size not set it should default to 128MiB") - - input2 := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -access_key: "access_key" -insecure: false -signature_version2: false -secret_key: "secret_key" -part_size: 104857600 -http_config: - insecure_skip_verify: false - idle_conn_timeout: 50s`) - cfg2, err := parseConfig(input2) - testutil.Ok(t, err) - testutil.Assert(t, cfg2.PartSize == 1024*1024*100, "when part size should be set to 100MiB") -} - -func TestParseConfig_OldSEEncryptionFieldShouldFail(t *testing.T) { - input := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -access_key: "access_key" -insecure: false -signature_version2: false -encrypt_sse: false -secret_key: "secret_key" -see_encryption: true -put_user_metadata: - "X-Amz-Acl": "bucket-owner-full-control" -http_config: - idle_conn_timeout: 0s`) - _, err := parseConfig(input) - testutil.NotOk(t, err) -} - -func TestParseConfig_ListObjectsV1(t *testing.T) { - input := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint"`) - - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - if cfg.ListObjectsVersion != "" { - t.Errorf("when list_objects_version not set, it should default to empty") - } - - input2 := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -list_objects_version: "abcd"`) - - cfg2, err := parseConfig(input2) - testutil.Ok(t, err) - - if cfg2.ListObjectsVersion != "abcd" { - t.Errorf("parsing of list_objects_version failed: got %v, expected %v", cfg.ListObjectsVersion, "abcd") - } -} - -func TestBucket_getServerSideEncryption(t *testing.T) { - // Default config should return no SSE config. - cfg := DefaultConfig - cfg.Endpoint = endpoint - bkt, err := NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - sse, err := bkt.getServerSideEncryption(context.Background()) - testutil.Ok(t, err) - testutil.Equals(t, nil, sse) - - // If SSE is configured in the client config it should be used. - cfg = DefaultConfig - cfg.Endpoint = endpoint - cfg.SSEConfig = SSEConfig{Type: SSES3} - bkt, err = NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - sse, err = bkt.getServerSideEncryption(context.Background()) - testutil.Ok(t, err) - testutil.Equals(t, encrypt.S3, sse.Type()) - - // SSE-KMS can be configured in the client config with an optional - // KMSEncryptionContext - In this case the encryptionContextHeader should be - // a base64 encoded string which represents a string-string map "{}" - cfg = DefaultConfig - cfg.Endpoint = endpoint - cfg.SSEConfig = SSEConfig{ - Type: SSEKMS, - KMSKeyID: "key", - } - bkt, err = NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - sse, err = bkt.getServerSideEncryption(context.Background()) - testutil.Ok(t, err) - testutil.Equals(t, encrypt.KMS, sse.Type()) - - encryptionContextHeader := "X-Amz-Server-Side-Encryption-Context" - headers := make(http.Header) - sse.Marshal(headers) - wantJson, err := json.Marshal(make(map[string]string)) - testutil.Ok(t, err) - want := base64.StdEncoding.EncodeToString(wantJson) - testutil.Equals(t, want, headers.Get(encryptionContextHeader)) - - // If the KMSEncryptionContext is set then the header should reflect it's - // value. - cfg = DefaultConfig - cfg.Endpoint = endpoint - cfg.SSEConfig = SSEConfig{ - Type: SSEKMS, - KMSKeyID: "key", - KMSEncryptionContext: map[string]string{"foo": "bar"}, - } - bkt, err = NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - sse, err = bkt.getServerSideEncryption(context.Background()) - testutil.Ok(t, err) - testutil.Equals(t, encrypt.KMS, sse.Type()) - - headers = make(http.Header) - sse.Marshal(headers) - wantJson, err = json.Marshal(cfg.SSEConfig.KMSEncryptionContext) - testutil.Ok(t, err) - want = base64.StdEncoding.EncodeToString(wantJson) - testutil.Equals(t, want, headers.Get(encryptionContextHeader)) - - // If SSE is configured in the context it should win. - cfg = DefaultConfig - cfg.Endpoint = endpoint - cfg.SSEConfig = SSEConfig{Type: SSES3} - override, err := encrypt.NewSSEKMS("test", nil) - testutil.Ok(t, err) - - bkt, err = NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - sse, err = bkt.getServerSideEncryption(context.WithValue(context.Background(), sseConfigKey, override)) - testutil.Ok(t, err) - testutil.Equals(t, encrypt.KMS, sse.Type()) -} - -func TestBucket_Get_ShouldReturnErrorIfServerTruncateResponse(t *testing.T) { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") - w.Header().Set("Content-Length", "100") - - // Write less bytes than the content length. - _, err := w.Write([]byte("12345")) - testutil.Ok(t, err) - })) - defer srv.Close() - - cfg := DefaultConfig - cfg.Bucket = "test-bucket" - cfg.Endpoint = srv.Listener.Addr().String() - cfg.Insecure = true - cfg.Region = "test" - cfg.AccessKey = "test" - cfg.SecretKey = "test" - - bkt, err := NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - reader, err := bkt.Get(context.Background(), "test") - testutil.Ok(t, err) - - // We expect an error when reading back. - _, err = ioutil.ReadAll(reader) - testutil.Equals(t, io.ErrUnexpectedEOF, err) -} diff --git a/pkg/objstore/swift/swift.go b/pkg/objstore/swift/swift.go deleted file mode 100644 index eb757ae9ac..0000000000 --- a/pkg/objstore/swift/swift.go +++ /dev/null @@ -1,377 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -// Package swift implements common object storage abstractions against OpenStack swift APIs. -package swift - -import ( - "context" - "fmt" - "io" - "os" - "strconv" - "strings" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/ncw/swift" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/runutil" -) - -const ( - // DirDelim is the delimiter used to model a directory structure in an object store bucket. - DirDelim = '/' - // Name of the directory in bucket, where to store file parts of SLO and DLO. - SegmentsDir = "segments/" -) - -var DefaultConfig = Config{ - AuthVersion: 0, // Means autodetect of the auth API version by the library. - ChunkSize: 1024 * 1024 * 1024, - Retries: 3, - ConnectTimeout: model.Duration(10 * time.Second), - Timeout: model.Duration(5 * time.Minute), -} - -type Config struct { - AuthVersion int `yaml:"auth_version"` - AuthUrl string `yaml:"auth_url"` - Username string `yaml:"username"` - UserDomainName string `yaml:"user_domain_name"` - UserDomainID string `yaml:"user_domain_id"` - UserId string `yaml:"user_id"` - Password string `yaml:"password"` - DomainId string `yaml:"domain_id"` - DomainName string `yaml:"domain_name"` - ProjectID string `yaml:"project_id"` - ProjectName string `yaml:"project_name"` - ProjectDomainID string `yaml:"project_domain_id"` - ProjectDomainName string `yaml:"project_domain_name"` - RegionName string `yaml:"region_name"` - ContainerName string `yaml:"container_name"` - ChunkSize int64 `yaml:"large_object_chunk_size"` - SegmentContainerName string `yaml:"large_object_segments_container_name"` - Retries int `yaml:"retries"` - ConnectTimeout model.Duration `yaml:"connect_timeout"` - Timeout model.Duration `yaml:"timeout"` - UseDynamicLargeObjects bool `yaml:"use_dynamic_large_objects"` -} - -func parseConfig(conf []byte) (*Config, error) { - sc := DefaultConfig - err := yaml.UnmarshalStrict(conf, &sc) - return &sc, err -} - -func configFromEnv() (*Config, error) { - c := swift.Connection{} - if err := c.ApplyEnvironment(); err != nil { - return nil, err - } - - config := Config{ - AuthVersion: c.AuthVersion, - AuthUrl: c.AuthUrl, - Password: c.ApiKey, - Username: c.UserName, - UserId: c.UserId, - DomainId: c.DomainId, - DomainName: c.Domain, - ProjectID: c.TenantId, - ProjectName: c.Tenant, - ProjectDomainID: c.TenantDomainId, - ProjectDomainName: c.TenantDomain, - RegionName: c.Region, - ContainerName: os.Getenv("OS_CONTAINER_NAME"), - ChunkSize: DefaultConfig.ChunkSize, - SegmentContainerName: os.Getenv("SWIFT_SEGMENTS_CONTAINER_NAME"), - Retries: c.Retries, - ConnectTimeout: model.Duration(c.ConnectTimeout), - Timeout: model.Duration(c.Timeout), - UseDynamicLargeObjects: false, - } - if os.Getenv("SWIFT_CHUNK_SIZE") != "" { - var err error - config.ChunkSize, err = strconv.ParseInt(os.Getenv("SWIFT_CHUNK_SIZE"), 10, 64) - if err != nil { - return nil, errors.Wrap(err, "parsing chunk size") - } - } - if strings.ToLower(os.Getenv("SWIFT_USE_DYNAMIC_LARGE_OBJECTS")) == "true" { - config.UseDynamicLargeObjects = true - } - return &config, nil -} - -func connectionFromConfig(sc *Config) *swift.Connection { - connection := swift.Connection{ - Domain: sc.DomainName, - DomainId: sc.DomainId, - UserName: sc.Username, - UserId: sc.UserId, - ApiKey: sc.Password, - AuthUrl: sc.AuthUrl, - Retries: sc.Retries, - Region: sc.RegionName, - AuthVersion: sc.AuthVersion, - Tenant: sc.ProjectName, - TenantId: sc.ProjectID, - TenantDomain: sc.ProjectDomainName, - TenantDomainId: sc.ProjectDomainID, - ConnectTimeout: time.Duration(sc.ConnectTimeout), - Timeout: time.Duration(sc.Timeout), - } - return &connection -} - -type Container struct { - logger log.Logger - name string - connection *swift.Connection - chunkSize int64 - useDynamicLargeObjects bool - segmentsContainer string -} - -func NewContainer(logger log.Logger, conf []byte) (*Container, error) { - sc, err := parseConfig(conf) - if err != nil { - return nil, errors.Wrap(err, "parse config") - } - return NewContainerFromConfig(logger, sc, false) -} - -func ensureContainer(connection *swift.Connection, name string, createIfNotExist bool) error { - if _, _, err := connection.Container(name); err != nil { - if err != swift.ContainerNotFound { - return errors.Wrapf(err, "verify container %s", name) - } - if !createIfNotExist { - return fmt.Errorf("unable to find the expected container %s", name) - } - if err = connection.ContainerCreate(name, swift.Headers{}); err != nil { - return errors.Wrapf(err, "create container %s", name) - } - return nil - } - return nil -} - -func NewContainerFromConfig(logger log.Logger, sc *Config, createContainer bool) (*Container, error) { - connection := connectionFromConfig(sc) - if err := connection.Authenticate(); err != nil { - return nil, errors.Wrap(err, "authentication") - } - - if err := ensureContainer(connection, sc.ContainerName, createContainer); err != nil { - return nil, err - } - if sc.SegmentContainerName == "" { - sc.SegmentContainerName = sc.ContainerName - } else if err := ensureContainer(connection, sc.SegmentContainerName, createContainer); err != nil { - return nil, err - } - - return &Container{ - logger: logger, - name: sc.ContainerName, - connection: connection, - chunkSize: sc.ChunkSize, - useDynamicLargeObjects: sc.UseDynamicLargeObjects, - segmentsContainer: sc.SegmentContainerName, - }, nil -} - -// Name returns the container name for swift. -func (c *Container) Name() string { - return c.name -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (c *Container) Iter(_ context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - if dir != "" { - dir = strings.TrimSuffix(dir, string(DirDelim)) + string(DirDelim) - } - - listOptions := &swift.ObjectsOpts{ - Prefix: dir, - Delimiter: DirDelim, - } - if objstore.ApplyIterOptions(options...).Recursive { - listOptions.Delimiter = rune(0) - } - - return c.connection.ObjectsWalk(c.name, listOptions, func(opts *swift.ObjectsOpts) (interface{}, error) { - objects, err := c.connection.ObjectNames(c.name, opts) - if err != nil { - return objects, errors.Wrap(err, "list object names") - } - for _, object := range objects { - if object == SegmentsDir { - continue - } - if err := f(object); err != nil { - return objects, errors.Wrap(err, "iteration over objects") - } - } - return objects, nil - }) -} - -func (c *Container) get(name string, headers swift.Headers, checkHash bool) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("object name cannot be empty") - } - file, _, err := c.connection.ObjectOpen(c.name, name, checkHash, headers) - if err != nil { - return nil, errors.Wrap(err, "open object") - } - return file, err -} - -// Get returns a reader for the given object name. -func (c *Container) Get(_ context.Context, name string) (io.ReadCloser, error) { - return c.get(name, swift.Headers{}, true) -} - -func (c *Container) GetRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { - // Set Range HTTP header, see the docs https://docs.openstack.org/api-ref/object-store/?expanded=show-container-details-and-list-objects-detail,get-object-content-and-metadata-detail#id76. - bytesRange := fmt.Sprintf("bytes=%d-", off) - if length != -1 { - bytesRange = fmt.Sprintf("%s%d", bytesRange, off+length-1) - } - return c.get(name, swift.Headers{"Range": bytesRange}, false) -} - -// Attributes returns information about the specified object. -func (c *Container) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) { - if name == "" { - return objstore.ObjectAttributes{}, errors.New("object name cannot be empty") - } - info, _, err := c.connection.Object(c.name, name) - if err != nil { - return objstore.ObjectAttributes{}, errors.Wrap(err, "get object attributes") - } - return objstore.ObjectAttributes{ - Size: info.Bytes, - LastModified: info.LastModified, - }, nil -} - -// Exists checks if the given object exists. -func (c *Container) Exists(_ context.Context, name string) (bool, error) { - found := true - _, _, err := c.connection.Object(c.name, name) - if c.IsObjNotFoundErr(err) { - err = nil - found = false - } - return found, err -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (c *Container) IsObjNotFoundErr(err error) bool { - return errors.Is(err, swift.ObjectNotFound) -} - -// Upload writes the contents of the reader as an object into the container. -func (c *Container) Upload(_ context.Context, name string, r io.Reader) (err error) { - size, err := objstore.TryToGetSize(r) - if err != nil { - level.Warn(c.logger).Log("msg", "could not guess file size, using large object to avoid issues if the file is larger than limit", "name", name, "err", err) - // Anything higher or equal to chunk size so the SLO is used. - size = c.chunkSize - } - var file io.WriteCloser - if size >= c.chunkSize { - opts := swift.LargeObjectOpts{ - Container: c.name, - ObjectName: name, - ChunkSize: c.chunkSize, - SegmentContainer: c.segmentsContainer, - CheckHash: true, - } - if c.useDynamicLargeObjects { - if file, err = c.connection.DynamicLargeObjectCreateFile(&opts); err != nil { - return errors.Wrap(err, "create DLO file") - } - } else { - if file, err = c.connection.StaticLargeObjectCreateFile(&opts); err != nil { - return errors.Wrap(err, "create SLO file") - } - } - } else { - if file, err = c.connection.ObjectCreate(c.name, name, true, "", "", swift.Headers{}); err != nil { - return errors.Wrap(err, "create file") - } - } - defer runutil.CloseWithErrCapture(&err, file, "upload object close") - if _, err := io.Copy(file, r); err != nil { - return errors.Wrap(err, "uploading object") - } - return nil -} - -// Delete removes the object with the given name. -func (c *Container) Delete(_ context.Context, name string) error { - return errors.Wrap(c.connection.LargeObjectDelete(c.name, name), "delete object") -} - -func (*Container) Close() error { - // Nothing to close. - return nil -} - -// NewTestContainer creates test objStore client that before returning creates temporary container. -// In a close function it empties and deletes the container. -func NewTestContainer(t testing.TB) (objstore.Bucket, func(), error) { - config, err := configFromEnv() - if err != nil { - return nil, nil, errors.Wrap(err, "loading config from ENV") - } - if config.ContainerName != "" { - if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { - return nil, nil, errors.New("OS_CONTAINER_NAME is defined. Normally this tests will create temporary container " + - "and delete it after test. Unset OS_CONTAINER_NAME env variable to use default logic. If you really want to run " + - "tests against provided (NOT USED!) container, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That container " + - "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + - "to safety (accidentally pointing prod container for test) as well as swift not being fully strong consistent.") - } - c, err := NewContainerFromConfig(log.NewNopLogger(), config, false) - if err != nil { - return nil, nil, errors.Wrap(err, "initializing new container") - } - if err := c.Iter(context.Background(), "", func(f string) error { - return errors.Errorf("container %s is not empty", c.Name()) - }); err != nil { - return nil, nil, errors.Wrapf(err, "check container %s", c.Name()) - } - t.Log("WARNING. Reusing", c.Name(), "container for Swift tests. Manual cleanup afterwards is required") - return c, func() {}, nil - } - config.ContainerName = objstore.CreateTemporaryTestBucketName(t) - config.SegmentContainerName = config.ContainerName - c, err := NewContainerFromConfig(log.NewNopLogger(), config, true) - if err != nil { - return nil, nil, errors.Wrap(err, "initializing new container") - } - t.Log("created temporary container for swift tests with name", c.Name()) - - return c, func() { - objstore.EmptyBucket(t, context.Background(), c) - if err := c.connection.ContainerDelete(c.name); err != nil { - t.Logf("deleting container %s failed: %s", c.Name(), err) - } - if err := c.connection.ContainerDelete(c.segmentsContainer); err != nil { - t.Logf("deleting segments container %s failed: %s", c.segmentsContainer, err) - } - }, nil -} diff --git a/pkg/objstore/swift/swift_test.go b/pkg/objstore/swift/swift_test.go deleted file mode 100644 index c4aac41860..0000000000 --- a/pkg/objstore/swift/swift_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package swift - -import ( - "testing" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestParseConfig(t *testing.T) { - input := []byte(`auth_url: http://identity.something.com/v3 -username: thanos -user_domain_name: userDomain -project_name: thanosProject -project_domain_name: projectDomain`) - - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - testutil.Equals(t, "http://identity.something.com/v3", cfg.AuthUrl) - testutil.Equals(t, "thanos", cfg.Username) - testutil.Equals(t, "userDomain", cfg.UserDomainName) - testutil.Equals(t, "thanosProject", cfg.ProjectName) - testutil.Equals(t, "projectDomain", cfg.ProjectDomainName) -} - -func TestParseConfigFail(t *testing.T) { - input := []byte(`auth_url: http://identity.something.com/v3 -tenant_name: something`) - - _, err := parseConfig(input) - // Must result in unmarshal error as there's no `tenant_name` in SwiftConfig. - testutil.NotOk(t, err) -} diff --git a/pkg/objstore/testing.go b/pkg/objstore/testing.go deleted file mode 100644 index d3dfcde959..0000000000 --- a/pkg/objstore/testing.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "math/rand" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func CreateTemporaryTestBucketName(t testing.TB) string { - src := rand.NewSource(time.Now().UnixNano()) - - // Bucket name need to conform: https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html. - name := strings.ReplaceAll(strings.Replace(fmt.Sprintf("test_%x_%s", src.Int63(), strings.ToLower(t.Name())), "_", "-", -1), "/", "-") - if len(name) >= 63 { - name = name[:63] - } - return name -} - -// EmptyBucket deletes all objects from bucket. This operation is required to properly delete bucket as a whole. -// It is used for testing only. -// TODO(bplotka): Add retries. -func EmptyBucket(t testing.TB, ctx context.Context, bkt Bucket) { - var wg sync.WaitGroup - - queue := []string{""} - for len(queue) > 0 { - elem := queue[0] - queue = queue[1:] - - err := bkt.Iter(ctx, elem, func(p string) error { - if strings.HasSuffix(p, DirDelim) { - queue = append(queue, p) - return nil - } - - wg.Add(1) - go func() { - if err := bkt.Delete(ctx, p); err != nil { - t.Logf("deleting object %s failed: %s", p, err) - } - wg.Done() - }() - return nil - }) - if err != nil { - t.Logf("iterating over bucket objects failed: %s", err) - wg.Wait() - return - } - } - wg.Wait() -} - -func WithNoopInstr(bkt Bucket) InstrumentedBucket { - return noopInstrumentedBucket{Bucket: bkt} -} - -type noopInstrumentedBucket struct { - Bucket -} - -func (b noopInstrumentedBucket) WithExpectedErrs(IsOpFailureExpectedFunc) Bucket { - return b -} - -func (b noopInstrumentedBucket) ReaderWithExpectedErrs(IsOpFailureExpectedFunc) BucketReader { - return b -} - -func AcceptanceTest(t *testing.T, bkt Bucket) { - ctx := context.Background() - - _, err := bkt.Get(ctx, "") - testutil.NotOk(t, err) - testutil.Assert(t, !bkt.IsObjNotFoundErr(err), "expected user error got not found %s", err) - - _, err = bkt.Get(ctx, "id1/obj_1.some") - testutil.NotOk(t, err) - testutil.Assert(t, bkt.IsObjNotFoundErr(err), "expected not found error got %s", err) - - ok, err := bkt.Exists(ctx, "id1/obj_1.some") - testutil.Ok(t, err) - testutil.Assert(t, !ok, "expected not exits") - - _, err = bkt.Attributes(ctx, "id1/obj_1.some") - testutil.NotOk(t, err) - testutil.Assert(t, bkt.IsObjNotFoundErr(err), "expected not found error but got %s", err) - - // Upload first object. - testutil.Ok(t, bkt.Upload(ctx, "id1/obj_1.some", strings.NewReader("@test-data@"))) - - // Double check we can immediately read it. - rc1, err := bkt.Get(ctx, "id1/obj_1.some") - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rc1.Close()) }() - content, err := ioutil.ReadAll(rc1) - testutil.Ok(t, err) - testutil.Equals(t, "@test-data@", string(content)) - - // Check if we can get the correct size. - attrs, err := bkt.Attributes(ctx, "id1/obj_1.some") - testutil.Ok(t, err) - testutil.Assert(t, attrs.Size == 11, "expected size to be equal to 11") - - rc2, err := bkt.GetRange(ctx, "id1/obj_1.some", 1, 3) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rc2.Close()) }() - content, err = ioutil.ReadAll(rc2) - testutil.Ok(t, err) - testutil.Equals(t, "tes", string(content)) - - // Unspecified range with offset. - rcUnspecifiedLen, err := bkt.GetRange(ctx, "id1/obj_1.some", 1, -1) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rcUnspecifiedLen.Close()) }() - content, err = ioutil.ReadAll(rcUnspecifiedLen) - testutil.Ok(t, err) - testutil.Equals(t, "test-data@", string(content)) - - // Out of band offset. Do not rely on outcome. - // NOTE: For various providers we have different outcome. - // * GCS is giving 416 status code - // * S3 errors immdiately with invalid range error. - // * inmem and filesystem are returning 0 bytes. - //rcOffset, err := bkt.GetRange(ctx, "id1/obj_1.some", 124141, 3) - - // Out of band length. We expect to read file fully. - rcLength, err := bkt.GetRange(ctx, "id1/obj_1.some", 3, 9999) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rcLength.Close()) }() - content, err = ioutil.ReadAll(rcLength) - testutil.Ok(t, err) - testutil.Equals(t, "st-data@", string(content)) - - ok, err = bkt.Exists(ctx, "id1/obj_1.some") - testutil.Ok(t, err) - testutil.Assert(t, ok, "expected exits") - - // Upload other objects. - testutil.Ok(t, bkt.Upload(ctx, "id1/obj_2.some", strings.NewReader("@test-data2@"))) - // Upload should be idempotent. - testutil.Ok(t, bkt.Upload(ctx, "id1/obj_2.some", strings.NewReader("@test-data2@"))) - testutil.Ok(t, bkt.Upload(ctx, "id1/obj_3.some", strings.NewReader("@test-data3@"))) - testutil.Ok(t, bkt.Upload(ctx, "id1/sub/subobj_1.some", strings.NewReader("@test-data4@"))) - testutil.Ok(t, bkt.Upload(ctx, "id1/sub/subobj_2.some", strings.NewReader("@test-data5@"))) - testutil.Ok(t, bkt.Upload(ctx, "id2/obj_4.some", strings.NewReader("@test-data6@"))) - testutil.Ok(t, bkt.Upload(ctx, "obj_5.some", strings.NewReader("@test-data7@"))) - - // Can we iter over items from top dir? - var seen []string - testutil.Ok(t, bkt.Iter(ctx, "", func(fn string) error { - seen = append(seen, fn) - return nil - })) - expected := []string{"obj_5.some", "id1/", "id2/"} - sort.Strings(expected) - sort.Strings(seen) - testutil.Equals(t, expected, seen) - - // Can we iter over items from top dir recursively? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "", func(fn string) error { - seen = append(seen, fn) - return nil - }, WithRecursiveIter)) - expected = []string{"id1/obj_1.some", "id1/obj_2.some", "id1/obj_3.some", "id1/sub/subobj_1.some", "id1/sub/subobj_2.some", "id2/obj_4.some", "obj_5.some"} - sort.Strings(expected) - sort.Strings(seen) - testutil.Equals(t, expected, seen) - - // Can we iter over items from id1/ dir? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "id1/", func(fn string) error { - seen = append(seen, fn) - return nil - })) - testutil.Equals(t, []string{"id1/obj_1.some", "id1/obj_2.some", "id1/obj_3.some", "id1/sub/"}, seen) - - // Can we iter over items from id1/ dir recursively? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "id1/", func(fn string) error { - seen = append(seen, fn) - return nil - }, WithRecursiveIter)) - testutil.Equals(t, []string{"id1/obj_1.some", "id1/obj_2.some", "id1/obj_3.some", "id1/sub/subobj_1.some", "id1/sub/subobj_2.some"}, seen) - - // Can we iter over items from id1 dir? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "id1", func(fn string) error { - seen = append(seen, fn) - return nil - })) - testutil.Equals(t, []string{"id1/obj_1.some", "id1/obj_2.some", "id1/obj_3.some", "id1/sub/"}, seen) - - // Can we iter over items from id1 dir recursively? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "id1", func(fn string) error { - seen = append(seen, fn) - return nil - }, WithRecursiveIter)) - testutil.Equals(t, []string{"id1/obj_1.some", "id1/obj_2.some", "id1/obj_3.some", "id1/sub/subobj_1.some", "id1/sub/subobj_2.some"}, seen) - - // Can we iter over items from not existing dir? - testutil.Ok(t, bkt.Iter(ctx, "id0", func(fn string) error { - t.Error("Not expected to loop through not existing directory") - t.FailNow() - - return nil - })) - - testutil.Ok(t, bkt.Delete(ctx, "id1/obj_2.some")) - - // Delete is expected to fail on non existing object. - // NOTE: Don't rely on this. S3 is not complying with this as GCS is. - // testutil.NotOk(t, bkt.Delete(ctx, "id1/obj_2.some")) - - // Can we iter over items from id1/ dir and see obj2 being deleted? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "id1/", func(fn string) error { - seen = append(seen, fn) - return nil - })) - testutil.Equals(t, []string{"id1/obj_1.some", "id1/obj_3.some", "id1/sub/"}, seen) - - testutil.Ok(t, bkt.Delete(ctx, "id2/obj_4.some")) - - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "", func(fn string) error { - seen = append(seen, fn) - return nil - })) - expected = []string{"obj_5.some", "id1/"} - sort.Strings(expected) - sort.Strings(seen) - testutil.Equals(t, expected, seen) - - testutil.Ok(t, bkt.Upload(ctx, "obj_6.som", bytes.NewReader(make([]byte, 1024*1024*200)))) - testutil.Ok(t, bkt.Delete(ctx, "obj_6.som")) -} - -type delayingBucket struct { - bkt Bucket - delay time.Duration -} - -func WithDelay(bkt Bucket, delay time.Duration) Bucket { - return &delayingBucket{bkt: bkt, delay: delay} -} - -func (d *delayingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - time.Sleep(d.delay) - return d.bkt.Get(ctx, name) -} - -func (d *delayingBucket) Attributes(ctx context.Context, name string) (ObjectAttributes, error) { - time.Sleep(d.delay) - return d.bkt.Attributes(ctx, name) -} - -func (d *delayingBucket) Iter(ctx context.Context, dir string, f func(string) error, options ...IterOption) error { - time.Sleep(d.delay) - return d.bkt.Iter(ctx, dir, f, options...) -} - -func (d *delayingBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - time.Sleep(d.delay) - return d.bkt.GetRange(ctx, name, off, length) -} - -func (d *delayingBucket) Exists(ctx context.Context, name string) (bool, error) { - time.Sleep(d.delay) - return d.bkt.Exists(ctx, name) -} - -func (d *delayingBucket) Upload(ctx context.Context, name string, r io.Reader) error { - time.Sleep(d.delay) - return d.bkt.Upload(ctx, name, r) -} - -func (d *delayingBucket) Delete(ctx context.Context, name string) error { - time.Sleep(d.delay) - return d.bkt.Delete(ctx, name) -} - -func (d *delayingBucket) Name() string { - time.Sleep(d.delay) - return d.bkt.Name() -} - -func (d *delayingBucket) Close() error { - // No delay for a local operation. - return d.bkt.Close() -} -func (d *delayingBucket) IsObjNotFoundErr(err error) bool { - // No delay for a local operation. - return d.bkt.IsObjNotFoundErr(err) -} diff --git a/pkg/objstore/tracing.go b/pkg/objstore/tracing.go deleted file mode 100644 index 0dccab724e..0000000000 --- a/pkg/objstore/tracing.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "context" - "io" - - "github.com/opentracing/opentracing-go" - - "github.com/thanos-io/thanos/pkg/tracing" -) - -// TracingBucket includes bucket operations in the traces. -type TracingBucket struct { - bkt Bucket -} - -func NewTracingBucket(bkt Bucket) InstrumentedBucket { - return TracingBucket{bkt: bkt} -} - -func (t TracingBucket) Iter(ctx context.Context, dir string, f func(string) error, options ...IterOption) (err error) { - tracing.DoWithSpan(ctx, "bucket_iter", func(spanCtx context.Context, span opentracing.Span) { - span.LogKV("dir", dir) - err = t.bkt.Iter(spanCtx, dir, f, options...) - }) - return -} - -func (t TracingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - span, spanCtx := tracing.StartSpan(ctx, "bucket_get") - span.LogKV("name", name) - - r, err := t.bkt.Get(spanCtx, name) - if err != nil { - span.LogKV("err", err) - span.Finish() - return nil, err - } - - return newTracingReadCloser(r, span), nil -} - -func (t TracingBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - span, spanCtx := tracing.StartSpan(ctx, "bucket_getrange") - span.LogKV("name", name, "offset", off, "length", length) - - r, err := t.bkt.GetRange(spanCtx, name, off, length) - if err != nil { - span.LogKV("err", err) - span.Finish() - return nil, err - } - - return newTracingReadCloser(r, span), nil -} - -func (t TracingBucket) Exists(ctx context.Context, name string) (exists bool, err error) { - tracing.DoWithSpan(ctx, "bucket_exists", func(spanCtx context.Context, span opentracing.Span) { - span.LogKV("name", name) - exists, err = t.bkt.Exists(spanCtx, name) - }) - return -} - -func (t TracingBucket) Attributes(ctx context.Context, name string) (attrs ObjectAttributes, err error) { - tracing.DoWithSpan(ctx, "bucket_attributes", func(spanCtx context.Context, span opentracing.Span) { - span.LogKV("name", name) - attrs, err = t.bkt.Attributes(spanCtx, name) - }) - return -} - -func (t TracingBucket) Upload(ctx context.Context, name string, r io.Reader) (err error) { - tracing.DoWithSpan(ctx, "bucket_upload", func(spanCtx context.Context, span opentracing.Span) { - span.LogKV("name", name) - err = t.bkt.Upload(spanCtx, name, r) - }) - return -} - -func (t TracingBucket) Delete(ctx context.Context, name string) (err error) { - tracing.DoWithSpan(ctx, "bucket_delete", func(spanCtx context.Context, span opentracing.Span) { - span.LogKV("name", name) - err = t.bkt.Delete(spanCtx, name) - }) - return -} - -func (t TracingBucket) Name() string { - return "tracing: " + t.bkt.Name() -} - -func (t TracingBucket) Close() error { - return t.bkt.Close() -} - -func (t TracingBucket) IsObjNotFoundErr(err error) bool { - return t.bkt.IsObjNotFoundErr(err) -} - -func (t TracingBucket) WithExpectedErrs(expectedFunc IsOpFailureExpectedFunc) Bucket { - if ib, ok := t.bkt.(InstrumentedBucket); ok { - return TracingBucket{bkt: ib.WithExpectedErrs(expectedFunc)} - } - return t -} - -func (t TracingBucket) ReaderWithExpectedErrs(expectedFunc IsOpFailureExpectedFunc) BucketReader { - return t.WithExpectedErrs(expectedFunc) -} - -type tracingReadCloser struct { - r io.ReadCloser - s opentracing.Span - - objSize int64 - objSizeErr error - - read int -} - -func newTracingReadCloser(r io.ReadCloser, span opentracing.Span) io.ReadCloser { - // Since TryToGetSize can only reliably return size before doing any read calls, - // we call during "construction" and remember the results. - objSize, objSizeErr := TryToGetSize(r) - - return &tracingReadCloser{r: r, s: span, objSize: objSize, objSizeErr: objSizeErr} -} - -func (t *tracingReadCloser) ObjectSize() (int64, error) { - return t.objSize, t.objSizeErr -} - -func (t *tracingReadCloser) Read(p []byte) (int, error) { - n, err := t.r.Read(p) - if n > 0 { - t.read += n - } - if err != nil && err != io.EOF && t.s != nil { - t.s.LogKV("err", err) - } - return n, err -} - -func (t *tracingReadCloser) Close() error { - err := t.r.Close() - if t.s != nil { - t.s.LogKV("read", t.read) - if err != nil { - t.s.LogKV("close err", err) - } - t.s.Finish() - t.s = nil - } - return err -} diff --git a/pkg/receive/multitsdb.go b/pkg/receive/multitsdb.go index 5ffacd7518..b503f6e697 100644 --- a/pkg/receive/multitsdb.go +++ b/pkg/receive/multitsdb.go @@ -20,15 +20,17 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" - "github.com/thanos-io/thanos/pkg/api/status" "go.uber.org/atomic" "golang.org/x/sync/errgroup" + "github.com/thanos-io/thanos/pkg/api/status" + + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/exemplars" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/shipper" "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/store/labelpb" diff --git a/pkg/receive/multitsdb_test.go b/pkg/receive/multitsdb_test.go index 917353d074..e0498bf436 100644 --- a/pkg/receive/multitsdb_test.go +++ b/pkg/receive/multitsdb_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/go-kit/log" "github.com/gogo/protobuf/types" diff --git a/pkg/replicate/replicator.go b/pkg/replicate/replicator.go index db7992c5ea..4116bdb57c 100644 --- a/pkg/replicate/replicator.go +++ b/pkg/replicate/replicator.go @@ -15,20 +15,21 @@ import ( "github.com/go-kit/log/level" "github.com/oklog/run" "github.com/oklog/ulid" - opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" + thanosblock "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/extprom" thanosmodel "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/server/http" diff --git a/pkg/replicate/scheme.go b/pkg/replicate/scheme.go index f9b1067b8e..9e9c7e9fa3 100644 --- a/pkg/replicate/scheme.go +++ b/pkg/replicate/scheme.go @@ -20,10 +20,11 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/objstore" + thanosblock "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/replicate/scheme_test.go b/pkg/replicate/scheme_test.go index 4d6c38c5a1..c0ce55e37f 100644 --- a/pkg/replicate/scheme_test.go +++ b/pkg/replicate/scheme_test.go @@ -20,10 +20,11 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/shipper/shipper.go b/pkg/shipper/shipper.go index a1184a9421..08a0ae352f 100644 --- a/pkg/shipper/shipper.go +++ b/pkg/shipper/shipper.go @@ -25,9 +25,10 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/shipper/shipper_e2e_test.go b/pkg/shipper/shipper_e2e_test.go index 641c1bf9ff..0bc0a2806f 100644 --- a/pkg/shipper/shipper_e2e_test.go +++ b/pkg/shipper/shipper_e2e_test.go @@ -24,10 +24,11 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/objtesting" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/objtesting" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/shipper/shipper_test.go b/pkg/shipper/shipper_test.go index 8844d57f4a..1e423aa725 100644 --- a/pkg/shipper/shipper_test.go +++ b/pkg/shipper/shipper_test.go @@ -19,9 +19,10 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 679c245fe6..61acbb2fe1 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -38,6 +38,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/indexheader" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -46,7 +48,6 @@ import ( "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/gate" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/pool" "github.com/thanos-io/thanos/pkg/runutil" storecache "github.com/thanos-io/thanos/pkg/store/cache" diff --git a/pkg/store/bucket_e2e_test.go b/pkg/store/bucket_e2e_test.go index 3f0a2ed81a..6a979127e8 100644 --- a/pkg/store/bucket_e2e_test.go +++ b/pkg/store/bucket_e2e_test.go @@ -25,11 +25,12 @@ import ( "github.com/weaveworks/common/httpgrpc" "google.golang.org/grpc/codes" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/objtesting" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/objtesting" storecache "github.com/thanos-io/thanos/pkg/store/cache" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index 381b0f5053..d0557718d4 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -36,16 +36,17 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/encoding" + "github.com/thanos-io/objstore/providers/filesystem" "go.uber.org/atomic" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/indexheader" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/gate" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" "github.com/thanos-io/thanos/pkg/pool" storecache "github.com/thanos-io/thanos/pkg/store/cache" "github.com/thanos-io/thanos/pkg/store/hintspb" diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index a8fbde25b2..27e0fd8a58 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -20,8 +20,9 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "golang.org/x/sync/errgroup" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/cache" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/cache/cachekey" ) diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index c92b072090..28bbf79dcf 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -15,11 +15,12 @@ import ( "github.com/prometheus/common/route" "gopkg.in/yaml.v2" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block/metadata" - cache "github.com/thanos-io/thanos/pkg/cache" + "github.com/thanos-io/thanos/pkg/cache" "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" ) // BucketCacheProvider is a type used to evaluate all bucket cache providers. diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 549afa5710..dca908101b 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -19,8 +19,9 @@ import ( "github.com/pkg/errors" promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/thanos-io/objstore" + thanoscache "github.com/thanos-io/thanos/pkg/cache" - "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/cache/cachekey" "github.com/thanos-io/thanos/pkg/testutil" diff --git a/pkg/verifier/index_issue.go b/pkg/verifier/index_issue.go index 61e84293c1..20a7b4c989 100644 --- a/pkg/verifier/index_issue.go +++ b/pkg/verifier/index_issue.go @@ -16,8 +16,9 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" ) // IndexKnownIssues verifies any known index issue. diff --git a/pkg/verifier/safe_delete.go b/pkg/verifier/safe_delete.go index 26eb0be2fa..dd44167ae6 100644 --- a/pkg/verifier/safe_delete.go +++ b/pkg/verifier/safe_delete.go @@ -15,9 +15,10 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" ) // TSDBBlockExistsInBucket checks to see if a given TSDB block ID exists in a diff --git a/pkg/verifier/verify.go b/pkg/verifier/verify.go index 12fb4dcc21..dd2fb2769f 100644 --- a/pkg/verifier/verify.go +++ b/pkg/verifier/verify.go @@ -17,7 +17,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) type Verifier interface { diff --git a/scripts/cfggen/main.go b/scripts/cfggen/main.go index 9571f159f1..070bb84fbd 100644 --- a/scripts/cfggen/main.go +++ b/scripts/cfggen/main.go @@ -15,6 +15,15 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/objstore/providers/azure" + "github.com/thanos-io/objstore/providers/bos" + "github.com/thanos-io/objstore/providers/cos" + "github.com/thanos-io/objstore/providers/filesystem" + "github.com/thanos-io/objstore/providers/gcs" + "github.com/thanos-io/objstore/providers/oss" + "github.com/thanos-io/objstore/providers/s3" + "github.com/thanos-io/objstore/providers/swift" "gopkg.in/alecthomas/kingpin.v2" "gopkg.in/yaml.v2" @@ -23,15 +32,6 @@ import ( "github.com/thanos-io/thanos/pkg/alert" "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/logging" - "github.com/thanos-io/thanos/pkg/objstore/azure" - "github.com/thanos-io/thanos/pkg/objstore/bos" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/cos" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" - "github.com/thanos-io/thanos/pkg/objstore/gcs" - "github.com/thanos-io/thanos/pkg/objstore/oss" - "github.com/thanos-io/thanos/pkg/objstore/s3" - "github.com/thanos-io/thanos/pkg/objstore/swift" "github.com/thanos-io/thanos/pkg/queryfrontend" storecache "github.com/thanos-io/thanos/pkg/store/cache" trclient "github.com/thanos-io/thanos/pkg/tracing/client" diff --git a/test/e2e/compact_test.go b/test/e2e/compact_test.go index c9be0f0537..dee48020b1 100644 --- a/test/e2e/compact_test.go +++ b/test/e2e/compact_test.go @@ -25,12 +25,13 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/thanos-io/objstore/providers/s3" + + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/testutil" diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 41fccca04d..7a354f41a3 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -27,13 +27,14 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/relabel" + "github.com/thanos-io/objstore/exthttp" + "github.com/thanos-io/objstore/providers/s3" "gopkg.in/yaml.v2" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/thanos/pkg/alert" - "github.com/thanos-io/thanos/pkg/exthttp" "github.com/thanos-io/thanos/pkg/httpconfig" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" "github.com/thanos-io/thanos/pkg/queryfrontend" "github.com/thanos-io/thanos/pkg/receive" diff --git a/test/e2e/info_api_test.go b/test/e2e/info_api_test.go index e22fc0495c..0a54bb0cf8 100644 --- a/test/e2e/info_api_test.go +++ b/test/e2e/info_api_test.go @@ -16,7 +16,8 @@ import ( "github.com/efficientgo/e2e" "github.com/prometheus/prometheus/model/labels" - "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/testutil" diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 131f04bd64..34d792efa4 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -24,9 +24,11 @@ import ( config_util "github.com/prometheus/common/config" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage/remote" + "github.com/thanos-io/objstore/providers/s3" + "google.golang.org/grpc" + "github.com/thanos-io/thanos/pkg/api/query/querypb" prompb_copy "github.com/thanos-io/thanos/pkg/store/storepb/prompb" - "google.golang.org/grpc" "github.com/chromedp/cdproto/network" "github.com/chromedp/chromedp" @@ -38,12 +40,12 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/rules" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" "github.com/thanos-io/thanos/pkg/metadata/metadatapb" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/runutil" diff --git a/test/e2e/store_gateway_test.go b/test/e2e/store_gateway_test.go index 5fe6958141..20c69396c3 100644 --- a/test/e2e/store_gateway_test.go +++ b/test/e2e/store_gateway_test.go @@ -20,12 +20,13 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/timestamp" + "github.com/thanos-io/objstore/providers/s3" + + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" diff --git a/test/e2e/tools_bucket_web_test.go b/test/e2e/tools_bucket_web_test.go index 10289963e6..9958dd9390 100644 --- a/test/e2e/tools_bucket_web_test.go +++ b/test/e2e/tools_bucket_web_test.go @@ -19,11 +19,12 @@ import ( "github.com/go-kit/log" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/thanos-io/objstore/providers/s3" + + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" v1 "github.com/thanos-io/thanos/pkg/api/blocks" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos"